summaryrefslogtreecommitdiff
path: root/src/core/memory.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/memory.cpp')
-rw-r--r--src/core/memory.cpp86
1 files changed, 63 insertions, 23 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index c7eb32c19..8176a41be 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -10,6 +10,7 @@
10#include "common/assert.h" 10#include "common/assert.h"
11#include "common/atomic_ops.h" 11#include "common/atomic_ops.h"
12#include "common/common_types.h" 12#include "common/common_types.h"
13#include "common/heap_tracker.h"
13#include "common/logging/log.h" 14#include "common/logging/log.h"
14#include "common/page_table.h" 15#include "common/page_table.h"
15#include "common/scope_exit.h" 16#include "common/scope_exit.h"
@@ -52,10 +53,18 @@ struct Memory::Impl {
52 } else { 53 } else {
53 current_page_table->fastmem_arena = nullptr; 54 current_page_table->fastmem_arena = nullptr;
54 } 55 }
56
57#ifdef __linux__
58 heap_tracker.emplace(system.DeviceMemory().buffer);
59 buffer = std::addressof(*heap_tracker);
60#else
61 buffer = std::addressof(system.DeviceMemory().buffer);
62#endif
55 } 63 }
56 64
57 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, 65 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
58 Common::PhysicalAddress target, Common::MemoryPermission perms) { 66 Common::PhysicalAddress target, Common::MemoryPermission perms,
67 bool separate_heap) {
59 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); 68 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
60 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); 69 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
61 ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", 70 ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}",
@@ -64,19 +73,20 @@ struct Memory::Impl {
64 Common::PageType::Memory); 73 Common::PageType::Memory);
65 74
66 if (current_page_table->fastmem_arena) { 75 if (current_page_table->fastmem_arena) {
67 system.DeviceMemory().buffer.Map(GetInteger(base), 76 buffer->Map(GetInteger(base), GetInteger(target) - DramMemoryMap::Base, size, perms,
68 GetInteger(target) - DramMemoryMap::Base, size, perms); 77 separate_heap);
69 } 78 }
70 } 79 }
71 80
72 void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) { 81 void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
82 bool separate_heap) {
73 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); 83 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
74 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); 84 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
75 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0, 85 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
76 Common::PageType::Unmapped); 86 Common::PageType::Unmapped);
77 87
78 if (current_page_table->fastmem_arena) { 88 if (current_page_table->fastmem_arena) {
79 system.DeviceMemory().buffer.Unmap(GetInteger(base), size); 89 buffer->Unmap(GetInteger(base), size, separate_heap);
80 } 90 }
81 } 91 }
82 92
@@ -89,11 +99,6 @@ struct Memory::Impl {
89 return; 99 return;
90 } 100 }
91 101
92 const bool is_r = True(perms & Common::MemoryPermission::Read);
93 const bool is_w = True(perms & Common::MemoryPermission::Write);
94 const bool is_x =
95 True(perms & Common::MemoryPermission::Execute) && Settings::IsNceEnabled();
96
97 u64 protect_bytes{}; 102 u64 protect_bytes{};
98 u64 protect_begin{}; 103 u64 protect_begin{};
99 for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) { 104 for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) {
@@ -102,8 +107,7 @@ struct Memory::Impl {
102 switch (page_type) { 107 switch (page_type) {
103 case Common::PageType::RasterizerCachedMemory: 108 case Common::PageType::RasterizerCachedMemory:
104 if (protect_bytes > 0) { 109 if (protect_bytes > 0) {
105 system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, 110 buffer->Protect(protect_begin, protect_bytes, perms);
106 is_x);
107 protect_bytes = 0; 111 protect_bytes = 0;
108 } 112 }
109 break; 113 break;
@@ -116,7 +120,7 @@ struct Memory::Impl {
116 } 120 }
117 121
118 if (protect_bytes > 0) { 122 if (protect_bytes > 0) {
119 system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, is_x); 123 buffer->Protect(protect_begin, protect_bytes, perms);
120 } 124 }
121 } 125 }
122 126
@@ -486,7 +490,9 @@ struct Memory::Impl {
486 } 490 }
487 491
488 if (current_page_table->fastmem_arena) { 492 if (current_page_table->fastmem_arena) {
489 system.DeviceMemory().buffer.Protect(vaddr, size, !debug, !debug); 493 const auto perm{debug ? Common::MemoryPermission{}
494 : Common::MemoryPermission::ReadWrite};
495 buffer->Protect(vaddr, size, perm);
490 } 496 }
491 497
492 // Iterate over a contiguous CPU address space, marking/unmarking the region. 498 // Iterate over a contiguous CPU address space, marking/unmarking the region.
@@ -543,9 +549,14 @@ struct Memory::Impl {
543 } 549 }
544 550
545 if (current_page_table->fastmem_arena) { 551 if (current_page_table->fastmem_arena) {
546 const bool is_read_enable = 552 Common::MemoryPermission perm{};
547 !Settings::values.use_reactive_flushing.GetValue() || !cached; 553 if (!Settings::values.use_reactive_flushing.GetValue() || !cached) {
548 system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached); 554 perm |= Common::MemoryPermission::Read;
555 }
556 if (!cached) {
557 perm |= Common::MemoryPermission::Write;
558 }
559 buffer->Protect(vaddr, size, perm);
549 } 560 }
550 561
551 // Iterate over a contiguous CPU address space, which corresponds to the specified GPU 562 // Iterate over a contiguous CPU address space, which corresponds to the specified GPU
@@ -856,6 +867,13 @@ struct Memory::Impl {
856 std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{}; 867 std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{};
857 std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers; 868 std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;
858 std::mutex sys_core_guard; 869 std::mutex sys_core_guard;
870
871 std::optional<Common::HeapTracker> heap_tracker;
872#ifdef __linux__
873 Common::HeapTracker* buffer{};
874#else
875 Common::HostMemory* buffer{};
876#endif
859}; 877};
860 878
861Memory::Memory(Core::System& system_) : system{system_} { 879Memory::Memory(Core::System& system_) : system{system_} {
@@ -873,12 +891,14 @@ void Memory::SetCurrentPageTable(Kernel::KProcess& process) {
873} 891}
874 892
875void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, 893void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
876 Common::PhysicalAddress target, Common::MemoryPermission perms) { 894 Common::PhysicalAddress target, Common::MemoryPermission perms,
877 impl->MapMemoryRegion(page_table, base, size, target, perms); 895 bool separate_heap) {
896 impl->MapMemoryRegion(page_table, base, size, target, perms, separate_heap);
878} 897}
879 898
880void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) { 899void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
881 impl->UnmapRegion(page_table, base, size); 900 bool separate_heap) {
901 impl->UnmapRegion(page_table, base, size, separate_heap);
882} 902}
883 903
884void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size, 904void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size,
@@ -1048,7 +1068,9 @@ void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
1048} 1068}
1049 1069
1050bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) { 1070bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
1051 bool mapped = true; 1071 [[maybe_unused]] bool mapped = true;
1072 [[maybe_unused]] bool rasterizer = false;
1073
1052 u8* const ptr = impl->GetPointerImpl( 1074 u8* const ptr = impl->GetPointerImpl(
1053 GetInteger(vaddr), 1075 GetInteger(vaddr),
1054 [&] { 1076 [&] {
@@ -1056,8 +1078,26 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
1056 GetInteger(vaddr)); 1078 GetInteger(vaddr));
1057 mapped = false; 1079 mapped = false;
1058 }, 1080 },
1059 [&] { impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); }); 1081 [&] {
1082 impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size);
1083 rasterizer = true;
1084 });
1085
1086#ifdef __linux__
1087 if (!rasterizer && mapped) {
1088 impl->buffer->DeferredMapSeparateHeap(GetInteger(vaddr));
1089 }
1090#endif
1091
1060 return mapped && ptr != nullptr; 1092 return mapped && ptr != nullptr;
1061} 1093}
1062 1094
1095bool Memory::InvalidateSeparateHeap(void* fault_address) {
1096#ifdef __linux__
1097 return impl->buffer->DeferredMapSeparateHeap(static_cast<u8*>(fault_address));
1098#else
1099 return false;
1100#endif
1101}
1102
1063} // namespace Core::Memory 1103} // namespace Core::Memory