summaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
authorGravatar Liam2023-12-25 23:21:08 -0500
committerGravatar Liam2023-12-25 23:30:56 -0500
commitddda76f9b0d16e8a6fbc92db9e26f25843b647ed (patch)
tree3fd209d66b8503ca7f0cf6d15c5c065179c66076 /src/core
parentMerge pull request #12394 from liamwhite/per-process-memory (diff)
downloadyuzu-ddda76f9b0d16e8a6fbc92db9e26f25843b647ed.tar.gz
yuzu-ddda76f9b0d16e8a6fbc92db9e26f25843b647ed.tar.xz
yuzu-ddda76f9b0d16e8a6fbc92db9e26f25843b647ed.zip
core: track separate heap allocation for linux
Diffstat (limited to 'src/core')
-rw-r--r--src/core/CMakeLists.txt1
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic.cpp49
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic.h20
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp5
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp5
-rw-r--r--src/core/hle/kernel/k_page_table_base.cpp26
-rw-r--r--src/core/hle/kernel/k_page_table_base.h3
-rw-r--r--src/core/hle/kernel/k_process.cpp6
-rw-r--r--src/core/memory.cpp86
-rw-r--r--src/core/memory.h7
10 files changed, 171 insertions, 37 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 96ab39cb8..e960edb47 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -978,6 +978,7 @@ endif()
978 978
979if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64) 979if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
980 target_sources(core PRIVATE 980 target_sources(core PRIVATE
981 arm/dynarmic/arm_dynarmic.cpp
981 arm/dynarmic/arm_dynarmic.h 982 arm/dynarmic/arm_dynarmic.h
982 arm/dynarmic/arm_dynarmic_64.cpp 983 arm/dynarmic/arm_dynarmic_64.cpp
983 arm/dynarmic/arm_dynarmic_64.h 984 arm/dynarmic/arm_dynarmic_64.h
diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp
new file mode 100644
index 000000000..e6e9fc45b
--- /dev/null
+++ b/src/core/arm/dynarmic/arm_dynarmic.cpp
@@ -0,0 +1,49 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#ifdef __linux__
5
6#include "common/signal_chain.h"
7
8#include "core/arm/dynarmic/arm_dynarmic.h"
9#include "core/hle/kernel/k_process.h"
10#include "core/memory.h"
11
12namespace Core {
13
14namespace {
15
16thread_local Core::Memory::Memory* g_current_memory{};
17std::once_flag g_registered{};
18struct sigaction g_old_segv {};
19
20void HandleSigSegv(int sig, siginfo_t* info, void* ctx) {
21 if (g_current_memory && g_current_memory->InvalidateSeparateHeap(info->si_addr)) {
22 return;
23 }
24
25 return g_old_segv.sa_sigaction(sig, info, ctx);
26}
27
28} // namespace
29
30ScopedJitExecution::ScopedJitExecution(Kernel::KProcess* process) {
31 g_current_memory = std::addressof(process->GetMemory());
32}
33
34ScopedJitExecution::~ScopedJitExecution() {
35 g_current_memory = nullptr;
36}
37
38void ScopedJitExecution::RegisterHandler() {
39 std::call_once(g_registered, [] {
40 struct sigaction sa {};
41 sa.sa_sigaction = &HandleSigSegv;
42 sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
43 Common::SigAction(SIGSEGV, std::addressof(sa), std::addressof(g_old_segv));
44 });
45}
46
47} // namespace Core
48
49#endif
diff --git a/src/core/arm/dynarmic/arm_dynarmic.h b/src/core/arm/dynarmic/arm_dynarmic.h
index eef7c3116..53dd18815 100644
--- a/src/core/arm/dynarmic/arm_dynarmic.h
+++ b/src/core/arm/dynarmic/arm_dynarmic.h
@@ -26,4 +26,24 @@ constexpr HaltReason TranslateHaltReason(Dynarmic::HaltReason hr) {
26 return static_cast<HaltReason>(hr); 26 return static_cast<HaltReason>(hr);
27} 27}
28 28
29#ifdef __linux__
30
31class ScopedJitExecution {
32public:
33 explicit ScopedJitExecution(Kernel::KProcess* process);
34 ~ScopedJitExecution();
35 static void RegisterHandler();
36};
37
38#else
39
40class ScopedJitExecution {
41public:
42 explicit ScopedJitExecution(Kernel::KProcess* process) {}
43 ~ScopedJitExecution() {}
44 static void RegisterHandler() {}
45};
46
47#endif
48
29} // namespace Core 49} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index c78cfd528..36478f722 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -331,11 +331,15 @@ bool ArmDynarmic32::IsInThumbMode() const {
331} 331}
332 332
333HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) { 333HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) {
334 ScopedJitExecution sj(thread->GetOwnerProcess());
335
334 m_jit->ClearExclusiveState(); 336 m_jit->ClearExclusiveState();
335 return TranslateHaltReason(m_jit->Run()); 337 return TranslateHaltReason(m_jit->Run());
336} 338}
337 339
338HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) { 340HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) {
341 ScopedJitExecution sj(thread->GetOwnerProcess());
342
339 m_jit->ClearExclusiveState(); 343 m_jit->ClearExclusiveState();
340 return TranslateHaltReason(m_jit->Step()); 344 return TranslateHaltReason(m_jit->Step());
341} 345}
@@ -377,6 +381,7 @@ ArmDynarmic32::ArmDynarmic32(System& system, bool uses_wall_clock, Kernel::KProc
377 m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index} { 381 m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index} {
378 auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl(); 382 auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl();
379 m_jit = MakeJit(&page_table_impl); 383 m_jit = MakeJit(&page_table_impl);
384 ScopedJitExecution::RegisterHandler();
380} 385}
381 386
382ArmDynarmic32::~ArmDynarmic32() = default; 387ArmDynarmic32::~ArmDynarmic32() = default;
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index f351b13d9..c811c8ad5 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -362,11 +362,15 @@ std::shared_ptr<Dynarmic::A64::Jit> ArmDynarmic64::MakeJit(Common::PageTable* pa
362} 362}
363 363
364HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) { 364HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) {
365 ScopedJitExecution sj(thread->GetOwnerProcess());
366
365 m_jit->ClearExclusiveState(); 367 m_jit->ClearExclusiveState();
366 return TranslateHaltReason(m_jit->Run()); 368 return TranslateHaltReason(m_jit->Run());
367} 369}
368 370
369HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) { 371HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) {
372 ScopedJitExecution sj(thread->GetOwnerProcess());
373
370 m_jit->ClearExclusiveState(); 374 m_jit->ClearExclusiveState();
371 return TranslateHaltReason(m_jit->Step()); 375 return TranslateHaltReason(m_jit->Step());
372} 376}
@@ -406,6 +410,7 @@ ArmDynarmic64::ArmDynarmic64(System& system, bool uses_wall_clock, Kernel::KProc
406 auto& page_table = process->GetPageTable().GetBasePageTable(); 410 auto& page_table = process->GetPageTable().GetBasePageTable();
407 auto& page_table_impl = page_table.GetImpl(); 411 auto& page_table_impl = page_table.GetImpl();
408 m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth()); 412 m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth());
413 ScopedJitExecution::RegisterHandler();
409} 414}
410 415
411ArmDynarmic64::~ArmDynarmic64() = default; 416ArmDynarmic64::~ArmDynarmic64() = default;
diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp
index 423289145..8c1549559 100644
--- a/src/core/hle/kernel/k_page_table_base.cpp
+++ b/src/core/hle/kernel/k_page_table_base.cpp
@@ -434,7 +434,7 @@ Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool
434void KPageTableBase::Finalize() { 434void KPageTableBase::Finalize() {
435 auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) { 435 auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
436 if (Settings::IsFastmemEnabled()) { 436 if (Settings::IsFastmemEnabled()) {
437 m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size); 437 m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size, false);
438 } 438 }
439 }; 439 };
440 440
@@ -5243,7 +5243,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
5243 // Unmap. 5243 // Unmap.
5244 R_ASSERT(this->Operate(updater.GetPageList(), cur_address, 5244 R_ASSERT(this->Operate(updater.GetPageList(), cur_address,
5245 cur_pages, 0, false, unmap_properties, 5245 cur_pages, 0, false, unmap_properties,
5246 OperationType::Unmap, true)); 5246 OperationType::UnmapPhysical, true));
5247 } 5247 }
5248 5248
5249 // Check if we're done. 5249 // Check if we're done.
@@ -5326,7 +5326,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
5326 // Map the papges. 5326 // Map the papges.
5327 R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages, 5327 R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages,
5328 cur_pg, map_properties, 5328 cur_pg, map_properties,
5329 OperationType::MapFirstGroup, false)); 5329 OperationType::MapFirstGroupPhysical, false));
5330 } 5330 }
5331 } 5331 }
5332 5332
@@ -5480,7 +5480,7 @@ Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size)
5480 5480
5481 // Unmap. 5481 // Unmap.
5482 R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false, 5482 R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false,
5483 unmap_properties, OperationType::Unmap, false)); 5483 unmap_properties, OperationType::UnmapPhysical, false));
5484 } 5484 }
5485 5485
5486 // Check if we're done. 5486 // Check if we're done.
@@ -5655,7 +5655,10 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
5655 // or free them to the page list, and so it goes unused (along with page properties). 5655 // or free them to the page list, and so it goes unused (along with page properties).
5656 5656
5657 switch (operation) { 5657 switch (operation) {
5658 case OperationType::Unmap: { 5658 case OperationType::Unmap:
5659 case OperationType::UnmapPhysical: {
5660 const bool separate_heap = operation == OperationType::UnmapPhysical;
5661
5659 // Ensure that any pages we track are closed on exit. 5662 // Ensure that any pages we track are closed on exit.
5660 KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager()); 5663 KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager());
5661 SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); 5664 SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
@@ -5664,7 +5667,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
5664 this->MakePageGroup(pages_to_close, virt_addr, num_pages); 5667 this->MakePageGroup(pages_to_close, virt_addr, num_pages);
5665 5668
5666 // Unmap. 5669 // Unmap.
5667 m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize); 5670 m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize, separate_heap);
5668 5671
5669 R_SUCCEED(); 5672 R_SUCCEED();
5670 } 5673 }
@@ -5672,7 +5675,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
5672 ASSERT(virt_addr != 0); 5675 ASSERT(virt_addr != 0);
5673 ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize)); 5676 ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
5674 m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr, 5677 m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr,
5675 ConvertToMemoryPermission(properties.perm)); 5678 ConvertToMemoryPermission(properties.perm), false);
5676 5679
5677 // Open references to pages, if we should. 5680 // Open references to pages, if we should.
5678 if (this->IsHeapPhysicalAddress(phys_addr)) { 5681 if (this->IsHeapPhysicalAddress(phys_addr)) {
@@ -5711,16 +5714,19 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
5711 5714
5712 switch (operation) { 5715 switch (operation) {
5713 case OperationType::MapGroup: 5716 case OperationType::MapGroup:
5714 case OperationType::MapFirstGroup: { 5717 case OperationType::MapFirstGroup:
5718 case OperationType::MapFirstGroupPhysical: {
5719 const bool separate_heap = operation == OperationType::MapFirstGroupPhysical;
5720
5715 // We want to maintain a new reference to every page in the group. 5721 // We want to maintain a new reference to every page in the group.
5716 KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup); 5722 KScopedPageGroup spg(page_group, operation == OperationType::MapGroup);
5717 5723
5718 for (const auto& node : page_group) { 5724 for (const auto& node : page_group) {
5719 const size_t size{node.GetNumPages() * PageSize}; 5725 const size_t size{node.GetNumPages() * PageSize};
5720 5726
5721 // Map the pages. 5727 // Map the pages.
5722 m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress(), 5728 m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress(),
5723 ConvertToMemoryPermission(properties.perm)); 5729 ConvertToMemoryPermission(properties.perm), separate_heap);
5724 5730
5725 virt_addr += size; 5731 virt_addr += size;
5726 } 5732 }
diff --git a/src/core/hle/kernel/k_page_table_base.h b/src/core/hle/kernel/k_page_table_base.h
index 556d230b3..077cafc96 100644
--- a/src/core/hle/kernel/k_page_table_base.h
+++ b/src/core/hle/kernel/k_page_table_base.h
@@ -104,6 +104,9 @@ protected:
104 ChangePermissionsAndRefresh = 5, 104 ChangePermissionsAndRefresh = 5,
105 ChangePermissionsAndRefreshAndFlush = 6, 105 ChangePermissionsAndRefreshAndFlush = 6,
106 Separate = 7, 106 Separate = 7,
107
108 MapFirstGroupPhysical = 65000,
109 UnmapPhysical = 65001,
107 }; 110 };
108 111
109 static constexpr size_t MaxPhysicalMapAlignment = 1_GiB; 112 static constexpr size_t MaxPhysicalMapAlignment = 1_GiB;
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index d6869c228..068e71dff 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -1237,8 +1237,10 @@ void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
1237 auto& buffer = m_kernel.System().DeviceMemory().buffer; 1237 auto& buffer = m_kernel.System().DeviceMemory().buffer;
1238 const auto& code = code_set.CodeSegment(); 1238 const auto& code = code_set.CodeSegment();
1239 const auto& patch = code_set.PatchSegment(); 1239 const auto& patch = code_set.PatchSegment();
1240 buffer.Protect(GetInteger(base_addr + code.addr), code.size, true, true, true); 1240 buffer.Protect(GetInteger(base_addr + code.addr), code.size,
1241 buffer.Protect(GetInteger(base_addr + patch.addr), patch.size, true, true, true); 1241 Common::MemoryPermission::Read | Common::MemoryPermission::Execute);
1242 buffer.Protect(GetInteger(base_addr + patch.addr), patch.size,
1243 Common::MemoryPermission::Read | Common::MemoryPermission::Execute);
1242 ReprotectSegment(code_set.PatchSegment(), Svc::MemoryPermission::None); 1244 ReprotectSegment(code_set.PatchSegment(), Svc::MemoryPermission::None);
1243 } 1245 }
1244#endif 1246#endif
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index c7eb32c19..8176a41be 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -10,6 +10,7 @@
10#include "common/assert.h" 10#include "common/assert.h"
11#include "common/atomic_ops.h" 11#include "common/atomic_ops.h"
12#include "common/common_types.h" 12#include "common/common_types.h"
13#include "common/heap_tracker.h"
13#include "common/logging/log.h" 14#include "common/logging/log.h"
14#include "common/page_table.h" 15#include "common/page_table.h"
15#include "common/scope_exit.h" 16#include "common/scope_exit.h"
@@ -52,10 +53,18 @@ struct Memory::Impl {
52 } else { 53 } else {
53 current_page_table->fastmem_arena = nullptr; 54 current_page_table->fastmem_arena = nullptr;
54 } 55 }
56
57#ifdef __linux__
58 heap_tracker.emplace(system.DeviceMemory().buffer);
59 buffer = std::addressof(*heap_tracker);
60#else
61 buffer = std::addressof(system.DeviceMemory().buffer);
62#endif
55 } 63 }
56 64
57 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, 65 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
58 Common::PhysicalAddress target, Common::MemoryPermission perms) { 66 Common::PhysicalAddress target, Common::MemoryPermission perms,
67 bool separate_heap) {
59 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); 68 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
60 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); 69 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
61 ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", 70 ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}",
@@ -64,19 +73,20 @@ struct Memory::Impl {
64 Common::PageType::Memory); 73 Common::PageType::Memory);
65 74
66 if (current_page_table->fastmem_arena) { 75 if (current_page_table->fastmem_arena) {
67 system.DeviceMemory().buffer.Map(GetInteger(base), 76 buffer->Map(GetInteger(base), GetInteger(target) - DramMemoryMap::Base, size, perms,
68 GetInteger(target) - DramMemoryMap::Base, size, perms); 77 separate_heap);
69 } 78 }
70 } 79 }
71 80
72 void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) { 81 void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
82 bool separate_heap) {
73 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); 83 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
74 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); 84 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
75 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0, 85 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
76 Common::PageType::Unmapped); 86 Common::PageType::Unmapped);
77 87
78 if (current_page_table->fastmem_arena) { 88 if (current_page_table->fastmem_arena) {
79 system.DeviceMemory().buffer.Unmap(GetInteger(base), size); 89 buffer->Unmap(GetInteger(base), size, separate_heap);
80 } 90 }
81 } 91 }
82 92
@@ -89,11 +99,6 @@ struct Memory::Impl {
89 return; 99 return;
90 } 100 }
91 101
92 const bool is_r = True(perms & Common::MemoryPermission::Read);
93 const bool is_w = True(perms & Common::MemoryPermission::Write);
94 const bool is_x =
95 True(perms & Common::MemoryPermission::Execute) && Settings::IsNceEnabled();
96
97 u64 protect_bytes{}; 102 u64 protect_bytes{};
98 u64 protect_begin{}; 103 u64 protect_begin{};
99 for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) { 104 for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) {
@@ -102,8 +107,7 @@ struct Memory::Impl {
102 switch (page_type) { 107 switch (page_type) {
103 case Common::PageType::RasterizerCachedMemory: 108 case Common::PageType::RasterizerCachedMemory:
104 if (protect_bytes > 0) { 109 if (protect_bytes > 0) {
105 system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, 110 buffer->Protect(protect_begin, protect_bytes, perms);
106 is_x);
107 protect_bytes = 0; 111 protect_bytes = 0;
108 } 112 }
109 break; 113 break;
@@ -116,7 +120,7 @@ struct Memory::Impl {
116 } 120 }
117 121
118 if (protect_bytes > 0) { 122 if (protect_bytes > 0) {
119 system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, is_x); 123 buffer->Protect(protect_begin, protect_bytes, perms);
120 } 124 }
121 } 125 }
122 126
@@ -486,7 +490,9 @@ struct Memory::Impl {
486 } 490 }
487 491
488 if (current_page_table->fastmem_arena) { 492 if (current_page_table->fastmem_arena) {
489 system.DeviceMemory().buffer.Protect(vaddr, size, !debug, !debug); 493 const auto perm{debug ? Common::MemoryPermission{}
494 : Common::MemoryPermission::ReadWrite};
495 buffer->Protect(vaddr, size, perm);
490 } 496 }
491 497
492 // Iterate over a contiguous CPU address space, marking/unmarking the region. 498 // Iterate over a contiguous CPU address space, marking/unmarking the region.
@@ -543,9 +549,14 @@ struct Memory::Impl {
543 } 549 }
544 550
545 if (current_page_table->fastmem_arena) { 551 if (current_page_table->fastmem_arena) {
546 const bool is_read_enable = 552 Common::MemoryPermission perm{};
547 !Settings::values.use_reactive_flushing.GetValue() || !cached; 553 if (!Settings::values.use_reactive_flushing.GetValue() || !cached) {
548 system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached); 554 perm |= Common::MemoryPermission::Read;
555 }
556 if (!cached) {
557 perm |= Common::MemoryPermission::Write;
558 }
559 buffer->Protect(vaddr, size, perm);
549 } 560 }
550 561
551 // Iterate over a contiguous CPU address space, which corresponds to the specified GPU 562 // Iterate over a contiguous CPU address space, which corresponds to the specified GPU
@@ -856,6 +867,13 @@ struct Memory::Impl {
856 std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{}; 867 std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{};
857 std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers; 868 std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;
858 std::mutex sys_core_guard; 869 std::mutex sys_core_guard;
870
871 std::optional<Common::HeapTracker> heap_tracker;
872#ifdef __linux__
873 Common::HeapTracker* buffer{};
874#else
875 Common::HostMemory* buffer{};
876#endif
859}; 877};
860 878
861Memory::Memory(Core::System& system_) : system{system_} { 879Memory::Memory(Core::System& system_) : system{system_} {
@@ -873,12 +891,14 @@ void Memory::SetCurrentPageTable(Kernel::KProcess& process) {
873} 891}
874 892
875void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, 893void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
876 Common::PhysicalAddress target, Common::MemoryPermission perms) { 894 Common::PhysicalAddress target, Common::MemoryPermission perms,
877 impl->MapMemoryRegion(page_table, base, size, target, perms); 895 bool separate_heap) {
896 impl->MapMemoryRegion(page_table, base, size, target, perms, separate_heap);
878} 897}
879 898
880void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) { 899void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
881 impl->UnmapRegion(page_table, base, size); 900 bool separate_heap) {
901 impl->UnmapRegion(page_table, base, size, separate_heap);
882} 902}
883 903
884void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size, 904void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size,
@@ -1048,7 +1068,9 @@ void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
1048} 1068}
1049 1069
1050bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) { 1070bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
1051 bool mapped = true; 1071 [[maybe_unused]] bool mapped = true;
1072 [[maybe_unused]] bool rasterizer = false;
1073
1052 u8* const ptr = impl->GetPointerImpl( 1074 u8* const ptr = impl->GetPointerImpl(
1053 GetInteger(vaddr), 1075 GetInteger(vaddr),
1054 [&] { 1076 [&] {
@@ -1056,8 +1078,26 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
1056 GetInteger(vaddr)); 1078 GetInteger(vaddr));
1057 mapped = false; 1079 mapped = false;
1058 }, 1080 },
1059 [&] { impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); }); 1081 [&] {
1082 impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size);
1083 rasterizer = true;
1084 });
1085
1086#ifdef __linux__
1087 if (!rasterizer && mapped) {
1088 impl->buffer->DeferredMapSeparateHeap(GetInteger(vaddr));
1089 }
1090#endif
1091
1060 return mapped && ptr != nullptr; 1092 return mapped && ptr != nullptr;
1061} 1093}
1062 1094
1095bool Memory::InvalidateSeparateHeap(void* fault_address) {
1096#ifdef __linux__
1097 return impl->buffer->DeferredMapSeparateHeap(static_cast<u8*>(fault_address));
1098#else
1099 return false;
1100#endif
1101}
1102
1063} // namespace Core::Memory 1103} // namespace Core::Memory
diff --git a/src/core/memory.h b/src/core/memory.h
index c1879e78f..3e4d03f57 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -86,7 +86,8 @@ public:
86 * @param perms The permissions to map the memory with. 86 * @param perms The permissions to map the memory with.
87 */ 87 */
88 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, 88 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
89 Common::PhysicalAddress target, Common::MemoryPermission perms); 89 Common::PhysicalAddress target, Common::MemoryPermission perms,
90 bool separate_heap);
90 91
91 /** 92 /**
92 * Unmaps a region of the emulated process address space. 93 * Unmaps a region of the emulated process address space.
@@ -95,7 +96,8 @@ public:
95 * @param base The address to begin unmapping at. 96 * @param base The address to begin unmapping at.
96 * @param size The amount of bytes to unmap. 97 * @param size The amount of bytes to unmap.
97 */ 98 */
98 void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size); 99 void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
100 bool separate_heap);
99 101
100 /** 102 /**
101 * Protects a region of the emulated process address space with the new permissions. 103 * Protects a region of the emulated process address space with the new permissions.
@@ -486,6 +488,7 @@ public:
486 void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers); 488 void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers);
487 void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size); 489 void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size);
488 bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size); 490 bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size);
491 bool InvalidateSeparateHeap(void* fault_address);
489 void FlushRegion(Common::ProcessAddress dest_addr, size_t size); 492 void FlushRegion(Common::ProcessAddress dest_addr, size_t size);
490 493
491private: 494private: