summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar Liam2023-12-25 23:21:08 -0500
committerGravatar Liam2023-12-25 23:30:56 -0500
commitddda76f9b0d16e8a6fbc92db9e26f25843b647ed (patch)
tree3fd209d66b8503ca7f0cf6d15c5c065179c66076 /src
parentMerge pull request #12394 from liamwhite/per-process-memory (diff)
downloadyuzu-ddda76f9b0d16e8a6fbc92db9e26f25843b647ed.tar.gz
yuzu-ddda76f9b0d16e8a6fbc92db9e26f25843b647ed.tar.xz
yuzu-ddda76f9b0d16e8a6fbc92db9e26f25843b647ed.zip
core: track separate heap allocation for linux
Diffstat (limited to 'src')
-rw-r--r--src/common/CMakeLists.txt2
-rw-r--r--src/common/heap_tracker.cpp263
-rw-r--r--src/common/heap_tracker.h97
-rw-r--r--src/common/host_memory.cpp10
-rw-r--r--src/common/host_memory.h11
-rw-r--r--src/core/CMakeLists.txt1
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic.cpp49
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic.h20
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp5
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp5
-rw-r--r--src/core/hle/kernel/k_page_table_base.cpp26
-rw-r--r--src/core/hle/kernel/k_page_table_base.h3
-rw-r--r--src/core/hle/kernel/k_process.cpp6
-rw-r--r--src/core/memory.cpp86
-rw-r--r--src/core/memory.h7
-rw-r--r--src/tests/common/host_memory.cpp99
16 files changed, 597 insertions, 93 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index b58a7073f..8c57d47c6 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -64,6 +64,8 @@ add_library(common STATIC
64 fs/path_util.cpp 64 fs/path_util.cpp
65 fs/path_util.h 65 fs/path_util.h
66 hash.h 66 hash.h
67 heap_tracker.cpp
68 heap_tracker.h
67 hex_util.cpp 69 hex_util.cpp
68 hex_util.h 70 hex_util.h
69 host_memory.cpp 71 host_memory.cpp
diff --git a/src/common/heap_tracker.cpp b/src/common/heap_tracker.cpp
new file mode 100644
index 000000000..95dc8aa1e
--- /dev/null
+++ b/src/common/heap_tracker.cpp
@@ -0,0 +1,263 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include <algorithm>
5#include <vector>
6
7#include "common/heap_tracker.h"
8#include "common/logging/log.h"
9
10namespace Common {
11
12namespace {
13
14constexpr s64 MaxResidentMapCount = 0x8000;
15
16} // namespace
17
18HeapTracker::HeapTracker(Common::HostMemory& buffer) : m_buffer(buffer) {}
19HeapTracker::~HeapTracker() = default;
20
21void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length,
22 MemoryPermission perm, bool is_separate_heap) {
23 // When mapping other memory, map pages immediately.
24 if (!is_separate_heap) {
25 m_buffer.Map(virtual_offset, host_offset, length, perm, false);
26 return;
27 }
28
29 {
30 // We are mapping part of a separate heap.
31 std::scoped_lock lk{m_lock};
32
33 auto* const map = new SeparateHeapMap{
34 .vaddr = virtual_offset,
35 .paddr = host_offset,
36 .size = length,
37 .tick = m_tick++,
38 .perm = perm,
39 .is_resident = false,
40 };
41
42 // Insert into mappings.
43 m_map_count++;
44 m_mappings.insert(*map);
45 }
46
47 // Finally, map.
48 this->DeferredMapSeparateHeap(virtual_offset);
49}
50
51void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) {
52 // If this is a separate heap...
53 if (is_separate_heap) {
54 std::scoped_lock lk{m_lock};
55
56 const SeparateHeapMap key{
57 .vaddr = virtual_offset,
58 };
59
60 // Split at the boundaries of the region we are removing.
61 this->SplitHeapMapLocked(virtual_offset);
62 this->SplitHeapMapLocked(virtual_offset + size);
63
64 // Erase all mappings in range.
65 auto it = m_mappings.find(key);
66 while (it != m_mappings.end() && it->vaddr < virtual_offset + size) {
67 // Get underlying item.
68 auto* const item = std::addressof(*it);
69
70 // If resident, erase from resident map.
71 if (item->is_resident) {
72 ASSERT(--m_resident_map_count >= 0);
73 m_resident_mappings.erase(m_resident_mappings.iterator_to(*item));
74 }
75
76 // Erase from map.
77 it = m_mappings.erase(it);
78 ASSERT(--m_map_count >= 0);
79
80 // Free the item.
81 delete item;
82 }
83 }
84
85 // Unmap pages.
86 m_buffer.Unmap(virtual_offset, size, false);
87}
88
89void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission perm) {
90 // Ensure no rebuild occurs while reprotecting.
91 std::shared_lock lk{m_rebuild_lock};
92
93 // Split at the boundaries of the region we are reprotecting.
94 this->SplitHeapMap(virtual_offset, size);
95
96 // Declare tracking variables.
97 VAddr cur = virtual_offset;
98 VAddr end = virtual_offset + size;
99
100 while (cur < end) {
101 VAddr next = cur;
102 bool should_protect = false;
103
104 {
105 std::scoped_lock lk2{m_lock};
106
107 const SeparateHeapMap key{
108 .vaddr = next,
109 };
110
111 // Try to get the next mapping corresponding to this address.
112 const auto it = m_mappings.nfind(key);
113
114 if (it == m_mappings.end()) {
115 // There are no separate heap mappings remaining.
116 next = end;
117 should_protect = true;
118 } else if (it->vaddr == cur) {
119 // We are in range.
120 // Update permission bits.
121 it->perm = perm;
122
123 // Determine next address and whether we should protect.
124 next = cur + it->size;
125 should_protect = it->is_resident;
126 } else /* if (it->vaddr > cur) */ {
127 // We weren't in range, but there is a block coming up that will be.
128 next = it->vaddr;
129 should_protect = true;
130 }
131 }
132
133 // Clamp to end.
134 next = std::min(next, end);
135
136 // Reprotect, if we need to.
137 if (should_protect) {
138 m_buffer.Protect(cur, next - cur, perm);
139 }
140
141 // Advance.
142 cur = next;
143 }
144}
145
146bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) {
147 if (m_buffer.IsInVirtualRange(fault_address)) {
148 return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer());
149 }
150
151 return false;
152}
153
154bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) {
155 bool rebuild_required = false;
156
157 {
158 std::scoped_lock lk{m_lock};
159
160 // Check to ensure this was a non-resident separate heap mapping.
161 const auto it = this->GetNearestHeapMapLocked(virtual_offset);
162 if (it == m_mappings.end() || it->is_resident) {
163 return false;
164 }
165
166 // Update tick before possible rebuild.
167 it->tick = m_tick++;
168
169 // Check if we need to rebuild.
170 if (m_resident_map_count > MaxResidentMapCount) {
171 rebuild_required = true;
172 }
173
174 // Map the area.
175 m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false);
176
177 // This map is now resident.
178 it->is_resident = true;
179 m_resident_map_count++;
180 m_resident_mappings.insert(*it);
181 }
182
183 if (rebuild_required) {
184 // A rebuild was required, so perform it now.
185 this->RebuildSeparateHeapAddressSpace();
186 }
187
188 return true;
189}
190
191void HeapTracker::RebuildSeparateHeapAddressSpace() {
192 std::scoped_lock lk{m_rebuild_lock, m_lock};
193
194 ASSERT(!m_resident_mappings.empty());
195
196 // Unmap so we have at least 4 maps available.
197 const size_t desired_count = std::min(m_resident_map_count, MaxResidentMapCount - 4);
198 const size_t evict_count = m_resident_map_count - desired_count;
199 auto it = m_resident_mappings.begin();
200
201 for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) {
202 // Unmark and unmap.
203 it->is_resident = false;
204 m_buffer.Unmap(it->vaddr, it->size, false);
205
206 // Advance.
207 ASSERT(--m_resident_map_count >= 0);
208 it = m_resident_mappings.erase(it);
209 }
210}
211
212void HeapTracker::SplitHeapMap(VAddr offset, size_t size) {
213 std::scoped_lock lk{m_lock};
214
215 this->SplitHeapMapLocked(offset);
216 this->SplitHeapMapLocked(offset + size);
217}
218
219void HeapTracker::SplitHeapMapLocked(VAddr offset) {
220 const auto it = this->GetNearestHeapMapLocked(offset);
221 if (it == m_mappings.end() || it->vaddr == offset) {
222 // Not contained or no split required.
223 return;
224 }
225
226 // Cache the original values.
227 auto* const left = std::addressof(*it);
228 const size_t orig_size = left->size;
229
230 // Adjust the left map.
231 const size_t left_size = offset - left->vaddr;
232 left->size = left_size;
233
234 // Create the new right map.
235 auto* const right = new SeparateHeapMap{
236 .vaddr = left->vaddr + left_size,
237 .paddr = left->paddr + left_size,
238 .size = orig_size - left_size,
239 .tick = left->tick,
240 .perm = left->perm,
241 .is_resident = left->is_resident,
242 };
243
244 // Insert the new right map.
245 m_map_count++;
246 m_mappings.insert(*right);
247
248 // If resident, also insert into resident map.
249 if (right->is_resident) {
250 m_resident_mappings.insert(*right);
251 m_resident_map_count++;
252 }
253}
254
255HeapTracker::AddrTree::iterator HeapTracker::GetNearestHeapMapLocked(VAddr offset) {
256 const SeparateHeapMap key{
257 .vaddr = offset,
258 };
259
260 return m_mappings.find(key);
261}
262
263} // namespace Common
diff --git a/src/common/heap_tracker.h b/src/common/heap_tracker.h
new file mode 100644
index 000000000..cc16041d9
--- /dev/null
+++ b/src/common/heap_tracker.h
@@ -0,0 +1,97 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <atomic>
7#include <mutex>
8#include <set>
9#include <shared_mutex>
10
11#include "common/host_memory.h"
12#include "common/intrusive_red_black_tree.h"
13
14namespace Common {
15
16struct SeparateHeapMap {
17 Common::IntrusiveRedBlackTreeNode addr_node{};
18 Common::IntrusiveRedBlackTreeNode tick_node{};
19 VAddr vaddr{};
20 PAddr paddr{};
21 size_t size{};
22 size_t tick{};
23 MemoryPermission perm{};
24 bool is_resident{};
25};
26
27struct SeparateHeapMapAddrComparator {
28 static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
29 if (lhs.vaddr < rhs.vaddr) {
30 return -1;
31 } else if (lhs.vaddr <= (rhs.vaddr + rhs.size - 1)) {
32 return 0;
33 } else {
34 return 1;
35 }
36 }
37};
38
39struct SeparateHeapMapTickComparator {
40 static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) {
41 if (lhs.tick < rhs.tick) {
42 return -1;
43 } else if (lhs.tick > rhs.tick) {
44 return 1;
45 } else {
46 return SeparateHeapMapAddrComparator::Compare(lhs, rhs);
47 }
48 }
49};
50
51class HeapTracker {
52public:
53 explicit HeapTracker(Common::HostMemory& buffer);
54 ~HeapTracker();
55
56 void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm,
57 bool is_separate_heap);
58 void Unmap(size_t virtual_offset, size_t size, bool is_separate_heap);
59 void Protect(size_t virtual_offset, size_t length, MemoryPermission perm);
60 u8* VirtualBasePointer() {
61 return m_buffer.VirtualBasePointer();
62 }
63
64 bool DeferredMapSeparateHeap(u8* fault_address);
65 bool DeferredMapSeparateHeap(size_t virtual_offset);
66
67private:
68 using AddrTreeTraits =
69 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::addr_node>;
70 using AddrTree = AddrTreeTraits::TreeType<SeparateHeapMapAddrComparator>;
71
72 using TickTreeTraits =
73 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::tick_node>;
74 using TickTree = TickTreeTraits::TreeType<SeparateHeapMapTickComparator>;
75
76 AddrTree m_mappings{};
77 TickTree m_resident_mappings{};
78
79private:
80 void SplitHeapMap(VAddr offset, size_t size);
81 void SplitHeapMapLocked(VAddr offset);
82
83 AddrTree::iterator GetNearestHeapMapLocked(VAddr offset);
84
85 void RebuildSeparateHeapAddressSpace();
86
87private:
88 Common::HostMemory& m_buffer;
89
90 std::shared_mutex m_rebuild_lock{};
91 std::mutex m_lock{};
92 s64 m_map_count{};
93 s64 m_resident_map_count{};
94 size_t m_tick{};
95};
96
97} // namespace Common
diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp
index e540375b8..860c39e6a 100644
--- a/src/common/host_memory.cpp
+++ b/src/common/host_memory.cpp
@@ -679,7 +679,7 @@ HostMemory::HostMemory(HostMemory&&) noexcept = default;
679HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default; 679HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default;
680 680
681void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length, 681void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length,
682 MemoryPermission perms) { 682 MemoryPermission perms, bool separate_heap) {
683 ASSERT(virtual_offset % PageAlignment == 0); 683 ASSERT(virtual_offset % PageAlignment == 0);
684 ASSERT(host_offset % PageAlignment == 0); 684 ASSERT(host_offset % PageAlignment == 0);
685 ASSERT(length % PageAlignment == 0); 685 ASSERT(length % PageAlignment == 0);
@@ -691,7 +691,7 @@ void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length,
691 impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms); 691 impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms);
692} 692}
693 693
694void HostMemory::Unmap(size_t virtual_offset, size_t length) { 694void HostMemory::Unmap(size_t virtual_offset, size_t length, bool separate_heap) {
695 ASSERT(virtual_offset % PageAlignment == 0); 695 ASSERT(virtual_offset % PageAlignment == 0);
696 ASSERT(length % PageAlignment == 0); 696 ASSERT(length % PageAlignment == 0);
697 ASSERT(virtual_offset + length <= virtual_size); 697 ASSERT(virtual_offset + length <= virtual_size);
@@ -701,14 +701,16 @@ void HostMemory::Unmap(size_t virtual_offset, size_t length) {
701 impl->Unmap(virtual_offset + virtual_base_offset, length); 701 impl->Unmap(virtual_offset + virtual_base_offset, length);
702} 702}
703 703
704void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write, 704void HostMemory::Protect(size_t virtual_offset, size_t length, MemoryPermission perm) {
705 bool execute) {
706 ASSERT(virtual_offset % PageAlignment == 0); 705 ASSERT(virtual_offset % PageAlignment == 0);
707 ASSERT(length % PageAlignment == 0); 706 ASSERT(length % PageAlignment == 0);
708 ASSERT(virtual_offset + length <= virtual_size); 707 ASSERT(virtual_offset + length <= virtual_size);
709 if (length == 0 || !virtual_base || !impl) { 708 if (length == 0 || !virtual_base || !impl) {
710 return; 709 return;
711 } 710 }
711 const bool read = True(perm & MemoryPermission::Read);
712 const bool write = True(perm & MemoryPermission::Write);
713 const bool execute = True(perm & MemoryPermission::Execute);
712 impl->Protect(virtual_offset + virtual_base_offset, length, read, write, execute); 714 impl->Protect(virtual_offset + virtual_base_offset, length, read, write, execute);
713} 715}
714 716
diff --git a/src/common/host_memory.h b/src/common/host_memory.h
index 747c5850c..72fbb05af 100644
--- a/src/common/host_memory.h
+++ b/src/common/host_memory.h
@@ -40,11 +40,12 @@ public:
40 HostMemory(HostMemory&& other) noexcept; 40 HostMemory(HostMemory&& other) noexcept;
41 HostMemory& operator=(HostMemory&& other) noexcept; 41 HostMemory& operator=(HostMemory&& other) noexcept;
42 42
43 void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms); 43 void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms,
44 bool separate_heap);
44 45
45 void Unmap(size_t virtual_offset, size_t length); 46 void Unmap(size_t virtual_offset, size_t length, bool separate_heap);
46 47
47 void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute = false); 48 void Protect(size_t virtual_offset, size_t length, MemoryPermission perms);
48 49
49 void EnableDirectMappedAddress(); 50 void EnableDirectMappedAddress();
50 51
@@ -64,6 +65,10 @@ public:
64 return virtual_base; 65 return virtual_base;
65 } 66 }
66 67
68 bool IsInVirtualRange(void* address) const noexcept {
69 return address >= virtual_base && address < virtual_base + virtual_size;
70 }
71
67private: 72private:
68 size_t backing_size{}; 73 size_t backing_size{};
69 size_t virtual_size{}; 74 size_t virtual_size{};
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 96ab39cb8..e960edb47 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -978,6 +978,7 @@ endif()
978 978
979if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64) 979if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64)
980 target_sources(core PRIVATE 980 target_sources(core PRIVATE
981 arm/dynarmic/arm_dynarmic.cpp
981 arm/dynarmic/arm_dynarmic.h 982 arm/dynarmic/arm_dynarmic.h
982 arm/dynarmic/arm_dynarmic_64.cpp 983 arm/dynarmic/arm_dynarmic_64.cpp
983 arm/dynarmic/arm_dynarmic_64.h 984 arm/dynarmic/arm_dynarmic_64.h
diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp
new file mode 100644
index 000000000..e6e9fc45b
--- /dev/null
+++ b/src/core/arm/dynarmic/arm_dynarmic.cpp
@@ -0,0 +1,49 @@
1// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#ifdef __linux__
5
6#include "common/signal_chain.h"
7
8#include "core/arm/dynarmic/arm_dynarmic.h"
9#include "core/hle/kernel/k_process.h"
10#include "core/memory.h"
11
12namespace Core {
13
14namespace {
15
16thread_local Core::Memory::Memory* g_current_memory{};
17std::once_flag g_registered{};
18struct sigaction g_old_segv {};
19
20void HandleSigSegv(int sig, siginfo_t* info, void* ctx) {
21 if (g_current_memory && g_current_memory->InvalidateSeparateHeap(info->si_addr)) {
22 return;
23 }
24
25 return g_old_segv.sa_sigaction(sig, info, ctx);
26}
27
28} // namespace
29
30ScopedJitExecution::ScopedJitExecution(Kernel::KProcess* process) {
31 g_current_memory = std::addressof(process->GetMemory());
32}
33
34ScopedJitExecution::~ScopedJitExecution() {
35 g_current_memory = nullptr;
36}
37
38void ScopedJitExecution::RegisterHandler() {
39 std::call_once(g_registered, [] {
40 struct sigaction sa {};
41 sa.sa_sigaction = &HandleSigSegv;
42 sa.sa_flags = SA_SIGINFO | SA_ONSTACK;
43 Common::SigAction(SIGSEGV, std::addressof(sa), std::addressof(g_old_segv));
44 });
45}
46
47} // namespace Core
48
49#endif
diff --git a/src/core/arm/dynarmic/arm_dynarmic.h b/src/core/arm/dynarmic/arm_dynarmic.h
index eef7c3116..53dd18815 100644
--- a/src/core/arm/dynarmic/arm_dynarmic.h
+++ b/src/core/arm/dynarmic/arm_dynarmic.h
@@ -26,4 +26,24 @@ constexpr HaltReason TranslateHaltReason(Dynarmic::HaltReason hr) {
26 return static_cast<HaltReason>(hr); 26 return static_cast<HaltReason>(hr);
27} 27}
28 28
29#ifdef __linux__
30
31class ScopedJitExecution {
32public:
33 explicit ScopedJitExecution(Kernel::KProcess* process);
34 ~ScopedJitExecution();
35 static void RegisterHandler();
36};
37
38#else
39
40class ScopedJitExecution {
41public:
42 explicit ScopedJitExecution(Kernel::KProcess* process) {}
43 ~ScopedJitExecution() {}
44 static void RegisterHandler() {}
45};
46
47#endif
48
29} // namespace Core 49} // namespace Core
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index c78cfd528..36478f722 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -331,11 +331,15 @@ bool ArmDynarmic32::IsInThumbMode() const {
331} 331}
332 332
333HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) { 333HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) {
334 ScopedJitExecution sj(thread->GetOwnerProcess());
335
334 m_jit->ClearExclusiveState(); 336 m_jit->ClearExclusiveState();
335 return TranslateHaltReason(m_jit->Run()); 337 return TranslateHaltReason(m_jit->Run());
336} 338}
337 339
338HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) { 340HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) {
341 ScopedJitExecution sj(thread->GetOwnerProcess());
342
339 m_jit->ClearExclusiveState(); 343 m_jit->ClearExclusiveState();
340 return TranslateHaltReason(m_jit->Step()); 344 return TranslateHaltReason(m_jit->Step());
341} 345}
@@ -377,6 +381,7 @@ ArmDynarmic32::ArmDynarmic32(System& system, bool uses_wall_clock, Kernel::KProc
377 m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index} { 381 m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index} {
378 auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl(); 382 auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl();
379 m_jit = MakeJit(&page_table_impl); 383 m_jit = MakeJit(&page_table_impl);
384 ScopedJitExecution::RegisterHandler();
380} 385}
381 386
382ArmDynarmic32::~ArmDynarmic32() = default; 387ArmDynarmic32::~ArmDynarmic32() = default;
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index f351b13d9..c811c8ad5 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -362,11 +362,15 @@ std::shared_ptr<Dynarmic::A64::Jit> ArmDynarmic64::MakeJit(Common::PageTable* pa
362} 362}
363 363
364HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) { 364HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) {
365 ScopedJitExecution sj(thread->GetOwnerProcess());
366
365 m_jit->ClearExclusiveState(); 367 m_jit->ClearExclusiveState();
366 return TranslateHaltReason(m_jit->Run()); 368 return TranslateHaltReason(m_jit->Run());
367} 369}
368 370
369HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) { 371HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) {
372 ScopedJitExecution sj(thread->GetOwnerProcess());
373
370 m_jit->ClearExclusiveState(); 374 m_jit->ClearExclusiveState();
371 return TranslateHaltReason(m_jit->Step()); 375 return TranslateHaltReason(m_jit->Step());
372} 376}
@@ -406,6 +410,7 @@ ArmDynarmic64::ArmDynarmic64(System& system, bool uses_wall_clock, Kernel::KProc
406 auto& page_table = process->GetPageTable().GetBasePageTable(); 410 auto& page_table = process->GetPageTable().GetBasePageTable();
407 auto& page_table_impl = page_table.GetImpl(); 411 auto& page_table_impl = page_table.GetImpl();
408 m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth()); 412 m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth());
413 ScopedJitExecution::RegisterHandler();
409} 414}
410 415
411ArmDynarmic64::~ArmDynarmic64() = default; 416ArmDynarmic64::~ArmDynarmic64() = default;
diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp
index 423289145..8c1549559 100644
--- a/src/core/hle/kernel/k_page_table_base.cpp
+++ b/src/core/hle/kernel/k_page_table_base.cpp
@@ -434,7 +434,7 @@ Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool
434void KPageTableBase::Finalize() { 434void KPageTableBase::Finalize() {
435 auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) { 435 auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) {
436 if (Settings::IsFastmemEnabled()) { 436 if (Settings::IsFastmemEnabled()) {
437 m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size); 437 m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size, false);
438 } 438 }
439 }; 439 };
440 440
@@ -5243,7 +5243,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
5243 // Unmap. 5243 // Unmap.
5244 R_ASSERT(this->Operate(updater.GetPageList(), cur_address, 5244 R_ASSERT(this->Operate(updater.GetPageList(), cur_address,
5245 cur_pages, 0, false, unmap_properties, 5245 cur_pages, 0, false, unmap_properties,
5246 OperationType::Unmap, true)); 5246 OperationType::UnmapPhysical, true));
5247 } 5247 }
5248 5248
5249 // Check if we're done. 5249 // Check if we're done.
@@ -5326,7 +5326,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
5326 // Map the papges. 5326 // Map the papges.
5327 R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages, 5327 R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages,
5328 cur_pg, map_properties, 5328 cur_pg, map_properties,
5329 OperationType::MapFirstGroup, false)); 5329 OperationType::MapFirstGroupPhysical, false));
5330 } 5330 }
5331 } 5331 }
5332 5332
@@ -5480,7 +5480,7 @@ Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size)
5480 5480
5481 // Unmap. 5481 // Unmap.
5482 R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false, 5482 R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false,
5483 unmap_properties, OperationType::Unmap, false)); 5483 unmap_properties, OperationType::UnmapPhysical, false));
5484 } 5484 }
5485 5485
5486 // Check if we're done. 5486 // Check if we're done.
@@ -5655,7 +5655,10 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
5655 // or free them to the page list, and so it goes unused (along with page properties). 5655 // or free them to the page list, and so it goes unused (along with page properties).
5656 5656
5657 switch (operation) { 5657 switch (operation) {
5658 case OperationType::Unmap: { 5658 case OperationType::Unmap:
5659 case OperationType::UnmapPhysical: {
5660 const bool separate_heap = operation == OperationType::UnmapPhysical;
5661
5659 // Ensure that any pages we track are closed on exit. 5662 // Ensure that any pages we track are closed on exit.
5660 KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager()); 5663 KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager());
5661 SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); 5664 SCOPE_EXIT({ pages_to_close.CloseAndReset(); });
@@ -5664,7 +5667,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
5664 this->MakePageGroup(pages_to_close, virt_addr, num_pages); 5667 this->MakePageGroup(pages_to_close, virt_addr, num_pages);
5665 5668
5666 // Unmap. 5669 // Unmap.
5667 m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize); 5670 m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize, separate_heap);
5668 5671
5669 R_SUCCEED(); 5672 R_SUCCEED();
5670 } 5673 }
@@ -5672,7 +5675,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
5672 ASSERT(virt_addr != 0); 5675 ASSERT(virt_addr != 0);
5673 ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize)); 5676 ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize));
5674 m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr, 5677 m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr,
5675 ConvertToMemoryPermission(properties.perm)); 5678 ConvertToMemoryPermission(properties.perm), false);
5676 5679
5677 // Open references to pages, if we should. 5680 // Open references to pages, if we should.
5678 if (this->IsHeapPhysicalAddress(phys_addr)) { 5681 if (this->IsHeapPhysicalAddress(phys_addr)) {
@@ -5711,16 +5714,19 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a
5711 5714
5712 switch (operation) { 5715 switch (operation) {
5713 case OperationType::MapGroup: 5716 case OperationType::MapGroup:
5714 case OperationType::MapFirstGroup: { 5717 case OperationType::MapFirstGroup:
5718 case OperationType::MapFirstGroupPhysical: {
5719 const bool separate_heap = operation == OperationType::MapFirstGroupPhysical;
5720
5715 // We want to maintain a new reference to every page in the group. 5721 // We want to maintain a new reference to every page in the group.
5716 KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup); 5722 KScopedPageGroup spg(page_group, operation == OperationType::MapGroup);
5717 5723
5718 for (const auto& node : page_group) { 5724 for (const auto& node : page_group) {
5719 const size_t size{node.GetNumPages() * PageSize}; 5725 const size_t size{node.GetNumPages() * PageSize};
5720 5726
5721 // Map the pages. 5727 // Map the pages.
5722 m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress(), 5728 m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress(),
5723 ConvertToMemoryPermission(properties.perm)); 5729 ConvertToMemoryPermission(properties.perm), separate_heap);
5724 5730
5725 virt_addr += size; 5731 virt_addr += size;
5726 } 5732 }
diff --git a/src/core/hle/kernel/k_page_table_base.h b/src/core/hle/kernel/k_page_table_base.h
index 556d230b3..077cafc96 100644
--- a/src/core/hle/kernel/k_page_table_base.h
+++ b/src/core/hle/kernel/k_page_table_base.h
@@ -104,6 +104,9 @@ protected:
104 ChangePermissionsAndRefresh = 5, 104 ChangePermissionsAndRefresh = 5,
105 ChangePermissionsAndRefreshAndFlush = 6, 105 ChangePermissionsAndRefreshAndFlush = 6,
106 Separate = 7, 106 Separate = 7,
107
108 MapFirstGroupPhysical = 65000,
109 UnmapPhysical = 65001,
107 }; 110 };
108 111
109 static constexpr size_t MaxPhysicalMapAlignment = 1_GiB; 112 static constexpr size_t MaxPhysicalMapAlignment = 1_GiB;
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index d6869c228..068e71dff 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -1237,8 +1237,10 @@ void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) {
1237 auto& buffer = m_kernel.System().DeviceMemory().buffer; 1237 auto& buffer = m_kernel.System().DeviceMemory().buffer;
1238 const auto& code = code_set.CodeSegment(); 1238 const auto& code = code_set.CodeSegment();
1239 const auto& patch = code_set.PatchSegment(); 1239 const auto& patch = code_set.PatchSegment();
1240 buffer.Protect(GetInteger(base_addr + code.addr), code.size, true, true, true); 1240 buffer.Protect(GetInteger(base_addr + code.addr), code.size,
1241 buffer.Protect(GetInteger(base_addr + patch.addr), patch.size, true, true, true); 1241 Common::MemoryPermission::Read | Common::MemoryPermission::Execute);
1242 buffer.Protect(GetInteger(base_addr + patch.addr), patch.size,
1243 Common::MemoryPermission::Read | Common::MemoryPermission::Execute);
1242 ReprotectSegment(code_set.PatchSegment(), Svc::MemoryPermission::None); 1244 ReprotectSegment(code_set.PatchSegment(), Svc::MemoryPermission::None);
1243 } 1245 }
1244#endif 1246#endif
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index c7eb32c19..8176a41be 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -10,6 +10,7 @@
10#include "common/assert.h" 10#include "common/assert.h"
11#include "common/atomic_ops.h" 11#include "common/atomic_ops.h"
12#include "common/common_types.h" 12#include "common/common_types.h"
13#include "common/heap_tracker.h"
13#include "common/logging/log.h" 14#include "common/logging/log.h"
14#include "common/page_table.h" 15#include "common/page_table.h"
15#include "common/scope_exit.h" 16#include "common/scope_exit.h"
@@ -52,10 +53,18 @@ struct Memory::Impl {
52 } else { 53 } else {
53 current_page_table->fastmem_arena = nullptr; 54 current_page_table->fastmem_arena = nullptr;
54 } 55 }
56
57#ifdef __linux__
58 heap_tracker.emplace(system.DeviceMemory().buffer);
59 buffer = std::addressof(*heap_tracker);
60#else
61 buffer = std::addressof(system.DeviceMemory().buffer);
62#endif
55 } 63 }
56 64
57 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, 65 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
58 Common::PhysicalAddress target, Common::MemoryPermission perms) { 66 Common::PhysicalAddress target, Common::MemoryPermission perms,
67 bool separate_heap) {
59 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); 68 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
60 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); 69 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
61 ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", 70 ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}",
@@ -64,19 +73,20 @@ struct Memory::Impl {
64 Common::PageType::Memory); 73 Common::PageType::Memory);
65 74
66 if (current_page_table->fastmem_arena) { 75 if (current_page_table->fastmem_arena) {
67 system.DeviceMemory().buffer.Map(GetInteger(base), 76 buffer->Map(GetInteger(base), GetInteger(target) - DramMemoryMap::Base, size, perms,
68 GetInteger(target) - DramMemoryMap::Base, size, perms); 77 separate_heap);
69 } 78 }
70 } 79 }
71 80
72 void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) { 81 void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
82 bool separate_heap) {
73 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); 83 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
74 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); 84 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base));
75 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0, 85 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
76 Common::PageType::Unmapped); 86 Common::PageType::Unmapped);
77 87
78 if (current_page_table->fastmem_arena) { 88 if (current_page_table->fastmem_arena) {
79 system.DeviceMemory().buffer.Unmap(GetInteger(base), size); 89 buffer->Unmap(GetInteger(base), size, separate_heap);
80 } 90 }
81 } 91 }
82 92
@@ -89,11 +99,6 @@ struct Memory::Impl {
89 return; 99 return;
90 } 100 }
91 101
92 const bool is_r = True(perms & Common::MemoryPermission::Read);
93 const bool is_w = True(perms & Common::MemoryPermission::Write);
94 const bool is_x =
95 True(perms & Common::MemoryPermission::Execute) && Settings::IsNceEnabled();
96
97 u64 protect_bytes{}; 102 u64 protect_bytes{};
98 u64 protect_begin{}; 103 u64 protect_begin{};
99 for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) { 104 for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) {
@@ -102,8 +107,7 @@ struct Memory::Impl {
102 switch (page_type) { 107 switch (page_type) {
103 case Common::PageType::RasterizerCachedMemory: 108 case Common::PageType::RasterizerCachedMemory:
104 if (protect_bytes > 0) { 109 if (protect_bytes > 0) {
105 system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, 110 buffer->Protect(protect_begin, protect_bytes, perms);
106 is_x);
107 protect_bytes = 0; 111 protect_bytes = 0;
108 } 112 }
109 break; 113 break;
@@ -116,7 +120,7 @@ struct Memory::Impl {
116 } 120 }
117 121
118 if (protect_bytes > 0) { 122 if (protect_bytes > 0) {
119 system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, is_x); 123 buffer->Protect(protect_begin, protect_bytes, perms);
120 } 124 }
121 } 125 }
122 126
@@ -486,7 +490,9 @@ struct Memory::Impl {
486 } 490 }
487 491
488 if (current_page_table->fastmem_arena) { 492 if (current_page_table->fastmem_arena) {
489 system.DeviceMemory().buffer.Protect(vaddr, size, !debug, !debug); 493 const auto perm{debug ? Common::MemoryPermission{}
494 : Common::MemoryPermission::ReadWrite};
495 buffer->Protect(vaddr, size, perm);
490 } 496 }
491 497
492 // Iterate over a contiguous CPU address space, marking/unmarking the region. 498 // Iterate over a contiguous CPU address space, marking/unmarking the region.
@@ -543,9 +549,14 @@ struct Memory::Impl {
543 } 549 }
544 550
545 if (current_page_table->fastmem_arena) { 551 if (current_page_table->fastmem_arena) {
546 const bool is_read_enable = 552 Common::MemoryPermission perm{};
547 !Settings::values.use_reactive_flushing.GetValue() || !cached; 553 if (!Settings::values.use_reactive_flushing.GetValue() || !cached) {
548 system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached); 554 perm |= Common::MemoryPermission::Read;
555 }
556 if (!cached) {
557 perm |= Common::MemoryPermission::Write;
558 }
559 buffer->Protect(vaddr, size, perm);
549 } 560 }
550 561
551 // Iterate over a contiguous CPU address space, which corresponds to the specified GPU 562 // Iterate over a contiguous CPU address space, which corresponds to the specified GPU
@@ -856,6 +867,13 @@ struct Memory::Impl {
856 std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{}; 867 std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{};
857 std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers; 868 std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers;
858 std::mutex sys_core_guard; 869 std::mutex sys_core_guard;
870
871 std::optional<Common::HeapTracker> heap_tracker;
872#ifdef __linux__
873 Common::HeapTracker* buffer{};
874#else
875 Common::HostMemory* buffer{};
876#endif
859}; 877};
860 878
861Memory::Memory(Core::System& system_) : system{system_} { 879Memory::Memory(Core::System& system_) : system{system_} {
@@ -873,12 +891,14 @@ void Memory::SetCurrentPageTable(Kernel::KProcess& process) {
873} 891}
874 892
875void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, 893void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
876 Common::PhysicalAddress target, Common::MemoryPermission perms) { 894 Common::PhysicalAddress target, Common::MemoryPermission perms,
877 impl->MapMemoryRegion(page_table, base, size, target, perms); 895 bool separate_heap) {
896 impl->MapMemoryRegion(page_table, base, size, target, perms, separate_heap);
878} 897}
879 898
880void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) { 899void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
881 impl->UnmapRegion(page_table, base, size); 900 bool separate_heap) {
901 impl->UnmapRegion(page_table, base, size, separate_heap);
882} 902}
883 903
884void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size, 904void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size,
@@ -1048,7 +1068,9 @@ void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) {
1048} 1068}
1049 1069
1050bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) { 1070bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
1051 bool mapped = true; 1071 [[maybe_unused]] bool mapped = true;
1072 [[maybe_unused]] bool rasterizer = false;
1073
1052 u8* const ptr = impl->GetPointerImpl( 1074 u8* const ptr = impl->GetPointerImpl(
1053 GetInteger(vaddr), 1075 GetInteger(vaddr),
1054 [&] { 1076 [&] {
@@ -1056,8 +1078,26 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) {
1056 GetInteger(vaddr)); 1078 GetInteger(vaddr));
1057 mapped = false; 1079 mapped = false;
1058 }, 1080 },
1059 [&] { impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); }); 1081 [&] {
1082 impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size);
1083 rasterizer = true;
1084 });
1085
1086#ifdef __linux__
1087 if (!rasterizer && mapped) {
1088 impl->buffer->DeferredMapSeparateHeap(GetInteger(vaddr));
1089 }
1090#endif
1091
1060 return mapped && ptr != nullptr; 1092 return mapped && ptr != nullptr;
1061} 1093}
1062 1094
1095bool Memory::InvalidateSeparateHeap(void* fault_address) {
1096#ifdef __linux__
1097 return impl->buffer->DeferredMapSeparateHeap(static_cast<u8*>(fault_address));
1098#else
1099 return false;
1100#endif
1101}
1102
1063} // namespace Core::Memory 1103} // namespace Core::Memory
diff --git a/src/core/memory.h b/src/core/memory.h
index c1879e78f..3e4d03f57 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -86,7 +86,8 @@ public:
86 * @param perms The permissions to map the memory with. 86 * @param perms The permissions to map the memory with.
87 */ 87 */
88 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, 88 void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
89 Common::PhysicalAddress target, Common::MemoryPermission perms); 89 Common::PhysicalAddress target, Common::MemoryPermission perms,
90 bool separate_heap);
90 91
91 /** 92 /**
92 * Unmaps a region of the emulated process address space. 93 * Unmaps a region of the emulated process address space.
@@ -95,7 +96,8 @@ public:
95 * @param base The address to begin unmapping at. 96 * @param base The address to begin unmapping at.
96 * @param size The amount of bytes to unmap. 97 * @param size The amount of bytes to unmap.
97 */ 98 */
98 void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size); 99 void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size,
100 bool separate_heap);
99 101
100 /** 102 /**
101 * Protects a region of the emulated process address space with the new permissions. 103 * Protects a region of the emulated process address space with the new permissions.
@@ -486,6 +488,7 @@ public:
486 void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers); 488 void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers);
487 void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size); 489 void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size);
488 bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size); 490 bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size);
491 bool InvalidateSeparateHeap(void* fault_address);
489 void FlushRegion(Common::ProcessAddress dest_addr, size_t size); 492 void FlushRegion(Common::ProcessAddress dest_addr, size_t size);
490 493
491private: 494private:
diff --git a/src/tests/common/host_memory.cpp b/src/tests/common/host_memory.cpp
index 1a28e862b..cb040c942 100644
--- a/src/tests/common/host_memory.cpp
+++ b/src/tests/common/host_memory.cpp
@@ -12,6 +12,7 @@ using namespace Common::Literals;
12static constexpr size_t VIRTUAL_SIZE = 1ULL << 39; 12static constexpr size_t VIRTUAL_SIZE = 1ULL << 39;
13static constexpr size_t BACKING_SIZE = 4_GiB; 13static constexpr size_t BACKING_SIZE = 4_GiB;
14static constexpr auto PERMS = Common::MemoryPermission::ReadWrite; 14static constexpr auto PERMS = Common::MemoryPermission::ReadWrite;
15static constexpr auto HEAP = false;
15 16
16TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") { 17TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") {
17 { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); } 18 { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); }
@@ -20,7 +21,7 @@ TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") {
20 21
21TEST_CASE("HostMemory: Simple map", "[common]") { 22TEST_CASE("HostMemory: Simple map", "[common]") {
22 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 23 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
23 mem.Map(0x5000, 0x8000, 0x1000, PERMS); 24 mem.Map(0x5000, 0x8000, 0x1000, PERMS, HEAP);
24 25
25 volatile u8* const data = mem.VirtualBasePointer() + 0x5000; 26 volatile u8* const data = mem.VirtualBasePointer() + 0x5000;
26 data[0] = 50; 27 data[0] = 50;
@@ -29,8 +30,8 @@ TEST_CASE("HostMemory: Simple map", "[common]") {
29 30
30TEST_CASE("HostMemory: Simple mirror map", "[common]") { 31TEST_CASE("HostMemory: Simple mirror map", "[common]") {
31 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 32 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
32 mem.Map(0x5000, 0x3000, 0x2000, PERMS); 33 mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);
33 mem.Map(0x8000, 0x4000, 0x1000, PERMS); 34 mem.Map(0x8000, 0x4000, 0x1000, PERMS, HEAP);
34 35
35 volatile u8* const mirror_a = mem.VirtualBasePointer() + 0x5000; 36 volatile u8* const mirror_a = mem.VirtualBasePointer() + 0x5000;
36 volatile u8* const mirror_b = mem.VirtualBasePointer() + 0x8000; 37 volatile u8* const mirror_b = mem.VirtualBasePointer() + 0x8000;
@@ -40,116 +41,116 @@ TEST_CASE("HostMemory: Simple mirror map", "[common]") {
40 41
41TEST_CASE("HostMemory: Simple unmap", "[common]") { 42TEST_CASE("HostMemory: Simple unmap", "[common]") {
42 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 43 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
43 mem.Map(0x5000, 0x3000, 0x2000, PERMS); 44 mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);
44 45
45 volatile u8* const data = mem.VirtualBasePointer() + 0x5000; 46 volatile u8* const data = mem.VirtualBasePointer() + 0x5000;
46 data[75] = 50; 47 data[75] = 50;
47 REQUIRE(data[75] == 50); 48 REQUIRE(data[75] == 50);
48 49
49 mem.Unmap(0x5000, 0x2000); 50 mem.Unmap(0x5000, 0x2000, HEAP);
50} 51}
51 52
52TEST_CASE("HostMemory: Simple unmap and remap", "[common]") { 53TEST_CASE("HostMemory: Simple unmap and remap", "[common]") {
53 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 54 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
54 mem.Map(0x5000, 0x3000, 0x2000, PERMS); 55 mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);
55 56
56 volatile u8* const data = mem.VirtualBasePointer() + 0x5000; 57 volatile u8* const data = mem.VirtualBasePointer() + 0x5000;
57 data[0] = 50; 58 data[0] = 50;
58 REQUIRE(data[0] == 50); 59 REQUIRE(data[0] == 50);
59 60
60 mem.Unmap(0x5000, 0x2000); 61 mem.Unmap(0x5000, 0x2000, HEAP);
61 62
62 mem.Map(0x5000, 0x3000, 0x2000, PERMS); 63 mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP);
63 REQUIRE(data[0] == 50); 64 REQUIRE(data[0] == 50);
64 65
65 mem.Map(0x7000, 0x2000, 0x5000, PERMS); 66 mem.Map(0x7000, 0x2000, 0x5000, PERMS, HEAP);
66 REQUIRE(data[0x3000] == 50); 67 REQUIRE(data[0x3000] == 50);
67} 68}
68 69
69TEST_CASE("HostMemory: Nieche allocation", "[common]") { 70TEST_CASE("HostMemory: Nieche allocation", "[common]") {
70 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 71 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
71 mem.Map(0x0000, 0, 0x20000, PERMS); 72 mem.Map(0x0000, 0, 0x20000, PERMS, HEAP);
72 mem.Unmap(0x0000, 0x4000); 73 mem.Unmap(0x0000, 0x4000, HEAP);
73 mem.Map(0x1000, 0, 0x2000, PERMS); 74 mem.Map(0x1000, 0, 0x2000, PERMS, HEAP);
74 mem.Map(0x3000, 0, 0x1000, PERMS); 75 mem.Map(0x3000, 0, 0x1000, PERMS, HEAP);
75 mem.Map(0, 0, 0x1000, PERMS); 76 mem.Map(0, 0, 0x1000, PERMS, HEAP);
76} 77}
77 78
78TEST_CASE("HostMemory: Full unmap", "[common]") { 79TEST_CASE("HostMemory: Full unmap", "[common]") {
79 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 80 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
80 mem.Map(0x8000, 0, 0x4000, PERMS); 81 mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
81 mem.Unmap(0x8000, 0x4000); 82 mem.Unmap(0x8000, 0x4000, HEAP);
82 mem.Map(0x6000, 0, 0x16000, PERMS); 83 mem.Map(0x6000, 0, 0x16000, PERMS, HEAP);
83} 84}
84 85
85TEST_CASE("HostMemory: Right out of bounds unmap", "[common]") { 86TEST_CASE("HostMemory: Right out of bounds unmap", "[common]") {
86 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 87 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
87 mem.Map(0x0000, 0, 0x4000, PERMS); 88 mem.Map(0x0000, 0, 0x4000, PERMS, HEAP);
88 mem.Unmap(0x2000, 0x4000); 89 mem.Unmap(0x2000, 0x4000, HEAP);
89 mem.Map(0x2000, 0x80000, 0x4000, PERMS); 90 mem.Map(0x2000, 0x80000, 0x4000, PERMS, HEAP);
90} 91}
91 92
92TEST_CASE("HostMemory: Left out of bounds unmap", "[common]") { 93TEST_CASE("HostMemory: Left out of bounds unmap", "[common]") {
93 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 94 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
94 mem.Map(0x8000, 0, 0x4000, PERMS); 95 mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
95 mem.Unmap(0x6000, 0x4000); 96 mem.Unmap(0x6000, 0x4000, HEAP);
96 mem.Map(0x8000, 0, 0x2000, PERMS); 97 mem.Map(0x8000, 0, 0x2000, PERMS, HEAP);
97} 98}
98 99
99TEST_CASE("HostMemory: Multiple placeholder unmap", "[common]") { 100TEST_CASE("HostMemory: Multiple placeholder unmap", "[common]") {
100 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 101 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
101 mem.Map(0x0000, 0, 0x4000, PERMS); 102 mem.Map(0x0000, 0, 0x4000, PERMS, HEAP);
102 mem.Map(0x4000, 0, 0x1b000, PERMS); 103 mem.Map(0x4000, 0, 0x1b000, PERMS, HEAP);
103 mem.Unmap(0x3000, 0x1c000); 104 mem.Unmap(0x3000, 0x1c000, HEAP);
104 mem.Map(0x3000, 0, 0x20000, PERMS); 105 mem.Map(0x3000, 0, 0x20000, PERMS, HEAP);
105} 106}
106 107
107TEST_CASE("HostMemory: Unmap between placeholders", "[common]") { 108TEST_CASE("HostMemory: Unmap between placeholders", "[common]") {
108 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 109 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
109 mem.Map(0x0000, 0, 0x4000, PERMS); 110 mem.Map(0x0000, 0, 0x4000, PERMS, HEAP);
110 mem.Map(0x4000, 0, 0x4000, PERMS); 111 mem.Map(0x4000, 0, 0x4000, PERMS, HEAP);
111 mem.Unmap(0x2000, 0x4000); 112 mem.Unmap(0x2000, 0x4000, HEAP);
112 mem.Map(0x2000, 0, 0x4000, PERMS); 113 mem.Map(0x2000, 0, 0x4000, PERMS, HEAP);
113} 114}
114 115
115TEST_CASE("HostMemory: Unmap to origin", "[common]") { 116TEST_CASE("HostMemory: Unmap to origin", "[common]") {
116 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 117 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
117 mem.Map(0x4000, 0, 0x4000, PERMS); 118 mem.Map(0x4000, 0, 0x4000, PERMS, HEAP);
118 mem.Map(0x8000, 0, 0x4000, PERMS); 119 mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
119 mem.Unmap(0x4000, 0x4000); 120 mem.Unmap(0x4000, 0x4000, HEAP);
120 mem.Map(0, 0, 0x4000, PERMS); 121 mem.Map(0, 0, 0x4000, PERMS, HEAP);
121 mem.Map(0x4000, 0, 0x4000, PERMS); 122 mem.Map(0x4000, 0, 0x4000, PERMS, HEAP);
122} 123}
123 124
124TEST_CASE("HostMemory: Unmap to right", "[common]") { 125TEST_CASE("HostMemory: Unmap to right", "[common]") {
125 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 126 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
126 mem.Map(0x4000, 0, 0x4000, PERMS); 127 mem.Map(0x4000, 0, 0x4000, PERMS, HEAP);
127 mem.Map(0x8000, 0, 0x4000, PERMS); 128 mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
128 mem.Unmap(0x8000, 0x4000); 129 mem.Unmap(0x8000, 0x4000, HEAP);
129 mem.Map(0x8000, 0, 0x4000, PERMS); 130 mem.Map(0x8000, 0, 0x4000, PERMS, HEAP);
130} 131}
131 132
132TEST_CASE("HostMemory: Partial right unmap check bindings", "[common]") { 133TEST_CASE("HostMemory: Partial right unmap check bindings", "[common]") {
133 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 134 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
134 mem.Map(0x4000, 0x10000, 0x4000, PERMS); 135 mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP);
135 136
136 volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; 137 volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;
137 ptr[0x1000] = 17; 138 ptr[0x1000] = 17;
138 139
139 mem.Unmap(0x6000, 0x2000); 140 mem.Unmap(0x6000, 0x2000, HEAP);
140 141
141 REQUIRE(ptr[0x1000] == 17); 142 REQUIRE(ptr[0x1000] == 17);
142} 143}
143 144
144TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") { 145TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") {
145 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 146 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
146 mem.Map(0x4000, 0x10000, 0x4000, PERMS); 147 mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP);
147 148
148 volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; 149 volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;
149 ptr[0x3000] = 19; 150 ptr[0x3000] = 19;
150 ptr[0x3fff] = 12; 151 ptr[0x3fff] = 12;
151 152
152 mem.Unmap(0x4000, 0x2000); 153 mem.Unmap(0x4000, 0x2000, HEAP);
153 154
154 REQUIRE(ptr[0x3000] == 19); 155 REQUIRE(ptr[0x3000] == 19);
155 REQUIRE(ptr[0x3fff] == 12); 156 REQUIRE(ptr[0x3fff] == 12);
@@ -157,13 +158,13 @@ TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") {
157 158
158TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") { 159TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") {
159 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 160 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
160 mem.Map(0x4000, 0x10000, 0x4000, PERMS); 161 mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP);
161 162
162 volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; 163 volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;
163 ptr[0x0000] = 19; 164 ptr[0x0000] = 19;
164 ptr[0x3fff] = 12; 165 ptr[0x3fff] = 12;
165 166
166 mem.Unmap(0x1000, 0x2000); 167 mem.Unmap(0x1000, 0x2000, HEAP);
167 168
168 REQUIRE(ptr[0x0000] == 19); 169 REQUIRE(ptr[0x0000] == 19);
169 REQUIRE(ptr[0x3fff] == 12); 170 REQUIRE(ptr[0x3fff] == 12);
@@ -171,14 +172,14 @@ TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") {
171 172
172TEST_CASE("HostMemory: Partial sparse middle unmap and check bindings", "[common]") { 173TEST_CASE("HostMemory: Partial sparse middle unmap and check bindings", "[common]") {
173 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); 174 HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE);
174 mem.Map(0x4000, 0x10000, 0x2000, PERMS); 175 mem.Map(0x4000, 0x10000, 0x2000, PERMS, HEAP);
175 mem.Map(0x6000, 0x20000, 0x2000, PERMS); 176 mem.Map(0x6000, 0x20000, 0x2000, PERMS, HEAP);
176 177
177 volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; 178 volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000;
178 ptr[0x0000] = 19; 179 ptr[0x0000] = 19;
179 ptr[0x3fff] = 12; 180 ptr[0x3fff] = 12;
180 181
181 mem.Unmap(0x5000, 0x2000); 182 mem.Unmap(0x5000, 0x2000, HEAP);
182 183
183 REQUIRE(ptr[0x0000] == 19); 184 REQUIRE(ptr[0x0000] == 19);
184 REQUIRE(ptr[0x3fff] == 12); 185 REQUIRE(ptr[0x3fff] == 12);