diff options
| author | 2024-01-01 13:56:16 -0600 | |
|---|---|---|
| committer | 2024-01-01 13:56:16 -0600 | |
| commit | f0f92edbd0a78abda819251ddc325da4acc14216 (patch) | |
| tree | 6a23c1be26148c4137a6f67ebdf926a3f82ce47f /src | |
| parent | Merge pull request #12501 from liamwhite/ips (diff) | |
| parent | heap_tracker: use linear-time mapping eviction (diff) | |
| download | yuzu-f0f92edbd0a78abda819251ddc325da4acc14216.tar.gz yuzu-f0f92edbd0a78abda819251ddc325da4acc14216.tar.xz yuzu-f0f92edbd0a78abda819251ddc325da4acc14216.zip | |
Merge pull request #12466 from liamwhite/sh2
core: track separate heap allocation for linux
Diffstat (limited to 'src')
| -rw-r--r-- | src/common/CMakeLists.txt | 2 | ||||
| -rw-r--r-- | src/common/heap_tracker.cpp | 281 | ||||
| -rw-r--r-- | src/common/heap_tracker.h | 98 | ||||
| -rw-r--r-- | src/common/host_memory.cpp | 10 | ||||
| -rw-r--r-- | src/common/host_memory.h | 11 | ||||
| -rw-r--r-- | src/core/CMakeLists.txt | 1 | ||||
| -rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic.cpp | 49 | ||||
| -rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic.h | 20 | ||||
| -rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic_32.cpp | 5 | ||||
| -rw-r--r-- | src/core/arm/dynarmic/arm_dynarmic_64.cpp | 5 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_table_base.cpp | 26 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_page_table_base.h | 3 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_process.cpp | 6 | ||||
| -rw-r--r-- | src/core/memory.cpp | 86 | ||||
| -rw-r--r-- | src/core/memory.h | 7 | ||||
| -rw-r--r-- | src/tests/common/host_memory.cpp | 99 |
16 files changed, 616 insertions, 93 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index b58a7073f..8c57d47c6 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt | |||
| @@ -64,6 +64,8 @@ add_library(common STATIC | |||
| 64 | fs/path_util.cpp | 64 | fs/path_util.cpp |
| 65 | fs/path_util.h | 65 | fs/path_util.h |
| 66 | hash.h | 66 | hash.h |
| 67 | heap_tracker.cpp | ||
| 68 | heap_tracker.h | ||
| 67 | hex_util.cpp | 69 | hex_util.cpp |
| 68 | hex_util.h | 70 | hex_util.h |
| 69 | host_memory.cpp | 71 | host_memory.cpp |
diff --git a/src/common/heap_tracker.cpp b/src/common/heap_tracker.cpp new file mode 100644 index 000000000..683208795 --- /dev/null +++ b/src/common/heap_tracker.cpp | |||
| @@ -0,0 +1,281 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include <fstream> | ||
| 5 | #include <vector> | ||
| 6 | |||
| 7 | #include "common/heap_tracker.h" | ||
| 8 | #include "common/logging/log.h" | ||
| 9 | |||
| 10 | namespace Common { | ||
| 11 | |||
| 12 | namespace { | ||
| 13 | |||
| 14 | s64 GetMaxPermissibleResidentMapCount() { | ||
| 15 | // Default value. | ||
| 16 | s64 value = 65530; | ||
| 17 | |||
| 18 | // Try to read how many mappings we can make. | ||
| 19 | std::ifstream s("/proc/sys/vm/max_map_count"); | ||
| 20 | s >> value; | ||
| 21 | |||
| 22 | // Print, for debug. | ||
| 23 | LOG_INFO(HW_Memory, "Current maximum map count: {}", value); | ||
| 24 | |||
| 25 | // Allow 20000 maps for other code and to account for split inaccuracy. | ||
| 26 | return std::max<s64>(value - 20000, 0); | ||
| 27 | } | ||
| 28 | |||
| 29 | } // namespace | ||
| 30 | |||
| 31 | HeapTracker::HeapTracker(Common::HostMemory& buffer) | ||
| 32 | : m_buffer(buffer), m_max_resident_map_count(GetMaxPermissibleResidentMapCount()) {} | ||
| 33 | HeapTracker::~HeapTracker() = default; | ||
| 34 | |||
| 35 | void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length, | ||
| 36 | MemoryPermission perm, bool is_separate_heap) { | ||
| 37 | // When mapping other memory, map pages immediately. | ||
| 38 | if (!is_separate_heap) { | ||
| 39 | m_buffer.Map(virtual_offset, host_offset, length, perm, false); | ||
| 40 | return; | ||
| 41 | } | ||
| 42 | |||
| 43 | { | ||
| 44 | // We are mapping part of a separate heap. | ||
| 45 | std::scoped_lock lk{m_lock}; | ||
| 46 | |||
| 47 | auto* const map = new SeparateHeapMap{ | ||
| 48 | .vaddr = virtual_offset, | ||
| 49 | .paddr = host_offset, | ||
| 50 | .size = length, | ||
| 51 | .tick = m_tick++, | ||
| 52 | .perm = perm, | ||
| 53 | .is_resident = false, | ||
| 54 | }; | ||
| 55 | |||
| 56 | // Insert into mappings. | ||
| 57 | m_map_count++; | ||
| 58 | m_mappings.insert(*map); | ||
| 59 | } | ||
| 60 | |||
| 61 | // Finally, map. | ||
| 62 | this->DeferredMapSeparateHeap(virtual_offset); | ||
| 63 | } | ||
| 64 | |||
| 65 | void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) { | ||
| 66 | // If this is a separate heap... | ||
| 67 | if (is_separate_heap) { | ||
| 68 | std::scoped_lock lk{m_lock}; | ||
| 69 | |||
| 70 | const SeparateHeapMap key{ | ||
| 71 | .vaddr = virtual_offset, | ||
| 72 | }; | ||
| 73 | |||
| 74 | // Split at the boundaries of the region we are removing. | ||
| 75 | this->SplitHeapMapLocked(virtual_offset); | ||
| 76 | this->SplitHeapMapLocked(virtual_offset + size); | ||
| 77 | |||
| 78 | // Erase all mappings in range. | ||
| 79 | auto it = m_mappings.find(key); | ||
| 80 | while (it != m_mappings.end() && it->vaddr < virtual_offset + size) { | ||
| 81 | // Get underlying item. | ||
| 82 | auto* const item = std::addressof(*it); | ||
| 83 | |||
| 84 | // If resident, erase from resident map. | ||
| 85 | if (item->is_resident) { | ||
| 86 | ASSERT(--m_resident_map_count >= 0); | ||
| 87 | m_resident_mappings.erase(m_resident_mappings.iterator_to(*item)); | ||
| 88 | } | ||
| 89 | |||
| 90 | // Erase from map. | ||
| 91 | ASSERT(--m_map_count >= 0); | ||
| 92 | it = m_mappings.erase(it); | ||
| 93 | |||
| 94 | // Free the item. | ||
| 95 | delete item; | ||
| 96 | } | ||
| 97 | } | ||
| 98 | |||
| 99 | // Unmap pages. | ||
| 100 | m_buffer.Unmap(virtual_offset, size, false); | ||
| 101 | } | ||
| 102 | |||
| 103 | void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission perm) { | ||
| 104 | // Ensure no rebuild occurs while reprotecting. | ||
| 105 | std::shared_lock lk{m_rebuild_lock}; | ||
| 106 | |||
| 107 | // Split at the boundaries of the region we are reprotecting. | ||
| 108 | this->SplitHeapMap(virtual_offset, size); | ||
| 109 | |||
| 110 | // Declare tracking variables. | ||
| 111 | const VAddr end = virtual_offset + size; | ||
| 112 | VAddr cur = virtual_offset; | ||
| 113 | |||
| 114 | while (cur < end) { | ||
| 115 | VAddr next = cur; | ||
| 116 | bool should_protect = false; | ||
| 117 | |||
| 118 | { | ||
| 119 | std::scoped_lock lk2{m_lock}; | ||
| 120 | |||
| 121 | const SeparateHeapMap key{ | ||
| 122 | .vaddr = next, | ||
| 123 | }; | ||
| 124 | |||
| 125 | // Try to get the next mapping corresponding to this address. | ||
| 126 | const auto it = m_mappings.nfind(key); | ||
| 127 | |||
| 128 | if (it == m_mappings.end()) { | ||
| 129 | // There are no separate heap mappings remaining. | ||
| 130 | next = end; | ||
| 131 | should_protect = true; | ||
| 132 | } else if (it->vaddr == cur) { | ||
| 133 | // We are in range. | ||
| 134 | // Update permission bits. | ||
| 135 | it->perm = perm; | ||
| 136 | |||
| 137 | // Determine next address and whether we should protect. | ||
| 138 | next = cur + it->size; | ||
| 139 | should_protect = it->is_resident; | ||
| 140 | } else /* if (it->vaddr > cur) */ { | ||
| 141 | // We weren't in range, but there is a block coming up that will be. | ||
| 142 | next = it->vaddr; | ||
| 143 | should_protect = true; | ||
| 144 | } | ||
| 145 | } | ||
| 146 | |||
| 147 | // Clamp to end. | ||
| 148 | next = std::min(next, end); | ||
| 149 | |||
| 150 | // Reprotect, if we need to. | ||
| 151 | if (should_protect) { | ||
| 152 | m_buffer.Protect(cur, next - cur, perm); | ||
| 153 | } | ||
| 154 | |||
| 155 | // Advance. | ||
| 156 | cur = next; | ||
| 157 | } | ||
| 158 | } | ||
| 159 | |||
| 160 | bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) { | ||
| 161 | if (m_buffer.IsInVirtualRange(fault_address)) { | ||
| 162 | return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer()); | ||
| 163 | } | ||
| 164 | |||
| 165 | return false; | ||
| 166 | } | ||
| 167 | |||
| 168 | bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) { | ||
| 169 | bool rebuild_required = false; | ||
| 170 | |||
| 171 | { | ||
| 172 | std::scoped_lock lk{m_lock}; | ||
| 173 | |||
| 174 | // Check to ensure this was a non-resident separate heap mapping. | ||
| 175 | const auto it = this->GetNearestHeapMapLocked(virtual_offset); | ||
| 176 | if (it == m_mappings.end() || it->is_resident) { | ||
| 177 | return false; | ||
| 178 | } | ||
| 179 | |||
| 180 | // Update tick before possible rebuild. | ||
| 181 | it->tick = m_tick++; | ||
| 182 | |||
| 183 | // Check if we need to rebuild. | ||
| 184 | if (m_resident_map_count > m_max_resident_map_count) { | ||
| 185 | rebuild_required = true; | ||
| 186 | } | ||
| 187 | |||
| 188 | // Map the area. | ||
| 189 | m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false); | ||
| 190 | |||
| 191 | // This map is now resident. | ||
| 192 | it->is_resident = true; | ||
| 193 | m_resident_map_count++; | ||
| 194 | m_resident_mappings.insert(*it); | ||
| 195 | } | ||
| 196 | |||
| 197 | if (rebuild_required) { | ||
| 198 | // A rebuild was required, so perform it now. | ||
| 199 | this->RebuildSeparateHeapAddressSpace(); | ||
| 200 | } | ||
| 201 | |||
| 202 | return true; | ||
| 203 | } | ||
| 204 | |||
| 205 | void HeapTracker::RebuildSeparateHeapAddressSpace() { | ||
| 206 | std::scoped_lock lk{m_rebuild_lock, m_lock}; | ||
| 207 | |||
| 208 | ASSERT(!m_resident_mappings.empty()); | ||
| 209 | |||
| 210 | // Dump half of the mappings. | ||
| 211 | // | ||
| 212 | // Despite being worse in theory, this has proven to be better in practice than more | ||
| 213 | // regularly dumping a smaller amount, because it significantly reduces average case | ||
| 214 | // lock contention. | ||
| 215 | const size_t desired_count = std::min(m_resident_map_count, m_max_resident_map_count) / 2; | ||
| 216 | const size_t evict_count = m_resident_map_count - desired_count; | ||
| 217 | auto it = m_resident_mappings.begin(); | ||
| 218 | |||
| 219 | for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) { | ||
| 220 | // Unmark and unmap. | ||
| 221 | it->is_resident = false; | ||
| 222 | m_buffer.Unmap(it->vaddr, it->size, false); | ||
| 223 | |||
| 224 | // Advance. | ||
| 225 | ASSERT(--m_resident_map_count >= 0); | ||
| 226 | it = m_resident_mappings.erase(it); | ||
| 227 | } | ||
| 228 | } | ||
| 229 | |||
| 230 | void HeapTracker::SplitHeapMap(VAddr offset, size_t size) { | ||
| 231 | std::scoped_lock lk{m_lock}; | ||
| 232 | |||
| 233 | this->SplitHeapMapLocked(offset); | ||
| 234 | this->SplitHeapMapLocked(offset + size); | ||
| 235 | } | ||
| 236 | |||
| 237 | void HeapTracker::SplitHeapMapLocked(VAddr offset) { | ||
| 238 | const auto it = this->GetNearestHeapMapLocked(offset); | ||
| 239 | if (it == m_mappings.end() || it->vaddr == offset) { | ||
| 240 | // Not contained or no split required. | ||
| 241 | return; | ||
| 242 | } | ||
| 243 | |||
| 244 | // Cache the original values. | ||
| 245 | auto* const left = std::addressof(*it); | ||
| 246 | const size_t orig_size = left->size; | ||
| 247 | |||
| 248 | // Adjust the left map. | ||
| 249 | const size_t left_size = offset - left->vaddr; | ||
| 250 | left->size = left_size; | ||
| 251 | |||
| 252 | // Create the new right map. | ||
| 253 | auto* const right = new SeparateHeapMap{ | ||
| 254 | .vaddr = left->vaddr + left_size, | ||
| 255 | .paddr = left->paddr + left_size, | ||
| 256 | .size = orig_size - left_size, | ||
| 257 | .tick = left->tick, | ||
| 258 | .perm = left->perm, | ||
| 259 | .is_resident = left->is_resident, | ||
| 260 | }; | ||
| 261 | |||
| 262 | // Insert the new right map. | ||
| 263 | m_map_count++; | ||
| 264 | m_mappings.insert(*right); | ||
| 265 | |||
| 266 | // If resident, also insert into resident map. | ||
| 267 | if (right->is_resident) { | ||
| 268 | m_resident_map_count++; | ||
| 269 | m_resident_mappings.insert(*right); | ||
| 270 | } | ||
| 271 | } | ||
| 272 | |||
| 273 | HeapTracker::AddrTree::iterator HeapTracker::GetNearestHeapMapLocked(VAddr offset) { | ||
| 274 | const SeparateHeapMap key{ | ||
| 275 | .vaddr = offset, | ||
| 276 | }; | ||
| 277 | |||
| 278 | return m_mappings.find(key); | ||
| 279 | } | ||
| 280 | |||
| 281 | } // namespace Common | ||
diff --git a/src/common/heap_tracker.h b/src/common/heap_tracker.h new file mode 100644 index 000000000..ee5b0bf43 --- /dev/null +++ b/src/common/heap_tracker.h | |||
| @@ -0,0 +1,98 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #pragma once | ||
| 5 | |||
| 6 | #include <atomic> | ||
| 7 | #include <mutex> | ||
| 8 | #include <set> | ||
| 9 | #include <shared_mutex> | ||
| 10 | |||
| 11 | #include "common/host_memory.h" | ||
| 12 | #include "common/intrusive_red_black_tree.h" | ||
| 13 | |||
| 14 | namespace Common { | ||
| 15 | |||
| 16 | struct SeparateHeapMap { | ||
| 17 | Common::IntrusiveRedBlackTreeNode addr_node{}; | ||
| 18 | Common::IntrusiveRedBlackTreeNode tick_node{}; | ||
| 19 | VAddr vaddr{}; | ||
| 20 | PAddr paddr{}; | ||
| 21 | size_t size{}; | ||
| 22 | size_t tick{}; | ||
| 23 | MemoryPermission perm{}; | ||
| 24 | bool is_resident{}; | ||
| 25 | }; | ||
| 26 | |||
| 27 | struct SeparateHeapMapAddrComparator { | ||
| 28 | static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) { | ||
| 29 | if (lhs.vaddr < rhs.vaddr) { | ||
| 30 | return -1; | ||
| 31 | } else if (lhs.vaddr <= (rhs.vaddr + rhs.size - 1)) { | ||
| 32 | return 0; | ||
| 33 | } else { | ||
| 34 | return 1; | ||
| 35 | } | ||
| 36 | } | ||
| 37 | }; | ||
| 38 | |||
| 39 | struct SeparateHeapMapTickComparator { | ||
| 40 | static constexpr int Compare(const SeparateHeapMap& lhs, const SeparateHeapMap& rhs) { | ||
| 41 | if (lhs.tick < rhs.tick) { | ||
| 42 | return -1; | ||
| 43 | } else if (lhs.tick > rhs.tick) { | ||
| 44 | return 1; | ||
| 45 | } else { | ||
| 46 | return SeparateHeapMapAddrComparator::Compare(lhs, rhs); | ||
| 47 | } | ||
| 48 | } | ||
| 49 | }; | ||
| 50 | |||
| 51 | class HeapTracker { | ||
| 52 | public: | ||
| 53 | explicit HeapTracker(Common::HostMemory& buffer); | ||
| 54 | ~HeapTracker(); | ||
| 55 | |||
| 56 | void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perm, | ||
| 57 | bool is_separate_heap); | ||
| 58 | void Unmap(size_t virtual_offset, size_t size, bool is_separate_heap); | ||
| 59 | void Protect(size_t virtual_offset, size_t length, MemoryPermission perm); | ||
| 60 | u8* VirtualBasePointer() { | ||
| 61 | return m_buffer.VirtualBasePointer(); | ||
| 62 | } | ||
| 63 | |||
| 64 | bool DeferredMapSeparateHeap(u8* fault_address); | ||
| 65 | bool DeferredMapSeparateHeap(size_t virtual_offset); | ||
| 66 | |||
| 67 | private: | ||
| 68 | using AddrTreeTraits = | ||
| 69 | Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::addr_node>; | ||
| 70 | using AddrTree = AddrTreeTraits::TreeType<SeparateHeapMapAddrComparator>; | ||
| 71 | |||
| 72 | using TickTreeTraits = | ||
| 73 | Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&SeparateHeapMap::tick_node>; | ||
| 74 | using TickTree = TickTreeTraits::TreeType<SeparateHeapMapTickComparator>; | ||
| 75 | |||
| 76 | AddrTree m_mappings{}; | ||
| 77 | TickTree m_resident_mappings{}; | ||
| 78 | |||
| 79 | private: | ||
| 80 | void SplitHeapMap(VAddr offset, size_t size); | ||
| 81 | void SplitHeapMapLocked(VAddr offset); | ||
| 82 | |||
| 83 | AddrTree::iterator GetNearestHeapMapLocked(VAddr offset); | ||
| 84 | |||
| 85 | void RebuildSeparateHeapAddressSpace(); | ||
| 86 | |||
| 87 | private: | ||
| 88 | Common::HostMemory& m_buffer; | ||
| 89 | const s64 m_max_resident_map_count; | ||
| 90 | |||
| 91 | std::shared_mutex m_rebuild_lock{}; | ||
| 92 | std::mutex m_lock{}; | ||
| 93 | s64 m_map_count{}; | ||
| 94 | s64 m_resident_map_count{}; | ||
| 95 | size_t m_tick{}; | ||
| 96 | }; | ||
| 97 | |||
| 98 | } // namespace Common | ||
diff --git a/src/common/host_memory.cpp b/src/common/host_memory.cpp index e540375b8..860c39e6a 100644 --- a/src/common/host_memory.cpp +++ b/src/common/host_memory.cpp | |||
| @@ -679,7 +679,7 @@ HostMemory::HostMemory(HostMemory&&) noexcept = default; | |||
| 679 | HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default; | 679 | HostMemory& HostMemory::operator=(HostMemory&&) noexcept = default; |
| 680 | 680 | ||
| 681 | void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length, | 681 | void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length, |
| 682 | MemoryPermission perms) { | 682 | MemoryPermission perms, bool separate_heap) { |
| 683 | ASSERT(virtual_offset % PageAlignment == 0); | 683 | ASSERT(virtual_offset % PageAlignment == 0); |
| 684 | ASSERT(host_offset % PageAlignment == 0); | 684 | ASSERT(host_offset % PageAlignment == 0); |
| 685 | ASSERT(length % PageAlignment == 0); | 685 | ASSERT(length % PageAlignment == 0); |
| @@ -691,7 +691,7 @@ void HostMemory::Map(size_t virtual_offset, size_t host_offset, size_t length, | |||
| 691 | impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms); | 691 | impl->Map(virtual_offset + virtual_base_offset, host_offset, length, perms); |
| 692 | } | 692 | } |
| 693 | 693 | ||
| 694 | void HostMemory::Unmap(size_t virtual_offset, size_t length) { | 694 | void HostMemory::Unmap(size_t virtual_offset, size_t length, bool separate_heap) { |
| 695 | ASSERT(virtual_offset % PageAlignment == 0); | 695 | ASSERT(virtual_offset % PageAlignment == 0); |
| 696 | ASSERT(length % PageAlignment == 0); | 696 | ASSERT(length % PageAlignment == 0); |
| 697 | ASSERT(virtual_offset + length <= virtual_size); | 697 | ASSERT(virtual_offset + length <= virtual_size); |
| @@ -701,14 +701,16 @@ void HostMemory::Unmap(size_t virtual_offset, size_t length) { | |||
| 701 | impl->Unmap(virtual_offset + virtual_base_offset, length); | 701 | impl->Unmap(virtual_offset + virtual_base_offset, length); |
| 702 | } | 702 | } |
| 703 | 703 | ||
| 704 | void HostMemory::Protect(size_t virtual_offset, size_t length, bool read, bool write, | 704 | void HostMemory::Protect(size_t virtual_offset, size_t length, MemoryPermission perm) { |
| 705 | bool execute) { | ||
| 706 | ASSERT(virtual_offset % PageAlignment == 0); | 705 | ASSERT(virtual_offset % PageAlignment == 0); |
| 707 | ASSERT(length % PageAlignment == 0); | 706 | ASSERT(length % PageAlignment == 0); |
| 708 | ASSERT(virtual_offset + length <= virtual_size); | 707 | ASSERT(virtual_offset + length <= virtual_size); |
| 709 | if (length == 0 || !virtual_base || !impl) { | 708 | if (length == 0 || !virtual_base || !impl) { |
| 710 | return; | 709 | return; |
| 711 | } | 710 | } |
| 711 | const bool read = True(perm & MemoryPermission::Read); | ||
| 712 | const bool write = True(perm & MemoryPermission::Write); | ||
| 713 | const bool execute = True(perm & MemoryPermission::Execute); | ||
| 712 | impl->Protect(virtual_offset + virtual_base_offset, length, read, write, execute); | 714 | impl->Protect(virtual_offset + virtual_base_offset, length, read, write, execute); |
| 713 | } | 715 | } |
| 714 | 716 | ||
diff --git a/src/common/host_memory.h b/src/common/host_memory.h index 747c5850c..72fbb05af 100644 --- a/src/common/host_memory.h +++ b/src/common/host_memory.h | |||
| @@ -40,11 +40,12 @@ public: | |||
| 40 | HostMemory(HostMemory&& other) noexcept; | 40 | HostMemory(HostMemory&& other) noexcept; |
| 41 | HostMemory& operator=(HostMemory&& other) noexcept; | 41 | HostMemory& operator=(HostMemory&& other) noexcept; |
| 42 | 42 | ||
| 43 | void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms); | 43 | void Map(size_t virtual_offset, size_t host_offset, size_t length, MemoryPermission perms, |
| 44 | bool separate_heap); | ||
| 44 | 45 | ||
| 45 | void Unmap(size_t virtual_offset, size_t length); | 46 | void Unmap(size_t virtual_offset, size_t length, bool separate_heap); |
| 46 | 47 | ||
| 47 | void Protect(size_t virtual_offset, size_t length, bool read, bool write, bool execute = false); | 48 | void Protect(size_t virtual_offset, size_t length, MemoryPermission perms); |
| 48 | 49 | ||
| 49 | void EnableDirectMappedAddress(); | 50 | void EnableDirectMappedAddress(); |
| 50 | 51 | ||
| @@ -64,6 +65,10 @@ public: | |||
| 64 | return virtual_base; | 65 | return virtual_base; |
| 65 | } | 66 | } |
| 66 | 67 | ||
| 68 | bool IsInVirtualRange(void* address) const noexcept { | ||
| 69 | return address >= virtual_base && address < virtual_base + virtual_size; | ||
| 70 | } | ||
| 71 | |||
| 67 | private: | 72 | private: |
| 68 | size_t backing_size{}; | 73 | size_t backing_size{}; |
| 69 | size_t virtual_size{}; | 74 | size_t virtual_size{}; |
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index a8b3d480c..367d01dc7 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -978,6 +978,7 @@ endif() | |||
| 978 | 978 | ||
| 979 | if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64) | 979 | if (ARCHITECTURE_x86_64 OR ARCHITECTURE_arm64) |
| 980 | target_sources(core PRIVATE | 980 | target_sources(core PRIVATE |
| 981 | arm/dynarmic/arm_dynarmic.cpp | ||
| 981 | arm/dynarmic/arm_dynarmic.h | 982 | arm/dynarmic/arm_dynarmic.h |
| 982 | arm/dynarmic/arm_dynarmic_64.cpp | 983 | arm/dynarmic/arm_dynarmic_64.cpp |
| 983 | arm/dynarmic/arm_dynarmic_64.h | 984 | arm/dynarmic/arm_dynarmic_64.h |
diff --git a/src/core/arm/dynarmic/arm_dynarmic.cpp b/src/core/arm/dynarmic/arm_dynarmic.cpp new file mode 100644 index 000000000..e6e9fc45b --- /dev/null +++ b/src/core/arm/dynarmic/arm_dynarmic.cpp | |||
| @@ -0,0 +1,49 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #ifdef __linux__ | ||
| 5 | |||
| 6 | #include "common/signal_chain.h" | ||
| 7 | |||
| 8 | #include "core/arm/dynarmic/arm_dynarmic.h" | ||
| 9 | #include "core/hle/kernel/k_process.h" | ||
| 10 | #include "core/memory.h" | ||
| 11 | |||
| 12 | namespace Core { | ||
| 13 | |||
| 14 | namespace { | ||
| 15 | |||
| 16 | thread_local Core::Memory::Memory* g_current_memory{}; | ||
| 17 | std::once_flag g_registered{}; | ||
| 18 | struct sigaction g_old_segv {}; | ||
| 19 | |||
| 20 | void HandleSigSegv(int sig, siginfo_t* info, void* ctx) { | ||
| 21 | if (g_current_memory && g_current_memory->InvalidateSeparateHeap(info->si_addr)) { | ||
| 22 | return; | ||
| 23 | } | ||
| 24 | |||
| 25 | return g_old_segv.sa_sigaction(sig, info, ctx); | ||
| 26 | } | ||
| 27 | |||
| 28 | } // namespace | ||
| 29 | |||
| 30 | ScopedJitExecution::ScopedJitExecution(Kernel::KProcess* process) { | ||
| 31 | g_current_memory = std::addressof(process->GetMemory()); | ||
| 32 | } | ||
| 33 | |||
| 34 | ScopedJitExecution::~ScopedJitExecution() { | ||
| 35 | g_current_memory = nullptr; | ||
| 36 | } | ||
| 37 | |||
| 38 | void ScopedJitExecution::RegisterHandler() { | ||
| 39 | std::call_once(g_registered, [] { | ||
| 40 | struct sigaction sa {}; | ||
| 41 | sa.sa_sigaction = &HandleSigSegv; | ||
| 42 | sa.sa_flags = SA_SIGINFO | SA_ONSTACK; | ||
| 43 | Common::SigAction(SIGSEGV, std::addressof(sa), std::addressof(g_old_segv)); | ||
| 44 | }); | ||
| 45 | } | ||
| 46 | |||
| 47 | } // namespace Core | ||
| 48 | |||
| 49 | #endif | ||
diff --git a/src/core/arm/dynarmic/arm_dynarmic.h b/src/core/arm/dynarmic/arm_dynarmic.h index eef7c3116..53dd18815 100644 --- a/src/core/arm/dynarmic/arm_dynarmic.h +++ b/src/core/arm/dynarmic/arm_dynarmic.h | |||
| @@ -26,4 +26,24 @@ constexpr HaltReason TranslateHaltReason(Dynarmic::HaltReason hr) { | |||
| 26 | return static_cast<HaltReason>(hr); | 26 | return static_cast<HaltReason>(hr); |
| 27 | } | 27 | } |
| 28 | 28 | ||
| 29 | #ifdef __linux__ | ||
| 30 | |||
| 31 | class ScopedJitExecution { | ||
| 32 | public: | ||
| 33 | explicit ScopedJitExecution(Kernel::KProcess* process); | ||
| 34 | ~ScopedJitExecution(); | ||
| 35 | static void RegisterHandler(); | ||
| 36 | }; | ||
| 37 | |||
| 38 | #else | ||
| 39 | |||
| 40 | class ScopedJitExecution { | ||
| 41 | public: | ||
| 42 | explicit ScopedJitExecution(Kernel::KProcess* process) {} | ||
| 43 | ~ScopedJitExecution() {} | ||
| 44 | static void RegisterHandler() {} | ||
| 45 | }; | ||
| 46 | |||
| 47 | #endif | ||
| 48 | |||
| 29 | } // namespace Core | 49 | } // namespace Core |
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index c78cfd528..36478f722 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp | |||
| @@ -331,11 +331,15 @@ bool ArmDynarmic32::IsInThumbMode() const { | |||
| 331 | } | 331 | } |
| 332 | 332 | ||
| 333 | HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) { | 333 | HaltReason ArmDynarmic32::RunThread(Kernel::KThread* thread) { |
| 334 | ScopedJitExecution sj(thread->GetOwnerProcess()); | ||
| 335 | |||
| 334 | m_jit->ClearExclusiveState(); | 336 | m_jit->ClearExclusiveState(); |
| 335 | return TranslateHaltReason(m_jit->Run()); | 337 | return TranslateHaltReason(m_jit->Run()); |
| 336 | } | 338 | } |
| 337 | 339 | ||
| 338 | HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) { | 340 | HaltReason ArmDynarmic32::StepThread(Kernel::KThread* thread) { |
| 341 | ScopedJitExecution sj(thread->GetOwnerProcess()); | ||
| 342 | |||
| 339 | m_jit->ClearExclusiveState(); | 343 | m_jit->ClearExclusiveState(); |
| 340 | return TranslateHaltReason(m_jit->Step()); | 344 | return TranslateHaltReason(m_jit->Step()); |
| 341 | } | 345 | } |
| @@ -377,6 +381,7 @@ ArmDynarmic32::ArmDynarmic32(System& system, bool uses_wall_clock, Kernel::KProc | |||
| 377 | m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index} { | 381 | m_cp15(std::make_shared<DynarmicCP15>(*this)), m_core_index{core_index} { |
| 378 | auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl(); | 382 | auto& page_table_impl = process->GetPageTable().GetBasePageTable().GetImpl(); |
| 379 | m_jit = MakeJit(&page_table_impl); | 383 | m_jit = MakeJit(&page_table_impl); |
| 384 | ScopedJitExecution::RegisterHandler(); | ||
| 380 | } | 385 | } |
| 381 | 386 | ||
| 382 | ArmDynarmic32::~ArmDynarmic32() = default; | 387 | ArmDynarmic32::~ArmDynarmic32() = default; |
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index f351b13d9..c811c8ad5 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp | |||
| @@ -362,11 +362,15 @@ std::shared_ptr<Dynarmic::A64::Jit> ArmDynarmic64::MakeJit(Common::PageTable* pa | |||
| 362 | } | 362 | } |
| 363 | 363 | ||
| 364 | HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) { | 364 | HaltReason ArmDynarmic64::RunThread(Kernel::KThread* thread) { |
| 365 | ScopedJitExecution sj(thread->GetOwnerProcess()); | ||
| 366 | |||
| 365 | m_jit->ClearExclusiveState(); | 367 | m_jit->ClearExclusiveState(); |
| 366 | return TranslateHaltReason(m_jit->Run()); | 368 | return TranslateHaltReason(m_jit->Run()); |
| 367 | } | 369 | } |
| 368 | 370 | ||
| 369 | HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) { | 371 | HaltReason ArmDynarmic64::StepThread(Kernel::KThread* thread) { |
| 372 | ScopedJitExecution sj(thread->GetOwnerProcess()); | ||
| 373 | |||
| 370 | m_jit->ClearExclusiveState(); | 374 | m_jit->ClearExclusiveState(); |
| 371 | return TranslateHaltReason(m_jit->Step()); | 375 | return TranslateHaltReason(m_jit->Step()); |
| 372 | } | 376 | } |
| @@ -406,6 +410,7 @@ ArmDynarmic64::ArmDynarmic64(System& system, bool uses_wall_clock, Kernel::KProc | |||
| 406 | auto& page_table = process->GetPageTable().GetBasePageTable(); | 410 | auto& page_table = process->GetPageTable().GetBasePageTable(); |
| 407 | auto& page_table_impl = page_table.GetImpl(); | 411 | auto& page_table_impl = page_table.GetImpl(); |
| 408 | m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth()); | 412 | m_jit = MakeJit(&page_table_impl, page_table.GetAddressSpaceWidth()); |
| 413 | ScopedJitExecution::RegisterHandler(); | ||
| 409 | } | 414 | } |
| 410 | 415 | ||
| 411 | ArmDynarmic64::~ArmDynarmic64() = default; | 416 | ArmDynarmic64::~ArmDynarmic64() = default; |
diff --git a/src/core/hle/kernel/k_page_table_base.cpp b/src/core/hle/kernel/k_page_table_base.cpp index 423289145..8c1549559 100644 --- a/src/core/hle/kernel/k_page_table_base.cpp +++ b/src/core/hle/kernel/k_page_table_base.cpp | |||
| @@ -434,7 +434,7 @@ Result KPageTableBase::InitializeForProcess(Svc::CreateProcessFlag as_type, bool | |||
| 434 | void KPageTableBase::Finalize() { | 434 | void KPageTableBase::Finalize() { |
| 435 | auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) { | 435 | auto HostUnmapCallback = [&](KProcessAddress addr, u64 size) { |
| 436 | if (Settings::IsFastmemEnabled()) { | 436 | if (Settings::IsFastmemEnabled()) { |
| 437 | m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size); | 437 | m_system.DeviceMemory().buffer.Unmap(GetInteger(addr), size, false); |
| 438 | } | 438 | } |
| 439 | }; | 439 | }; |
| 440 | 440 | ||
| @@ -5243,7 +5243,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) { | |||
| 5243 | // Unmap. | 5243 | // Unmap. |
| 5244 | R_ASSERT(this->Operate(updater.GetPageList(), cur_address, | 5244 | R_ASSERT(this->Operate(updater.GetPageList(), cur_address, |
| 5245 | cur_pages, 0, false, unmap_properties, | 5245 | cur_pages, 0, false, unmap_properties, |
| 5246 | OperationType::Unmap, true)); | 5246 | OperationType::UnmapPhysical, true)); |
| 5247 | } | 5247 | } |
| 5248 | 5248 | ||
| 5249 | // Check if we're done. | 5249 | // Check if we're done. |
| @@ -5326,7 +5326,7 @@ Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) { | |||
| 5326 | // Map the papges. | 5326 | // Map the papges. |
| 5327 | R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages, | 5327 | R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages, |
| 5328 | cur_pg, map_properties, | 5328 | cur_pg, map_properties, |
| 5329 | OperationType::MapFirstGroup, false)); | 5329 | OperationType::MapFirstGroupPhysical, false)); |
| 5330 | } | 5330 | } |
| 5331 | } | 5331 | } |
| 5332 | 5332 | ||
| @@ -5480,7 +5480,7 @@ Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) | |||
| 5480 | 5480 | ||
| 5481 | // Unmap. | 5481 | // Unmap. |
| 5482 | R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false, | 5482 | R_ASSERT(this->Operate(updater.GetPageList(), cur_address, cur_pages, 0, false, |
| 5483 | unmap_properties, OperationType::Unmap, false)); | 5483 | unmap_properties, OperationType::UnmapPhysical, false)); |
| 5484 | } | 5484 | } |
| 5485 | 5485 | ||
| 5486 | // Check if we're done. | 5486 | // Check if we're done. |
| @@ -5655,7 +5655,10 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a | |||
| 5655 | // or free them to the page list, and so it goes unused (along with page properties). | 5655 | // or free them to the page list, and so it goes unused (along with page properties). |
| 5656 | 5656 | ||
| 5657 | switch (operation) { | 5657 | switch (operation) { |
| 5658 | case OperationType::Unmap: { | 5658 | case OperationType::Unmap: |
| 5659 | case OperationType::UnmapPhysical: { | ||
| 5660 | const bool separate_heap = operation == OperationType::UnmapPhysical; | ||
| 5661 | |||
| 5659 | // Ensure that any pages we track are closed on exit. | 5662 | // Ensure that any pages we track are closed on exit. |
| 5660 | KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager()); | 5663 | KPageGroup pages_to_close(m_kernel, this->GetBlockInfoManager()); |
| 5661 | SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); | 5664 | SCOPE_EXIT({ pages_to_close.CloseAndReset(); }); |
| @@ -5664,7 +5667,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a | |||
| 5664 | this->MakePageGroup(pages_to_close, virt_addr, num_pages); | 5667 | this->MakePageGroup(pages_to_close, virt_addr, num_pages); |
| 5665 | 5668 | ||
| 5666 | // Unmap. | 5669 | // Unmap. |
| 5667 | m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize); | 5670 | m_memory->UnmapRegion(*m_impl, virt_addr, num_pages * PageSize, separate_heap); |
| 5668 | 5671 | ||
| 5669 | R_SUCCEED(); | 5672 | R_SUCCEED(); |
| 5670 | } | 5673 | } |
| @@ -5672,7 +5675,7 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a | |||
| 5672 | ASSERT(virt_addr != 0); | 5675 | ASSERT(virt_addr != 0); |
| 5673 | ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize)); | 5676 | ASSERT(Common::IsAligned(GetInteger(virt_addr), PageSize)); |
| 5674 | m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr, | 5677 | m_memory->MapMemoryRegion(*m_impl, virt_addr, num_pages * PageSize, phys_addr, |
| 5675 | ConvertToMemoryPermission(properties.perm)); | 5678 | ConvertToMemoryPermission(properties.perm), false); |
| 5676 | 5679 | ||
| 5677 | // Open references to pages, if we should. | 5680 | // Open references to pages, if we should. |
| 5678 | if (this->IsHeapPhysicalAddress(phys_addr)) { | 5681 | if (this->IsHeapPhysicalAddress(phys_addr)) { |
| @@ -5711,16 +5714,19 @@ Result KPageTableBase::Operate(PageLinkedList* page_list, KProcessAddress virt_a | |||
| 5711 | 5714 | ||
| 5712 | switch (operation) { | 5715 | switch (operation) { |
| 5713 | case OperationType::MapGroup: | 5716 | case OperationType::MapGroup: |
| 5714 | case OperationType::MapFirstGroup: { | 5717 | case OperationType::MapFirstGroup: |
| 5718 | case OperationType::MapFirstGroupPhysical: { | ||
| 5719 | const bool separate_heap = operation == OperationType::MapFirstGroupPhysical; | ||
| 5720 | |||
| 5715 | // We want to maintain a new reference to every page in the group. | 5721 | // We want to maintain a new reference to every page in the group. |
| 5716 | KScopedPageGroup spg(page_group, operation != OperationType::MapFirstGroup); | 5722 | KScopedPageGroup spg(page_group, operation == OperationType::MapGroup); |
| 5717 | 5723 | ||
| 5718 | for (const auto& node : page_group) { | 5724 | for (const auto& node : page_group) { |
| 5719 | const size_t size{node.GetNumPages() * PageSize}; | 5725 | const size_t size{node.GetNumPages() * PageSize}; |
| 5720 | 5726 | ||
| 5721 | // Map the pages. | 5727 | // Map the pages. |
| 5722 | m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress(), | 5728 | m_memory->MapMemoryRegion(*m_impl, virt_addr, size, node.GetAddress(), |
| 5723 | ConvertToMemoryPermission(properties.perm)); | 5729 | ConvertToMemoryPermission(properties.perm), separate_heap); |
| 5724 | 5730 | ||
| 5725 | virt_addr += size; | 5731 | virt_addr += size; |
| 5726 | } | 5732 | } |
diff --git a/src/core/hle/kernel/k_page_table_base.h b/src/core/hle/kernel/k_page_table_base.h index 556d230b3..077cafc96 100644 --- a/src/core/hle/kernel/k_page_table_base.h +++ b/src/core/hle/kernel/k_page_table_base.h | |||
| @@ -104,6 +104,9 @@ protected: | |||
| 104 | ChangePermissionsAndRefresh = 5, | 104 | ChangePermissionsAndRefresh = 5, |
| 105 | ChangePermissionsAndRefreshAndFlush = 6, | 105 | ChangePermissionsAndRefreshAndFlush = 6, |
| 106 | Separate = 7, | 106 | Separate = 7, |
| 107 | |||
| 108 | MapFirstGroupPhysical = 65000, | ||
| 109 | UnmapPhysical = 65001, | ||
| 107 | }; | 110 | }; |
| 108 | 111 | ||
| 109 | static constexpr size_t MaxPhysicalMapAlignment = 1_GiB; | 112 | static constexpr size_t MaxPhysicalMapAlignment = 1_GiB; |
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp index d6869c228..068e71dff 100644 --- a/src/core/hle/kernel/k_process.cpp +++ b/src/core/hle/kernel/k_process.cpp | |||
| @@ -1237,8 +1237,10 @@ void KProcess::LoadModule(CodeSet code_set, KProcessAddress base_addr) { | |||
| 1237 | auto& buffer = m_kernel.System().DeviceMemory().buffer; | 1237 | auto& buffer = m_kernel.System().DeviceMemory().buffer; |
| 1238 | const auto& code = code_set.CodeSegment(); | 1238 | const auto& code = code_set.CodeSegment(); |
| 1239 | const auto& patch = code_set.PatchSegment(); | 1239 | const auto& patch = code_set.PatchSegment(); |
| 1240 | buffer.Protect(GetInteger(base_addr + code.addr), code.size, true, true, true); | 1240 | buffer.Protect(GetInteger(base_addr + code.addr), code.size, |
| 1241 | buffer.Protect(GetInteger(base_addr + patch.addr), patch.size, true, true, true); | 1241 | Common::MemoryPermission::Read | Common::MemoryPermission::Execute); |
| 1242 | buffer.Protect(GetInteger(base_addr + patch.addr), patch.size, | ||
| 1243 | Common::MemoryPermission::Read | Common::MemoryPermission::Execute); | ||
| 1242 | ReprotectSegment(code_set.PatchSegment(), Svc::MemoryPermission::None); | 1244 | ReprotectSegment(code_set.PatchSegment(), Svc::MemoryPermission::None); |
| 1243 | } | 1245 | } |
| 1244 | #endif | 1246 | #endif |
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index c7eb32c19..8176a41be 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include "common/assert.h" | 10 | #include "common/assert.h" |
| 11 | #include "common/atomic_ops.h" | 11 | #include "common/atomic_ops.h" |
| 12 | #include "common/common_types.h" | 12 | #include "common/common_types.h" |
| 13 | #include "common/heap_tracker.h" | ||
| 13 | #include "common/logging/log.h" | 14 | #include "common/logging/log.h" |
| 14 | #include "common/page_table.h" | 15 | #include "common/page_table.h" |
| 15 | #include "common/scope_exit.h" | 16 | #include "common/scope_exit.h" |
| @@ -52,10 +53,18 @@ struct Memory::Impl { | |||
| 52 | } else { | 53 | } else { |
| 53 | current_page_table->fastmem_arena = nullptr; | 54 | current_page_table->fastmem_arena = nullptr; |
| 54 | } | 55 | } |
| 56 | |||
| 57 | #ifdef __linux__ | ||
| 58 | heap_tracker.emplace(system.DeviceMemory().buffer); | ||
| 59 | buffer = std::addressof(*heap_tracker); | ||
| 60 | #else | ||
| 61 | buffer = std::addressof(system.DeviceMemory().buffer); | ||
| 62 | #endif | ||
| 55 | } | 63 | } |
| 56 | 64 | ||
| 57 | void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, | 65 | void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, |
| 58 | Common::PhysicalAddress target, Common::MemoryPermission perms) { | 66 | Common::PhysicalAddress target, Common::MemoryPermission perms, |
| 67 | bool separate_heap) { | ||
| 59 | ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); | 68 | ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); |
| 60 | ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); | 69 | ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); |
| 61 | ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", | 70 | ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", |
| @@ -64,19 +73,20 @@ struct Memory::Impl { | |||
| 64 | Common::PageType::Memory); | 73 | Common::PageType::Memory); |
| 65 | 74 | ||
| 66 | if (current_page_table->fastmem_arena) { | 75 | if (current_page_table->fastmem_arena) { |
| 67 | system.DeviceMemory().buffer.Map(GetInteger(base), | 76 | buffer->Map(GetInteger(base), GetInteger(target) - DramMemoryMap::Base, size, perms, |
| 68 | GetInteger(target) - DramMemoryMap::Base, size, perms); | 77 | separate_heap); |
| 69 | } | 78 | } |
| 70 | } | 79 | } |
| 71 | 80 | ||
| 72 | void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) { | 81 | void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, |
| 82 | bool separate_heap) { | ||
| 73 | ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); | 83 | ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size); |
| 74 | ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); | 84 | ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", GetInteger(base)); |
| 75 | MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0, | 85 | MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0, |
| 76 | Common::PageType::Unmapped); | 86 | Common::PageType::Unmapped); |
| 77 | 87 | ||
| 78 | if (current_page_table->fastmem_arena) { | 88 | if (current_page_table->fastmem_arena) { |
| 79 | system.DeviceMemory().buffer.Unmap(GetInteger(base), size); | 89 | buffer->Unmap(GetInteger(base), size, separate_heap); |
| 80 | } | 90 | } |
| 81 | } | 91 | } |
| 82 | 92 | ||
| @@ -89,11 +99,6 @@ struct Memory::Impl { | |||
| 89 | return; | 99 | return; |
| 90 | } | 100 | } |
| 91 | 101 | ||
| 92 | const bool is_r = True(perms & Common::MemoryPermission::Read); | ||
| 93 | const bool is_w = True(perms & Common::MemoryPermission::Write); | ||
| 94 | const bool is_x = | ||
| 95 | True(perms & Common::MemoryPermission::Execute) && Settings::IsNceEnabled(); | ||
| 96 | |||
| 97 | u64 protect_bytes{}; | 102 | u64 protect_bytes{}; |
| 98 | u64 protect_begin{}; | 103 | u64 protect_begin{}; |
| 99 | for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) { | 104 | for (u64 addr = vaddr; addr < vaddr + size; addr += YUZU_PAGESIZE) { |
| @@ -102,8 +107,7 @@ struct Memory::Impl { | |||
| 102 | switch (page_type) { | 107 | switch (page_type) { |
| 103 | case Common::PageType::RasterizerCachedMemory: | 108 | case Common::PageType::RasterizerCachedMemory: |
| 104 | if (protect_bytes > 0) { | 109 | if (protect_bytes > 0) { |
| 105 | system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, | 110 | buffer->Protect(protect_begin, protect_bytes, perms); |
| 106 | is_x); | ||
| 107 | protect_bytes = 0; | 111 | protect_bytes = 0; |
| 108 | } | 112 | } |
| 109 | break; | 113 | break; |
| @@ -116,7 +120,7 @@ struct Memory::Impl { | |||
| 116 | } | 120 | } |
| 117 | 121 | ||
| 118 | if (protect_bytes > 0) { | 122 | if (protect_bytes > 0) { |
| 119 | system.DeviceMemory().buffer.Protect(protect_begin, protect_bytes, is_r, is_w, is_x); | 123 | buffer->Protect(protect_begin, protect_bytes, perms); |
| 120 | } | 124 | } |
| 121 | } | 125 | } |
| 122 | 126 | ||
| @@ -486,7 +490,9 @@ struct Memory::Impl { | |||
| 486 | } | 490 | } |
| 487 | 491 | ||
| 488 | if (current_page_table->fastmem_arena) { | 492 | if (current_page_table->fastmem_arena) { |
| 489 | system.DeviceMemory().buffer.Protect(vaddr, size, !debug, !debug); | 493 | const auto perm{debug ? Common::MemoryPermission{} |
| 494 | : Common::MemoryPermission::ReadWrite}; | ||
| 495 | buffer->Protect(vaddr, size, perm); | ||
| 490 | } | 496 | } |
| 491 | 497 | ||
| 492 | // Iterate over a contiguous CPU address space, marking/unmarking the region. | 498 | // Iterate over a contiguous CPU address space, marking/unmarking the region. |
| @@ -543,9 +549,14 @@ struct Memory::Impl { | |||
| 543 | } | 549 | } |
| 544 | 550 | ||
| 545 | if (current_page_table->fastmem_arena) { | 551 | if (current_page_table->fastmem_arena) { |
| 546 | const bool is_read_enable = | 552 | Common::MemoryPermission perm{}; |
| 547 | !Settings::values.use_reactive_flushing.GetValue() || !cached; | 553 | if (!Settings::values.use_reactive_flushing.GetValue() || !cached) { |
| 548 | system.DeviceMemory().buffer.Protect(vaddr, size, is_read_enable, !cached); | 554 | perm |= Common::MemoryPermission::Read; |
| 555 | } | ||
| 556 | if (!cached) { | ||
| 557 | perm |= Common::MemoryPermission::Write; | ||
| 558 | } | ||
| 559 | buffer->Protect(vaddr, size, perm); | ||
| 549 | } | 560 | } |
| 550 | 561 | ||
| 551 | // Iterate over a contiguous CPU address space, which corresponds to the specified GPU | 562 | // Iterate over a contiguous CPU address space, which corresponds to the specified GPU |
| @@ -856,6 +867,13 @@ struct Memory::Impl { | |||
| 856 | std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{}; | 867 | std::array<GPUDirtyState, Core::Hardware::NUM_CPU_CORES> rasterizer_write_areas{}; |
| 857 | std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers; | 868 | std::span<Core::GPUDirtyMemoryManager> gpu_dirty_managers; |
| 858 | std::mutex sys_core_guard; | 869 | std::mutex sys_core_guard; |
| 870 | |||
| 871 | std::optional<Common::HeapTracker> heap_tracker; | ||
| 872 | #ifdef __linux__ | ||
| 873 | Common::HeapTracker* buffer{}; | ||
| 874 | #else | ||
| 875 | Common::HostMemory* buffer{}; | ||
| 876 | #endif | ||
| 859 | }; | 877 | }; |
| 860 | 878 | ||
| 861 | Memory::Memory(Core::System& system_) : system{system_} { | 879 | Memory::Memory(Core::System& system_) : system{system_} { |
| @@ -873,12 +891,14 @@ void Memory::SetCurrentPageTable(Kernel::KProcess& process) { | |||
| 873 | } | 891 | } |
| 874 | 892 | ||
| 875 | void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, | 893 | void Memory::MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, |
| 876 | Common::PhysicalAddress target, Common::MemoryPermission perms) { | 894 | Common::PhysicalAddress target, Common::MemoryPermission perms, |
| 877 | impl->MapMemoryRegion(page_table, base, size, target, perms); | 895 | bool separate_heap) { |
| 896 | impl->MapMemoryRegion(page_table, base, size, target, perms, separate_heap); | ||
| 878 | } | 897 | } |
| 879 | 898 | ||
| 880 | void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size) { | 899 | void Memory::UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, |
| 881 | impl->UnmapRegion(page_table, base, size); | 900 | bool separate_heap) { |
| 901 | impl->UnmapRegion(page_table, base, size, separate_heap); | ||
| 882 | } | 902 | } |
| 883 | 903 | ||
| 884 | void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size, | 904 | void Memory::ProtectRegion(Common::PageTable& page_table, Common::ProcessAddress vaddr, u64 size, |
| @@ -1048,7 +1068,9 @@ void Memory::FlushRegion(Common::ProcessAddress dest_addr, size_t size) { | |||
| 1048 | } | 1068 | } |
| 1049 | 1069 | ||
| 1050 | bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) { | 1070 | bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) { |
| 1051 | bool mapped = true; | 1071 | [[maybe_unused]] bool mapped = true; |
| 1072 | [[maybe_unused]] bool rasterizer = false; | ||
| 1073 | |||
| 1052 | u8* const ptr = impl->GetPointerImpl( | 1074 | u8* const ptr = impl->GetPointerImpl( |
| 1053 | GetInteger(vaddr), | 1075 | GetInteger(vaddr), |
| 1054 | [&] { | 1076 | [&] { |
| @@ -1056,8 +1078,26 @@ bool Memory::InvalidateNCE(Common::ProcessAddress vaddr, size_t size) { | |||
| 1056 | GetInteger(vaddr)); | 1078 | GetInteger(vaddr)); |
| 1057 | mapped = false; | 1079 | mapped = false; |
| 1058 | }, | 1080 | }, |
| 1059 | [&] { impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); }); | 1081 | [&] { |
| 1082 | impl->system.GPU().InvalidateRegion(GetInteger(vaddr), size); | ||
| 1083 | rasterizer = true; | ||
| 1084 | }); | ||
| 1085 | |||
| 1086 | #ifdef __linux__ | ||
| 1087 | if (!rasterizer && mapped) { | ||
| 1088 | impl->buffer->DeferredMapSeparateHeap(GetInteger(vaddr)); | ||
| 1089 | } | ||
| 1090 | #endif | ||
| 1091 | |||
| 1060 | return mapped && ptr != nullptr; | 1092 | return mapped && ptr != nullptr; |
| 1061 | } | 1093 | } |
| 1062 | 1094 | ||
| 1095 | bool Memory::InvalidateSeparateHeap(void* fault_address) { | ||
| 1096 | #ifdef __linux__ | ||
| 1097 | return impl->buffer->DeferredMapSeparateHeap(static_cast<u8*>(fault_address)); | ||
| 1098 | #else | ||
| 1099 | return false; | ||
| 1100 | #endif | ||
| 1101 | } | ||
| 1102 | |||
| 1063 | } // namespace Core::Memory | 1103 | } // namespace Core::Memory |
diff --git a/src/core/memory.h b/src/core/memory.h index c1879e78f..3e4d03f57 100644 --- a/src/core/memory.h +++ b/src/core/memory.h | |||
| @@ -86,7 +86,8 @@ public: | |||
| 86 | * @param perms The permissions to map the memory with. | 86 | * @param perms The permissions to map the memory with. |
| 87 | */ | 87 | */ |
| 88 | void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, | 88 | void MapMemoryRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, |
| 89 | Common::PhysicalAddress target, Common::MemoryPermission perms); | 89 | Common::PhysicalAddress target, Common::MemoryPermission perms, |
| 90 | bool separate_heap); | ||
| 90 | 91 | ||
| 91 | /** | 92 | /** |
| 92 | * Unmaps a region of the emulated process address space. | 93 | * Unmaps a region of the emulated process address space. |
| @@ -95,7 +96,8 @@ public: | |||
| 95 | * @param base The address to begin unmapping at. | 96 | * @param base The address to begin unmapping at. |
| 96 | * @param size The amount of bytes to unmap. | 97 | * @param size The amount of bytes to unmap. |
| 97 | */ | 98 | */ |
| 98 | void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size); | 99 | void UnmapRegion(Common::PageTable& page_table, Common::ProcessAddress base, u64 size, |
| 100 | bool separate_heap); | ||
| 99 | 101 | ||
| 100 | /** | 102 | /** |
| 101 | * Protects a region of the emulated process address space with the new permissions. | 103 | * Protects a region of the emulated process address space with the new permissions. |
| @@ -486,6 +488,7 @@ public: | |||
| 486 | void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers); | 488 | void SetGPUDirtyManagers(std::span<Core::GPUDirtyMemoryManager> managers); |
| 487 | void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size); | 489 | void InvalidateRegion(Common::ProcessAddress dest_addr, size_t size); |
| 488 | bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size); | 490 | bool InvalidateNCE(Common::ProcessAddress vaddr, size_t size); |
| 491 | bool InvalidateSeparateHeap(void* fault_address); | ||
| 489 | void FlushRegion(Common::ProcessAddress dest_addr, size_t size); | 492 | void FlushRegion(Common::ProcessAddress dest_addr, size_t size); |
| 490 | 493 | ||
| 491 | private: | 494 | private: |
diff --git a/src/tests/common/host_memory.cpp b/src/tests/common/host_memory.cpp index 1a28e862b..cb040c942 100644 --- a/src/tests/common/host_memory.cpp +++ b/src/tests/common/host_memory.cpp | |||
| @@ -12,6 +12,7 @@ using namespace Common::Literals; | |||
| 12 | static constexpr size_t VIRTUAL_SIZE = 1ULL << 39; | 12 | static constexpr size_t VIRTUAL_SIZE = 1ULL << 39; |
| 13 | static constexpr size_t BACKING_SIZE = 4_GiB; | 13 | static constexpr size_t BACKING_SIZE = 4_GiB; |
| 14 | static constexpr auto PERMS = Common::MemoryPermission::ReadWrite; | 14 | static constexpr auto PERMS = Common::MemoryPermission::ReadWrite; |
| 15 | static constexpr auto HEAP = false; | ||
| 15 | 16 | ||
| 16 | TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") { | 17 | TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") { |
| 17 | { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); } | 18 | { HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); } |
| @@ -20,7 +21,7 @@ TEST_CASE("HostMemory: Initialize and deinitialize", "[common]") { | |||
| 20 | 21 | ||
| 21 | TEST_CASE("HostMemory: Simple map", "[common]") { | 22 | TEST_CASE("HostMemory: Simple map", "[common]") { |
| 22 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); | 23 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
| 23 | mem.Map(0x5000, 0x8000, 0x1000, PERMS); | 24 | mem.Map(0x5000, 0x8000, 0x1000, PERMS, HEAP); |
| 24 | 25 | ||
| 25 | volatile u8* const data = mem.VirtualBasePointer() + 0x5000; | 26 | volatile u8* const data = mem.VirtualBasePointer() + 0x5000; |
| 26 | data[0] = 50; | 27 | data[0] = 50; |
| @@ -29,8 +30,8 @@ TEST_CASE("HostMemory: Simple map", "[common]") { | |||
| 29 | 30 | ||
| 30 | TEST_CASE("HostMemory: Simple mirror map", "[common]") { | 31 | TEST_CASE("HostMemory: Simple mirror map", "[common]") { |
| 31 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); | 32 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
| 32 | mem.Map(0x5000, 0x3000, 0x2000, PERMS); | 33 | mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP); |
| 33 | mem.Map(0x8000, 0x4000, 0x1000, PERMS); | 34 | mem.Map(0x8000, 0x4000, 0x1000, PERMS, HEAP); |
| 34 | 35 | ||
| 35 | volatile u8* const mirror_a = mem.VirtualBasePointer() + 0x5000; | 36 | volatile u8* const mirror_a = mem.VirtualBasePointer() + 0x5000; |
| 36 | volatile u8* const mirror_b = mem.VirtualBasePointer() + 0x8000; | 37 | volatile u8* const mirror_b = mem.VirtualBasePointer() + 0x8000; |
| @@ -40,116 +41,116 @@ TEST_CASE("HostMemory: Simple mirror map", "[common]") { | |||
| 40 | 41 | ||
| 41 | TEST_CASE("HostMemory: Simple unmap", "[common]") { | 42 | TEST_CASE("HostMemory: Simple unmap", "[common]") { |
| 42 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); | 43 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
| 43 | mem.Map(0x5000, 0x3000, 0x2000, PERMS); | 44 | mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP); |
| 44 | 45 | ||
| 45 | volatile u8* const data = mem.VirtualBasePointer() + 0x5000; | 46 | volatile u8* const data = mem.VirtualBasePointer() + 0x5000; |
| 46 | data[75] = 50; | 47 | data[75] = 50; |
| 47 | REQUIRE(data[75] == 50); | 48 | REQUIRE(data[75] == 50); |
| 48 | 49 | ||
| 49 | mem.Unmap(0x5000, 0x2000); | 50 | mem.Unmap(0x5000, 0x2000, HEAP); |
| 50 | } | 51 | } |
| 51 | 52 | ||
| 52 | TEST_CASE("HostMemory: Simple unmap and remap", "[common]") { | 53 | TEST_CASE("HostMemory: Simple unmap and remap", "[common]") { |
| 53 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); | 54 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
| 54 | mem.Map(0x5000, 0x3000, 0x2000, PERMS); | 55 | mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP); |
| 55 | 56 | ||
| 56 | volatile u8* const data = mem.VirtualBasePointer() + 0x5000; | 57 | volatile u8* const data = mem.VirtualBasePointer() + 0x5000; |
| 57 | data[0] = 50; | 58 | data[0] = 50; |
| 58 | REQUIRE(data[0] == 50); | 59 | REQUIRE(data[0] == 50); |
| 59 | 60 | ||
| 60 | mem.Unmap(0x5000, 0x2000); | 61 | mem.Unmap(0x5000, 0x2000, HEAP); |
| 61 | 62 | ||
| 62 | mem.Map(0x5000, 0x3000, 0x2000, PERMS); | 63 | mem.Map(0x5000, 0x3000, 0x2000, PERMS, HEAP); |
| 63 | REQUIRE(data[0] == 50); | 64 | REQUIRE(data[0] == 50); |
| 64 | 65 | ||
| 65 | mem.Map(0x7000, 0x2000, 0x5000, PERMS); | 66 | mem.Map(0x7000, 0x2000, 0x5000, PERMS, HEAP); |
| 66 | REQUIRE(data[0x3000] == 50); | 67 | REQUIRE(data[0x3000] == 50); |
| 67 | } | 68 | } |
| 68 | 69 | ||
| 69 | TEST_CASE("HostMemory: Nieche allocation", "[common]") { | 70 | TEST_CASE("HostMemory: Nieche allocation", "[common]") { |
| 70 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); | 71 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
| 71 | mem.Map(0x0000, 0, 0x20000, PERMS); | 72 | mem.Map(0x0000, 0, 0x20000, PERMS, HEAP); |
| 72 | mem.Unmap(0x0000, 0x4000); | 73 | mem.Unmap(0x0000, 0x4000, HEAP); |
| 73 | mem.Map(0x1000, 0, 0x2000, PERMS); | 74 | mem.Map(0x1000, 0, 0x2000, PERMS, HEAP); |
| 74 | mem.Map(0x3000, 0, 0x1000, PERMS); | 75 | mem.Map(0x3000, 0, 0x1000, PERMS, HEAP); |
| 75 | mem.Map(0, 0, 0x1000, PERMS); | 76 | mem.Map(0, 0, 0x1000, PERMS, HEAP); |
| 76 | } | 77 | } |
| 77 | 78 | ||
| 78 | TEST_CASE("HostMemory: Full unmap", "[common]") { | 79 | TEST_CASE("HostMemory: Full unmap", "[common]") { |
| 79 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); | 80 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
| 80 | mem.Map(0x8000, 0, 0x4000, PERMS); | 81 | mem.Map(0x8000, 0, 0x4000, PERMS, HEAP); |
| 81 | mem.Unmap(0x8000, 0x4000); | 82 | mem.Unmap(0x8000, 0x4000, HEAP); |
| 82 | mem.Map(0x6000, 0, 0x16000, PERMS); | 83 | mem.Map(0x6000, 0, 0x16000, PERMS, HEAP); |
| 83 | } | 84 | } |
| 84 | 85 | ||
| 85 | TEST_CASE("HostMemory: Right out of bounds unmap", "[common]") { | 86 | TEST_CASE("HostMemory: Right out of bounds unmap", "[common]") { |
| 86 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); | 87 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
| 87 | mem.Map(0x0000, 0, 0x4000, PERMS); | 88 | mem.Map(0x0000, 0, 0x4000, PERMS, HEAP); |
| 88 | mem.Unmap(0x2000, 0x4000); | 89 | mem.Unmap(0x2000, 0x4000, HEAP); |
| 89 | mem.Map(0x2000, 0x80000, 0x4000, PERMS); | 90 | mem.Map(0x2000, 0x80000, 0x4000, PERMS, HEAP); |
| 90 | } | 91 | } |
| 91 | 92 | ||
| 92 | TEST_CASE("HostMemory: Left out of bounds unmap", "[common]") { | 93 | TEST_CASE("HostMemory: Left out of bounds unmap", "[common]") { |
| 93 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); | 94 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
| 94 | mem.Map(0x8000, 0, 0x4000, PERMS); | 95 | mem.Map(0x8000, 0, 0x4000, PERMS, HEAP); |
| 95 | mem.Unmap(0x6000, 0x4000); | 96 | mem.Unmap(0x6000, 0x4000, HEAP); |
| 96 | mem.Map(0x8000, 0, 0x2000, PERMS); | 97 | mem.Map(0x8000, 0, 0x2000, PERMS, HEAP); |
| 97 | } | 98 | } |
| 98 | 99 | ||
| 99 | TEST_CASE("HostMemory: Multiple placeholder unmap", "[common]") { | 100 | TEST_CASE("HostMemory: Multiple placeholder unmap", "[common]") { |
| 100 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); | 101 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
| 101 | mem.Map(0x0000, 0, 0x4000, PERMS); | 102 | mem.Map(0x0000, 0, 0x4000, PERMS, HEAP); |
| 102 | mem.Map(0x4000, 0, 0x1b000, PERMS); | 103 | mem.Map(0x4000, 0, 0x1b000, PERMS, HEAP); |
| 103 | mem.Unmap(0x3000, 0x1c000); | 104 | mem.Unmap(0x3000, 0x1c000, HEAP); |
| 104 | mem.Map(0x3000, 0, 0x20000, PERMS); | 105 | mem.Map(0x3000, 0, 0x20000, PERMS, HEAP); |
| 105 | } | 106 | } |
| 106 | 107 | ||
| 107 | TEST_CASE("HostMemory: Unmap between placeholders", "[common]") { | 108 | TEST_CASE("HostMemory: Unmap between placeholders", "[common]") { |
| 108 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); | 109 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
| 109 | mem.Map(0x0000, 0, 0x4000, PERMS); | 110 | mem.Map(0x0000, 0, 0x4000, PERMS, HEAP); |
| 110 | mem.Map(0x4000, 0, 0x4000, PERMS); | 111 | mem.Map(0x4000, 0, 0x4000, PERMS, HEAP); |
| 111 | mem.Unmap(0x2000, 0x4000); | 112 | mem.Unmap(0x2000, 0x4000, HEAP); |
| 112 | mem.Map(0x2000, 0, 0x4000, PERMS); | 113 | mem.Map(0x2000, 0, 0x4000, PERMS, HEAP); |
| 113 | } | 114 | } |
| 114 | 115 | ||
| 115 | TEST_CASE("HostMemory: Unmap to origin", "[common]") { | 116 | TEST_CASE("HostMemory: Unmap to origin", "[common]") { |
| 116 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); | 117 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
| 117 | mem.Map(0x4000, 0, 0x4000, PERMS); | 118 | mem.Map(0x4000, 0, 0x4000, PERMS, HEAP); |
| 118 | mem.Map(0x8000, 0, 0x4000, PERMS); | 119 | mem.Map(0x8000, 0, 0x4000, PERMS, HEAP); |
| 119 | mem.Unmap(0x4000, 0x4000); | 120 | mem.Unmap(0x4000, 0x4000, HEAP); |
| 120 | mem.Map(0, 0, 0x4000, PERMS); | 121 | mem.Map(0, 0, 0x4000, PERMS, HEAP); |
| 121 | mem.Map(0x4000, 0, 0x4000, PERMS); | 122 | mem.Map(0x4000, 0, 0x4000, PERMS, HEAP); |
| 122 | } | 123 | } |
| 123 | 124 | ||
| 124 | TEST_CASE("HostMemory: Unmap to right", "[common]") { | 125 | TEST_CASE("HostMemory: Unmap to right", "[common]") { |
| 125 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); | 126 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
| 126 | mem.Map(0x4000, 0, 0x4000, PERMS); | 127 | mem.Map(0x4000, 0, 0x4000, PERMS, HEAP); |
| 127 | mem.Map(0x8000, 0, 0x4000, PERMS); | 128 | mem.Map(0x8000, 0, 0x4000, PERMS, HEAP); |
| 128 | mem.Unmap(0x8000, 0x4000); | 129 | mem.Unmap(0x8000, 0x4000, HEAP); |
| 129 | mem.Map(0x8000, 0, 0x4000, PERMS); | 130 | mem.Map(0x8000, 0, 0x4000, PERMS, HEAP); |
| 130 | } | 131 | } |
| 131 | 132 | ||
| 132 | TEST_CASE("HostMemory: Partial right unmap check bindings", "[common]") { | 133 | TEST_CASE("HostMemory: Partial right unmap check bindings", "[common]") { |
| 133 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); | 134 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
| 134 | mem.Map(0x4000, 0x10000, 0x4000, PERMS); | 135 | mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP); |
| 135 | 136 | ||
| 136 | volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; | 137 | volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; |
| 137 | ptr[0x1000] = 17; | 138 | ptr[0x1000] = 17; |
| 138 | 139 | ||
| 139 | mem.Unmap(0x6000, 0x2000); | 140 | mem.Unmap(0x6000, 0x2000, HEAP); |
| 140 | 141 | ||
| 141 | REQUIRE(ptr[0x1000] == 17); | 142 | REQUIRE(ptr[0x1000] == 17); |
| 142 | } | 143 | } |
| 143 | 144 | ||
| 144 | TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") { | 145 | TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") { |
| 145 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); | 146 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
| 146 | mem.Map(0x4000, 0x10000, 0x4000, PERMS); | 147 | mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP); |
| 147 | 148 | ||
| 148 | volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; | 149 | volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; |
| 149 | ptr[0x3000] = 19; | 150 | ptr[0x3000] = 19; |
| 150 | ptr[0x3fff] = 12; | 151 | ptr[0x3fff] = 12; |
| 151 | 152 | ||
| 152 | mem.Unmap(0x4000, 0x2000); | 153 | mem.Unmap(0x4000, 0x2000, HEAP); |
| 153 | 154 | ||
| 154 | REQUIRE(ptr[0x3000] == 19); | 155 | REQUIRE(ptr[0x3000] == 19); |
| 155 | REQUIRE(ptr[0x3fff] == 12); | 156 | REQUIRE(ptr[0x3fff] == 12); |
| @@ -157,13 +158,13 @@ TEST_CASE("HostMemory: Partial left unmap check bindings", "[common]") { | |||
| 157 | 158 | ||
| 158 | TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") { | 159 | TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") { |
| 159 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); | 160 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
| 160 | mem.Map(0x4000, 0x10000, 0x4000, PERMS); | 161 | mem.Map(0x4000, 0x10000, 0x4000, PERMS, HEAP); |
| 161 | 162 | ||
| 162 | volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; | 163 | volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; |
| 163 | ptr[0x0000] = 19; | 164 | ptr[0x0000] = 19; |
| 164 | ptr[0x3fff] = 12; | 165 | ptr[0x3fff] = 12; |
| 165 | 166 | ||
| 166 | mem.Unmap(0x1000, 0x2000); | 167 | mem.Unmap(0x1000, 0x2000, HEAP); |
| 167 | 168 | ||
| 168 | REQUIRE(ptr[0x0000] == 19); | 169 | REQUIRE(ptr[0x0000] == 19); |
| 169 | REQUIRE(ptr[0x3fff] == 12); | 170 | REQUIRE(ptr[0x3fff] == 12); |
| @@ -171,14 +172,14 @@ TEST_CASE("HostMemory: Partial middle unmap check bindings", "[common]") { | |||
| 171 | 172 | ||
| 172 | TEST_CASE("HostMemory: Partial sparse middle unmap and check bindings", "[common]") { | 173 | TEST_CASE("HostMemory: Partial sparse middle unmap and check bindings", "[common]") { |
| 173 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); | 174 | HostMemory mem(BACKING_SIZE, VIRTUAL_SIZE); |
| 174 | mem.Map(0x4000, 0x10000, 0x2000, PERMS); | 175 | mem.Map(0x4000, 0x10000, 0x2000, PERMS, HEAP); |
| 175 | mem.Map(0x6000, 0x20000, 0x2000, PERMS); | 176 | mem.Map(0x6000, 0x20000, 0x2000, PERMS, HEAP); |
| 176 | 177 | ||
| 177 | volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; | 178 | volatile u8* const ptr = mem.VirtualBasePointer() + 0x4000; |
| 178 | ptr[0x0000] = 19; | 179 | ptr[0x0000] = 19; |
| 179 | ptr[0x3fff] = 12; | 180 | ptr[0x3fff] = 12; |
| 180 | 181 | ||
| 181 | mem.Unmap(0x5000, 0x2000); | 182 | mem.Unmap(0x5000, 0x2000, HEAP); |
| 182 | 183 | ||
| 183 | REQUIRE(ptr[0x0000] == 19); | 184 | REQUIRE(ptr[0x0000] == 19); |
| 184 | REQUIRE(ptr[0x3fff] == 12); | 185 | REQUIRE(ptr[0x3fff] == 12); |