diff options
Diffstat (limited to 'src/common/heap_tracker.cpp')
| -rw-r--r-- | src/common/heap_tracker.cpp | 263 |
1 files changed, 263 insertions, 0 deletions
diff --git a/src/common/heap_tracker.cpp b/src/common/heap_tracker.cpp new file mode 100644 index 000000000..95dc8aa1e --- /dev/null +++ b/src/common/heap_tracker.cpp | |||
| @@ -0,0 +1,263 @@ | |||
| 1 | // SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project | ||
| 2 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
| 3 | |||
| 4 | #include <algorithm> | ||
| 5 | #include <vector> | ||
| 6 | |||
| 7 | #include "common/heap_tracker.h" | ||
| 8 | #include "common/logging/log.h" | ||
| 9 | |||
| 10 | namespace Common { | ||
| 11 | |||
| 12 | namespace { | ||
| 13 | |||
| 14 | constexpr s64 MaxResidentMapCount = 0x8000; | ||
| 15 | |||
| 16 | } // namespace | ||
| 17 | |||
| 18 | HeapTracker::HeapTracker(Common::HostMemory& buffer) : m_buffer(buffer) {} | ||
| 19 | HeapTracker::~HeapTracker() = default; | ||
| 20 | |||
| 21 | void HeapTracker::Map(size_t virtual_offset, size_t host_offset, size_t length, | ||
| 22 | MemoryPermission perm, bool is_separate_heap) { | ||
| 23 | // When mapping other memory, map pages immediately. | ||
| 24 | if (!is_separate_heap) { | ||
| 25 | m_buffer.Map(virtual_offset, host_offset, length, perm, false); | ||
| 26 | return; | ||
| 27 | } | ||
| 28 | |||
| 29 | { | ||
| 30 | // We are mapping part of a separate heap. | ||
| 31 | std::scoped_lock lk{m_lock}; | ||
| 32 | |||
| 33 | auto* const map = new SeparateHeapMap{ | ||
| 34 | .vaddr = virtual_offset, | ||
| 35 | .paddr = host_offset, | ||
| 36 | .size = length, | ||
| 37 | .tick = m_tick++, | ||
| 38 | .perm = perm, | ||
| 39 | .is_resident = false, | ||
| 40 | }; | ||
| 41 | |||
| 42 | // Insert into mappings. | ||
| 43 | m_map_count++; | ||
| 44 | m_mappings.insert(*map); | ||
| 45 | } | ||
| 46 | |||
| 47 | // Finally, map. | ||
| 48 | this->DeferredMapSeparateHeap(virtual_offset); | ||
| 49 | } | ||
| 50 | |||
| 51 | void HeapTracker::Unmap(size_t virtual_offset, size_t size, bool is_separate_heap) { | ||
| 52 | // If this is a separate heap... | ||
| 53 | if (is_separate_heap) { | ||
| 54 | std::scoped_lock lk{m_lock}; | ||
| 55 | |||
| 56 | const SeparateHeapMap key{ | ||
| 57 | .vaddr = virtual_offset, | ||
| 58 | }; | ||
| 59 | |||
| 60 | // Split at the boundaries of the region we are removing. | ||
| 61 | this->SplitHeapMapLocked(virtual_offset); | ||
| 62 | this->SplitHeapMapLocked(virtual_offset + size); | ||
| 63 | |||
| 64 | // Erase all mappings in range. | ||
| 65 | auto it = m_mappings.find(key); | ||
| 66 | while (it != m_mappings.end() && it->vaddr < virtual_offset + size) { | ||
| 67 | // Get underlying item. | ||
| 68 | auto* const item = std::addressof(*it); | ||
| 69 | |||
| 70 | // If resident, erase from resident map. | ||
| 71 | if (item->is_resident) { | ||
| 72 | ASSERT(--m_resident_map_count >= 0); | ||
| 73 | m_resident_mappings.erase(m_resident_mappings.iterator_to(*item)); | ||
| 74 | } | ||
| 75 | |||
| 76 | // Erase from map. | ||
| 77 | it = m_mappings.erase(it); | ||
| 78 | ASSERT(--m_map_count >= 0); | ||
| 79 | |||
| 80 | // Free the item. | ||
| 81 | delete item; | ||
| 82 | } | ||
| 83 | } | ||
| 84 | |||
| 85 | // Unmap pages. | ||
| 86 | m_buffer.Unmap(virtual_offset, size, false); | ||
| 87 | } | ||
| 88 | |||
| 89 | void HeapTracker::Protect(size_t virtual_offset, size_t size, MemoryPermission perm) { | ||
| 90 | // Ensure no rebuild occurs while reprotecting. | ||
| 91 | std::shared_lock lk{m_rebuild_lock}; | ||
| 92 | |||
| 93 | // Split at the boundaries of the region we are reprotecting. | ||
| 94 | this->SplitHeapMap(virtual_offset, size); | ||
| 95 | |||
| 96 | // Declare tracking variables. | ||
| 97 | VAddr cur = virtual_offset; | ||
| 98 | VAddr end = virtual_offset + size; | ||
| 99 | |||
| 100 | while (cur < end) { | ||
| 101 | VAddr next = cur; | ||
| 102 | bool should_protect = false; | ||
| 103 | |||
| 104 | { | ||
| 105 | std::scoped_lock lk2{m_lock}; | ||
| 106 | |||
| 107 | const SeparateHeapMap key{ | ||
| 108 | .vaddr = next, | ||
| 109 | }; | ||
| 110 | |||
| 111 | // Try to get the next mapping corresponding to this address. | ||
| 112 | const auto it = m_mappings.nfind(key); | ||
| 113 | |||
| 114 | if (it == m_mappings.end()) { | ||
| 115 | // There are no separate heap mappings remaining. | ||
| 116 | next = end; | ||
| 117 | should_protect = true; | ||
| 118 | } else if (it->vaddr == cur) { | ||
| 119 | // We are in range. | ||
| 120 | // Update permission bits. | ||
| 121 | it->perm = perm; | ||
| 122 | |||
| 123 | // Determine next address and whether we should protect. | ||
| 124 | next = cur + it->size; | ||
| 125 | should_protect = it->is_resident; | ||
| 126 | } else /* if (it->vaddr > cur) */ { | ||
| 127 | // We weren't in range, but there is a block coming up that will be. | ||
| 128 | next = it->vaddr; | ||
| 129 | should_protect = true; | ||
| 130 | } | ||
| 131 | } | ||
| 132 | |||
| 133 | // Clamp to end. | ||
| 134 | next = std::min(next, end); | ||
| 135 | |||
| 136 | // Reprotect, if we need to. | ||
| 137 | if (should_protect) { | ||
| 138 | m_buffer.Protect(cur, next - cur, perm); | ||
| 139 | } | ||
| 140 | |||
| 141 | // Advance. | ||
| 142 | cur = next; | ||
| 143 | } | ||
| 144 | } | ||
| 145 | |||
| 146 | bool HeapTracker::DeferredMapSeparateHeap(u8* fault_address) { | ||
| 147 | if (m_buffer.IsInVirtualRange(fault_address)) { | ||
| 148 | return this->DeferredMapSeparateHeap(fault_address - m_buffer.VirtualBasePointer()); | ||
| 149 | } | ||
| 150 | |||
| 151 | return false; | ||
| 152 | } | ||
| 153 | |||
| 154 | bool HeapTracker::DeferredMapSeparateHeap(size_t virtual_offset) { | ||
| 155 | bool rebuild_required = false; | ||
| 156 | |||
| 157 | { | ||
| 158 | std::scoped_lock lk{m_lock}; | ||
| 159 | |||
| 160 | // Check to ensure this was a non-resident separate heap mapping. | ||
| 161 | const auto it = this->GetNearestHeapMapLocked(virtual_offset); | ||
| 162 | if (it == m_mappings.end() || it->is_resident) { | ||
| 163 | return false; | ||
| 164 | } | ||
| 165 | |||
| 166 | // Update tick before possible rebuild. | ||
| 167 | it->tick = m_tick++; | ||
| 168 | |||
| 169 | // Check if we need to rebuild. | ||
| 170 | if (m_resident_map_count > MaxResidentMapCount) { | ||
| 171 | rebuild_required = true; | ||
| 172 | } | ||
| 173 | |||
| 174 | // Map the area. | ||
| 175 | m_buffer.Map(it->vaddr, it->paddr, it->size, it->perm, false); | ||
| 176 | |||
| 177 | // This map is now resident. | ||
| 178 | it->is_resident = true; | ||
| 179 | m_resident_map_count++; | ||
| 180 | m_resident_mappings.insert(*it); | ||
| 181 | } | ||
| 182 | |||
| 183 | if (rebuild_required) { | ||
| 184 | // A rebuild was required, so perform it now. | ||
| 185 | this->RebuildSeparateHeapAddressSpace(); | ||
| 186 | } | ||
| 187 | |||
| 188 | return true; | ||
| 189 | } | ||
| 190 | |||
| 191 | void HeapTracker::RebuildSeparateHeapAddressSpace() { | ||
| 192 | std::scoped_lock lk{m_rebuild_lock, m_lock}; | ||
| 193 | |||
| 194 | ASSERT(!m_resident_mappings.empty()); | ||
| 195 | |||
| 196 | // Unmap so we have at least 4 maps available. | ||
| 197 | const size_t desired_count = std::min(m_resident_map_count, MaxResidentMapCount - 4); | ||
| 198 | const size_t evict_count = m_resident_map_count - desired_count; | ||
| 199 | auto it = m_resident_mappings.begin(); | ||
| 200 | |||
| 201 | for (size_t i = 0; i < evict_count && it != m_resident_mappings.end(); i++) { | ||
| 202 | // Unmark and unmap. | ||
| 203 | it->is_resident = false; | ||
| 204 | m_buffer.Unmap(it->vaddr, it->size, false); | ||
| 205 | |||
| 206 | // Advance. | ||
| 207 | ASSERT(--m_resident_map_count >= 0); | ||
| 208 | it = m_resident_mappings.erase(it); | ||
| 209 | } | ||
| 210 | } | ||
| 211 | |||
| 212 | void HeapTracker::SplitHeapMap(VAddr offset, size_t size) { | ||
| 213 | std::scoped_lock lk{m_lock}; | ||
| 214 | |||
| 215 | this->SplitHeapMapLocked(offset); | ||
| 216 | this->SplitHeapMapLocked(offset + size); | ||
| 217 | } | ||
| 218 | |||
| 219 | void HeapTracker::SplitHeapMapLocked(VAddr offset) { | ||
| 220 | const auto it = this->GetNearestHeapMapLocked(offset); | ||
| 221 | if (it == m_mappings.end() || it->vaddr == offset) { | ||
| 222 | // Not contained or no split required. | ||
| 223 | return; | ||
| 224 | } | ||
| 225 | |||
| 226 | // Cache the original values. | ||
| 227 | auto* const left = std::addressof(*it); | ||
| 228 | const size_t orig_size = left->size; | ||
| 229 | |||
| 230 | // Adjust the left map. | ||
| 231 | const size_t left_size = offset - left->vaddr; | ||
| 232 | left->size = left_size; | ||
| 233 | |||
| 234 | // Create the new right map. | ||
| 235 | auto* const right = new SeparateHeapMap{ | ||
| 236 | .vaddr = left->vaddr + left_size, | ||
| 237 | .paddr = left->paddr + left_size, | ||
| 238 | .size = orig_size - left_size, | ||
| 239 | .tick = left->tick, | ||
| 240 | .perm = left->perm, | ||
| 241 | .is_resident = left->is_resident, | ||
| 242 | }; | ||
| 243 | |||
| 244 | // Insert the new right map. | ||
| 245 | m_map_count++; | ||
| 246 | m_mappings.insert(*right); | ||
| 247 | |||
| 248 | // If resident, also insert into resident map. | ||
| 249 | if (right->is_resident) { | ||
| 250 | m_resident_mappings.insert(*right); | ||
| 251 | m_resident_map_count++; | ||
| 252 | } | ||
| 253 | } | ||
| 254 | |||
| 255 | HeapTracker::AddrTree::iterator HeapTracker::GetNearestHeapMapLocked(VAddr offset) { | ||
| 256 | const SeparateHeapMap key{ | ||
| 257 | .vaddr = offset, | ||
| 258 | }; | ||
| 259 | |||
| 260 | return m_mappings.find(key); | ||
| 261 | } | ||
| 262 | |||
| 263 | } // namespace Common | ||