summaryrefslogtreecommitdiff
path: root/src/core/memory.cpp
diff options
context:
space:
mode:
authorGravatar MerryMage2018-01-27 15:16:39 +0000
committerGravatar MerryMage2018-01-27 15:16:39 +0000
commit738f91a57da7c129d1ee85b7abbf6858f8669ee3 (patch)
tree3ef164d8e79c0aea6ab72dc9b8fa78877a82338a /src/core/memory.cpp
parentexternals: Update dynarmic (diff)
downloadyuzu-738f91a57da7c129d1ee85b7abbf6858f8669ee3.tar.gz
yuzu-738f91a57da7c129d1ee85b7abbf6858f8669ee3.tar.xz
yuzu-738f91a57da7c129d1ee85b7abbf6858f8669ee3.zip
memory: Replace all memory hooking with Special regions
Diffstat (limited to 'src/core/memory.cpp')
-rw-r--r--src/core/memory.cpp480
1 files changed, 163 insertions, 317 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index a3d2d4951..f658271a5 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -2,8 +2,10 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <algorithm>
5#include <array> 6#include <array>
6#include <cstring> 7#include <cstring>
8#include <boost/optional.hpp>
7#include "common/assert.h" 9#include "common/assert.h"
8#include "common/common_types.h" 10#include "common/common_types.h"
9#include "common/logging/log.h" 11#include "common/logging/log.h"
@@ -12,7 +14,6 @@
12#include "core/core.h" 14#include "core/core.h"
13#include "core/hle/kernel/memory.h" 15#include "core/hle/kernel/memory.h"
14#include "core/hle/kernel/process.h" 16#include "core/hle/kernel/process.h"
15#include "core/hle/lock.h"
16#include "core/memory.h" 17#include "core/memory.h"
17#include "core/memory_setup.h" 18#include "core/memory_setup.h"
18#include "video_core/renderer_base.h" 19#include "video_core/renderer_base.h"
@@ -40,16 +41,12 @@ static void MapPages(PageTable& page_table, VAddr base, u64 size, u8* memory, Pa
40 LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE, 41 LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE,
41 (base + size) * PAGE_SIZE); 42 (base + size) * PAGE_SIZE);
42 43
43 RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE,
44 FlushMode::FlushAndInvalidate);
45
46 VAddr end = base + size; 44 VAddr end = base + size;
47 while (base != end) { 45 while (base != end) {
48 ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at %08X", base); 46 ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at %08X", base);
49 47
50 page_table.attributes[base] = type; 48 page_table.attributes[base] = type;
51 page_table.pointers[base] = memory; 49 page_table.pointers[base] = memory;
52 page_table.cached_res_count[base] = 0;
53 50
54 base += 1; 51 base += 1;
55 if (memory != nullptr) 52 if (memory != nullptr)
@@ -63,157 +60,110 @@ void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target) {
63 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); 60 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory);
64} 61}
65 62
66void MapIoRegion(PageTable& page_table, VAddr base, u64 size, MMIORegionPointer mmio_handler) { 63void MapIoRegion(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer mmio_handler) {
67 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); 64 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
68 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); 65 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
69 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); 66 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special);
70 67
71 page_table.special_regions.emplace_back(SpecialRegion{base, size, mmio_handler}); 68 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
69 SpecialRegion region{SpecialRegion::Type::IODevice, mmio_handler};
70 page_table.special_regions.add(std::make_pair(interval, std::set<SpecialRegion>{region}));
72} 71}
73 72
74void UnmapRegion(PageTable& page_table, VAddr base, u64 size) { 73void UnmapRegion(PageTable& page_table, VAddr base, u64 size) {
75 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); 74 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
76 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); 75 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
77 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped); 76 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
78}
79
80/**
81 * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned)
82 * using a VMA from the current process
83 */
84static u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) {
85 u8* direct_pointer = nullptr;
86 77
87 auto& vm_manager = process.vm_manager; 78 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
88 79 page_table.special_regions.erase(interval);
89 auto it = vm_manager.FindVMA(vaddr); 80}
90 ASSERT(it != vm_manager.vma_map.end());
91
92 auto& vma = it->second;
93 switch (vma.type) {
94 case Kernel::VMAType::AllocatedMemoryBlock:
95 direct_pointer = vma.backing_block->data() + vma.offset;
96 break;
97 case Kernel::VMAType::BackingMemory:
98 direct_pointer = vma.backing_memory;
99 break;
100 case Kernel::VMAType::Free:
101 return nullptr;
102 default:
103 UNREACHABLE();
104 }
105 81
106 return direct_pointer + (vaddr - vma.base); 82void AddDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook) {
83 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
84 SpecialRegion region{SpecialRegion::Type::DebugHook, hook};
85 page_table.special_regions.add(std::make_pair(interval, std::set<SpecialRegion>{region}));
107} 86}
108 87
109/** 88void RemoveDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook) {
110 * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) 89 auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1);
111 * using a VMA from the current process. 90 SpecialRegion region{SpecialRegion::Type::DebugHook, hook};
112 */ 91 page_table.special_regions.subtract(std::make_pair(interval, std::set<SpecialRegion>{region}));
113static u8* GetPointerFromVMA(VAddr vaddr) {
114 return GetPointerFromVMA(*Kernel::g_current_process, vaddr);
115} 92}
116 93
117/** 94/**
118 * This function should only be called for virtual addreses with attribute `PageType::Special`. 95 * This function should only be called for virtual addreses with attribute `PageType::Special`.
119 */ 96 */
120static MMIORegionPointer GetMMIOHandler(const PageTable& page_table, VAddr vaddr) { 97static std::set<MemoryHookPointer> GetSpecialHandlers(const PageTable& page_table, VAddr vaddr,
121 for (const auto& region : page_table.special_regions) { 98 u64 size) {
122 if (vaddr >= region.base && vaddr < (region.base + region.size)) { 99 std::set<MemoryHookPointer> result;
123 return region.handler; 100 auto interval = boost::icl::discrete_interval<VAddr>::closed(vaddr, vaddr + size - 1);
101 auto interval_list = page_table.special_regions.equal_range(interval);
102 for (auto it = interval_list.first; it != interval_list.second; ++it) {
103 for (const auto& region : it->second) {
104 result.insert(region.handler);
124 } 105 }
125 } 106 }
126 ASSERT_MSG(false, "Mapped IO page without a handler @ %08X", vaddr); 107 return result;
127 return nullptr; // Should never happen
128} 108}
129 109
130static MMIORegionPointer GetMMIOHandler(VAddr vaddr) { 110static std::set<MemoryHookPointer> GetSpecialHandlers(VAddr vaddr, u64 size) {
131 const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table; 111 const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table;
132 return GetMMIOHandler(page_table, vaddr); 112 return GetSpecialHandlers(page_table, vaddr, size);
133} 113}
134 114
135template <typename T> 115template <typename T>
136T ReadMMIO(MMIORegionPointer mmio_handler, VAddr addr); 116boost::optional<T> ReadSpecial(VAddr addr);
137 117
138template <typename T> 118template <typename T>
139T Read(const VAddr vaddr) { 119T Read(const VAddr vaddr) {
140 const u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; 120 const PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
141 if (page_pointer) {
142 // NOTE: Avoid adding any extra logic to this fast-path block
143 T value;
144 std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T));
145 return value;
146 }
147
148 // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
149 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
150
151 PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
152 switch (type) { 121 switch (type) {
153 case PageType::Unmapped: 122 case PageType::Unmapped:
154 LOG_ERROR(HW_Memory, "unmapped Read%lu @ 0x%llx", sizeof(T) * 8, vaddr); 123 LOG_ERROR(HW_Memory, "unmapped Read%lu @ 0x%016llX", sizeof(T) * 8, vaddr);
155 return 0; 124 return 0;
156 case PageType::Memory: 125 case PageType::Special: {
157 ASSERT_MSG(false, "Mapped memory page without a pointer @ %08X", vaddr); 126 if (auto result = ReadSpecial<T>(vaddr))
158 break; 127 return *result;
159 case PageType::RasterizerCachedMemory: { 128 [[fallthrough]];
160 RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Flush); 129 }
130 case PageType::Memory: {
131 const u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
132 ASSERT_MSG(page_pointer, "Mapped memory page without a pointer @ %08X", vaddr);
161 133
162 T value; 134 T value;
163 std::memcpy(&value, GetPointerFromVMA(vaddr), sizeof(T)); 135 std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T));
164 return value; 136 return value;
165 } 137 }
166 case PageType::Special:
167 return ReadMMIO<T>(GetMMIOHandler(vaddr), vaddr);
168 case PageType::RasterizerCachedSpecial: {
169 RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Flush);
170 return ReadMMIO<T>(GetMMIOHandler(vaddr), vaddr);
171 }
172 default:
173 UNREACHABLE();
174 } 138 }
139 UNREACHABLE();
140 return 0;
175} 141}
176 142
177template <typename T> 143template <typename T>
178void WriteMMIO(MMIORegionPointer mmio_handler, VAddr addr, const T data); 144bool WriteSpecial(VAddr addr, const T data);
179 145
180template <typename T> 146template <typename T>
181void Write(const VAddr vaddr, const T data) { 147void Write(const VAddr vaddr, const T data) {
182 u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; 148 const PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
183 if (page_pointer) {
184 // NOTE: Avoid adding any extra logic to this fast-path block
185 std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T));
186 return;
187 }
188
189 // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
190 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock);
191
192 PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
193 switch (type) { 149 switch (type) {
194 case PageType::Unmapped: 150 case PageType::Unmapped:
195 LOG_ERROR(HW_Memory, "unmapped Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, 151 LOG_ERROR(HW_Memory, "unmapped Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data,
196 vaddr); 152 vaddr);
197 return; 153 return;
198 case PageType::Memory: 154 case PageType::Special: {
199 ASSERT_MSG(false, "Mapped memory page without a pointer @ %08X", vaddr); 155 if (WriteSpecial<T>(vaddr, data))
200 break; 156 return;
201 case PageType::RasterizerCachedMemory: { 157 [[fallthrough]];
202 RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::FlushAndInvalidate);
203 std::memcpy(GetPointerFromVMA(vaddr), &data, sizeof(T));
204 break;
205 } 158 }
206 case PageType::Special: 159 case PageType::Memory: {
207 WriteMMIO<T>(GetMMIOHandler(vaddr), vaddr, data); 160 u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS];
208 break; 161 ASSERT_MSG(page_pointer, "Mapped memory page without a pointer @ %08X", vaddr);
209 case PageType::RasterizerCachedSpecial: { 162 std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T));
210 RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::FlushAndInvalidate); 163 return;
211 WriteMMIO<T>(GetMMIOHandler(vaddr), vaddr, data);
212 break;
213 } 164 }
214 default:
215 UNREACHABLE();
216 } 165 }
166 UNREACHABLE();
217} 167}
218 168
219bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) { 169bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) {
@@ -222,21 +172,20 @@ bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) {
222 if ((vaddr >> PAGE_BITS) >= PAGE_TABLE_NUM_ENTRIES) 172 if ((vaddr >> PAGE_BITS) >= PAGE_TABLE_NUM_ENTRIES)
223 return false; 173 return false;
224 174
225 const u8* page_pointer = page_table.pointers[vaddr >> PAGE_BITS]; 175 const PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
226 if (page_pointer) 176 switch (type) {
227 return true; 177 case PageType::Unmapped:
228
229 if (page_table.attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory)
230 return true;
231
232 if (page_table.attributes[vaddr >> PAGE_BITS] != PageType::Special)
233 return false; 178 return false;
234 179 case PageType::Memory:
235 MMIORegionPointer mmio_region = GetMMIOHandler(page_table, vaddr); 180 return true;
236 if (mmio_region) { 181 case PageType::Special: {
237 return mmio_region->IsValidAddress(vaddr); 182 for (auto handler : GetSpecialHandlers(page_table, vaddr, 1))
183 if (auto result = handler->IsValidAddress(vaddr))
184 return *result;
185 return current_page_table->pointers[vaddr >> PAGE_BITS] != nullptr;
238 } 186 }
239 187 }
188 UNREACHABLE();
240 return false; 189 return false;
241} 190}
242 191
@@ -254,10 +203,6 @@ u8* GetPointer(const VAddr vaddr) {
254 return page_pointer + (vaddr & PAGE_MASK); 203 return page_pointer + (vaddr & PAGE_MASK);
255 } 204 }
256 205
257 if (current_page_table->attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory) {
258 return GetPointerFromVMA(vaddr);
259 }
260
261 LOG_ERROR(HW_Memory, "unknown GetPointer @ 0x%08x", vaddr); 206 LOG_ERROR(HW_Memory, "unknown GetPointer @ 0x%08x", vaddr);
262 return nullptr; 207 return nullptr;
263} 208}
@@ -335,97 +280,6 @@ u8* GetPhysicalPointer(PAddr address) {
335 return target_pointer; 280 return target_pointer;
336} 281}
337 282
338void RasterizerMarkRegionCached(PAddr start, u64 size, int count_delta) {
339 if (start == 0) {
340 return;
341 }
342
343 u64 num_pages = ((start + size - 1) >> PAGE_BITS) - (start >> PAGE_BITS) + 1;
344 PAddr paddr = start;
345
346 for (unsigned i = 0; i < num_pages; ++i, paddr += PAGE_SIZE) {
347 boost::optional<VAddr> maybe_vaddr = PhysicalToVirtualAddress(paddr);
348 // While the physical <-> virtual mapping is 1:1 for the regions supported by the cache,
349 // some games (like Pokemon Super Mystery Dungeon) will try to use textures that go beyond
350 // the end address of VRAM, causing the Virtual->Physical translation to fail when flushing
351 // parts of the texture.
352 if (!maybe_vaddr) {
353 LOG_ERROR(HW_Memory,
354 "Trying to flush a cached region to an invalid physical address %08X", paddr);
355 continue;
356 }
357 VAddr vaddr = *maybe_vaddr;
358
359 u8& res_count = current_page_table->cached_res_count[vaddr >> PAGE_BITS];
360 ASSERT_MSG(count_delta <= UINT8_MAX - res_count,
361 "Rasterizer resource cache counter overflow!");
362 ASSERT_MSG(count_delta >= -res_count, "Rasterizer resource cache counter underflow!");
363
364 // Switch page type to cached if now cached
365 if (res_count == 0) {
366 PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
367 switch (page_type) {
368 case PageType::Unmapped:
369 // It is not necessary for a process to have this region mapped into its address
370 // space, for example, a system module need not have a VRAM mapping.
371 break;
372 case PageType::Memory:
373 page_type = PageType::RasterizerCachedMemory;
374 current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr;
375 break;
376 case PageType::Special:
377 page_type = PageType::RasterizerCachedSpecial;
378 break;
379 default:
380 UNREACHABLE();
381 }
382 }
383
384 res_count += count_delta;
385
386 // Switch page type to uncached if now uncached
387 if (res_count == 0) {
388 PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS];
389 switch (page_type) {
390 case PageType::Unmapped:
391 // It is not necessary for a process to have this region mapped into its address
392 // space, for example, a system module need not have a VRAM mapping.
393 break;
394 case PageType::RasterizerCachedMemory: {
395 u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK);
396 if (pointer == nullptr) {
397 // It's possible that this function has called been while updating the pagetable
398 // after unmapping a VMA. In that case the underlying VMA will no longer exist,
399 // and we should just leave the pagetable entry blank.
400 page_type = PageType::Unmapped;
401 } else {
402 page_type = PageType::Memory;
403 current_page_table->pointers[vaddr >> PAGE_BITS] = pointer;
404 }
405 break;
406 }
407 case PageType::RasterizerCachedSpecial:
408 page_type = PageType::Special;
409 break;
410 default:
411 UNREACHABLE();
412 }
413 }
414 }
415}
416
417void RasterizerFlushRegion(PAddr start, u64 size) {}
418
419void RasterizerFlushAndInvalidateRegion(PAddr start, u64 size) {
420 // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be
421 // null here
422}
423
424void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) {
425 // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be
426 // null here
427}
428
429u8 Read8(const VAddr addr) { 283u8 Read8(const VAddr addr) {
430 return Read<u8>(addr); 284 return Read<u8>(addr);
431} 285}
@@ -442,6 +296,17 @@ u64 Read64(const VAddr addr) {
442 return Read<u64_le>(addr); 296 return Read<u64_le>(addr);
443} 297}
444 298
299static bool ReadSpecialBlock(const Kernel::Process& process, const VAddr src_addr,
300 void* dest_buffer, const size_t size) {
301 auto& page_table = process.vm_manager.page_table;
302 for (const auto& handler : GetSpecialHandlers(page_table, src_addr, size)) {
303 if (handler->ReadBlock(src_addr, dest_buffer, size)) {
304 return true;
305 }
306 }
307 return false;
308}
309
445void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, 310void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer,
446 const size_t size) { 311 const size_t size) {
447 auto& page_table = process.vm_manager.page_table; 312 auto& page_table = process.vm_manager.page_table;
@@ -455,11 +320,15 @@ void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_
455 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); 320 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
456 321
457 switch (page_table.attributes[page_index]) { 322 switch (page_table.attributes[page_index]) {
458 case PageType::Unmapped: { 323 case PageType::Unmapped:
459 LOG_ERROR(HW_Memory, "unmapped ReadBlock @ 0x%08X (start address = 0xllx, size = %zu)", 324 LOG_ERROR(HW_Memory, "unmapped ReadBlock @ 0x%08X (start address = 0xllx, size = %zu)",
460 current_vaddr, src_addr, size); 325 current_vaddr, src_addr, size);
461 std::memset(dest_buffer, 0, copy_amount); 326 std::memset(dest_buffer, 0, copy_amount);
462 break; 327 break;
328 case PageType::Special: {
329 if (ReadSpecialBlock(process, current_vaddr, dest_buffer, copy_amount))
330 break;
331 [[fallthrough]];
463 } 332 }
464 case PageType::Memory: { 333 case PageType::Memory: {
465 DEBUG_ASSERT(page_table.pointers[page_index]); 334 DEBUG_ASSERT(page_table.pointers[page_index]);
@@ -468,26 +337,6 @@ void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_
468 std::memcpy(dest_buffer, src_ptr, copy_amount); 337 std::memcpy(dest_buffer, src_ptr, copy_amount);
469 break; 338 break;
470 } 339 }
471 case PageType::Special: {
472 MMIORegionPointer handler = GetMMIOHandler(page_table, current_vaddr);
473 DEBUG_ASSERT(handler);
474 handler->ReadBlock(current_vaddr, dest_buffer, copy_amount);
475 break;
476 }
477 case PageType::RasterizerCachedMemory: {
478 RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount),
479 FlushMode::Flush);
480 std::memcpy(dest_buffer, GetPointerFromVMA(process, current_vaddr), copy_amount);
481 break;
482 }
483 case PageType::RasterizerCachedSpecial: {
484 MMIORegionPointer handler = GetMMIOHandler(page_table, current_vaddr);
485 DEBUG_ASSERT(handler);
486 RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount),
487 FlushMode::Flush);
488 handler->ReadBlock(current_vaddr, dest_buffer, copy_amount);
489 break;
490 }
491 default: 340 default:
492 UNREACHABLE(); 341 UNREACHABLE();
493 } 342 }
@@ -519,6 +368,17 @@ void Write64(const VAddr addr, const u64 data) {
519 Write<u64_le>(addr, data); 368 Write<u64_le>(addr, data);
520} 369}
521 370
371static bool WriteSpecialBlock(const Kernel::Process& process, const VAddr dest_addr,
372 const void* src_buffer, const size_t size) {
373 auto& page_table = process.vm_manager.page_table;
374 for (const auto& handler : GetSpecialHandlers(page_table, dest_addr, size)) {
375 if (handler->WriteBlock(dest_addr, src_buffer, size)) {
376 return true;
377 }
378 }
379 return false;
380}
381
522void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, 382void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer,
523 const size_t size) { 383 const size_t size) {
524 auto& page_table = process.vm_manager.page_table; 384 auto& page_table = process.vm_manager.page_table;
@@ -531,12 +391,15 @@ void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const voi
531 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); 391 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
532 392
533 switch (page_table.attributes[page_index]) { 393 switch (page_table.attributes[page_index]) {
534 case PageType::Unmapped: { 394 case PageType::Unmapped:
535 LOG_ERROR(HW_Memory, 395 LOG_ERROR(HW_Memory,
536 "unmapped WriteBlock @ 0x%08X (start address = 0x%08X, size = %zu)", 396 "unmapped WriteBlock @ 0x%08X (start address = 0x%08X, size = %zu)",
537 current_vaddr, dest_addr, size); 397 current_vaddr, dest_addr, size);
538 break; 398 break;
539 } 399 case PageType::Special:
400 if (WriteSpecialBlock(process, current_vaddr, src_buffer, copy_amount))
401 break;
402 [[fallthrough]];
540 case PageType::Memory: { 403 case PageType::Memory: {
541 DEBUG_ASSERT(page_table.pointers[page_index]); 404 DEBUG_ASSERT(page_table.pointers[page_index]);
542 405
@@ -544,26 +407,6 @@ void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const voi
544 std::memcpy(dest_ptr, src_buffer, copy_amount); 407 std::memcpy(dest_ptr, src_buffer, copy_amount);
545 break; 408 break;
546 } 409 }
547 case PageType::Special: {
548 MMIORegionPointer handler = GetMMIOHandler(page_table, current_vaddr);
549 DEBUG_ASSERT(handler);
550 handler->WriteBlock(current_vaddr, src_buffer, copy_amount);
551 break;
552 }
553 case PageType::RasterizerCachedMemory: {
554 RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount),
555 FlushMode::FlushAndInvalidate);
556 std::memcpy(GetPointerFromVMA(process, current_vaddr), src_buffer, copy_amount);
557 break;
558 }
559 case PageType::RasterizerCachedSpecial: {
560 MMIORegionPointer handler = GetMMIOHandler(page_table, current_vaddr);
561 DEBUG_ASSERT(handler);
562 RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount),
563 FlushMode::FlushAndInvalidate);
564 handler->WriteBlock(current_vaddr, src_buffer, copy_amount);
565 break;
566 }
567 default: 410 default:
568 UNREACHABLE(); 411 UNREACHABLE();
569 } 412 }
@@ -580,6 +423,8 @@ void WriteBlock(const VAddr dest_addr, const void* src_buffer, const size_t size
580} 423}
581 424
582void ZeroBlock(const VAddr dest_addr, const size_t size) { 425void ZeroBlock(const VAddr dest_addr, const size_t size) {
426 const auto& process = *Kernel::g_current_process;
427
583 size_t remaining_size = size; 428 size_t remaining_size = size;
584 size_t page_index = dest_addr >> PAGE_BITS; 429 size_t page_index = dest_addr >> PAGE_BITS;
585 size_t page_offset = dest_addr & PAGE_MASK; 430 size_t page_offset = dest_addr & PAGE_MASK;
@@ -591,11 +436,14 @@ void ZeroBlock(const VAddr dest_addr, const size_t size) {
591 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); 436 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
592 437
593 switch (current_page_table->attributes[page_index]) { 438 switch (current_page_table->attributes[page_index]) {
594 case PageType::Unmapped: { 439 case PageType::Unmapped:
595 LOG_ERROR(HW_Memory, "unmapped ZeroBlock @ 0x%08X (start address = 0x%08X, size = %zu)", 440 LOG_ERROR(HW_Memory, "unmapped ZeroBlock @ 0x%08X (start address = 0x%08X, size = %zu)",
596 current_vaddr, dest_addr, size); 441 current_vaddr, dest_addr, size);
597 break; 442 break;
598 } 443 case PageType::Special:
444 if (WriteSpecialBlock(process, current_vaddr, zeros.data(), copy_amount))
445 break;
446 [[fallthrough]];
599 case PageType::Memory: { 447 case PageType::Memory: {
600 DEBUG_ASSERT(current_page_table->pointers[page_index]); 448 DEBUG_ASSERT(current_page_table->pointers[page_index]);
601 449
@@ -603,25 +451,6 @@ void ZeroBlock(const VAddr dest_addr, const size_t size) {
603 std::memset(dest_ptr, 0, copy_amount); 451 std::memset(dest_ptr, 0, copy_amount);
604 break; 452 break;
605 } 453 }
606 case PageType::Special: {
607 DEBUG_ASSERT(GetMMIOHandler(current_vaddr));
608
609 GetMMIOHandler(current_vaddr)->WriteBlock(current_vaddr, zeros.data(), copy_amount);
610 break;
611 }
612 case PageType::RasterizerCachedMemory: {
613 RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount),
614 FlushMode::FlushAndInvalidate);
615 std::memset(GetPointerFromVMA(current_vaddr), 0, copy_amount);
616 break;
617 }
618 case PageType::RasterizerCachedSpecial: {
619 DEBUG_ASSERT(GetMMIOHandler(current_vaddr));
620 RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount),
621 FlushMode::FlushAndInvalidate);
622 GetMMIOHandler(current_vaddr)->WriteBlock(current_vaddr, zeros.data(), copy_amount);
623 break;
624 }
625 default: 454 default:
626 UNREACHABLE(); 455 UNREACHABLE();
627 } 456 }
@@ -633,6 +462,8 @@ void ZeroBlock(const VAddr dest_addr, const size_t size) {
633} 462}
634 463
635void CopyBlock(VAddr dest_addr, VAddr src_addr, const size_t size) { 464void CopyBlock(VAddr dest_addr, VAddr src_addr, const size_t size) {
465 const auto& process = *Kernel::g_current_process;
466
636 size_t remaining_size = size; 467 size_t remaining_size = size;
637 size_t page_index = src_addr >> PAGE_BITS; 468 size_t page_index = src_addr >> PAGE_BITS;
638 size_t page_offset = src_addr & PAGE_MASK; 469 size_t page_offset = src_addr & PAGE_MASK;
@@ -642,11 +473,18 @@ void CopyBlock(VAddr dest_addr, VAddr src_addr, const size_t size) {
642 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); 473 const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
643 474
644 switch (current_page_table->attributes[page_index]) { 475 switch (current_page_table->attributes[page_index]) {
645 case PageType::Unmapped: { 476 case PageType::Unmapped:
646 LOG_ERROR(HW_Memory, "unmapped CopyBlock @ 0x%08X (start address = 0x%08X, size = %zu)", 477 LOG_ERROR(HW_Memory, "unmapped CopyBlock @ 0x%08X (start address = 0x%08X, size = %zu)",
647 current_vaddr, src_addr, size); 478 current_vaddr, src_addr, size);
648 ZeroBlock(dest_addr, copy_amount); 479 ZeroBlock(dest_addr, copy_amount);
649 break; 480 break;
481 case PageType::Special: {
482 std::vector<u8> buffer(copy_amount);
483 if (ReadSpecialBlock(process, current_vaddr, buffer.data(), buffer.size())) {
484 WriteBlock(dest_addr, buffer.data(), buffer.size());
485 break;
486 }
487 [[fallthrough]];
650 } 488 }
651 case PageType::Memory: { 489 case PageType::Memory: {
652 DEBUG_ASSERT(current_page_table->pointers[page_index]); 490 DEBUG_ASSERT(current_page_table->pointers[page_index]);
@@ -654,30 +492,6 @@ void CopyBlock(VAddr dest_addr, VAddr src_addr, const size_t size) {
654 WriteBlock(dest_addr, src_ptr, copy_amount); 492 WriteBlock(dest_addr, src_ptr, copy_amount);
655 break; 493 break;
656 } 494 }
657 case PageType::Special: {
658 DEBUG_ASSERT(GetMMIOHandler(current_vaddr));
659
660 std::vector<u8> buffer(copy_amount);
661 GetMMIOHandler(current_vaddr)->ReadBlock(current_vaddr, buffer.data(), buffer.size());
662 WriteBlock(dest_addr, buffer.data(), buffer.size());
663 break;
664 }
665 case PageType::RasterizerCachedMemory: {
666 RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount),
667 FlushMode::Flush);
668 WriteBlock(dest_addr, GetPointerFromVMA(current_vaddr), copy_amount);
669 break;
670 }
671 case PageType::RasterizerCachedSpecial: {
672 DEBUG_ASSERT(GetMMIOHandler(current_vaddr));
673 RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount),
674 FlushMode::Flush);
675
676 std::vector<u8> buffer(copy_amount);
677 GetMMIOHandler(current_vaddr)->ReadBlock(current_vaddr, buffer.data(), buffer.size());
678 WriteBlock(dest_addr, buffer.data(), buffer.size());
679 break;
680 }
681 default: 495 default:
682 UNREACHABLE(); 496 UNREACHABLE();
683 } 497 }
@@ -691,43 +505,75 @@ void CopyBlock(VAddr dest_addr, VAddr src_addr, const size_t size) {
691} 505}
692 506
693template <> 507template <>
694u8 ReadMMIO<u8>(MMIORegionPointer mmio_handler, VAddr addr) { 508boost::optional<u8> ReadSpecial<u8>(VAddr addr) {
695 return mmio_handler->Read8(addr); 509 const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table;
510 for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u8)))
511 if (auto result = handler->Read8(addr))
512 return *result;
513 return {};
696} 514}
697 515
698template <> 516template <>
699u16 ReadMMIO<u16>(MMIORegionPointer mmio_handler, VAddr addr) { 517boost::optional<u16> ReadSpecial<u16>(VAddr addr) {
700 return mmio_handler->Read16(addr); 518 const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table;
519 for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u16)))
520 if (auto result = handler->Read16(addr))
521 return *result;
522 return {};
701} 523}
702 524
703template <> 525template <>
704u32 ReadMMIO<u32>(MMIORegionPointer mmio_handler, VAddr addr) { 526boost::optional<u32> ReadSpecial<u32>(VAddr addr) {
705 return mmio_handler->Read32(addr); 527 const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table;
528 for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u32)))
529 if (auto result = handler->Read32(addr))
530 return *result;
531 return {};
706} 532}
707 533
708template <> 534template <>
709u64 ReadMMIO<u64>(MMIORegionPointer mmio_handler, VAddr addr) { 535boost::optional<u64> ReadSpecial<u64>(VAddr addr) {
710 return mmio_handler->Read64(addr); 536 const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table;
537 for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u64)))
538 if (auto result = handler->Read64(addr))
539 return *result;
540 return {};
711} 541}
712 542
713template <> 543template <>
714void WriteMMIO<u8>(MMIORegionPointer mmio_handler, VAddr addr, const u8 data) { 544bool WriteSpecial<u8>(VAddr addr, const u8 data) {
715 mmio_handler->Write8(addr, data); 545 const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table;
546 for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u8)))
547 if (handler->Write8(addr, data))
548 return true;
549 return false;
716} 550}
717 551
718template <> 552template <>
719void WriteMMIO<u16>(MMIORegionPointer mmio_handler, VAddr addr, const u16 data) { 553bool WriteSpecial<u16>(VAddr addr, const u16 data) {
720 mmio_handler->Write16(addr, data); 554 const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table;
555 for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u16)))
556 if (handler->Write16(addr, data))
557 return true;
558 return false;
721} 559}
722 560
723template <> 561template <>
724void WriteMMIO<u32>(MMIORegionPointer mmio_handler, VAddr addr, const u32 data) { 562bool WriteSpecial<u32>(VAddr addr, const u32 data) {
725 mmio_handler->Write32(addr, data); 563 const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table;
564 for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u32)))
565 if (handler->Write32(addr, data))
566 return true;
567 return false;
726} 568}
727 569
728template <> 570template <>
729void WriteMMIO<u64>(MMIORegionPointer mmio_handler, VAddr addr, const u64 data) { 571bool WriteSpecial<u64>(VAddr addr, const u64 data) {
730 mmio_handler->Write64(addr, data); 572 const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table;
573 for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u64)))
574 if (handler->Write64(addr, data))
575 return true;
576 return false;
731} 577}
732 578
733boost::optional<PAddr> TryVirtualToPhysicalAddress(const VAddr addr) { 579boost::optional<PAddr> TryVirtualToPhysicalAddress(const VAddr addr) {