diff options
| author | 2018-03-24 22:21:14 -0400 | |
|---|---|---|
| committer | 2018-03-26 21:16:57 -0400 | |
| commit | d732142b667a650e7418ff5d6c985fa333e04c38 (patch) | |
| tree | 2ba4559b10bccff73e975e057b86d38da05de26f /src/core/memory.cpp | |
| parent | gl_rasterizer: Implement SetupVertexArray. (diff) | |
| download | yuzu-d732142b667a650e7418ff5d6c985fa333e04c38.tar.gz yuzu-d732142b667a650e7418ff5d6c985fa333e04c38.tar.xz yuzu-d732142b667a650e7418ff5d6c985fa333e04c38.zip | |
memory: Add RasterizerMarkRegionCached code and cleanup.
Diffstat (limited to 'src/core/memory.cpp')
| -rw-r--r-- | src/core/memory.cpp | 390 |
1 files changed, 190 insertions, 200 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index d6469dd3d..47c4828f7 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include "core/core.h" | 15 | #include "core/core.h" |
| 16 | #include "core/hle/kernel/memory.h" | 16 | #include "core/hle/kernel/memory.h" |
| 17 | #include "core/hle/kernel/process.h" | 17 | #include "core/hle/kernel/process.h" |
| 18 | #include "core/hle/lock.h" | ||
| 18 | #include "core/memory.h" | 19 | #include "core/memory.h" |
| 19 | #include "core/memory_setup.h" | 20 | #include "core/memory_setup.h" |
| 20 | #include "video_core/renderer_base.h" | 21 | #include "video_core/renderer_base.h" |
| @@ -115,91 +116,120 @@ static std::set<MemoryHookPointer> GetSpecialHandlers(VAddr vaddr, u64 size) { | |||
| 115 | return GetSpecialHandlers(page_table, vaddr, size); | 116 | return GetSpecialHandlers(page_table, vaddr, size); |
| 116 | } | 117 | } |
| 117 | 118 | ||
| 118 | template <typename T> | 119 | /** |
| 119 | boost::optional<T> ReadSpecial(VAddr addr); | 120 | * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) |
| 121 | * using a VMA from the current process | ||
| 122 | */ | ||
| 123 | static u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) { | ||
| 124 | u8* direct_pointer = nullptr; | ||
| 125 | |||
| 126 | auto& vm_manager = process.vm_manager; | ||
| 127 | |||
| 128 | auto it = vm_manager.FindVMA(vaddr); | ||
| 129 | ASSERT(it != vm_manager.vma_map.end()); | ||
| 130 | |||
| 131 | auto& vma = it->second; | ||
| 132 | switch (vma.type) { | ||
| 133 | case Kernel::VMAType::AllocatedMemoryBlock: | ||
| 134 | direct_pointer = vma.backing_block->data() + vma.offset; | ||
| 135 | break; | ||
| 136 | case Kernel::VMAType::BackingMemory: | ||
| 137 | direct_pointer = vma.backing_memory; | ||
| 138 | break; | ||
| 139 | case Kernel::VMAType::Free: | ||
| 140 | return nullptr; | ||
| 141 | default: | ||
| 142 | UNREACHABLE(); | ||
| 143 | } | ||
| 144 | |||
| 145 | return direct_pointer + (vaddr - vma.base); | ||
| 146 | } | ||
| 147 | |||
| 148 | /** | ||
| 149 | * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) | ||
| 150 | * using a VMA from the current process. | ||
| 151 | */ | ||
| 152 | static u8* GetPointerFromVMA(VAddr vaddr) { | ||
| 153 | return GetPointerFromVMA(*Core::CurrentProcess(), vaddr); | ||
| 154 | } | ||
| 120 | 155 | ||
| 121 | template <typename T> | 156 | template <typename T> |
| 122 | T Read(const VAddr vaddr) { | 157 | T Read(const VAddr vaddr) { |
| 123 | if ((vaddr >> PAGE_BITS) >= PAGE_TABLE_NUM_ENTRIES) { | 158 | const u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; |
| 124 | LOG_ERROR(HW_Memory, "Read%lu after page table @ 0x%016" PRIX64, sizeof(T) * 8, vaddr); | 159 | if (page_pointer) { |
| 125 | return 0; | 160 | // NOTE: Avoid adding any extra logic to this fast-path block |
| 161 | T value; | ||
| 162 | std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T)); | ||
| 163 | return value; | ||
| 126 | } | 164 | } |
| 127 | 165 | ||
| 128 | const PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; | 166 | // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state |
| 167 | std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); | ||
| 168 | |||
| 169 | PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; | ||
| 129 | switch (type) { | 170 | switch (type) { |
| 130 | case PageType::Unmapped: | 171 | case PageType::Unmapped: |
| 131 | LOG_ERROR(HW_Memory, "unmapped Read%zu @ 0x%016" PRIX64, sizeof(T) * 8, vaddr); | 172 | LOG_ERROR(HW_Memory, "unmapped Read%lu @ 0x%08X", sizeof(T) * 8, vaddr); |
| 132 | return 0; | 173 | return 0; |
| 133 | case PageType::Special: { | 174 | case PageType::Memory: |
| 134 | if (auto result = ReadSpecial<T>(vaddr)) | 175 | ASSERT_MSG(false, "Mapped memory page without a pointer @ %08X", vaddr); |
| 135 | return *result; | 176 | break; |
| 136 | [[fallthrough]]; | 177 | case PageType::RasterizerCachedMemory: { |
| 137 | } | 178 | RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Flush); |
| 138 | case PageType::Memory: { | ||
| 139 | const u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; | ||
| 140 | ASSERT_MSG(page_pointer, "Mapped memory page without a pointer @ %016" PRIX64, vaddr); | ||
| 141 | 179 | ||
| 142 | T value; | 180 | T value; |
| 143 | std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T)); | 181 | std::memcpy(&value, GetPointerFromVMA(vaddr), sizeof(T)); |
| 144 | return value; | 182 | return value; |
| 145 | } | 183 | } |
| 184 | default: | ||
| 185 | UNREACHABLE(); | ||
| 146 | } | 186 | } |
| 147 | UNREACHABLE(); | ||
| 148 | return 0; | ||
| 149 | } | 187 | } |
| 150 | 188 | ||
| 151 | template <typename T> | 189 | template <typename T> |
| 152 | bool WriteSpecial(VAddr addr, const T data); | ||
| 153 | |||
| 154 | template <typename T> | ||
| 155 | void Write(const VAddr vaddr, const T data) { | 190 | void Write(const VAddr vaddr, const T data) { |
| 156 | if ((vaddr >> PAGE_BITS) >= PAGE_TABLE_NUM_ENTRIES) { | 191 | u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; |
| 157 | LOG_ERROR(HW_Memory, "Write%lu after page table 0x%08X @ 0x%016" PRIX64, sizeof(data) * 8, | 192 | if (page_pointer) { |
| 158 | (u32)data, vaddr); | 193 | // NOTE: Avoid adding any extra logic to this fast-path block |
| 194 | std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T)); | ||
| 159 | return; | 195 | return; |
| 160 | } | 196 | } |
| 161 | 197 | ||
| 162 | const PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; | 198 | // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state |
| 199 | std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); | ||
| 200 | |||
| 201 | PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; | ||
| 163 | switch (type) { | 202 | switch (type) { |
| 164 | case PageType::Unmapped: | 203 | case PageType::Unmapped: |
| 165 | LOG_ERROR(HW_Memory, "unmapped Write%zu 0x%08X @ 0x%016" PRIX64, sizeof(data) * 8, | 204 | LOG_ERROR(HW_Memory, "unmapped Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, |
| 166 | static_cast<u32>(data), vaddr); | 205 | vaddr); |
| 167 | return; | ||
| 168 | case PageType::Special: { | ||
| 169 | if (WriteSpecial<T>(vaddr, data)) | ||
| 170 | return; | ||
| 171 | [[fallthrough]]; | ||
| 172 | } | ||
| 173 | case PageType::Memory: { | ||
| 174 | u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; | ||
| 175 | ASSERT_MSG(page_pointer, "Mapped memory page without a pointer @ %016" PRIX64, vaddr); | ||
| 176 | std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T)); | ||
| 177 | return; | 206 | return; |
| 207 | case PageType::Memory: | ||
| 208 | ASSERT_MSG(false, "Mapped memory page without a pointer @ %08X", vaddr); | ||
| 209 | break; | ||
| 210 | case PageType::RasterizerCachedMemory: { | ||
| 211 | RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Invalidate); | ||
| 212 | std::memcpy(GetPointerFromVMA(vaddr), &data, sizeof(T)); | ||
| 213 | break; | ||
| 178 | } | 214 | } |
| 215 | default: | ||
| 216 | UNREACHABLE(); | ||
| 179 | } | 217 | } |
| 180 | UNREACHABLE(); | ||
| 181 | } | 218 | } |
| 182 | 219 | ||
| 183 | bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) { | 220 | bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) { |
| 184 | auto& page_table = process.vm_manager.page_table; | 221 | auto& page_table = process.vm_manager.page_table; |
| 185 | 222 | ||
| 186 | if ((vaddr >> PAGE_BITS) >= PAGE_TABLE_NUM_ENTRIES) | 223 | const u8* page_pointer = page_table.pointers[vaddr >> PAGE_BITS]; |
| 187 | return false; | 224 | if (page_pointer) |
| 225 | return true; | ||
| 188 | 226 | ||
| 189 | const PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; | 227 | if (page_table.attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory) |
| 190 | switch (type) { | ||
| 191 | case PageType::Unmapped: | ||
| 192 | return false; | ||
| 193 | case PageType::Memory: | ||
| 194 | return true; | 228 | return true; |
| 195 | case PageType::Special: { | 229 | |
| 196 | for (auto handler : GetSpecialHandlers(page_table, vaddr, 1)) | 230 | if (page_table.attributes[vaddr >> PAGE_BITS] != PageType::Special) |
| 197 | if (auto result = handler->IsValidAddress(vaddr)) | 231 | return false; |
| 198 | return *result; | 232 | |
| 199 | return current_page_table->pointers[vaddr >> PAGE_BITS] != nullptr; | ||
| 200 | } | ||
| 201 | } | ||
| 202 | UNREACHABLE(); | ||
| 203 | return false; | 233 | return false; |
| 204 | } | 234 | } |
| 205 | 235 | ||
| @@ -217,7 +247,11 @@ u8* GetPointer(const VAddr vaddr) { | |||
| 217 | return page_pointer + (vaddr & PAGE_MASK); | 247 | return page_pointer + (vaddr & PAGE_MASK); |
| 218 | } | 248 | } |
| 219 | 249 | ||
| 220 | LOG_ERROR(HW_Memory, "unknown GetPointer @ 0x%016" PRIx64, vaddr); | 250 | if (current_page_table->attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory) { |
| 251 | return GetPointerFromVMA(vaddr); | ||
| 252 | } | ||
| 253 | |||
| 254 | LOG_ERROR(HW_Memory, "unknown GetPointer @ 0x%08x", vaddr); | ||
| 221 | return nullptr; | 255 | return nullptr; |
| 222 | } | 256 | } |
| 223 | 257 | ||
| @@ -291,6 +325,58 @@ u8* GetPhysicalPointer(PAddr address) { | |||
| 291 | return target_pointer; | 325 | return target_pointer; |
| 292 | } | 326 | } |
| 293 | 327 | ||
| 328 | void RasterizerMarkRegionCached(VAddr start, u64 size, bool cached) { | ||
| 329 | if (start == 0) { | ||
| 330 | return; | ||
| 331 | } | ||
| 332 | |||
| 333 | u64 num_pages = ((start + size - 1) >> PAGE_BITS) - (start >> PAGE_BITS) + 1; | ||
| 334 | VAddr vaddr = start; | ||
| 335 | |||
| 336 | for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { | ||
| 337 | PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; | ||
| 338 | |||
| 339 | if (cached) { | ||
| 340 | // Switch page type to cached if now cached | ||
| 341 | switch (page_type) { | ||
| 342 | case PageType::Unmapped: | ||
| 343 | // It is not necessary for a process to have this region mapped into its address | ||
| 344 | // space, for example, a system module need not have a VRAM mapping. | ||
| 345 | break; | ||
| 346 | case PageType::Memory: | ||
| 347 | page_type = PageType::RasterizerCachedMemory; | ||
| 348 | current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr; | ||
| 349 | break; | ||
| 350 | default: | ||
| 351 | UNREACHABLE(); | ||
| 352 | } | ||
| 353 | } else { | ||
| 354 | // Switch page type to uncached if now uncached | ||
| 355 | switch (page_type) { | ||
| 356 | case PageType::Unmapped: | ||
| 357 | // It is not necessary for a process to have this region mapped into its address | ||
| 358 | // space, for example, a system module need not have a VRAM mapping. | ||
| 359 | break; | ||
| 360 | case PageType::RasterizerCachedMemory: { | ||
| 361 | u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK); | ||
| 362 | if (pointer == nullptr) { | ||
| 363 | // It's possible that this function has been called while updating the pagetable | ||
| 364 | // after unmapping a VMA. In that case the underlying VMA will no longer exist, | ||
| 365 | // and we should just leave the pagetable entry blank. | ||
| 366 | page_type = PageType::Unmapped; | ||
| 367 | } else { | ||
| 368 | page_type = PageType::Memory; | ||
| 369 | current_page_table->pointers[vaddr >> PAGE_BITS] = pointer; | ||
| 370 | } | ||
| 371 | break; | ||
| 372 | } | ||
| 373 | default: | ||
| 374 | UNREACHABLE(); | ||
| 375 | } | ||
| 376 | } | ||
| 377 | } | ||
| 378 | } | ||
| 379 | |||
| 294 | void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) { | 380 | void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) { |
| 295 | // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be | 381 | // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be |
| 296 | // null here | 382 | // null here |
| @@ -344,17 +430,6 @@ u64 Read64(const VAddr addr) { | |||
| 344 | return Read<u64_le>(addr); | 430 | return Read<u64_le>(addr); |
| 345 | } | 431 | } |
| 346 | 432 | ||
| 347 | static bool ReadSpecialBlock(const Kernel::Process& process, const VAddr src_addr, | ||
| 348 | void* dest_buffer, const size_t size) { | ||
| 349 | auto& page_table = process.vm_manager.page_table; | ||
| 350 | for (const auto& handler : GetSpecialHandlers(page_table, src_addr, size)) { | ||
| 351 | if (handler->ReadBlock(src_addr, dest_buffer, size)) { | ||
| 352 | return true; | ||
| 353 | } | ||
| 354 | } | ||
| 355 | return false; | ||
| 356 | } | ||
| 357 | |||
| 358 | void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, | 433 | void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, |
| 359 | const size_t size) { | 434 | const size_t size) { |
| 360 | auto& page_table = process.vm_manager.page_table; | 435 | auto& page_table = process.vm_manager.page_table; |
| @@ -364,21 +439,15 @@ void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_ | |||
| 364 | size_t page_offset = src_addr & PAGE_MASK; | 439 | size_t page_offset = src_addr & PAGE_MASK; |
| 365 | 440 | ||
| 366 | while (remaining_size > 0) { | 441 | while (remaining_size > 0) { |
| 367 | const size_t copy_amount = std::min<size_t>(PAGE_SIZE - page_offset, remaining_size); | 442 | const size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size); |
| 368 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 443 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| 369 | 444 | ||
| 370 | switch (page_table.attributes[page_index]) { | 445 | switch (page_table.attributes[page_index]) { |
| 371 | case PageType::Unmapped: | 446 | case PageType::Unmapped: { |
| 372 | LOG_ERROR(HW_Memory, | 447 | LOG_ERROR(HW_Memory, "unmapped ReadBlock @ 0x%08X (start address = 0x%08X, size = %zu)", |
| 373 | "unmapped ReadBlock @ 0x%016" PRIX64 " (start address = 0x%" PRIx64 | ||
| 374 | ", size = %zu)", | ||
| 375 | current_vaddr, src_addr, size); | 448 | current_vaddr, src_addr, size); |
| 376 | std::memset(dest_buffer, 0, copy_amount); | 449 | std::memset(dest_buffer, 0, copy_amount); |
| 377 | break; | 450 | break; |
| 378 | case PageType::Special: { | ||
| 379 | if (ReadSpecialBlock(process, current_vaddr, dest_buffer, copy_amount)) | ||
| 380 | break; | ||
| 381 | [[fallthrough]]; | ||
| 382 | } | 451 | } |
| 383 | case PageType::Memory: { | 452 | case PageType::Memory: { |
| 384 | DEBUG_ASSERT(page_table.pointers[page_index]); | 453 | DEBUG_ASSERT(page_table.pointers[page_index]); |
| @@ -387,6 +456,12 @@ void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_ | |||
| 387 | std::memcpy(dest_buffer, src_ptr, copy_amount); | 456 | std::memcpy(dest_buffer, src_ptr, copy_amount); |
| 388 | break; | 457 | break; |
| 389 | } | 458 | } |
| 459 | case PageType::RasterizerCachedMemory: { | ||
| 460 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | ||
| 461 | FlushMode::Flush); | ||
| 462 | std::memcpy(dest_buffer, GetPointerFromVMA(process, current_vaddr), copy_amount); | ||
| 463 | break; | ||
| 464 | } | ||
| 390 | default: | 465 | default: |
| 391 | UNREACHABLE(); | 466 | UNREACHABLE(); |
| 392 | } | 467 | } |
| @@ -418,17 +493,6 @@ void Write64(const VAddr addr, const u64 data) { | |||
| 418 | Write<u64_le>(addr, data); | 493 | Write<u64_le>(addr, data); |
| 419 | } | 494 | } |
| 420 | 495 | ||
| 421 | static bool WriteSpecialBlock(const Kernel::Process& process, const VAddr dest_addr, | ||
| 422 | const void* src_buffer, const size_t size) { | ||
| 423 | auto& page_table = process.vm_manager.page_table; | ||
| 424 | for (const auto& handler : GetSpecialHandlers(page_table, dest_addr, size)) { | ||
| 425 | if (handler->WriteBlock(dest_addr, src_buffer, size)) { | ||
| 426 | return true; | ||
| 427 | } | ||
| 428 | } | ||
| 429 | return false; | ||
| 430 | } | ||
| 431 | |||
| 432 | void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, | 496 | void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, |
| 433 | const size_t size) { | 497 | const size_t size) { |
| 434 | auto& page_table = process.vm_manager.page_table; | 498 | auto& page_table = process.vm_manager.page_table; |
| @@ -437,20 +501,16 @@ void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const voi | |||
| 437 | size_t page_offset = dest_addr & PAGE_MASK; | 501 | size_t page_offset = dest_addr & PAGE_MASK; |
| 438 | 502 | ||
| 439 | while (remaining_size > 0) { | 503 | while (remaining_size > 0) { |
| 440 | const size_t copy_amount = std::min<size_t>(PAGE_SIZE - page_offset, remaining_size); | 504 | const size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size); |
| 441 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 505 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| 442 | 506 | ||
| 443 | switch (page_table.attributes[page_index]) { | 507 | switch (page_table.attributes[page_index]) { |
| 444 | case PageType::Unmapped: | 508 | case PageType::Unmapped: { |
| 445 | LOG_ERROR(HW_Memory, | 509 | LOG_ERROR(HW_Memory, |
| 446 | "unmapped WriteBlock @ 0x%016" PRIX64 " (start address = 0x%016" PRIX64 | 510 | "unmapped WriteBlock @ 0x%08X (start address = 0x%08X, size = %zu)", |
| 447 | ", size = %zu)", | ||
| 448 | current_vaddr, dest_addr, size); | 511 | current_vaddr, dest_addr, size); |
| 449 | break; | 512 | break; |
| 450 | case PageType::Special: | 513 | } |
| 451 | if (WriteSpecialBlock(process, current_vaddr, src_buffer, copy_amount)) | ||
| 452 | break; | ||
| 453 | [[fallthrough]]; | ||
| 454 | case PageType::Memory: { | 514 | case PageType::Memory: { |
| 455 | DEBUG_ASSERT(page_table.pointers[page_index]); | 515 | DEBUG_ASSERT(page_table.pointers[page_index]); |
| 456 | 516 | ||
| @@ -458,6 +518,12 @@ void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const voi | |||
| 458 | std::memcpy(dest_ptr, src_buffer, copy_amount); | 518 | std::memcpy(dest_ptr, src_buffer, copy_amount); |
| 459 | break; | 519 | break; |
| 460 | } | 520 | } |
| 521 | case PageType::RasterizerCachedMemory: { | ||
| 522 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | ||
| 523 | FlushMode::Invalidate); | ||
| 524 | std::memcpy(GetPointerFromVMA(process, current_vaddr), src_buffer, copy_amount); | ||
| 525 | break; | ||
| 526 | } | ||
| 461 | default: | 527 | default: |
| 462 | UNREACHABLE(); | 528 | UNREACHABLE(); |
| 463 | } | 529 | } |
| @@ -473,9 +539,8 @@ void WriteBlock(const VAddr dest_addr, const void* src_buffer, const size_t size | |||
| 473 | WriteBlock(*Core::CurrentProcess(), dest_addr, src_buffer, size); | 539 | WriteBlock(*Core::CurrentProcess(), dest_addr, src_buffer, size); |
| 474 | } | 540 | } |
| 475 | 541 | ||
| 476 | void ZeroBlock(const VAddr dest_addr, const size_t size) { | 542 | void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const size_t size) { |
| 477 | const auto& process = *Core::CurrentProcess(); | 543 | auto& page_table = process.vm_manager.page_table; |
| 478 | |||
| 479 | size_t remaining_size = size; | 544 | size_t remaining_size = size; |
| 480 | size_t page_index = dest_addr >> PAGE_BITS; | 545 | size_t page_index = dest_addr >> PAGE_BITS; |
| 481 | size_t page_offset = dest_addr & PAGE_MASK; | 546 | size_t page_offset = dest_addr & PAGE_MASK; |
| @@ -483,27 +548,28 @@ void ZeroBlock(const VAddr dest_addr, const size_t size) { | |||
| 483 | static const std::array<u8, PAGE_SIZE> zeros = {}; | 548 | static const std::array<u8, PAGE_SIZE> zeros = {}; |
| 484 | 549 | ||
| 485 | while (remaining_size > 0) { | 550 | while (remaining_size > 0) { |
| 486 | const size_t copy_amount = std::min<size_t>(PAGE_SIZE - page_offset, remaining_size); | 551 | const size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size); |
| 487 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 552 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| 488 | 553 | ||
| 489 | switch (current_page_table->attributes[page_index]) { | 554 | switch (page_table.attributes[page_index]) { |
| 490 | case PageType::Unmapped: | 555 | case PageType::Unmapped: { |
| 491 | LOG_ERROR(HW_Memory, | 556 | LOG_ERROR(HW_Memory, "unmapped ZeroBlock @ 0x%08X (start address = 0x%08X, size = %zu)", |
| 492 | "unmapped ZeroBlock @ 0x%016" PRIX64 " (start address = 0x%016" PRIX64 | ||
| 493 | ", size = %zu)", | ||
| 494 | current_vaddr, dest_addr, size); | 557 | current_vaddr, dest_addr, size); |
| 495 | break; | 558 | break; |
| 496 | case PageType::Special: | 559 | } |
| 497 | if (WriteSpecialBlock(process, current_vaddr, zeros.data(), copy_amount)) | ||
| 498 | break; | ||
| 499 | [[fallthrough]]; | ||
| 500 | case PageType::Memory: { | 560 | case PageType::Memory: { |
| 501 | DEBUG_ASSERT(current_page_table->pointers[page_index]); | 561 | DEBUG_ASSERT(page_table.pointers[page_index]); |
| 502 | 562 | ||
| 503 | u8* dest_ptr = current_page_table->pointers[page_index] + page_offset; | 563 | u8* dest_ptr = page_table.pointers[page_index] + page_offset; |
| 504 | std::memset(dest_ptr, 0, copy_amount); | 564 | std::memset(dest_ptr, 0, copy_amount); |
| 505 | break; | 565 | break; |
| 506 | } | 566 | } |
| 567 | case PageType::RasterizerCachedMemory: { | ||
| 568 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | ||
| 569 | FlushMode::Invalidate); | ||
| 570 | std::memset(GetPointerFromVMA(process, current_vaddr), 0, copy_amount); | ||
| 571 | break; | ||
| 572 | } | ||
| 507 | default: | 573 | default: |
| 508 | UNREACHABLE(); | 574 | UNREACHABLE(); |
| 509 | } | 575 | } |
| @@ -514,37 +580,33 @@ void ZeroBlock(const VAddr dest_addr, const size_t size) { | |||
| 514 | } | 580 | } |
| 515 | } | 581 | } |
| 516 | 582 | ||
| 517 | void CopyBlock(VAddr dest_addr, VAddr src_addr, const size_t size) { | 583 | void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, const size_t size) { |
| 518 | const auto& process = *Core::CurrentProcess(); | 584 | auto& page_table = process.vm_manager.page_table; |
| 519 | |||
| 520 | size_t remaining_size = size; | 585 | size_t remaining_size = size; |
| 521 | size_t page_index = src_addr >> PAGE_BITS; | 586 | size_t page_index = src_addr >> PAGE_BITS; |
| 522 | size_t page_offset = src_addr & PAGE_MASK; | 587 | size_t page_offset = src_addr & PAGE_MASK; |
| 523 | 588 | ||
| 524 | while (remaining_size > 0) { | 589 | while (remaining_size > 0) { |
| 525 | const size_t copy_amount = std::min<size_t>(PAGE_SIZE - page_offset, remaining_size); | 590 | const size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size); |
| 526 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 591 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| 527 | 592 | ||
| 528 | switch (current_page_table->attributes[page_index]) { | 593 | switch (page_table.attributes[page_index]) { |
| 529 | case PageType::Unmapped: | 594 | case PageType::Unmapped: { |
| 530 | LOG_ERROR(HW_Memory, | 595 | LOG_ERROR(HW_Memory, "unmapped CopyBlock @ 0x%08X (start address = 0x%08X, size = %zu)", |
| 531 | "unmapped CopyBlock @ 0x%016" PRIX64 " (start address = 0x%016" PRIX64 | ||
| 532 | ", size = %zu)", | ||
| 533 | current_vaddr, src_addr, size); | 596 | current_vaddr, src_addr, size); |
| 534 | ZeroBlock(dest_addr, copy_amount); | 597 | ZeroBlock(process, dest_addr, copy_amount); |
| 535 | break; | 598 | break; |
| 536 | case PageType::Special: { | ||
| 537 | std::vector<u8> buffer(copy_amount); | ||
| 538 | if (ReadSpecialBlock(process, current_vaddr, buffer.data(), buffer.size())) { | ||
| 539 | WriteBlock(dest_addr, buffer.data(), buffer.size()); | ||
| 540 | break; | ||
| 541 | } | ||
| 542 | [[fallthrough]]; | ||
| 543 | } | 599 | } |
| 544 | case PageType::Memory: { | 600 | case PageType::Memory: { |
| 545 | DEBUG_ASSERT(current_page_table->pointers[page_index]); | 601 | DEBUG_ASSERT(page_table.pointers[page_index]); |
| 546 | const u8* src_ptr = current_page_table->pointers[page_index] + page_offset; | 602 | const u8* src_ptr = page_table.pointers[page_index] + page_offset; |
| 547 | WriteBlock(dest_addr, src_ptr, copy_amount); | 603 | WriteBlock(process, dest_addr, src_ptr, copy_amount); |
| 604 | break; | ||
| 605 | } | ||
| 606 | case PageType::RasterizerCachedMemory: { | ||
| 607 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | ||
| 608 | FlushMode::Flush); | ||
| 609 | WriteBlock(process, dest_addr, GetPointerFromVMA(process, current_vaddr), copy_amount); | ||
| 548 | break; | 610 | break; |
| 549 | } | 611 | } |
| 550 | default: | 612 | default: |
| @@ -559,78 +621,6 @@ void CopyBlock(VAddr dest_addr, VAddr src_addr, const size_t size) { | |||
| 559 | } | 621 | } |
| 560 | } | 622 | } |
| 561 | 623 | ||
| 562 | template <> | ||
| 563 | boost::optional<u8> ReadSpecial<u8>(VAddr addr) { | ||
| 564 | const PageTable& page_table = Core::CurrentProcess()->vm_manager.page_table; | ||
| 565 | for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u8))) | ||
| 566 | if (auto result = handler->Read8(addr)) | ||
| 567 | return *result; | ||
| 568 | return {}; | ||
| 569 | } | ||
| 570 | |||
| 571 | template <> | ||
| 572 | boost::optional<u16> ReadSpecial<u16>(VAddr addr) { | ||
| 573 | const PageTable& page_table = Core::CurrentProcess()->vm_manager.page_table; | ||
| 574 | for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u16))) | ||
| 575 | if (auto result = handler->Read16(addr)) | ||
| 576 | return *result; | ||
| 577 | return {}; | ||
| 578 | } | ||
| 579 | |||
| 580 | template <> | ||
| 581 | boost::optional<u32> ReadSpecial<u32>(VAddr addr) { | ||
| 582 | const PageTable& page_table = Core::CurrentProcess()->vm_manager.page_table; | ||
| 583 | for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u32))) | ||
| 584 | if (auto result = handler->Read32(addr)) | ||
| 585 | return *result; | ||
| 586 | return {}; | ||
| 587 | } | ||
| 588 | |||
| 589 | template <> | ||
| 590 | boost::optional<u64> ReadSpecial<u64>(VAddr addr) { | ||
| 591 | const PageTable& page_table = Core::CurrentProcess()->vm_manager.page_table; | ||
| 592 | for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u64))) | ||
| 593 | if (auto result = handler->Read64(addr)) | ||
| 594 | return *result; | ||
| 595 | return {}; | ||
| 596 | } | ||
| 597 | |||
| 598 | template <> | ||
| 599 | bool WriteSpecial<u8>(VAddr addr, const u8 data) { | ||
| 600 | const PageTable& page_table = Core::CurrentProcess()->vm_manager.page_table; | ||
| 601 | for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u8))) | ||
| 602 | if (handler->Write8(addr, data)) | ||
| 603 | return true; | ||
| 604 | return false; | ||
| 605 | } | ||
| 606 | |||
| 607 | template <> | ||
| 608 | bool WriteSpecial<u16>(VAddr addr, const u16 data) { | ||
| 609 | const PageTable& page_table = Core::CurrentProcess()->vm_manager.page_table; | ||
| 610 | for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u16))) | ||
| 611 | if (handler->Write16(addr, data)) | ||
| 612 | return true; | ||
| 613 | return false; | ||
| 614 | } | ||
| 615 | |||
| 616 | template <> | ||
| 617 | bool WriteSpecial<u32>(VAddr addr, const u32 data) { | ||
| 618 | const PageTable& page_table = Core::CurrentProcess()->vm_manager.page_table; | ||
| 619 | for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u32))) | ||
| 620 | if (handler->Write32(addr, data)) | ||
| 621 | return true; | ||
| 622 | return false; | ||
| 623 | } | ||
| 624 | |||
| 625 | template <> | ||
| 626 | bool WriteSpecial<u64>(VAddr addr, const u64 data) { | ||
| 627 | const PageTable& page_table = Core::CurrentProcess()->vm_manager.page_table; | ||
| 628 | for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u64))) | ||
| 629 | if (handler->Write64(addr, data)) | ||
| 630 | return true; | ||
| 631 | return false; | ||
| 632 | } | ||
| 633 | |||
| 634 | boost::optional<PAddr> TryVirtualToPhysicalAddress(const VAddr addr) { | 624 | boost::optional<PAddr> TryVirtualToPhysicalAddress(const VAddr addr) { |
| 635 | if (addr == 0) { | 625 | if (addr == 0) { |
| 636 | return 0; | 626 | return 0; |