diff options
| author | 2020-04-08 22:50:46 -0400 | |
|---|---|---|
| committer | 2020-04-17 00:59:34 -0400 | |
| commit | f7c03610e150e49632354e838052d210c8c6075b (patch) | |
| tree | eaa5c3ee35eb1f6a968dda22b7191f7355bba789 /src/core/memory.cpp | |
| parent | common: page_table: Update to use VirtualBuffer and simplify. (diff) | |
| download | yuzu-f7c03610e150e49632354e838052d210c8c6075b.tar.gz yuzu-f7c03610e150e49632354e838052d210c8c6075b.tar.xz yuzu-f7c03610e150e49632354e838052d210c8c6075b.zip | |
core: memory: Updates for new VMM.
Diffstat (limited to 'src/core/memory.cpp')
| -rw-r--r-- | src/core/memory.cpp | 152 |
1 files changed, 52 insertions, 100 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 72d1caf73..fd892b762 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -14,9 +14,10 @@ | |||
| 14 | #include "common/swap.h" | 14 | #include "common/swap.h" |
| 15 | #include "core/arm/arm_interface.h" | 15 | #include "core/arm/arm_interface.h" |
| 16 | #include "core/core.h" | 16 | #include "core/core.h" |
| 17 | #include "core/device_memory.h" | ||
| 18 | #include "core/hle/kernel/memory/page_table.h" | ||
| 17 | #include "core/hle/kernel/physical_memory.h" | 19 | #include "core/hle/kernel/physical_memory.h" |
| 18 | #include "core/hle/kernel/process.h" | 20 | #include "core/hle/kernel/process.h" |
| 19 | #include "core/hle/kernel/vm_manager.h" | ||
| 20 | #include "core/memory.h" | 21 | #include "core/memory.h" |
| 21 | #include "video_core/gpu.h" | 22 | #include "video_core/gpu.h" |
| 22 | 23 | ||
| @@ -29,9 +30,9 @@ struct Memory::Impl { | |||
| 29 | explicit Impl(Core::System& system_) : system{system_} {} | 30 | explicit Impl(Core::System& system_) : system{system_} {} |
| 30 | 31 | ||
| 31 | void SetCurrentPageTable(Kernel::Process& process) { | 32 | void SetCurrentPageTable(Kernel::Process& process) { |
| 32 | current_page_table = &process.VMManager().page_table; | 33 | current_page_table = &process.PageTable().PageTableImpl(); |
| 33 | 34 | ||
| 34 | const std::size_t address_space_width = process.VMManager().GetAddressSpaceWidth(); | 35 | const std::size_t address_space_width = process.PageTable().GetAddressSpaceWidth(); |
| 35 | 36 | ||
| 36 | system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width); | 37 | system.ArmInterface(0).PageTableChanged(*current_page_table, address_space_width); |
| 37 | system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width); | 38 | system.ArmInterface(1).PageTableChanged(*current_page_table, address_space_width); |
| @@ -39,12 +40,7 @@ struct Memory::Impl { | |||
| 39 | system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width); | 40 | system.ArmInterface(3).PageTableChanged(*current_page_table, address_space_width); |
| 40 | } | 41 | } |
| 41 | 42 | ||
| 42 | void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, | 43 | void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { |
| 43 | Kernel::PhysicalMemory& memory, VAddr offset) { | ||
| 44 | MapMemoryRegion(page_table, base, size, memory.data() + offset); | ||
| 45 | } | ||
| 46 | |||
| 47 | void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { | ||
| 48 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); | 44 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); |
| 49 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); | 45 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); |
| 50 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); | 46 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); |
| @@ -52,46 +48,27 @@ struct Memory::Impl { | |||
| 52 | 48 | ||
| 53 | void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, | 49 | void MapIoRegion(Common::PageTable& page_table, VAddr base, u64 size, |
| 54 | Common::MemoryHookPointer mmio_handler) { | 50 | Common::MemoryHookPointer mmio_handler) { |
| 55 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); | 51 | UNIMPLEMENTED(); |
| 56 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); | ||
| 57 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, | ||
| 58 | Common::PageType::Special); | ||
| 59 | |||
| 60 | const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); | ||
| 61 | const Common::SpecialRegion region{Common::SpecialRegion::Type::IODevice, | ||
| 62 | std::move(mmio_handler)}; | ||
| 63 | page_table.special_regions.add( | ||
| 64 | std::make_pair(interval, std::set<Common::SpecialRegion>{region})); | ||
| 65 | } | 52 | } |
| 66 | 53 | ||
| 67 | void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { | 54 | void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { |
| 68 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); | 55 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); |
| 69 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); | 56 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); |
| 70 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, | 57 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped); |
| 71 | Common::PageType::Unmapped); | ||
| 72 | |||
| 73 | const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); | ||
| 74 | page_table.special_regions.erase(interval); | ||
| 75 | } | 58 | } |
| 76 | 59 | ||
| 77 | void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, | 60 | void AddDebugHook(Common::PageTable& page_table, VAddr base, u64 size, |
| 78 | Common::MemoryHookPointer hook) { | 61 | Common::MemoryHookPointer hook) { |
| 79 | const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); | 62 | UNIMPLEMENTED(); |
| 80 | const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; | ||
| 81 | page_table.special_regions.add( | ||
| 82 | std::make_pair(interval, std::set<Common::SpecialRegion>{region})); | ||
| 83 | } | 63 | } |
| 84 | 64 | ||
| 85 | void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, | 65 | void RemoveDebugHook(Common::PageTable& page_table, VAddr base, u64 size, |
| 86 | Common::MemoryHookPointer hook) { | 66 | Common::MemoryHookPointer hook) { |
| 87 | const auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); | 67 | UNIMPLEMENTED(); |
| 88 | const Common::SpecialRegion region{Common::SpecialRegion::Type::DebugHook, std::move(hook)}; | ||
| 89 | page_table.special_regions.subtract( | ||
| 90 | std::make_pair(interval, std::set<Common::SpecialRegion>{region})); | ||
| 91 | } | 68 | } |
| 92 | 69 | ||
| 93 | bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) const { | 70 | bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) const { |
| 94 | const auto& page_table = process.VMManager().page_table; | 71 | const auto& page_table = process.PageTable().PageTableImpl(); |
| 95 | 72 | ||
| 96 | const u8* const page_pointer = page_table.pointers[vaddr >> PAGE_BITS]; | 73 | const u8* const page_pointer = page_table.pointers[vaddr >> PAGE_BITS]; |
| 97 | if (page_pointer != nullptr) { | 74 | if (page_pointer != nullptr) { |
| @@ -113,55 +90,28 @@ struct Memory::Impl { | |||
| 113 | return IsValidVirtualAddress(*system.CurrentProcess(), vaddr); | 90 | return IsValidVirtualAddress(*system.CurrentProcess(), vaddr); |
| 114 | } | 91 | } |
| 115 | 92 | ||
| 116 | /** | 93 | u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const { |
| 117 | * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) | 94 | const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]}; |
| 118 | * using a VMA from the current process | ||
| 119 | */ | ||
| 120 | u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) { | ||
| 121 | const auto& vm_manager = process.VMManager(); | ||
| 122 | |||
| 123 | const auto it = vm_manager.FindVMA(vaddr); | ||
| 124 | DEBUG_ASSERT(vm_manager.IsValidHandle(it)); | ||
| 125 | 95 | ||
| 126 | u8* direct_pointer = nullptr; | 96 | if (!paddr) { |
| 127 | const auto& vma = it->second; | 97 | return {}; |
| 128 | switch (vma.type) { | ||
| 129 | case Kernel::VMAType::AllocatedMemoryBlock: | ||
| 130 | direct_pointer = vma.backing_block->data() + vma.offset; | ||
| 131 | break; | ||
| 132 | case Kernel::VMAType::BackingMemory: | ||
| 133 | direct_pointer = vma.backing_memory; | ||
| 134 | break; | ||
| 135 | case Kernel::VMAType::Free: | ||
| 136 | return nullptr; | ||
| 137 | default: | ||
| 138 | UNREACHABLE(); | ||
| 139 | } | 98 | } |
| 140 | 99 | ||
| 141 | return direct_pointer + (vaddr - vma.base); | 100 | return system.DeviceMemory().GetPointer(paddr) + vaddr; |
| 142 | } | 101 | } |
| 143 | 102 | ||
| 144 | /** | 103 | u8* GetPointer(const VAddr vaddr) const { |
| 145 | * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) | 104 | u8* const page_pointer{current_page_table->pointers[vaddr >> PAGE_BITS]}; |
| 146 | * using a VMA from the current process. | 105 | if (page_pointer) { |
| 147 | */ | ||
| 148 | u8* GetPointerFromVMA(VAddr vaddr) { | ||
| 149 | return GetPointerFromVMA(*system.CurrentProcess(), vaddr); | ||
| 150 | } | ||
| 151 | |||
| 152 | u8* GetPointer(const VAddr vaddr) { | ||
| 153 | u8* const page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; | ||
| 154 | if (page_pointer != nullptr) { | ||
| 155 | return page_pointer + vaddr; | 106 | return page_pointer + vaddr; |
| 156 | } | 107 | } |
| 157 | 108 | ||
| 158 | if (current_page_table->attributes[vaddr >> PAGE_BITS] == | 109 | if (current_page_table->attributes[vaddr >> PAGE_BITS] == |
| 159 | Common::PageType::RasterizerCachedMemory) { | 110 | Common::PageType::RasterizerCachedMemory) { |
| 160 | return GetPointerFromVMA(vaddr); | 111 | return GetPointerFromRasterizerCachedMemory(vaddr); |
| 161 | } | 112 | } |
| 162 | 113 | ||
| 163 | LOG_ERROR(HW_Memory, "Unknown GetPointer @ 0x{:016X}", vaddr); | 114 | return {}; |
| 164 | return nullptr; | ||
| 165 | } | 115 | } |
| 166 | 116 | ||
| 167 | u8 Read8(const VAddr addr) { | 117 | u8 Read8(const VAddr addr) { |
| @@ -213,7 +163,7 @@ struct Memory::Impl { | |||
| 213 | 163 | ||
| 214 | void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, | 164 | void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, |
| 215 | const std::size_t size) { | 165 | const std::size_t size) { |
| 216 | const auto& page_table = process.VMManager().page_table; | 166 | const auto& page_table = process.PageTable().PageTableImpl(); |
| 217 | 167 | ||
| 218 | std::size_t remaining_size = size; | 168 | std::size_t remaining_size = size; |
| 219 | std::size_t page_index = src_addr >> PAGE_BITS; | 169 | std::size_t page_index = src_addr >> PAGE_BITS; |
| @@ -241,7 +191,7 @@ struct Memory::Impl { | |||
| 241 | break; | 191 | break; |
| 242 | } | 192 | } |
| 243 | case Common::PageType::RasterizerCachedMemory: { | 193 | case Common::PageType::RasterizerCachedMemory: { |
| 244 | const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); | 194 | const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; |
| 245 | system.GPU().FlushRegion(current_vaddr, copy_amount); | 195 | system.GPU().FlushRegion(current_vaddr, copy_amount); |
| 246 | std::memcpy(dest_buffer, host_ptr, copy_amount); | 196 | std::memcpy(dest_buffer, host_ptr, copy_amount); |
| 247 | break; | 197 | break; |
| @@ -259,7 +209,7 @@ struct Memory::Impl { | |||
| 259 | 209 | ||
| 260 | void ReadBlockUnsafe(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, | 210 | void ReadBlockUnsafe(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, |
| 261 | const std::size_t size) { | 211 | const std::size_t size) { |
| 262 | const auto& page_table = process.VMManager().page_table; | 212 | const auto& page_table = process.PageTable().PageTableImpl(); |
| 263 | 213 | ||
| 264 | std::size_t remaining_size = size; | 214 | std::size_t remaining_size = size; |
| 265 | std::size_t page_index = src_addr >> PAGE_BITS; | 215 | std::size_t page_index = src_addr >> PAGE_BITS; |
| @@ -287,7 +237,7 @@ struct Memory::Impl { | |||
| 287 | break; | 237 | break; |
| 288 | } | 238 | } |
| 289 | case Common::PageType::RasterizerCachedMemory: { | 239 | case Common::PageType::RasterizerCachedMemory: { |
| 290 | const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); | 240 | const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; |
| 291 | std::memcpy(dest_buffer, host_ptr, copy_amount); | 241 | std::memcpy(dest_buffer, host_ptr, copy_amount); |
| 292 | break; | 242 | break; |
| 293 | } | 243 | } |
| @@ -312,7 +262,7 @@ struct Memory::Impl { | |||
| 312 | 262 | ||
| 313 | void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, | 263 | void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, |
| 314 | const std::size_t size) { | 264 | const std::size_t size) { |
| 315 | const auto& page_table = process.VMManager().page_table; | 265 | const auto& page_table = process.PageTable().PageTableImpl(); |
| 316 | std::size_t remaining_size = size; | 266 | std::size_t remaining_size = size; |
| 317 | std::size_t page_index = dest_addr >> PAGE_BITS; | 267 | std::size_t page_index = dest_addr >> PAGE_BITS; |
| 318 | std::size_t page_offset = dest_addr & PAGE_MASK; | 268 | std::size_t page_offset = dest_addr & PAGE_MASK; |
| @@ -338,7 +288,7 @@ struct Memory::Impl { | |||
| 338 | break; | 288 | break; |
| 339 | } | 289 | } |
| 340 | case Common::PageType::RasterizerCachedMemory: { | 290 | case Common::PageType::RasterizerCachedMemory: { |
| 341 | u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); | 291 | u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; |
| 342 | system.GPU().InvalidateRegion(current_vaddr, copy_amount); | 292 | system.GPU().InvalidateRegion(current_vaddr, copy_amount); |
| 343 | std::memcpy(host_ptr, src_buffer, copy_amount); | 293 | std::memcpy(host_ptr, src_buffer, copy_amount); |
| 344 | break; | 294 | break; |
| @@ -356,7 +306,7 @@ struct Memory::Impl { | |||
| 356 | 306 | ||
| 357 | void WriteBlockUnsafe(const Kernel::Process& process, const VAddr dest_addr, | 307 | void WriteBlockUnsafe(const Kernel::Process& process, const VAddr dest_addr, |
| 358 | const void* src_buffer, const std::size_t size) { | 308 | const void* src_buffer, const std::size_t size) { |
| 359 | const auto& page_table = process.VMManager().page_table; | 309 | const auto& page_table = process.PageTable().PageTableImpl(); |
| 360 | std::size_t remaining_size = size; | 310 | std::size_t remaining_size = size; |
| 361 | std::size_t page_index = dest_addr >> PAGE_BITS; | 311 | std::size_t page_index = dest_addr >> PAGE_BITS; |
| 362 | std::size_t page_offset = dest_addr & PAGE_MASK; | 312 | std::size_t page_offset = dest_addr & PAGE_MASK; |
| @@ -382,7 +332,7 @@ struct Memory::Impl { | |||
| 382 | break; | 332 | break; |
| 383 | } | 333 | } |
| 384 | case Common::PageType::RasterizerCachedMemory: { | 334 | case Common::PageType::RasterizerCachedMemory: { |
| 385 | u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); | 335 | u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; |
| 386 | std::memcpy(host_ptr, src_buffer, copy_amount); | 336 | std::memcpy(host_ptr, src_buffer, copy_amount); |
| 387 | break; | 337 | break; |
| 388 | } | 338 | } |
| @@ -406,7 +356,7 @@ struct Memory::Impl { | |||
| 406 | } | 356 | } |
| 407 | 357 | ||
| 408 | void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) { | 358 | void ZeroBlock(const Kernel::Process& process, const VAddr dest_addr, const std::size_t size) { |
| 409 | const auto& page_table = process.VMManager().page_table; | 359 | const auto& page_table = process.PageTable().PageTableImpl(); |
| 410 | std::size_t remaining_size = size; | 360 | std::size_t remaining_size = size; |
| 411 | std::size_t page_index = dest_addr >> PAGE_BITS; | 361 | std::size_t page_index = dest_addr >> PAGE_BITS; |
| 412 | std::size_t page_offset = dest_addr & PAGE_MASK; | 362 | std::size_t page_offset = dest_addr & PAGE_MASK; |
| @@ -432,7 +382,7 @@ struct Memory::Impl { | |||
| 432 | break; | 382 | break; |
| 433 | } | 383 | } |
| 434 | case Common::PageType::RasterizerCachedMemory: { | 384 | case Common::PageType::RasterizerCachedMemory: { |
| 435 | u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); | 385 | u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; |
| 436 | system.GPU().InvalidateRegion(current_vaddr, copy_amount); | 386 | system.GPU().InvalidateRegion(current_vaddr, copy_amount); |
| 437 | std::memset(host_ptr, 0, copy_amount); | 387 | std::memset(host_ptr, 0, copy_amount); |
| 438 | break; | 388 | break; |
| @@ -453,7 +403,7 @@ struct Memory::Impl { | |||
| 453 | 403 | ||
| 454 | void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, | 404 | void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, |
| 455 | const std::size_t size) { | 405 | const std::size_t size) { |
| 456 | const auto& page_table = process.VMManager().page_table; | 406 | const auto& page_table = process.PageTable().PageTableImpl(); |
| 457 | std::size_t remaining_size = size; | 407 | std::size_t remaining_size = size; |
| 458 | std::size_t page_index = src_addr >> PAGE_BITS; | 408 | std::size_t page_index = src_addr >> PAGE_BITS; |
| 459 | std::size_t page_offset = src_addr & PAGE_MASK; | 409 | std::size_t page_offset = src_addr & PAGE_MASK; |
| @@ -479,7 +429,7 @@ struct Memory::Impl { | |||
| 479 | break; | 429 | break; |
| 480 | } | 430 | } |
| 481 | case Common::PageType::RasterizerCachedMemory: { | 431 | case Common::PageType::RasterizerCachedMemory: { |
| 482 | const u8* const host_ptr = GetPointerFromVMA(process, current_vaddr); | 432 | const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(current_vaddr)}; |
| 483 | system.GPU().FlushRegion(current_vaddr, copy_amount); | 433 | system.GPU().FlushRegion(current_vaddr, copy_amount); |
| 484 | WriteBlock(process, dest_addr, host_ptr, copy_amount); | 434 | WriteBlock(process, dest_addr, host_ptr, copy_amount); |
| 485 | break; | 435 | break; |
| @@ -512,7 +462,7 @@ struct Memory::Impl { | |||
| 512 | 462 | ||
| 513 | u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; | 463 | u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; |
| 514 | for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { | 464 | for (unsigned i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { |
| 515 | Common::PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; | 465 | Common::PageType& page_type{current_page_table->attributes[vaddr >> PAGE_BITS]}; |
| 516 | 466 | ||
| 517 | if (cached) { | 467 | if (cached) { |
| 518 | // Switch page type to cached if now cached | 468 | // Switch page type to cached if now cached |
| @@ -544,7 +494,7 @@ struct Memory::Impl { | |||
| 544 | // that this area is already unmarked as cached. | 494 | // that this area is already unmarked as cached. |
| 545 | break; | 495 | break; |
| 546 | case Common::PageType::RasterizerCachedMemory: { | 496 | case Common::PageType::RasterizerCachedMemory: { |
| 547 | u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK); | 497 | u8* pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~PAGE_MASK)}; |
| 548 | if (pointer == nullptr) { | 498 | if (pointer == nullptr) { |
| 549 | // It's possible that this function has been called while updating the | 499 | // It's possible that this function has been called while updating the |
| 550 | // pagetable after unmapping a VMA. In that case the underlying VMA will no | 500 | // pagetable after unmapping a VMA. In that case the underlying VMA will no |
| @@ -573,9 +523,9 @@ struct Memory::Impl { | |||
| 573 | * @param memory The memory to map. | 523 | * @param memory The memory to map. |
| 574 | * @param type The page type to map the memory as. | 524 | * @param type The page type to map the memory as. |
| 575 | */ | 525 | */ |
| 576 | void MapPages(Common::PageTable& page_table, VAddr base, u64 size, u8* memory, | 526 | void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target, |
| 577 | Common::PageType type) { | 527 | Common::PageType type) { |
| 578 | LOG_DEBUG(HW_Memory, "Mapping {} onto {:016X}-{:016X}", fmt::ptr(memory), base * PAGE_SIZE, | 528 | LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * PAGE_SIZE, |
| 579 | (base + size) * PAGE_SIZE); | 529 | (base + size) * PAGE_SIZE); |
| 580 | 530 | ||
| 581 | // During boot, current_page_table might not be set yet, in which case we need not flush | 531 | // During boot, current_page_table might not be set yet, in which case we need not flush |
| @@ -593,19 +543,26 @@ struct Memory::Impl { | |||
| 593 | ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", | 543 | ASSERT_MSG(end <= page_table.pointers.size(), "out of range mapping at {:016X}", |
| 594 | base + page_table.pointers.size()); | 544 | base + page_table.pointers.size()); |
| 595 | 545 | ||
| 596 | std::fill(page_table.attributes.begin() + base, page_table.attributes.begin() + end, type); | 546 | if (!target) { |
| 547 | while (base != end) { | ||
| 548 | page_table.pointers[base] = nullptr; | ||
| 549 | page_table.attributes[base] = type; | ||
| 550 | page_table.backing_addr[base] = 0; | ||
| 597 | 551 | ||
| 598 | if (memory == nullptr) { | 552 | base += 1; |
| 599 | std::fill(page_table.pointers.begin() + base, page_table.pointers.begin() + end, | 553 | } |
| 600 | memory); | ||
| 601 | } else { | 554 | } else { |
| 602 | while (base != end) { | 555 | while (base != end) { |
| 603 | page_table.pointers[base] = memory - (base << PAGE_BITS); | 556 | page_table.pointers[base] = |
| 557 | system.DeviceMemory().GetPointer(target) - (base << PAGE_BITS); | ||
| 558 | page_table.attributes[base] = type; | ||
| 559 | page_table.backing_addr[base] = target - (base << PAGE_BITS); | ||
| 560 | |||
| 604 | ASSERT_MSG(page_table.pointers[base], | 561 | ASSERT_MSG(page_table.pointers[base], |
| 605 | "memory mapping base yield a nullptr within the table"); | 562 | "memory mapping base yield a nullptr within the table"); |
| 606 | 563 | ||
| 607 | base += 1; | 564 | base += 1; |
| 608 | memory += PAGE_SIZE; | 565 | target += PAGE_SIZE; |
| 609 | } | 566 | } |
| 610 | } | 567 | } |
| 611 | } | 568 | } |
| @@ -640,7 +597,7 @@ struct Memory::Impl { | |||
| 640 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); | 597 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); |
| 641 | break; | 598 | break; |
| 642 | case Common::PageType::RasterizerCachedMemory: { | 599 | case Common::PageType::RasterizerCachedMemory: { |
| 643 | const u8* const host_ptr = GetPointerFromVMA(vaddr); | 600 | const u8* const host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)}; |
| 644 | system.GPU().FlushRegion(vaddr, sizeof(T)); | 601 | system.GPU().FlushRegion(vaddr, sizeof(T)); |
| 645 | T value; | 602 | T value; |
| 646 | std::memcpy(&value, host_ptr, sizeof(T)); | 603 | std::memcpy(&value, host_ptr, sizeof(T)); |
| @@ -682,7 +639,7 @@ struct Memory::Impl { | |||
| 682 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); | 639 | ASSERT_MSG(false, "Mapped memory page without a pointer @ {:016X}", vaddr); |
| 683 | break; | 640 | break; |
| 684 | case Common::PageType::RasterizerCachedMemory: { | 641 | case Common::PageType::RasterizerCachedMemory: { |
| 685 | u8* const host_ptr{GetPointerFromVMA(vaddr)}; | 642 | u8* const host_ptr{GetPointerFromRasterizerCachedMemory(vaddr)}; |
| 686 | system.GPU().InvalidateRegion(vaddr, sizeof(T)); | 643 | system.GPU().InvalidateRegion(vaddr, sizeof(T)); |
| 687 | std::memcpy(host_ptr, &data, sizeof(T)); | 644 | std::memcpy(host_ptr, &data, sizeof(T)); |
| 688 | break; | 645 | break; |
| @@ -703,12 +660,7 @@ void Memory::SetCurrentPageTable(Kernel::Process& process) { | |||
| 703 | impl->SetCurrentPageTable(process); | 660 | impl->SetCurrentPageTable(process); |
| 704 | } | 661 | } |
| 705 | 662 | ||
| 706 | void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, | 663 | void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { |
| 707 | Kernel::PhysicalMemory& memory, VAddr offset) { | ||
| 708 | impl->MapMemoryRegion(page_table, base, size, memory, offset); | ||
| 709 | } | ||
| 710 | |||
| 711 | void Memory::MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, u8* target) { | ||
| 712 | impl->MapMemoryRegion(page_table, base, size, target); | 664 | impl->MapMemoryRegion(page_table, base, size, target); |
| 713 | } | 665 | } |
| 714 | 666 | ||