diff options
| author | 2018-01-27 15:16:39 +0000 | |
|---|---|---|
| committer | 2018-01-27 15:16:39 +0000 | |
| commit | 738f91a57da7c129d1ee85b7abbf6858f8669ee3 (patch) | |
| tree | 3ef164d8e79c0aea6ab72dc9b8fa78877a82338a /src | |
| parent | externals: Update dynarmic (diff) | |
| download | yuzu-738f91a57da7c129d1ee85b7abbf6858f8669ee3.tar.gz yuzu-738f91a57da7c129d1ee85b7abbf6858f8669ee3.tar.xz yuzu-738f91a57da7c129d1ee85b7abbf6858f8669ee3.zip | |
memory: Replace all memory hooking with Special regions
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/CMakeLists.txt | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.cpp | 6 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.h | 6 | ||||
| -rw-r--r-- | src/core/memory.cpp | 480 | ||||
| -rw-r--r-- | src/core/memory.h | 72 | ||||
| -rw-r--r-- | src/core/memory_hook.h | 46 | ||||
| -rw-r--r-- | src/core/memory_setup.h | 8 | ||||
| -rw-r--r-- | src/core/mmio.h | 38 | ||||
| -rw-r--r-- | src/tests/core/arm/arm_test_common.cpp | 32 | ||||
| -rw-r--r-- | src/tests/core/arm/arm_test_common.h | 22 | ||||
| -rw-r--r-- | src/video_core/renderer_opengl/renderer_opengl.cpp | 2 |
11 files changed, 273 insertions, 441 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 7153c4f3f..14bc99ba6 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt | |||
| @@ -163,8 +163,8 @@ add_library(core STATIC | |||
| 163 | loader/nso.h | 163 | loader/nso.h |
| 164 | memory.cpp | 164 | memory.cpp |
| 165 | memory.h | 165 | memory.h |
| 166 | memory_hook.h | ||
| 166 | memory_setup.h | 167 | memory_setup.h |
| 167 | mmio.h | ||
| 168 | perf_stats.cpp | 168 | perf_stats.cpp |
| 169 | perf_stats.h | 169 | perf_stats.h |
| 170 | settings.cpp | 170 | settings.cpp |
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index 93662a45e..6da77eb58 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp | |||
| @@ -10,8 +10,8 @@ | |||
| 10 | #include "core/hle/kernel/errors.h" | 10 | #include "core/hle/kernel/errors.h" |
| 11 | #include "core/hle/kernel/vm_manager.h" | 11 | #include "core/hle/kernel/vm_manager.h" |
| 12 | #include "core/memory.h" | 12 | #include "core/memory.h" |
| 13 | #include "core/memory_hook.h" | ||
| 13 | #include "core/memory_setup.h" | 14 | #include "core/memory_setup.h" |
| 14 | #include "core/mmio.h" | ||
| 15 | 15 | ||
| 16 | namespace Kernel { | 16 | namespace Kernel { |
| 17 | 17 | ||
| @@ -60,8 +60,8 @@ void VMManager::Reset() { | |||
| 60 | vma_map.emplace(initial_vma.base, initial_vma); | 60 | vma_map.emplace(initial_vma.base, initial_vma); |
| 61 | 61 | ||
| 62 | page_table.pointers.fill(nullptr); | 62 | page_table.pointers.fill(nullptr); |
| 63 | page_table.special_regions.clear(); | ||
| 63 | page_table.attributes.fill(Memory::PageType::Unmapped); | 64 | page_table.attributes.fill(Memory::PageType::Unmapped); |
| 64 | page_table.cached_res_count.fill(0); | ||
| 65 | 65 | ||
| 66 | UpdatePageTableForVMA(initial_vma); | 66 | UpdatePageTableForVMA(initial_vma); |
| 67 | } | 67 | } |
| @@ -121,7 +121,7 @@ ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* me | |||
| 121 | 121 | ||
| 122 | ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size, | 122 | ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size, |
| 123 | MemoryState state, | 123 | MemoryState state, |
| 124 | Memory::MMIORegionPointer mmio_handler) { | 124 | Memory::MemoryHookPointer mmio_handler) { |
| 125 | // This is the appropriately sized VMA that will turn into our allocation. | 125 | // This is the appropriately sized VMA that will turn into our allocation. |
| 126 | CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); | 126 | CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); |
| 127 | VirtualMemoryArea& final_vma = vma_handle->second; | 127 | VirtualMemoryArea& final_vma = vma_handle->second; |
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index b17385c7c..8de704a60 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 11 | #include "core/hle/result.h" | 11 | #include "core/hle/result.h" |
| 12 | #include "core/memory.h" | 12 | #include "core/memory.h" |
| 13 | #include "core/mmio.h" | 13 | #include "core/memory_hook.h" |
| 14 | 14 | ||
| 15 | namespace Kernel { | 15 | namespace Kernel { |
| 16 | 16 | ||
| @@ -81,7 +81,7 @@ struct VirtualMemoryArea { | |||
| 81 | // Settings for type = MMIO | 81 | // Settings for type = MMIO |
| 82 | /// Physical address of the register area this VMA maps to. | 82 | /// Physical address of the register area this VMA maps to. |
| 83 | PAddr paddr = 0; | 83 | PAddr paddr = 0; |
| 84 | Memory::MMIORegionPointer mmio_handler = nullptr; | 84 | Memory::MemoryHookPointer mmio_handler = nullptr; |
| 85 | 85 | ||
| 86 | /// Tests if this area can be merged to the right with `next`. | 86 | /// Tests if this area can be merged to the right with `next`. |
| 87 | bool CanBeMergedWith(const VirtualMemoryArea& next) const; | 87 | bool CanBeMergedWith(const VirtualMemoryArea& next) const; |
| @@ -160,7 +160,7 @@ public: | |||
| 160 | * @param mmio_handler The handler that will implement read and write for this MMIO region. | 160 | * @param mmio_handler The handler that will implement read and write for this MMIO region. |
| 161 | */ | 161 | */ |
| 162 | ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state, | 162 | ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state, |
| 163 | Memory::MMIORegionPointer mmio_handler); | 163 | Memory::MemoryHookPointer mmio_handler); |
| 164 | 164 | ||
| 165 | /// Unmaps a range of addresses, splitting VMAs as necessary. | 165 | /// Unmaps a range of addresses, splitting VMAs as necessary. |
| 166 | ResultCode UnmapRange(VAddr target, u64 size); | 166 | ResultCode UnmapRange(VAddr target, u64 size); |
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index a3d2d4951..f658271a5 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -2,8 +2,10 @@ | |||
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <algorithm> | ||
| 5 | #include <array> | 6 | #include <array> |
| 6 | #include <cstring> | 7 | #include <cstring> |
| 8 | #include <boost/optional.hpp> | ||
| 7 | #include "common/assert.h" | 9 | #include "common/assert.h" |
| 8 | #include "common/common_types.h" | 10 | #include "common/common_types.h" |
| 9 | #include "common/logging/log.h" | 11 | #include "common/logging/log.h" |
| @@ -12,7 +14,6 @@ | |||
| 12 | #include "core/core.h" | 14 | #include "core/core.h" |
| 13 | #include "core/hle/kernel/memory.h" | 15 | #include "core/hle/kernel/memory.h" |
| 14 | #include "core/hle/kernel/process.h" | 16 | #include "core/hle/kernel/process.h" |
| 15 | #include "core/hle/lock.h" | ||
| 16 | #include "core/memory.h" | 17 | #include "core/memory.h" |
| 17 | #include "core/memory_setup.h" | 18 | #include "core/memory_setup.h" |
| 18 | #include "video_core/renderer_base.h" | 19 | #include "video_core/renderer_base.h" |
| @@ -40,16 +41,12 @@ static void MapPages(PageTable& page_table, VAddr base, u64 size, u8* memory, Pa | |||
| 40 | LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE, | 41 | LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE, |
| 41 | (base + size) * PAGE_SIZE); | 42 | (base + size) * PAGE_SIZE); |
| 42 | 43 | ||
| 43 | RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE, | ||
| 44 | FlushMode::FlushAndInvalidate); | ||
| 45 | |||
| 46 | VAddr end = base + size; | 44 | VAddr end = base + size; |
| 47 | while (base != end) { | 45 | while (base != end) { |
| 48 | ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at %08X", base); | 46 | ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at %08X", base); |
| 49 | 47 | ||
| 50 | page_table.attributes[base] = type; | 48 | page_table.attributes[base] = type; |
| 51 | page_table.pointers[base] = memory; | 49 | page_table.pointers[base] = memory; |
| 52 | page_table.cached_res_count[base] = 0; | ||
| 53 | 50 | ||
| 54 | base += 1; | 51 | base += 1; |
| 55 | if (memory != nullptr) | 52 | if (memory != nullptr) |
| @@ -63,157 +60,110 @@ void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target) { | |||
| 63 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); | 60 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); |
| 64 | } | 61 | } |
| 65 | 62 | ||
| 66 | void MapIoRegion(PageTable& page_table, VAddr base, u64 size, MMIORegionPointer mmio_handler) { | 63 | void MapIoRegion(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer mmio_handler) { |
| 67 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); | 64 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); |
| 68 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); | 65 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); |
| 69 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); | 66 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); |
| 70 | 67 | ||
| 71 | page_table.special_regions.emplace_back(SpecialRegion{base, size, mmio_handler}); | 68 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); |
| 69 | SpecialRegion region{SpecialRegion::Type::IODevice, mmio_handler}; | ||
| 70 | page_table.special_regions.add(std::make_pair(interval, std::set<SpecialRegion>{region})); | ||
| 72 | } | 71 | } |
| 73 | 72 | ||
| 74 | void UnmapRegion(PageTable& page_table, VAddr base, u64 size) { | 73 | void UnmapRegion(PageTable& page_table, VAddr base, u64 size) { |
| 75 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); | 74 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); |
| 76 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); | 75 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); |
| 77 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped); | 76 | MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped); |
| 78 | } | ||
| 79 | |||
| 80 | /** | ||
| 81 | * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) | ||
| 82 | * using a VMA from the current process | ||
| 83 | */ | ||
| 84 | static u8* GetPointerFromVMA(const Kernel::Process& process, VAddr vaddr) { | ||
| 85 | u8* direct_pointer = nullptr; | ||
| 86 | 77 | ||
| 87 | auto& vm_manager = process.vm_manager; | 78 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); |
| 88 | 79 | page_table.special_regions.erase(interval); | |
| 89 | auto it = vm_manager.FindVMA(vaddr); | 80 | } |
| 90 | ASSERT(it != vm_manager.vma_map.end()); | ||
| 91 | |||
| 92 | auto& vma = it->second; | ||
| 93 | switch (vma.type) { | ||
| 94 | case Kernel::VMAType::AllocatedMemoryBlock: | ||
| 95 | direct_pointer = vma.backing_block->data() + vma.offset; | ||
| 96 | break; | ||
| 97 | case Kernel::VMAType::BackingMemory: | ||
| 98 | direct_pointer = vma.backing_memory; | ||
| 99 | break; | ||
| 100 | case Kernel::VMAType::Free: | ||
| 101 | return nullptr; | ||
| 102 | default: | ||
| 103 | UNREACHABLE(); | ||
| 104 | } | ||
| 105 | 81 | ||
| 106 | return direct_pointer + (vaddr - vma.base); | 82 | void AddDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook) { |
| 83 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); | ||
| 84 | SpecialRegion region{SpecialRegion::Type::DebugHook, hook}; | ||
| 85 | page_table.special_regions.add(std::make_pair(interval, std::set<SpecialRegion>{region})); | ||
| 107 | } | 86 | } |
| 108 | 87 | ||
| 109 | /** | 88 | void RemoveDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook) { |
| 110 | * Gets a pointer to the exact memory at the virtual address (i.e. not page aligned) | 89 | auto interval = boost::icl::discrete_interval<VAddr>::closed(base, base + size - 1); |
| 111 | * using a VMA from the current process. | 90 | SpecialRegion region{SpecialRegion::Type::DebugHook, hook}; |
| 112 | */ | 91 | page_table.special_regions.subtract(std::make_pair(interval, std::set<SpecialRegion>{region})); |
| 113 | static u8* GetPointerFromVMA(VAddr vaddr) { | ||
| 114 | return GetPointerFromVMA(*Kernel::g_current_process, vaddr); | ||
| 115 | } | 92 | } |
| 116 | 93 | ||
| 117 | /** | 94 | /** |
| 118 | * This function should only be called for virtual addreses with attribute `PageType::Special`. | 95 | * This function should only be called for virtual addreses with attribute `PageType::Special`. |
| 119 | */ | 96 | */ |
| 120 | static MMIORegionPointer GetMMIOHandler(const PageTable& page_table, VAddr vaddr) { | 97 | static std::set<MemoryHookPointer> GetSpecialHandlers(const PageTable& page_table, VAddr vaddr, |
| 121 | for (const auto& region : page_table.special_regions) { | 98 | u64 size) { |
| 122 | if (vaddr >= region.base && vaddr < (region.base + region.size)) { | 99 | std::set<MemoryHookPointer> result; |
| 123 | return region.handler; | 100 | auto interval = boost::icl::discrete_interval<VAddr>::closed(vaddr, vaddr + size - 1); |
| 101 | auto interval_list = page_table.special_regions.equal_range(interval); | ||
| 102 | for (auto it = interval_list.first; it != interval_list.second; ++it) { | ||
| 103 | for (const auto& region : it->second) { | ||
| 104 | result.insert(region.handler); | ||
| 124 | } | 105 | } |
| 125 | } | 106 | } |
| 126 | ASSERT_MSG(false, "Mapped IO page without a handler @ %08X", vaddr); | 107 | return result; |
| 127 | return nullptr; // Should never happen | ||
| 128 | } | 108 | } |
| 129 | 109 | ||
| 130 | static MMIORegionPointer GetMMIOHandler(VAddr vaddr) { | 110 | static std::set<MemoryHookPointer> GetSpecialHandlers(VAddr vaddr, u64 size) { |
| 131 | const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table; | 111 | const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table; |
| 132 | return GetMMIOHandler(page_table, vaddr); | 112 | return GetSpecialHandlers(page_table, vaddr, size); |
| 133 | } | 113 | } |
| 134 | 114 | ||
| 135 | template <typename T> | 115 | template <typename T> |
| 136 | T ReadMMIO(MMIORegionPointer mmio_handler, VAddr addr); | 116 | boost::optional<T> ReadSpecial(VAddr addr); |
| 137 | 117 | ||
| 138 | template <typename T> | 118 | template <typename T> |
| 139 | T Read(const VAddr vaddr) { | 119 | T Read(const VAddr vaddr) { |
| 140 | const u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; | 120 | const PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; |
| 141 | if (page_pointer) { | ||
| 142 | // NOTE: Avoid adding any extra logic to this fast-path block | ||
| 143 | T value; | ||
| 144 | std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T)); | ||
| 145 | return value; | ||
| 146 | } | ||
| 147 | |||
| 148 | // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state | ||
| 149 | std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); | ||
| 150 | |||
| 151 | PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; | ||
| 152 | switch (type) { | 121 | switch (type) { |
| 153 | case PageType::Unmapped: | 122 | case PageType::Unmapped: |
| 154 | LOG_ERROR(HW_Memory, "unmapped Read%lu @ 0x%llx", sizeof(T) * 8, vaddr); | 123 | LOG_ERROR(HW_Memory, "unmapped Read%lu @ 0x%016llX", sizeof(T) * 8, vaddr); |
| 155 | return 0; | 124 | return 0; |
| 156 | case PageType::Memory: | 125 | case PageType::Special: { |
| 157 | ASSERT_MSG(false, "Mapped memory page without a pointer @ %08X", vaddr); | 126 | if (auto result = ReadSpecial<T>(vaddr)) |
| 158 | break; | 127 | return *result; |
| 159 | case PageType::RasterizerCachedMemory: { | 128 | [[fallthrough]]; |
| 160 | RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Flush); | 129 | } |
| 130 | case PageType::Memory: { | ||
| 131 | const u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; | ||
| 132 | ASSERT_MSG(page_pointer, "Mapped memory page without a pointer @ %08X", vaddr); | ||
| 161 | 133 | ||
| 162 | T value; | 134 | T value; |
| 163 | std::memcpy(&value, GetPointerFromVMA(vaddr), sizeof(T)); | 135 | std::memcpy(&value, &page_pointer[vaddr & PAGE_MASK], sizeof(T)); |
| 164 | return value; | 136 | return value; |
| 165 | } | 137 | } |
| 166 | case PageType::Special: | ||
| 167 | return ReadMMIO<T>(GetMMIOHandler(vaddr), vaddr); | ||
| 168 | case PageType::RasterizerCachedSpecial: { | ||
| 169 | RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Flush); | ||
| 170 | return ReadMMIO<T>(GetMMIOHandler(vaddr), vaddr); | ||
| 171 | } | ||
| 172 | default: | ||
| 173 | UNREACHABLE(); | ||
| 174 | } | 138 | } |
| 139 | UNREACHABLE(); | ||
| 140 | return 0; | ||
| 175 | } | 141 | } |
| 176 | 142 | ||
| 177 | template <typename T> | 143 | template <typename T> |
| 178 | void WriteMMIO(MMIORegionPointer mmio_handler, VAddr addr, const T data); | 144 | bool WriteSpecial(VAddr addr, const T data); |
| 179 | 145 | ||
| 180 | template <typename T> | 146 | template <typename T> |
| 181 | void Write(const VAddr vaddr, const T data) { | 147 | void Write(const VAddr vaddr, const T data) { |
| 182 | u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; | 148 | const PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; |
| 183 | if (page_pointer) { | ||
| 184 | // NOTE: Avoid adding any extra logic to this fast-path block | ||
| 185 | std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T)); | ||
| 186 | return; | ||
| 187 | } | ||
| 188 | |||
| 189 | // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state | ||
| 190 | std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); | ||
| 191 | |||
| 192 | PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; | ||
| 193 | switch (type) { | 149 | switch (type) { |
| 194 | case PageType::Unmapped: | 150 | case PageType::Unmapped: |
| 195 | LOG_ERROR(HW_Memory, "unmapped Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, | 151 | LOG_ERROR(HW_Memory, "unmapped Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, |
| 196 | vaddr); | 152 | vaddr); |
| 197 | return; | 153 | return; |
| 198 | case PageType::Memory: | 154 | case PageType::Special: { |
| 199 | ASSERT_MSG(false, "Mapped memory page without a pointer @ %08X", vaddr); | 155 | if (WriteSpecial<T>(vaddr, data)) |
| 200 | break; | 156 | return; |
| 201 | case PageType::RasterizerCachedMemory: { | 157 | [[fallthrough]]; |
| 202 | RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::FlushAndInvalidate); | ||
| 203 | std::memcpy(GetPointerFromVMA(vaddr), &data, sizeof(T)); | ||
| 204 | break; | ||
| 205 | } | 158 | } |
| 206 | case PageType::Special: | 159 | case PageType::Memory: { |
| 207 | WriteMMIO<T>(GetMMIOHandler(vaddr), vaddr, data); | 160 | u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; |
| 208 | break; | 161 | ASSERT_MSG(page_pointer, "Mapped memory page without a pointer @ %08X", vaddr); |
| 209 | case PageType::RasterizerCachedSpecial: { | 162 | std::memcpy(&page_pointer[vaddr & PAGE_MASK], &data, sizeof(T)); |
| 210 | RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::FlushAndInvalidate); | 163 | return; |
| 211 | WriteMMIO<T>(GetMMIOHandler(vaddr), vaddr, data); | ||
| 212 | break; | ||
| 213 | } | 164 | } |
| 214 | default: | ||
| 215 | UNREACHABLE(); | ||
| 216 | } | 165 | } |
| 166 | UNREACHABLE(); | ||
| 217 | } | 167 | } |
| 218 | 168 | ||
| 219 | bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) { | 169 | bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) { |
| @@ -222,21 +172,20 @@ bool IsValidVirtualAddress(const Kernel::Process& process, const VAddr vaddr) { | |||
| 222 | if ((vaddr >> PAGE_BITS) >= PAGE_TABLE_NUM_ENTRIES) | 172 | if ((vaddr >> PAGE_BITS) >= PAGE_TABLE_NUM_ENTRIES) |
| 223 | return false; | 173 | return false; |
| 224 | 174 | ||
| 225 | const u8* page_pointer = page_table.pointers[vaddr >> PAGE_BITS]; | 175 | const PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; |
| 226 | if (page_pointer) | 176 | switch (type) { |
| 227 | return true; | 177 | case PageType::Unmapped: |
| 228 | |||
| 229 | if (page_table.attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory) | ||
| 230 | return true; | ||
| 231 | |||
| 232 | if (page_table.attributes[vaddr >> PAGE_BITS] != PageType::Special) | ||
| 233 | return false; | 178 | return false; |
| 234 | 179 | case PageType::Memory: | |
| 235 | MMIORegionPointer mmio_region = GetMMIOHandler(page_table, vaddr); | 180 | return true; |
| 236 | if (mmio_region) { | 181 | case PageType::Special: { |
| 237 | return mmio_region->IsValidAddress(vaddr); | 182 | for (auto handler : GetSpecialHandlers(page_table, vaddr, 1)) |
| 183 | if (auto result = handler->IsValidAddress(vaddr)) | ||
| 184 | return *result; | ||
| 185 | return current_page_table->pointers[vaddr >> PAGE_BITS] != nullptr; | ||
| 238 | } | 186 | } |
| 239 | 187 | } | |
| 188 | UNREACHABLE(); | ||
| 240 | return false; | 189 | return false; |
| 241 | } | 190 | } |
| 242 | 191 | ||
| @@ -254,10 +203,6 @@ u8* GetPointer(const VAddr vaddr) { | |||
| 254 | return page_pointer + (vaddr & PAGE_MASK); | 203 | return page_pointer + (vaddr & PAGE_MASK); |
| 255 | } | 204 | } |
| 256 | 205 | ||
| 257 | if (current_page_table->attributes[vaddr >> PAGE_BITS] == PageType::RasterizerCachedMemory) { | ||
| 258 | return GetPointerFromVMA(vaddr); | ||
| 259 | } | ||
| 260 | |||
| 261 | LOG_ERROR(HW_Memory, "unknown GetPointer @ 0x%08x", vaddr); | 206 | LOG_ERROR(HW_Memory, "unknown GetPointer @ 0x%08x", vaddr); |
| 262 | return nullptr; | 207 | return nullptr; |
| 263 | } | 208 | } |
| @@ -335,97 +280,6 @@ u8* GetPhysicalPointer(PAddr address) { | |||
| 335 | return target_pointer; | 280 | return target_pointer; |
| 336 | } | 281 | } |
| 337 | 282 | ||
| 338 | void RasterizerMarkRegionCached(PAddr start, u64 size, int count_delta) { | ||
| 339 | if (start == 0) { | ||
| 340 | return; | ||
| 341 | } | ||
| 342 | |||
| 343 | u64 num_pages = ((start + size - 1) >> PAGE_BITS) - (start >> PAGE_BITS) + 1; | ||
| 344 | PAddr paddr = start; | ||
| 345 | |||
| 346 | for (unsigned i = 0; i < num_pages; ++i, paddr += PAGE_SIZE) { | ||
| 347 | boost::optional<VAddr> maybe_vaddr = PhysicalToVirtualAddress(paddr); | ||
| 348 | // While the physical <-> virtual mapping is 1:1 for the regions supported by the cache, | ||
| 349 | // some games (like Pokemon Super Mystery Dungeon) will try to use textures that go beyond | ||
| 350 | // the end address of VRAM, causing the Virtual->Physical translation to fail when flushing | ||
| 351 | // parts of the texture. | ||
| 352 | if (!maybe_vaddr) { | ||
| 353 | LOG_ERROR(HW_Memory, | ||
| 354 | "Trying to flush a cached region to an invalid physical address %08X", paddr); | ||
| 355 | continue; | ||
| 356 | } | ||
| 357 | VAddr vaddr = *maybe_vaddr; | ||
| 358 | |||
| 359 | u8& res_count = current_page_table->cached_res_count[vaddr >> PAGE_BITS]; | ||
| 360 | ASSERT_MSG(count_delta <= UINT8_MAX - res_count, | ||
| 361 | "Rasterizer resource cache counter overflow!"); | ||
| 362 | ASSERT_MSG(count_delta >= -res_count, "Rasterizer resource cache counter underflow!"); | ||
| 363 | |||
| 364 | // Switch page type to cached if now cached | ||
| 365 | if (res_count == 0) { | ||
| 366 | PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; | ||
| 367 | switch (page_type) { | ||
| 368 | case PageType::Unmapped: | ||
| 369 | // It is not necessary for a process to have this region mapped into its address | ||
| 370 | // space, for example, a system module need not have a VRAM mapping. | ||
| 371 | break; | ||
| 372 | case PageType::Memory: | ||
| 373 | page_type = PageType::RasterizerCachedMemory; | ||
| 374 | current_page_table->pointers[vaddr >> PAGE_BITS] = nullptr; | ||
| 375 | break; | ||
| 376 | case PageType::Special: | ||
| 377 | page_type = PageType::RasterizerCachedSpecial; | ||
| 378 | break; | ||
| 379 | default: | ||
| 380 | UNREACHABLE(); | ||
| 381 | } | ||
| 382 | } | ||
| 383 | |||
| 384 | res_count += count_delta; | ||
| 385 | |||
| 386 | // Switch page type to uncached if now uncached | ||
| 387 | if (res_count == 0) { | ||
| 388 | PageType& page_type = current_page_table->attributes[vaddr >> PAGE_BITS]; | ||
| 389 | switch (page_type) { | ||
| 390 | case PageType::Unmapped: | ||
| 391 | // It is not necessary for a process to have this region mapped into its address | ||
| 392 | // space, for example, a system module need not have a VRAM mapping. | ||
| 393 | break; | ||
| 394 | case PageType::RasterizerCachedMemory: { | ||
| 395 | u8* pointer = GetPointerFromVMA(vaddr & ~PAGE_MASK); | ||
| 396 | if (pointer == nullptr) { | ||
| 397 | // It's possible that this function has called been while updating the pagetable | ||
| 398 | // after unmapping a VMA. In that case the underlying VMA will no longer exist, | ||
| 399 | // and we should just leave the pagetable entry blank. | ||
| 400 | page_type = PageType::Unmapped; | ||
| 401 | } else { | ||
| 402 | page_type = PageType::Memory; | ||
| 403 | current_page_table->pointers[vaddr >> PAGE_BITS] = pointer; | ||
| 404 | } | ||
| 405 | break; | ||
| 406 | } | ||
| 407 | case PageType::RasterizerCachedSpecial: | ||
| 408 | page_type = PageType::Special; | ||
| 409 | break; | ||
| 410 | default: | ||
| 411 | UNREACHABLE(); | ||
| 412 | } | ||
| 413 | } | ||
| 414 | } | ||
| 415 | } | ||
| 416 | |||
| 417 | void RasterizerFlushRegion(PAddr start, u64 size) {} | ||
| 418 | |||
| 419 | void RasterizerFlushAndInvalidateRegion(PAddr start, u64 size) { | ||
| 420 | // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be | ||
| 421 | // null here | ||
| 422 | } | ||
| 423 | |||
| 424 | void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) { | ||
| 425 | // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be | ||
| 426 | // null here | ||
| 427 | } | ||
| 428 | |||
| 429 | u8 Read8(const VAddr addr) { | 283 | u8 Read8(const VAddr addr) { |
| 430 | return Read<u8>(addr); | 284 | return Read<u8>(addr); |
| 431 | } | 285 | } |
| @@ -442,6 +296,17 @@ u64 Read64(const VAddr addr) { | |||
| 442 | return Read<u64_le>(addr); | 296 | return Read<u64_le>(addr); |
| 443 | } | 297 | } |
| 444 | 298 | ||
| 299 | static bool ReadSpecialBlock(const Kernel::Process& process, const VAddr src_addr, | ||
| 300 | void* dest_buffer, const size_t size) { | ||
| 301 | auto& page_table = process.vm_manager.page_table; | ||
| 302 | for (const auto& handler : GetSpecialHandlers(page_table, src_addr, size)) { | ||
| 303 | if (handler->ReadBlock(src_addr, dest_buffer, size)) { | ||
| 304 | return true; | ||
| 305 | } | ||
| 306 | } | ||
| 307 | return false; | ||
| 308 | } | ||
| 309 | |||
| 445 | void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, | 310 | void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_buffer, |
| 446 | const size_t size) { | 311 | const size_t size) { |
| 447 | auto& page_table = process.vm_manager.page_table; | 312 | auto& page_table = process.vm_manager.page_table; |
| @@ -455,11 +320,15 @@ void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_ | |||
| 455 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 320 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| 456 | 321 | ||
| 457 | switch (page_table.attributes[page_index]) { | 322 | switch (page_table.attributes[page_index]) { |
| 458 | case PageType::Unmapped: { | 323 | case PageType::Unmapped: |
| 459 | LOG_ERROR(HW_Memory, "unmapped ReadBlock @ 0x%08X (start address = 0xllx, size = %zu)", | 324 | LOG_ERROR(HW_Memory, "unmapped ReadBlock @ 0x%08X (start address = 0xllx, size = %zu)", |
| 460 | current_vaddr, src_addr, size); | 325 | current_vaddr, src_addr, size); |
| 461 | std::memset(dest_buffer, 0, copy_amount); | 326 | std::memset(dest_buffer, 0, copy_amount); |
| 462 | break; | 327 | break; |
| 328 | case PageType::Special: { | ||
| 329 | if (ReadSpecialBlock(process, current_vaddr, dest_buffer, copy_amount)) | ||
| 330 | break; | ||
| 331 | [[fallthrough]]; | ||
| 463 | } | 332 | } |
| 464 | case PageType::Memory: { | 333 | case PageType::Memory: { |
| 465 | DEBUG_ASSERT(page_table.pointers[page_index]); | 334 | DEBUG_ASSERT(page_table.pointers[page_index]); |
| @@ -468,26 +337,6 @@ void ReadBlock(const Kernel::Process& process, const VAddr src_addr, void* dest_ | |||
| 468 | std::memcpy(dest_buffer, src_ptr, copy_amount); | 337 | std::memcpy(dest_buffer, src_ptr, copy_amount); |
| 469 | break; | 338 | break; |
| 470 | } | 339 | } |
| 471 | case PageType::Special: { | ||
| 472 | MMIORegionPointer handler = GetMMIOHandler(page_table, current_vaddr); | ||
| 473 | DEBUG_ASSERT(handler); | ||
| 474 | handler->ReadBlock(current_vaddr, dest_buffer, copy_amount); | ||
| 475 | break; | ||
| 476 | } | ||
| 477 | case PageType::RasterizerCachedMemory: { | ||
| 478 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | ||
| 479 | FlushMode::Flush); | ||
| 480 | std::memcpy(dest_buffer, GetPointerFromVMA(process, current_vaddr), copy_amount); | ||
| 481 | break; | ||
| 482 | } | ||
| 483 | case PageType::RasterizerCachedSpecial: { | ||
| 484 | MMIORegionPointer handler = GetMMIOHandler(page_table, current_vaddr); | ||
| 485 | DEBUG_ASSERT(handler); | ||
| 486 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | ||
| 487 | FlushMode::Flush); | ||
| 488 | handler->ReadBlock(current_vaddr, dest_buffer, copy_amount); | ||
| 489 | break; | ||
| 490 | } | ||
| 491 | default: | 340 | default: |
| 492 | UNREACHABLE(); | 341 | UNREACHABLE(); |
| 493 | } | 342 | } |
| @@ -519,6 +368,17 @@ void Write64(const VAddr addr, const u64 data) { | |||
| 519 | Write<u64_le>(addr, data); | 368 | Write<u64_le>(addr, data); |
| 520 | } | 369 | } |
| 521 | 370 | ||
| 371 | static bool WriteSpecialBlock(const Kernel::Process& process, const VAddr dest_addr, | ||
| 372 | const void* src_buffer, const size_t size) { | ||
| 373 | auto& page_table = process.vm_manager.page_table; | ||
| 374 | for (const auto& handler : GetSpecialHandlers(page_table, dest_addr, size)) { | ||
| 375 | if (handler->WriteBlock(dest_addr, src_buffer, size)) { | ||
| 376 | return true; | ||
| 377 | } | ||
| 378 | } | ||
| 379 | return false; | ||
| 380 | } | ||
| 381 | |||
| 522 | void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, | 382 | void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const void* src_buffer, |
| 523 | const size_t size) { | 383 | const size_t size) { |
| 524 | auto& page_table = process.vm_manager.page_table; | 384 | auto& page_table = process.vm_manager.page_table; |
| @@ -531,12 +391,15 @@ void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const voi | |||
| 531 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 391 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| 532 | 392 | ||
| 533 | switch (page_table.attributes[page_index]) { | 393 | switch (page_table.attributes[page_index]) { |
| 534 | case PageType::Unmapped: { | 394 | case PageType::Unmapped: |
| 535 | LOG_ERROR(HW_Memory, | 395 | LOG_ERROR(HW_Memory, |
| 536 | "unmapped WriteBlock @ 0x%08X (start address = 0x%08X, size = %zu)", | 396 | "unmapped WriteBlock @ 0x%08X (start address = 0x%08X, size = %zu)", |
| 537 | current_vaddr, dest_addr, size); | 397 | current_vaddr, dest_addr, size); |
| 538 | break; | 398 | break; |
| 539 | } | 399 | case PageType::Special: |
| 400 | if (WriteSpecialBlock(process, current_vaddr, src_buffer, copy_amount)) | ||
| 401 | break; | ||
| 402 | [[fallthrough]]; | ||
| 540 | case PageType::Memory: { | 403 | case PageType::Memory: { |
| 541 | DEBUG_ASSERT(page_table.pointers[page_index]); | 404 | DEBUG_ASSERT(page_table.pointers[page_index]); |
| 542 | 405 | ||
| @@ -544,26 +407,6 @@ void WriteBlock(const Kernel::Process& process, const VAddr dest_addr, const voi | |||
| 544 | std::memcpy(dest_ptr, src_buffer, copy_amount); | 407 | std::memcpy(dest_ptr, src_buffer, copy_amount); |
| 545 | break; | 408 | break; |
| 546 | } | 409 | } |
| 547 | case PageType::Special: { | ||
| 548 | MMIORegionPointer handler = GetMMIOHandler(page_table, current_vaddr); | ||
| 549 | DEBUG_ASSERT(handler); | ||
| 550 | handler->WriteBlock(current_vaddr, src_buffer, copy_amount); | ||
| 551 | break; | ||
| 552 | } | ||
| 553 | case PageType::RasterizerCachedMemory: { | ||
| 554 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | ||
| 555 | FlushMode::FlushAndInvalidate); | ||
| 556 | std::memcpy(GetPointerFromVMA(process, current_vaddr), src_buffer, copy_amount); | ||
| 557 | break; | ||
| 558 | } | ||
| 559 | case PageType::RasterizerCachedSpecial: { | ||
| 560 | MMIORegionPointer handler = GetMMIOHandler(page_table, current_vaddr); | ||
| 561 | DEBUG_ASSERT(handler); | ||
| 562 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | ||
| 563 | FlushMode::FlushAndInvalidate); | ||
| 564 | handler->WriteBlock(current_vaddr, src_buffer, copy_amount); | ||
| 565 | break; | ||
| 566 | } | ||
| 567 | default: | 410 | default: |
| 568 | UNREACHABLE(); | 411 | UNREACHABLE(); |
| 569 | } | 412 | } |
| @@ -580,6 +423,8 @@ void WriteBlock(const VAddr dest_addr, const void* src_buffer, const size_t size | |||
| 580 | } | 423 | } |
| 581 | 424 | ||
| 582 | void ZeroBlock(const VAddr dest_addr, const size_t size) { | 425 | void ZeroBlock(const VAddr dest_addr, const size_t size) { |
| 426 | const auto& process = *Kernel::g_current_process; | ||
| 427 | |||
| 583 | size_t remaining_size = size; | 428 | size_t remaining_size = size; |
| 584 | size_t page_index = dest_addr >> PAGE_BITS; | 429 | size_t page_index = dest_addr >> PAGE_BITS; |
| 585 | size_t page_offset = dest_addr & PAGE_MASK; | 430 | size_t page_offset = dest_addr & PAGE_MASK; |
| @@ -591,11 +436,14 @@ void ZeroBlock(const VAddr dest_addr, const size_t size) { | |||
| 591 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 436 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| 592 | 437 | ||
| 593 | switch (current_page_table->attributes[page_index]) { | 438 | switch (current_page_table->attributes[page_index]) { |
| 594 | case PageType::Unmapped: { | 439 | case PageType::Unmapped: |
| 595 | LOG_ERROR(HW_Memory, "unmapped ZeroBlock @ 0x%08X (start address = 0x%08X, size = %zu)", | 440 | LOG_ERROR(HW_Memory, "unmapped ZeroBlock @ 0x%08X (start address = 0x%08X, size = %zu)", |
| 596 | current_vaddr, dest_addr, size); | 441 | current_vaddr, dest_addr, size); |
| 597 | break; | 442 | break; |
| 598 | } | 443 | case PageType::Special: |
| 444 | if (WriteSpecialBlock(process, current_vaddr, zeros.data(), copy_amount)) | ||
| 445 | break; | ||
| 446 | [[fallthrough]]; | ||
| 599 | case PageType::Memory: { | 447 | case PageType::Memory: { |
| 600 | DEBUG_ASSERT(current_page_table->pointers[page_index]); | 448 | DEBUG_ASSERT(current_page_table->pointers[page_index]); |
| 601 | 449 | ||
| @@ -603,25 +451,6 @@ void ZeroBlock(const VAddr dest_addr, const size_t size) { | |||
| 603 | std::memset(dest_ptr, 0, copy_amount); | 451 | std::memset(dest_ptr, 0, copy_amount); |
| 604 | break; | 452 | break; |
| 605 | } | 453 | } |
| 606 | case PageType::Special: { | ||
| 607 | DEBUG_ASSERT(GetMMIOHandler(current_vaddr)); | ||
| 608 | |||
| 609 | GetMMIOHandler(current_vaddr)->WriteBlock(current_vaddr, zeros.data(), copy_amount); | ||
| 610 | break; | ||
| 611 | } | ||
| 612 | case PageType::RasterizerCachedMemory: { | ||
| 613 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | ||
| 614 | FlushMode::FlushAndInvalidate); | ||
| 615 | std::memset(GetPointerFromVMA(current_vaddr), 0, copy_amount); | ||
| 616 | break; | ||
| 617 | } | ||
| 618 | case PageType::RasterizerCachedSpecial: { | ||
| 619 | DEBUG_ASSERT(GetMMIOHandler(current_vaddr)); | ||
| 620 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | ||
| 621 | FlushMode::FlushAndInvalidate); | ||
| 622 | GetMMIOHandler(current_vaddr)->WriteBlock(current_vaddr, zeros.data(), copy_amount); | ||
| 623 | break; | ||
| 624 | } | ||
| 625 | default: | 454 | default: |
| 626 | UNREACHABLE(); | 455 | UNREACHABLE(); |
| 627 | } | 456 | } |
| @@ -633,6 +462,8 @@ void ZeroBlock(const VAddr dest_addr, const size_t size) { | |||
| 633 | } | 462 | } |
| 634 | 463 | ||
| 635 | void CopyBlock(VAddr dest_addr, VAddr src_addr, const size_t size) { | 464 | void CopyBlock(VAddr dest_addr, VAddr src_addr, const size_t size) { |
| 465 | const auto& process = *Kernel::g_current_process; | ||
| 466 | |||
| 636 | size_t remaining_size = size; | 467 | size_t remaining_size = size; |
| 637 | size_t page_index = src_addr >> PAGE_BITS; | 468 | size_t page_index = src_addr >> PAGE_BITS; |
| 638 | size_t page_offset = src_addr & PAGE_MASK; | 469 | size_t page_offset = src_addr & PAGE_MASK; |
| @@ -642,11 +473,18 @@ void CopyBlock(VAddr dest_addr, VAddr src_addr, const size_t size) { | |||
| 642 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | 473 | const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); |
| 643 | 474 | ||
| 644 | switch (current_page_table->attributes[page_index]) { | 475 | switch (current_page_table->attributes[page_index]) { |
| 645 | case PageType::Unmapped: { | 476 | case PageType::Unmapped: |
| 646 | LOG_ERROR(HW_Memory, "unmapped CopyBlock @ 0x%08X (start address = 0x%08X, size = %zu)", | 477 | LOG_ERROR(HW_Memory, "unmapped CopyBlock @ 0x%08X (start address = 0x%08X, size = %zu)", |
| 647 | current_vaddr, src_addr, size); | 478 | current_vaddr, src_addr, size); |
| 648 | ZeroBlock(dest_addr, copy_amount); | 479 | ZeroBlock(dest_addr, copy_amount); |
| 649 | break; | 480 | break; |
| 481 | case PageType::Special: { | ||
| 482 | std::vector<u8> buffer(copy_amount); | ||
| 483 | if (ReadSpecialBlock(process, current_vaddr, buffer.data(), buffer.size())) { | ||
| 484 | WriteBlock(dest_addr, buffer.data(), buffer.size()); | ||
| 485 | break; | ||
| 486 | } | ||
| 487 | [[fallthrough]]; | ||
| 650 | } | 488 | } |
| 651 | case PageType::Memory: { | 489 | case PageType::Memory: { |
| 652 | DEBUG_ASSERT(current_page_table->pointers[page_index]); | 490 | DEBUG_ASSERT(current_page_table->pointers[page_index]); |
| @@ -654,30 +492,6 @@ void CopyBlock(VAddr dest_addr, VAddr src_addr, const size_t size) { | |||
| 654 | WriteBlock(dest_addr, src_ptr, copy_amount); | 492 | WriteBlock(dest_addr, src_ptr, copy_amount); |
| 655 | break; | 493 | break; |
| 656 | } | 494 | } |
| 657 | case PageType::Special: { | ||
| 658 | DEBUG_ASSERT(GetMMIOHandler(current_vaddr)); | ||
| 659 | |||
| 660 | std::vector<u8> buffer(copy_amount); | ||
| 661 | GetMMIOHandler(current_vaddr)->ReadBlock(current_vaddr, buffer.data(), buffer.size()); | ||
| 662 | WriteBlock(dest_addr, buffer.data(), buffer.size()); | ||
| 663 | break; | ||
| 664 | } | ||
| 665 | case PageType::RasterizerCachedMemory: { | ||
| 666 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | ||
| 667 | FlushMode::Flush); | ||
| 668 | WriteBlock(dest_addr, GetPointerFromVMA(current_vaddr), copy_amount); | ||
| 669 | break; | ||
| 670 | } | ||
| 671 | case PageType::RasterizerCachedSpecial: { | ||
| 672 | DEBUG_ASSERT(GetMMIOHandler(current_vaddr)); | ||
| 673 | RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | ||
| 674 | FlushMode::Flush); | ||
| 675 | |||
| 676 | std::vector<u8> buffer(copy_amount); | ||
| 677 | GetMMIOHandler(current_vaddr)->ReadBlock(current_vaddr, buffer.data(), buffer.size()); | ||
| 678 | WriteBlock(dest_addr, buffer.data(), buffer.size()); | ||
| 679 | break; | ||
| 680 | } | ||
| 681 | default: | 495 | default: |
| 682 | UNREACHABLE(); | 496 | UNREACHABLE(); |
| 683 | } | 497 | } |
| @@ -691,43 +505,75 @@ void CopyBlock(VAddr dest_addr, VAddr src_addr, const size_t size) { | |||
| 691 | } | 505 | } |
| 692 | 506 | ||
| 693 | template <> | 507 | template <> |
| 694 | u8 ReadMMIO<u8>(MMIORegionPointer mmio_handler, VAddr addr) { | 508 | boost::optional<u8> ReadSpecial<u8>(VAddr addr) { |
| 695 | return mmio_handler->Read8(addr); | 509 | const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table; |
| 510 | for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u8))) | ||
| 511 | if (auto result = handler->Read8(addr)) | ||
| 512 | return *result; | ||
| 513 | return {}; | ||
| 696 | } | 514 | } |
| 697 | 515 | ||
| 698 | template <> | 516 | template <> |
| 699 | u16 ReadMMIO<u16>(MMIORegionPointer mmio_handler, VAddr addr) { | 517 | boost::optional<u16> ReadSpecial<u16>(VAddr addr) { |
| 700 | return mmio_handler->Read16(addr); | 518 | const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table; |
| 519 | for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u16))) | ||
| 520 | if (auto result = handler->Read16(addr)) | ||
| 521 | return *result; | ||
| 522 | return {}; | ||
| 701 | } | 523 | } |
| 702 | 524 | ||
| 703 | template <> | 525 | template <> |
| 704 | u32 ReadMMIO<u32>(MMIORegionPointer mmio_handler, VAddr addr) { | 526 | boost::optional<u32> ReadSpecial<u32>(VAddr addr) { |
| 705 | return mmio_handler->Read32(addr); | 527 | const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table; |
| 528 | for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u32))) | ||
| 529 | if (auto result = handler->Read32(addr)) | ||
| 530 | return *result; | ||
| 531 | return {}; | ||
| 706 | } | 532 | } |
| 707 | 533 | ||
| 708 | template <> | 534 | template <> |
| 709 | u64 ReadMMIO<u64>(MMIORegionPointer mmio_handler, VAddr addr) { | 535 | boost::optional<u64> ReadSpecial<u64>(VAddr addr) { |
| 710 | return mmio_handler->Read64(addr); | 536 | const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table; |
| 537 | for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u64))) | ||
| 538 | if (auto result = handler->Read64(addr)) | ||
| 539 | return *result; | ||
| 540 | return {}; | ||
| 711 | } | 541 | } |
| 712 | 542 | ||
| 713 | template <> | 543 | template <> |
| 714 | void WriteMMIO<u8>(MMIORegionPointer mmio_handler, VAddr addr, const u8 data) { | 544 | bool WriteSpecial<u8>(VAddr addr, const u8 data) { |
| 715 | mmio_handler->Write8(addr, data); | 545 | const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table; |
| 546 | for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u8))) | ||
| 547 | if (handler->Write8(addr, data)) | ||
| 548 | return true; | ||
| 549 | return false; | ||
| 716 | } | 550 | } |
| 717 | 551 | ||
| 718 | template <> | 552 | template <> |
| 719 | void WriteMMIO<u16>(MMIORegionPointer mmio_handler, VAddr addr, const u16 data) { | 553 | bool WriteSpecial<u16>(VAddr addr, const u16 data) { |
| 720 | mmio_handler->Write16(addr, data); | 554 | const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table; |
| 555 | for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u16))) | ||
| 556 | if (handler->Write16(addr, data)) | ||
| 557 | return true; | ||
| 558 | return false; | ||
| 721 | } | 559 | } |
| 722 | 560 | ||
| 723 | template <> | 561 | template <> |
| 724 | void WriteMMIO<u32>(MMIORegionPointer mmio_handler, VAddr addr, const u32 data) { | 562 | bool WriteSpecial<u32>(VAddr addr, const u32 data) { |
| 725 | mmio_handler->Write32(addr, data); | 563 | const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table; |
| 564 | for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u32))) | ||
| 565 | if (handler->Write32(addr, data)) | ||
| 566 | return true; | ||
| 567 | return false; | ||
| 726 | } | 568 | } |
| 727 | 569 | ||
| 728 | template <> | 570 | template <> |
| 729 | void WriteMMIO<u64>(MMIORegionPointer mmio_handler, VAddr addr, const u64 data) { | 571 | bool WriteSpecial<u64>(VAddr addr, const u64 data) { |
| 730 | mmio_handler->Write64(addr, data); | 572 | const PageTable& page_table = Kernel::g_current_process->vm_manager.page_table; |
| 573 | for (const auto& handler : GetSpecialHandlers(page_table, addr, sizeof(u64))) | ||
| 574 | if (handler->Write64(addr, data)) | ||
| 575 | return true; | ||
| 576 | return false; | ||
| 731 | } | 577 | } |
| 732 | 578 | ||
| 733 | boost::optional<PAddr> TryVirtualToPhysicalAddress(const VAddr addr) { | 579 | boost::optional<PAddr> TryVirtualToPhysicalAddress(const VAddr addr) { |
diff --git a/src/core/memory.h b/src/core/memory.h index 7e554f394..b2158ca46 100644 --- a/src/core/memory.h +++ b/src/core/memory.h | |||
| @@ -8,10 +8,12 @@ | |||
| 8 | #include <cstddef> | 8 | #include <cstddef> |
| 9 | #include <map> | 9 | #include <map> |
| 10 | #include <string> | 10 | #include <string> |
| 11 | #include <tuple> | ||
| 11 | #include <vector> | 12 | #include <vector> |
| 13 | #include <boost/icl/interval_map.hpp> | ||
| 12 | #include <boost/optional.hpp> | 14 | #include <boost/optional.hpp> |
| 13 | #include "common/common_types.h" | 15 | #include "common/common_types.h" |
| 14 | #include "core/mmio.h" | 16 | #include "core/memory_hook.h" |
| 15 | 17 | ||
| 16 | namespace Kernel { | 18 | namespace Kernel { |
| 17 | class Process; | 19 | class Process; |
| @@ -28,32 +30,35 @@ const u64 PAGE_SIZE = 1 << PAGE_BITS; | |||
| 28 | const u64 PAGE_MASK = PAGE_SIZE - 1; | 30 | const u64 PAGE_MASK = PAGE_SIZE - 1; |
| 29 | const size_t PAGE_TABLE_NUM_ENTRIES = 1ULL << (36 - PAGE_BITS); | 31 | const size_t PAGE_TABLE_NUM_ENTRIES = 1ULL << (36 - PAGE_BITS); |
| 30 | 32 | ||
| 31 | enum class PageType { | 33 | enum class PageType : u8 { |
| 32 | /// Page is unmapped and should cause an access error. | 34 | /// Page is unmapped and should cause an access error. |
| 33 | Unmapped, | 35 | Unmapped, |
| 34 | /// Page is mapped to regular memory. This is the only type you can get pointers to. | 36 | /// Page is mapped to regular memory. This is the only type you can get pointers to. |
| 35 | Memory, | 37 | Memory, |
| 36 | /// Page is mapped to regular memory, but also needs to check for rasterizer cache flushing and | 38 | /// Page is mapped to a memory hook, which intercepts read and write requests. |
| 37 | /// invalidation | ||
| 38 | RasterizerCachedMemory, | ||
| 39 | /// Page is mapped to a I/O region. Writing and reading to this page is handled by functions. | ||
| 40 | Special, | 39 | Special, |
| 41 | /// Page is mapped to a I/O region, but also needs to check for rasterizer cache flushing and | ||
| 42 | /// invalidation | ||
| 43 | RasterizerCachedSpecial, | ||
| 44 | }; | 40 | }; |
| 45 | 41 | ||
| 46 | struct SpecialRegion { | 42 | struct SpecialRegion { |
| 47 | VAddr base; | 43 | enum class Type { |
| 48 | u64 size; | 44 | DebugHook, |
| 49 | MMIORegionPointer handler; | 45 | IODevice, |
| 46 | } type; | ||
| 47 | |||
| 48 | MemoryHookPointer handler; | ||
| 49 | |||
| 50 | bool operator<(const SpecialRegion& other) const { | ||
| 51 | return std::tie(type, handler) < std::tie(other.type, other.handler); | ||
| 52 | } | ||
| 53 | |||
| 54 | bool operator==(const SpecialRegion& other) const { | ||
| 55 | return std::tie(type, handler) == std::tie(other.type, other.handler); | ||
| 56 | } | ||
| 50 | }; | 57 | }; |
| 51 | 58 | ||
| 52 | /** | 59 | /** |
| 53 | * A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely | 60 | * A (reasonably) fast way of allowing switchable and remappable process address spaces. It loosely |
| 54 | * mimics the way a real CPU page table works, but instead is optimized for minimal decoding and | 61 | * mimics the way a real CPU page table works. |
| 55 | * fetching requirements when accessing. In the usual case of an access to regular memory, it only | ||
| 56 | * requires an indexed fetch and a check for NULL. | ||
| 57 | */ | 62 | */ |
| 58 | struct PageTable { | 63 | struct PageTable { |
| 59 | /** | 64 | /** |
| @@ -66,19 +71,13 @@ struct PageTable { | |||
| 66 | * Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of | 71 | * Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of |
| 67 | * type `Special`. | 72 | * type `Special`. |
| 68 | */ | 73 | */ |
| 69 | std::vector<SpecialRegion> special_regions; | 74 | boost::icl::interval_map<VAddr, std::set<SpecialRegion>> special_regions; |
| 70 | 75 | ||
| 71 | /** | 76 | /** |
| 72 | * Array of fine grained page attributes. If it is set to any value other than `Memory`, then | 77 | * Array of fine grained page attributes. If it is set to any value other than `Memory`, then |
| 73 | * the corresponding entry in `pointers` MUST be set to null. | 78 | * the corresponding entry in `pointers` MUST be set to null. |
| 74 | */ | 79 | */ |
| 75 | std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes; | 80 | std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes; |
| 76 | |||
| 77 | /** | ||
| 78 | * Indicates the number of externally cached resources touching a page that should be | ||
| 79 | * flushed before the memory is accessed | ||
| 80 | */ | ||
| 81 | std::array<u8, PAGE_TABLE_NUM_ENTRIES> cached_res_count; | ||
| 82 | }; | 81 | }; |
| 83 | 82 | ||
| 84 | /// Physical memory regions as seen from the ARM11 | 83 | /// Physical memory regions as seen from the ARM11 |
| @@ -243,33 +242,4 @@ boost::optional<VAddr> PhysicalToVirtualAddress(PAddr addr); | |||
| 243 | */ | 242 | */ |
| 244 | u8* GetPhysicalPointer(PAddr address); | 243 | u8* GetPhysicalPointer(PAddr address); |
| 245 | 244 | ||
| 246 | /** | ||
| 247 | * Adds the supplied value to the rasterizer resource cache counter of each | ||
| 248 | * page touching the region. | ||
| 249 | */ | ||
| 250 | void RasterizerMarkRegionCached(PAddr start, u64 size, int count_delta); | ||
| 251 | |||
| 252 | /** | ||
| 253 | * Flushes any externally cached rasterizer resources touching the given region. | ||
| 254 | */ | ||
| 255 | void RasterizerFlushRegion(PAddr start, u64 size); | ||
| 256 | |||
| 257 | /** | ||
| 258 | * Flushes and invalidates any externally cached rasterizer resources touching the given region. | ||
| 259 | */ | ||
| 260 | void RasterizerFlushAndInvalidateRegion(PAddr start, u64 size); | ||
| 261 | |||
| 262 | enum class FlushMode { | ||
| 263 | /// Write back modified surfaces to RAM | ||
| 264 | Flush, | ||
| 265 | /// Write back modified surfaces to RAM, and also remove them from the cache | ||
| 266 | FlushAndInvalidate, | ||
| 267 | }; | ||
| 268 | |||
| 269 | /** | ||
| 270 | * Flushes and invalidates any externally cached rasterizer resources touching the given virtual | ||
| 271 | * address region. | ||
| 272 | */ | ||
| 273 | void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode); | ||
| 274 | |||
| 275 | } // namespace Memory | 245 | } // namespace Memory |
diff --git a/src/core/memory_hook.h b/src/core/memory_hook.h new file mode 100644 index 000000000..feebd850a --- /dev/null +++ b/src/core/memory_hook.h | |||
| @@ -0,0 +1,46 @@ | |||
| 1 | // Copyright 2016 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <memory> | ||
| 8 | #include <boost/optional.hpp> | ||
| 9 | #include "common/common_types.h" | ||
| 10 | |||
| 11 | namespace Memory { | ||
| 12 | |||
| 13 | /** | ||
| 14 | * Memory hooks have two purposes: | ||
| 15 | * 1. To allow reads and writes to a region of memory to be intercepted. This is used to implement | ||
| 16 | * texture forwarding and memory breakpoints for debugging. | ||
| 17 | * 2. To allow for the implementation of MMIO devices. | ||
| 18 | * | ||
| 19 | * A hook may be mapped to multiple regions of memory. | ||
| 20 | * | ||
| 21 | * If a boost::none or false is returned from a function, the read/write request is passed through | ||
| 22 | * to the underlying memory region. | ||
| 23 | */ | ||
| 24 | class MemoryHook { | ||
| 25 | public: | ||
| 26 | virtual ~MemoryHook() = default; | ||
| 27 | |||
| 28 | virtual boost::optional<bool> IsValidAddress(VAddr addr) = 0; | ||
| 29 | |||
| 30 | virtual boost::optional<u8> Read8(VAddr addr) = 0; | ||
| 31 | virtual boost::optional<u16> Read16(VAddr addr) = 0; | ||
| 32 | virtual boost::optional<u32> Read32(VAddr addr) = 0; | ||
| 33 | virtual boost::optional<u64> Read64(VAddr addr) = 0; | ||
| 34 | |||
| 35 | virtual bool ReadBlock(VAddr src_addr, void* dest_buffer, size_t size) = 0; | ||
| 36 | |||
| 37 | virtual bool Write8(VAddr addr, u8 data) = 0; | ||
| 38 | virtual bool Write16(VAddr addr, u16 data) = 0; | ||
| 39 | virtual bool Write32(VAddr addr, u32 data) = 0; | ||
| 40 | virtual bool Write64(VAddr addr, u64 data) = 0; | ||
| 41 | |||
| 42 | virtual bool WriteBlock(VAddr dest_addr, const void* src_buffer, size_t size) = 0; | ||
| 43 | }; | ||
| 44 | |||
| 45 | using MemoryHookPointer = std::shared_ptr<MemoryHook>; | ||
| 46 | } // namespace Memory | ||
diff --git a/src/core/memory_setup.h b/src/core/memory_setup.h index 6f82a131e..9a1a4f4be 100644 --- a/src/core/memory_setup.h +++ b/src/core/memory_setup.h | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include "common/common_types.h" | 7 | #include "common/common_types.h" |
| 8 | #include "core/mmio.h" | 8 | #include "core/memory_hook.h" |
| 9 | 9 | ||
| 10 | namespace Memory { | 10 | namespace Memory { |
| 11 | 11 | ||
| @@ -26,7 +26,11 @@ void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target); | |||
| 26 | * @param size The amount of bytes to map. Must be page-aligned. | 26 | * @param size The amount of bytes to map. Must be page-aligned. |
| 27 | * @param mmio_handler The handler that backs the mapping. | 27 | * @param mmio_handler The handler that backs the mapping. |
| 28 | */ | 28 | */ |
| 29 | void MapIoRegion(PageTable& page_table, VAddr base, u64 size, MMIORegionPointer mmio_handler); | 29 | void MapIoRegion(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer mmio_handler); |
| 30 | 30 | ||
| 31 | void UnmapRegion(PageTable& page_table, VAddr base, u64 size); | 31 | void UnmapRegion(PageTable& page_table, VAddr base, u64 size); |
| 32 | |||
| 33 | void AddDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook); | ||
| 34 | void RemoveDebugHook(PageTable& page_table, VAddr base, u64 size, MemoryHookPointer hook); | ||
| 35 | |||
| 32 | } // namespace Memory | 36 | } // namespace Memory |
diff --git a/src/core/mmio.h b/src/core/mmio.h deleted file mode 100644 index 5e3cc01af..000000000 --- a/src/core/mmio.h +++ /dev/null | |||
| @@ -1,38 +0,0 @@ | |||
| 1 | // Copyright 2016 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <memory> | ||
| 8 | #include "common/common_types.h" | ||
| 9 | |||
| 10 | namespace Memory { | ||
| 11 | |||
| 12 | /** | ||
| 13 | * Represents a device with memory mapped IO. | ||
| 14 | * A device may be mapped to multiple regions of memory. | ||
| 15 | */ | ||
| 16 | class MMIORegion { | ||
| 17 | public: | ||
| 18 | virtual ~MMIORegion() = default; | ||
| 19 | |||
| 20 | virtual bool IsValidAddress(VAddr addr) = 0; | ||
| 21 | |||
| 22 | virtual u8 Read8(VAddr addr) = 0; | ||
| 23 | virtual u16 Read16(VAddr addr) = 0; | ||
| 24 | virtual u32 Read32(VAddr addr) = 0; | ||
| 25 | virtual u64 Read64(VAddr addr) = 0; | ||
| 26 | |||
| 27 | virtual bool ReadBlock(VAddr src_addr, void* dest_buffer, size_t size) = 0; | ||
| 28 | |||
| 29 | virtual void Write8(VAddr addr, u8 data) = 0; | ||
| 30 | virtual void Write16(VAddr addr, u16 data) = 0; | ||
| 31 | virtual void Write32(VAddr addr, u32 data) = 0; | ||
| 32 | virtual void Write64(VAddr addr, u64 data) = 0; | ||
| 33 | |||
| 34 | virtual bool WriteBlock(VAddr dest_addr, const void* src_buffer, size_t size) = 0; | ||
| 35 | }; | ||
| 36 | |||
| 37 | using MMIORegionPointer = std::shared_ptr<MMIORegion>; | ||
| 38 | }; // namespace Memory | ||
diff --git a/src/tests/core/arm/arm_test_common.cpp b/src/tests/core/arm/arm_test_common.cpp index 2339bdfb8..88bbbc95c 100644 --- a/src/tests/core/arm/arm_test_common.cpp +++ b/src/tests/core/arm/arm_test_common.cpp | |||
| @@ -19,8 +19,8 @@ TestEnvironment::TestEnvironment(bool mutable_memory_) | |||
| 19 | page_table = &Kernel::g_current_process->vm_manager.page_table; | 19 | page_table = &Kernel::g_current_process->vm_manager.page_table; |
| 20 | 20 | ||
| 21 | page_table->pointers.fill(nullptr); | 21 | page_table->pointers.fill(nullptr); |
| 22 | page_table->special_regions.clear(); | ||
| 22 | page_table->attributes.fill(Memory::PageType::Unmapped); | 23 | page_table->attributes.fill(Memory::PageType::Unmapped); |
| 23 | page_table->cached_res_count.fill(0); | ||
| 24 | 24 | ||
| 25 | Memory::MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory); | 25 | Memory::MapIoRegion(*page_table, 0x00000000, 0x80000000, test_memory); |
| 26 | Memory::MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory); | 26 | Memory::MapIoRegion(*page_table, 0x80000000, 0x80000000, test_memory); |
| @@ -62,11 +62,11 @@ void TestEnvironment::ClearWriteRecords() { | |||
| 62 | 62 | ||
| 63 | TestEnvironment::TestMemory::~TestMemory() {} | 63 | TestEnvironment::TestMemory::~TestMemory() {} |
| 64 | 64 | ||
| 65 | bool TestEnvironment::TestMemory::IsValidAddress(VAddr addr) { | 65 | boost::optional<bool> TestEnvironment::TestMemory::IsValidAddress(VAddr addr) { |
| 66 | return true; | 66 | return true; |
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | u8 TestEnvironment::TestMemory::Read8(VAddr addr) { | 69 | boost::optional<u8> TestEnvironment::TestMemory::Read8(VAddr addr) { |
| 70 | auto iter = data.find(addr); | 70 | auto iter = data.find(addr); |
| 71 | if (iter == data.end()) { | 71 | if (iter == data.end()) { |
| 72 | return addr; // Some arbitrary data | 72 | return addr; // Some arbitrary data |
| @@ -74,16 +74,16 @@ u8 TestEnvironment::TestMemory::Read8(VAddr addr) { | |||
| 74 | return iter->second; | 74 | return iter->second; |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | u16 TestEnvironment::TestMemory::Read16(VAddr addr) { | 77 | boost::optional<u16> TestEnvironment::TestMemory::Read16(VAddr addr) { |
| 78 | return Read8(addr) | static_cast<u16>(Read8(addr + 1)) << 8; | 78 | return *Read8(addr) | static_cast<u16>(*Read8(addr + 1)) << 8; |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | u32 TestEnvironment::TestMemory::Read32(VAddr addr) { | 81 | boost::optional<u32> TestEnvironment::TestMemory::Read32(VAddr addr) { |
| 82 | return Read16(addr) | static_cast<u32>(Read16(addr + 2)) << 16; | 82 | return *Read16(addr) | static_cast<u32>(*Read16(addr + 2)) << 16; |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | u64 TestEnvironment::TestMemory::Read64(VAddr addr) { | 85 | boost::optional<u64> TestEnvironment::TestMemory::Read64(VAddr addr) { |
| 86 | return Read32(addr) | static_cast<u64>(Read32(addr + 4)) << 32; | 86 | return *Read32(addr) | static_cast<u64>(*Read32(addr + 4)) << 32; |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | bool TestEnvironment::TestMemory::ReadBlock(VAddr src_addr, void* dest_buffer, size_t size) { | 89 | bool TestEnvironment::TestMemory::ReadBlock(VAddr src_addr, void* dest_buffer, size_t size) { |
| @@ -91,34 +91,38 @@ bool TestEnvironment::TestMemory::ReadBlock(VAddr src_addr, void* dest_buffer, s | |||
| 91 | u8* data = static_cast<u8*>(dest_buffer); | 91 | u8* data = static_cast<u8*>(dest_buffer); |
| 92 | 92 | ||
| 93 | for (size_t i = 0; i < size; i++, addr++, data++) { | 93 | for (size_t i = 0; i < size; i++, addr++, data++) { |
| 94 | *data = Read8(addr); | 94 | *data = *Read8(addr); |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | return true; | 97 | return true; |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | void TestEnvironment::TestMemory::Write8(VAddr addr, u8 data) { | 100 | bool TestEnvironment::TestMemory::Write8(VAddr addr, u8 data) { |
| 101 | env->write_records.emplace_back(8, addr, data); | 101 | env->write_records.emplace_back(8, addr, data); |
| 102 | if (env->mutable_memory) | 102 | if (env->mutable_memory) |
| 103 | env->SetMemory8(addr, data); | 103 | env->SetMemory8(addr, data); |
| 104 | return true; | ||
| 104 | } | 105 | } |
| 105 | 106 | ||
| 106 | void TestEnvironment::TestMemory::Write16(VAddr addr, u16 data) { | 107 | bool TestEnvironment::TestMemory::Write16(VAddr addr, u16 data) { |
| 107 | env->write_records.emplace_back(16, addr, data); | 108 | env->write_records.emplace_back(16, addr, data); |
| 108 | if (env->mutable_memory) | 109 | if (env->mutable_memory) |
| 109 | env->SetMemory16(addr, data); | 110 | env->SetMemory16(addr, data); |
| 111 | return true; | ||
| 110 | } | 112 | } |
| 111 | 113 | ||
| 112 | void TestEnvironment::TestMemory::Write32(VAddr addr, u32 data) { | 114 | bool TestEnvironment::TestMemory::Write32(VAddr addr, u32 data) { |
| 113 | env->write_records.emplace_back(32, addr, data); | 115 | env->write_records.emplace_back(32, addr, data); |
| 114 | if (env->mutable_memory) | 116 | if (env->mutable_memory) |
| 115 | env->SetMemory32(addr, data); | 117 | env->SetMemory32(addr, data); |
| 118 | return true; | ||
| 116 | } | 119 | } |
| 117 | 120 | ||
| 118 | void TestEnvironment::TestMemory::Write64(VAddr addr, u64 data) { | 121 | bool TestEnvironment::TestMemory::Write64(VAddr addr, u64 data) { |
| 119 | env->write_records.emplace_back(64, addr, data); | 122 | env->write_records.emplace_back(64, addr, data); |
| 120 | if (env->mutable_memory) | 123 | if (env->mutable_memory) |
| 121 | env->SetMemory64(addr, data); | 124 | env->SetMemory64(addr, data); |
| 125 | return true; | ||
| 122 | } | 126 | } |
| 123 | 127 | ||
| 124 | bool TestEnvironment::TestMemory::WriteBlock(VAddr dest_addr, const void* src_buffer, size_t size) { | 128 | bool TestEnvironment::TestMemory::WriteBlock(VAddr dest_addr, const void* src_buffer, size_t size) { |
diff --git a/src/tests/core/arm/arm_test_common.h b/src/tests/core/arm/arm_test_common.h index 592c28594..b66922d61 100644 --- a/src/tests/core/arm/arm_test_common.h +++ b/src/tests/core/arm/arm_test_common.h | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | #include <vector> | 7 | #include <vector> |
| 8 | 8 | ||
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "core/mmio.h" | 10 | #include "core/memory_hook.h" |
| 11 | 11 | ||
| 12 | namespace ArmTests { | 12 | namespace ArmTests { |
| 13 | 13 | ||
| @@ -51,25 +51,25 @@ public: | |||
| 51 | 51 | ||
| 52 | private: | 52 | private: |
| 53 | friend struct TestMemory; | 53 | friend struct TestMemory; |
| 54 | struct TestMemory final : Memory::MMIORegion { | 54 | struct TestMemory final : Memory::MemoryHook { |
| 55 | explicit TestMemory(TestEnvironment* env_) : env(env_) {} | 55 | explicit TestMemory(TestEnvironment* env_) : env(env_) {} |
| 56 | TestEnvironment* env; | 56 | TestEnvironment* env; |
| 57 | 57 | ||
| 58 | ~TestMemory() override; | 58 | ~TestMemory() override; |
| 59 | 59 | ||
| 60 | bool IsValidAddress(VAddr addr) override; | 60 | boost::optional<bool> IsValidAddress(VAddr addr) override; |
| 61 | 61 | ||
| 62 | u8 Read8(VAddr addr) override; | 62 | boost::optional<u8> Read8(VAddr addr) override; |
| 63 | u16 Read16(VAddr addr) override; | 63 | boost::optional<u16> Read16(VAddr addr) override; |
| 64 | u32 Read32(VAddr addr) override; | 64 | boost::optional<u32> Read32(VAddr addr) override; |
| 65 | u64 Read64(VAddr addr) override; | 65 | boost::optional<u64> Read64(VAddr addr) override; |
| 66 | 66 | ||
| 67 | bool ReadBlock(VAddr src_addr, void* dest_buffer, size_t size) override; | 67 | bool ReadBlock(VAddr src_addr, void* dest_buffer, size_t size) override; |
| 68 | 68 | ||
| 69 | void Write8(VAddr addr, u8 data) override; | 69 | bool Write8(VAddr addr, u8 data) override; |
| 70 | void Write16(VAddr addr, u16 data) override; | 70 | bool Write16(VAddr addr, u16 data) override; |
| 71 | void Write32(VAddr addr, u32 data) override; | 71 | bool Write32(VAddr addr, u32 data) override; |
| 72 | void Write64(VAddr addr, u64 data) override; | 72 | bool Write64(VAddr addr, u64 data) override; |
| 73 | 73 | ||
| 74 | bool WriteBlock(VAddr dest_addr, const void* src_buffer, size_t size) override; | 74 | bool WriteBlock(VAddr dest_addr, const void* src_buffer, size_t size) override; |
| 75 | 75 | ||
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index 50396b5c1..8c23128ae 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp | |||
| @@ -266,7 +266,7 @@ void RendererOpenGL::LoadFBToScreenInfo(const FramebufferInfo& framebuffer_info, | |||
| 266 | screen_info.display_texture = screen_info.texture.resource.handle; | 266 | screen_info.display_texture = screen_info.texture.resource.handle; |
| 267 | screen_info.display_texcoords = MathUtil::Rectangle<float>(0.f, 0.f, 1.f, 1.f); | 267 | screen_info.display_texcoords = MathUtil::Rectangle<float>(0.f, 0.f, 1.f, 1.f); |
| 268 | 268 | ||
| 269 | Memory::RasterizerFlushRegion(framebuffer_info.address, size_in_bytes); | 269 | // Memory::RasterizerFlushRegion(framebuffer_info.address, size_in_bytes); |
| 270 | 270 | ||
| 271 | state.texture_units[0].texture_2d = screen_info.texture.resource.handle; | 271 | state.texture_units[0].texture_2d = screen_info.texture.resource.handle; |
| 272 | state.Apply(); | 272 | state.Apply(); |