diff options
| author | 2017-09-01 23:10:03 -0400 | |
|---|---|---|
| committer | 2017-09-30 14:28:54 -0400 | |
| commit | f01472a5ffd03b535e8a66bb00d9a7548a0f61bf (patch) | |
| tree | d11874933de837f7ce57ccb259f1f869db70bdb7 /src | |
| parent | arm: Use 64-bit addressing in a bunch of places. (diff) | |
| download | yuzu-f01472a5ffd03b535e8a66bb00d9a7548a0f61bf.tar.gz yuzu-f01472a5ffd03b535e8a66bb00d9a7548a0f61bf.tar.xz yuzu-f01472a5ffd03b535e8a66bb00d9a7548a0f61bf.zip | |
core: Various changes to support 64-bit addressing.
Diffstat (limited to 'src')
| -rw-r--r-- | src/core/hle/kernel/vm_manager.cpp | 22 | ||||
| -rw-r--r-- | src/core/hle/kernel/vm_manager.h | 20 | ||||
| -rw-r--r-- | src/core/memory.cpp | 44 | ||||
| -rw-r--r-- | src/core/memory.h | 16 | ||||
| -rw-r--r-- | src/core/memory_setup.h | 6 |
5 files changed, 54 insertions, 54 deletions
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp index cef1f7fa8..f70c32501 100644 --- a/src/core/hle/kernel/vm_manager.cpp +++ b/src/core/hle/kernel/vm_manager.cpp | |||
| @@ -56,7 +56,7 @@ void VMManager::Reset() { | |||
| 56 | initial_vma.size = MAX_ADDRESS; | 56 | initial_vma.size = MAX_ADDRESS; |
| 57 | vma_map.emplace(initial_vma.base, initial_vma); | 57 | vma_map.emplace(initial_vma.base, initial_vma); |
| 58 | 58 | ||
| 59 | UpdatePageTableForVMA(initial_vma); | 59 | //UpdatePageTableForVMA(initial_vma); |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | VMManager::VMAHandle VMManager::FindVMA(VAddr target) const { | 62 | VMManager::VMAHandle VMManager::FindVMA(VAddr target) const { |
| @@ -69,7 +69,7 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const { | |||
| 69 | 69 | ||
| 70 | ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, | 70 | ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, |
| 71 | std::shared_ptr<std::vector<u8>> block, | 71 | std::shared_ptr<std::vector<u8>> block, |
| 72 | size_t offset, u32 size, | 72 | size_t offset, u64 size, |
| 73 | MemoryState state) { | 73 | MemoryState state) { |
| 74 | ASSERT(block != nullptr); | 74 | ASSERT(block != nullptr); |
| 75 | ASSERT(offset + size <= block->size()); | 75 | ASSERT(offset + size <= block->size()); |
| @@ -89,7 +89,7 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, | |||
| 89 | return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); | 89 | return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* memory, u32 size, | 92 | ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* memory, u64 size, |
| 93 | MemoryState state) { | 93 | MemoryState state) { |
| 94 | ASSERT(memory != nullptr); | 94 | ASSERT(memory != nullptr); |
| 95 | 95 | ||
| @@ -107,7 +107,7 @@ ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* me | |||
| 107 | return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); | 107 | return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); |
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u32 size, | 110 | ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size, |
| 111 | MemoryState state, | 111 | MemoryState state, |
| 112 | Memory::MMIORegionPointer mmio_handler) { | 112 | Memory::MMIORegionPointer mmio_handler) { |
| 113 | // This is the appropriately sized VMA that will turn into our allocation. | 113 | // This is the appropriately sized VMA that will turn into our allocation. |
| @@ -141,7 +141,7 @@ VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) { | |||
| 141 | return MergeAdjacent(vma_handle); | 141 | return MergeAdjacent(vma_handle); |
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | ResultCode VMManager::UnmapRange(VAddr target, u32 size) { | 144 | ResultCode VMManager::UnmapRange(VAddr target, u64 size) { |
| 145 | CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size)); | 145 | CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size)); |
| 146 | VAddr target_end = target + size; | 146 | VAddr target_end = target + size; |
| 147 | 147 | ||
| @@ -166,7 +166,7 @@ VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission ne | |||
| 166 | return MergeAdjacent(iter); | 166 | return MergeAdjacent(iter); |
| 167 | } | 167 | } |
| 168 | 168 | ||
| 169 | ResultCode VMManager::ReprotectRange(VAddr target, u32 size, VMAPermission new_perms) { | 169 | ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_perms) { |
| 170 | CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size)); | 170 | CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size)); |
| 171 | VAddr target_end = target + size; | 171 | VAddr target_end = target + size; |
| 172 | 172 | ||
| @@ -209,7 +209,7 @@ VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle& iter) { | |||
| 209 | return vma_map.erase(iter, iter); // Erases an empty range of elements | 209 | return vma_map.erase(iter, iter); // Erases an empty range of elements |
| 210 | } | 210 | } |
| 211 | 211 | ||
| 212 | ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) { | 212 | ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u64 size) { |
| 213 | ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x%8X", size); | 213 | ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x%8X", size); |
| 214 | ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x%08X", base); | 214 | ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x%08X", base); |
| 215 | 215 | ||
| @@ -225,8 +225,8 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) { | |||
| 225 | return ERR_INVALID_ADDRESS_STATE; | 225 | return ERR_INVALID_ADDRESS_STATE; |
| 226 | } | 226 | } |
| 227 | 227 | ||
| 228 | u32 start_in_vma = base - vma.base; | 228 | u64 start_in_vma = base - vma.base; |
| 229 | u32 end_in_vma = start_in_vma + size; | 229 | u64 end_in_vma = start_in_vma + size; |
| 230 | 230 | ||
| 231 | if (end_in_vma > vma.size) { | 231 | if (end_in_vma > vma.size) { |
| 232 | // Requested allocation doesn't fit inside VMA | 232 | // Requested allocation doesn't fit inside VMA |
| @@ -245,7 +245,7 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) { | |||
| 245 | return MakeResult<VMAIter>(vma_handle); | 245 | return MakeResult<VMAIter>(vma_handle); |
| 246 | } | 246 | } |
| 247 | 247 | ||
| 248 | ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u32 size) { | 248 | ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u64 size) { |
| 249 | ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x%8X", size); | 249 | ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x%8X", size); |
| 250 | ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x%08X", target); | 250 | ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x%08X", target); |
| 251 | 251 | ||
| @@ -274,7 +274,7 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u32 size) { | |||
| 274 | return MakeResult<VMAIter>(begin_vma); | 274 | return MakeResult<VMAIter>(begin_vma); |
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u32 offset_in_vma) { | 277 | VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) { |
| 278 | VirtualMemoryArea& old_vma = vma_handle->second; | 278 | VirtualMemoryArea& old_vma = vma_handle->second; |
| 279 | VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA | 279 | VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA |
| 280 | 280 | ||
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h index 38e0d74d0..aa2265ce6 100644 --- a/src/core/hle/kernel/vm_manager.h +++ b/src/core/hle/kernel/vm_manager.h | |||
| @@ -63,7 +63,7 @@ struct VirtualMemoryArea { | |||
| 63 | /// Virtual base address of the region. | 63 | /// Virtual base address of the region. |
| 64 | VAddr base = 0; | 64 | VAddr base = 0; |
| 65 | /// Size of the region. | 65 | /// Size of the region. |
| 66 | u32 size = 0; | 66 | u64 size = 0; |
| 67 | 67 | ||
| 68 | VMAType type = VMAType::Free; | 68 | VMAType type = VMAType::Free; |
| 69 | VMAPermission permissions = VMAPermission::None; | 69 | VMAPermission permissions = VMAPermission::None; |
| @@ -109,7 +109,7 @@ public: | |||
| 109 | * used. | 109 | * used. |
| 110 | * @note This is the limit used by the New 3DS kernel. Old 3DS used 0x20000000. | 110 | * @note This is the limit used by the New 3DS kernel. Old 3DS used 0x20000000. |
| 111 | */ | 111 | */ |
| 112 | static const u32 MAX_ADDRESS = 0x40000000; | 112 | static const VAddr MAX_ADDRESS = 0x8000000000; |
| 113 | 113 | ||
| 114 | /** | 114 | /** |
| 115 | * A map covering the entirety of the managed address space, keyed by the `base` field of each | 115 | * A map covering the entirety of the managed address space, keyed by the `base` field of each |
| @@ -142,7 +142,7 @@ public: | |||
| 142 | * @param state MemoryState tag to attach to the VMA. | 142 | * @param state MemoryState tag to attach to the VMA. |
| 143 | */ | 143 | */ |
| 144 | ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block, | 144 | ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block, |
| 145 | size_t offset, u32 size, MemoryState state); | 145 | size_t offset, u64 size, MemoryState state); |
| 146 | 146 | ||
| 147 | /** | 147 | /** |
| 148 | * Maps an unmanaged host memory pointer at a given address. | 148 | * Maps an unmanaged host memory pointer at a given address. |
| @@ -152,7 +152,7 @@ public: | |||
| 152 | * @param size Size of the mapping. | 152 | * @param size Size of the mapping. |
| 153 | * @param state MemoryState tag to attach to the VMA. | 153 | * @param state MemoryState tag to attach to the VMA. |
| 154 | */ | 154 | */ |
| 155 | ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u32 size, MemoryState state); | 155 | ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u64 size, MemoryState state); |
| 156 | 156 | ||
| 157 | /** | 157 | /** |
| 158 | * Maps a memory-mapped IO region at a given address. | 158 | * Maps a memory-mapped IO region at a given address. |
| @@ -163,17 +163,17 @@ public: | |||
| 163 | * @param state MemoryState tag to attach to the VMA. | 163 | * @param state MemoryState tag to attach to the VMA. |
| 164 | * @param mmio_handler The handler that will implement read and write for this MMIO region. | 164 | * @param mmio_handler The handler that will implement read and write for this MMIO region. |
| 165 | */ | 165 | */ |
| 166 | ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state, | 166 | ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state, |
| 167 | Memory::MMIORegionPointer mmio_handler); | 167 | Memory::MMIORegionPointer mmio_handler); |
| 168 | 168 | ||
| 169 | /// Unmaps a range of addresses, splitting VMAs as necessary. | 169 | /// Unmaps a range of addresses, splitting VMAs as necessary. |
| 170 | ResultCode UnmapRange(VAddr target, u32 size); | 170 | ResultCode UnmapRange(VAddr target, u64 size); |
| 171 | 171 | ||
| 172 | /// Changes the permissions of the given VMA. | 172 | /// Changes the permissions of the given VMA. |
| 173 | VMAHandle Reprotect(VMAHandle vma, VMAPermission new_perms); | 173 | VMAHandle Reprotect(VMAHandle vma, VMAPermission new_perms); |
| 174 | 174 | ||
| 175 | /// Changes the permissions of a range of addresses, splitting VMAs as necessary. | 175 | /// Changes the permissions of a range of addresses, splitting VMAs as necessary. |
| 176 | ResultCode ReprotectRange(VAddr target, u32 size, VMAPermission new_perms); | 176 | ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms); |
| 177 | 177 | ||
| 178 | /** | 178 | /** |
| 179 | * Scans all VMAs and updates the page table range of any that use the given vector as backing | 179 | * Scans all VMAs and updates the page table range of any that use the given vector as backing |
| @@ -197,19 +197,19 @@ private: | |||
| 197 | * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing | 197 | * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing |
| 198 | * the appropriate error checking. | 198 | * the appropriate error checking. |
| 199 | */ | 199 | */ |
| 200 | ResultVal<VMAIter> CarveVMA(VAddr base, u32 size); | 200 | ResultVal<VMAIter> CarveVMA(VAddr base, u64 size); |
| 201 | 201 | ||
| 202 | /** | 202 | /** |
| 203 | * Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each | 203 | * Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each |
| 204 | * end of the range. | 204 | * end of the range. |
| 205 | */ | 205 | */ |
| 206 | ResultVal<VMAIter> CarveVMARange(VAddr base, u32 size); | 206 | ResultVal<VMAIter> CarveVMARange(VAddr base, u64 size); |
| 207 | 207 | ||
| 208 | /** | 208 | /** |
| 209 | * Splits a VMA in two, at the specified offset. | 209 | * Splits a VMA in two, at the specified offset. |
| 210 | * @returns the right side of the split, with the original iterator becoming the left side. | 210 | * @returns the right side of the split, with the original iterator becoming the left side. |
| 211 | */ | 211 | */ |
| 212 | VMAIter SplitVMA(VMAIter vma, u32 offset_in_vma); | 212 | VMAIter SplitVMA(VMAIter vma, u64 offset_in_vma); |
| 213 | 213 | ||
| 214 | /** | 214 | /** |
| 215 | * Checks for and merges the specified VMA with adjacent ones if possible. | 215 | * Checks for and merges the specified VMA with adjacent ones if possible. |
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 65649d9d7..ed453d0c1 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -34,7 +34,7 @@ enum class PageType { | |||
| 34 | 34 | ||
| 35 | struct SpecialRegion { | 35 | struct SpecialRegion { |
| 36 | VAddr base; | 36 | VAddr base; |
| 37 | u32 size; | 37 | u64 size; |
| 38 | MMIORegionPointer handler; | 38 | MMIORegionPointer handler; |
| 39 | }; | 39 | }; |
| 40 | 40 | ||
| @@ -49,7 +49,7 @@ struct PageTable { | |||
| 49 | * Array of memory pointers backing each page. An entry can only be non-null if the | 49 | * Array of memory pointers backing each page. An entry can only be non-null if the |
| 50 | * corresponding entry in the `attributes` array is of type `Memory`. | 50 | * corresponding entry in the `attributes` array is of type `Memory`. |
| 51 | */ | 51 | */ |
| 52 | std::array<u8*, PAGE_TABLE_NUM_ENTRIES> pointers; | 52 | std::map<u64, u8*> pointers; |
| 53 | 53 | ||
| 54 | /** | 54 | /** |
| 55 | * Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of | 55 | * Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of |
| @@ -61,13 +61,13 @@ struct PageTable { | |||
| 61 | * Array of fine grained page attributes. If it is set to any value other than `Memory`, then | 61 | * Array of fine grained page attributes. If it is set to any value other than `Memory`, then |
| 62 | * the corresponding entry in `pointers` MUST be set to null. | 62 | * the corresponding entry in `pointers` MUST be set to null. |
| 63 | */ | 63 | */ |
| 64 | std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes; | 64 | std::map<u64, PageType> attributes; |
| 65 | 65 | ||
| 66 | /** | 66 | /** |
| 67 | * Indicates the number of externally cached resources touching a page that should be | 67 | * Indicates the number of externally cached resources touching a page that should be |
| 68 | * flushed before the memory is accessed | 68 | * flushed before the memory is accessed |
| 69 | */ | 69 | */ |
| 70 | std::array<u8, PAGE_TABLE_NUM_ENTRIES> cached_res_count; | 70 | std::map<u64, u8> cached_res_count; |
| 71 | }; | 71 | }; |
| 72 | 72 | ||
| 73 | /// Singular page table used for the singleton process | 73 | /// Singular page table used for the singleton process |
| @@ -75,18 +75,18 @@ static PageTable main_page_table; | |||
| 75 | /// Currently active page table | 75 | /// Currently active page table |
| 76 | static PageTable* current_page_table = &main_page_table; | 76 | static PageTable* current_page_table = &main_page_table; |
| 77 | 77 | ||
| 78 | std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers() { | 78 | //std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers() { |
| 79 | return ¤t_page_table->pointers; | 79 | // return ¤t_page_table->pointers; |
| 80 | } | 80 | //} |
| 81 | 81 | ||
| 82 | static void MapPages(u32 base, u32 size, u8* memory, PageType type) { | 82 | static void MapPages(u64 base, u64 size, u8* memory, PageType type) { |
| 83 | LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE, | 83 | LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE, |
| 84 | (base + size) * PAGE_SIZE); | 84 | (base + size) * PAGE_SIZE); |
| 85 | 85 | ||
| 86 | RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE, | 86 | RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE, |
| 87 | FlushMode::FlushAndInvalidate); | 87 | FlushMode::FlushAndInvalidate); |
| 88 | 88 | ||
| 89 | u32 end = base + size; | 89 | u64 end = base + size; |
| 90 | while (base != end) { | 90 | while (base != end) { |
| 91 | ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at %08X", base); | 91 | ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at %08X", base); |
| 92 | 92 | ||
| @@ -101,18 +101,18 @@ static void MapPages(u32 base, u32 size, u8* memory, PageType type) { | |||
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | void InitMemoryMap() { | 103 | void InitMemoryMap() { |
| 104 | main_page_table.pointers.fill(nullptr); | 104 | //main_page_table.pointers.fill(nullptr); |
| 105 | main_page_table.attributes.fill(PageType::Unmapped); | 105 | //main_page_table.attributes.fill(PageType::Unmapped); |
| 106 | main_page_table.cached_res_count.fill(0); | 106 | //main_page_table.cached_res_count.fill(0); |
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | void MapMemoryRegion(VAddr base, u32 size, u8* target) { | 109 | void MapMemoryRegion(VAddr base, u64 size, u8* target) { |
| 110 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); | 110 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); |
| 111 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); | 111 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); |
| 112 | MapPages(base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); | 112 | MapPages(base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); |
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | void MapIoRegion(VAddr base, u32 size, MMIORegionPointer mmio_handler) { | 115 | void MapIoRegion(VAddr base, u64 size, MMIORegionPointer mmio_handler) { |
| 116 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); | 116 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); |
| 117 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); | 117 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); |
| 118 | MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); | 118 | MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); |
| @@ -120,7 +120,7 @@ void MapIoRegion(VAddr base, u32 size, MMIORegionPointer mmio_handler) { | |||
| 120 | current_page_table->special_regions.emplace_back(SpecialRegion{base, size, mmio_handler}); | 120 | current_page_table->special_regions.emplace_back(SpecialRegion{base, size, mmio_handler}); |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | void UnmapRegion(VAddr base, u32 size) { | 123 | void UnmapRegion(VAddr base, u64 size) { |
| 124 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); | 124 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); |
| 125 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); | 125 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); |
| 126 | MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped); | 126 | MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped); |
| @@ -222,7 +222,7 @@ void Write(const VAddr vaddr, const T data) { | |||
| 222 | PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; | 222 | PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; |
| 223 | switch (type) { | 223 | switch (type) { |
| 224 | case PageType::Unmapped: | 224 | case PageType::Unmapped: |
| 225 | LOG_ERROR(HW_Memory, "unmapped Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, | 225 | LOG_ERROR(HW_Memory, "unmapped Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u64)data, |
| 226 | vaddr); | 226 | vaddr); |
| 227 | return; | 227 | return; |
| 228 | case PageType::Memory: | 228 | case PageType::Memory: |
| @@ -304,12 +304,12 @@ u8* GetPhysicalPointer(PAddr address) { | |||
| 304 | return vaddr ? GetPointer(*vaddr) : nullptr; | 304 | return vaddr ? GetPointer(*vaddr) : nullptr; |
| 305 | } | 305 | } |
| 306 | 306 | ||
| 307 | void RasterizerMarkRegionCached(PAddr start, u32 size, int count_delta) { | 307 | void RasterizerMarkRegionCached(PAddr start, u64 size, int count_delta) { |
| 308 | if (start == 0) { | 308 | if (start == 0) { |
| 309 | return; | 309 | return; |
| 310 | } | 310 | } |
| 311 | 311 | ||
| 312 | u32 num_pages = ((start + size - 1) >> PAGE_BITS) - (start >> PAGE_BITS) + 1; | 312 | u64 num_pages = ((start + size - 1) >> PAGE_BITS) - (start >> PAGE_BITS) + 1; |
| 313 | PAddr paddr = start; | 313 | PAddr paddr = start; |
| 314 | 314 | ||
| 315 | for (unsigned i = 0; i < num_pages; ++i, paddr += PAGE_SIZE) { | 315 | for (unsigned i = 0; i < num_pages; ++i, paddr += PAGE_SIZE) { |
| @@ -368,13 +368,13 @@ void RasterizerMarkRegionCached(PAddr start, u32 size, int count_delta) { | |||
| 368 | } | 368 | } |
| 369 | } | 369 | } |
| 370 | 370 | ||
| 371 | void RasterizerFlushRegion(PAddr start, u32 size) { | 371 | void RasterizerFlushRegion(PAddr start, u64 size) { |
| 372 | if (VideoCore::g_renderer != nullptr) { | 372 | if (VideoCore::g_renderer != nullptr) { |
| 373 | VideoCore::g_renderer->Rasterizer()->FlushRegion(start, size); | 373 | VideoCore::g_renderer->Rasterizer()->FlushRegion(start, size); |
| 374 | } | 374 | } |
| 375 | } | 375 | } |
| 376 | 376 | ||
| 377 | void RasterizerFlushAndInvalidateRegion(PAddr start, u32 size) { | 377 | void RasterizerFlushAndInvalidateRegion(PAddr start, u64 size) { |
| 378 | // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be | 378 | // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be |
| 379 | // null here | 379 | // null here |
| 380 | if (VideoCore::g_renderer != nullptr) { | 380 | if (VideoCore::g_renderer != nullptr) { |
| @@ -382,7 +382,7 @@ void RasterizerFlushAndInvalidateRegion(PAddr start, u32 size) { | |||
| 382 | } | 382 | } |
| 383 | } | 383 | } |
| 384 | 384 | ||
| 385 | void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode) { | 385 | void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) { |
| 386 | // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be | 386 | // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be |
| 387 | // null here | 387 | // null here |
| 388 | if (VideoCore::g_renderer != nullptr) { | 388 | if (VideoCore::g_renderer != nullptr) { |
| @@ -398,7 +398,7 @@ void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode) { | |||
| 398 | VAddr overlap_end = std::min(end, region_end); | 398 | VAddr overlap_end = std::min(end, region_end); |
| 399 | 399 | ||
| 400 | PAddr physical_start = TryVirtualToPhysicalAddress(overlap_start).value(); | 400 | PAddr physical_start = TryVirtualToPhysicalAddress(overlap_start).value(); |
| 401 | u32 overlap_size = overlap_end - overlap_start; | 401 | u64 overlap_size = overlap_end - overlap_start; |
| 402 | 402 | ||
| 403 | auto* rasterizer = VideoCore::g_renderer->Rasterizer(); | 403 | auto* rasterizer = VideoCore::g_renderer->Rasterizer(); |
| 404 | switch (mode) { | 404 | switch (mode) { |
diff --git a/src/core/memory.h b/src/core/memory.h index c8c56babd..e8d796d24 100644 --- a/src/core/memory.h +++ b/src/core/memory.h | |||
| @@ -16,10 +16,10 @@ namespace Memory { | |||
| 16 | * Page size used by the ARM architecture. This is the smallest granularity with which memory can | 16 | * Page size used by the ARM architecture. This is the smallest granularity with which memory can |
| 17 | * be mapped. | 17 | * be mapped. |
| 18 | */ | 18 | */ |
| 19 | const u32 PAGE_SIZE = 0x1000; | ||
| 20 | const u32 PAGE_MASK = PAGE_SIZE - 1; | ||
| 21 | const int PAGE_BITS = 12; | 19 | const int PAGE_BITS = 12; |
| 22 | const size_t PAGE_TABLE_NUM_ENTRIES = 1 << (32 - PAGE_BITS); | 20 | const u64 PAGE_SIZE = 1 << PAGE_BITS; |
| 21 | const u64 PAGE_MASK = PAGE_SIZE - 1; | ||
| 22 | const size_t PAGE_TABLE_NUM_ENTRIES = 1ULL << (64 - PAGE_BITS); | ||
| 23 | 23 | ||
| 24 | /// Physical memory regions as seen from the ARM11 | 24 | /// Physical memory regions as seen from the ARM11 |
| 25 | enum : PAddr { | 25 | enum : PAddr { |
| @@ -178,17 +178,17 @@ u8* GetPhysicalPointer(PAddr address); | |||
| 178 | * Adds the supplied value to the rasterizer resource cache counter of each | 178 | * Adds the supplied value to the rasterizer resource cache counter of each |
| 179 | * page touching the region. | 179 | * page touching the region. |
| 180 | */ | 180 | */ |
| 181 | void RasterizerMarkRegionCached(PAddr start, u32 size, int count_delta); | 181 | void RasterizerMarkRegionCached(PAddr start, u64 size, int count_delta); |
| 182 | 182 | ||
| 183 | /** | 183 | /** |
| 184 | * Flushes any externally cached rasterizer resources touching the given region. | 184 | * Flushes any externally cached rasterizer resources touching the given region. |
| 185 | */ | 185 | */ |
| 186 | void RasterizerFlushRegion(PAddr start, u32 size); | 186 | void RasterizerFlushRegion(PAddr start, u64 size); |
| 187 | 187 | ||
| 188 | /** | 188 | /** |
| 189 | * Flushes and invalidates any externally cached rasterizer resources touching the given region. | 189 | * Flushes and invalidates any externally cached rasterizer resources touching the given region. |
| 190 | */ | 190 | */ |
| 191 | void RasterizerFlushAndInvalidateRegion(PAddr start, u32 size); | 191 | void RasterizerFlushAndInvalidateRegion(PAddr start, u64 size); |
| 192 | 192 | ||
| 193 | enum class FlushMode { | 193 | enum class FlushMode { |
| 194 | /// Write back modified surfaces to RAM | 194 | /// Write back modified surfaces to RAM |
| @@ -201,12 +201,12 @@ enum class FlushMode { | |||
| 201 | * Flushes and invalidates any externally cached rasterizer resources touching the given virtual | 201 | * Flushes and invalidates any externally cached rasterizer resources touching the given virtual |
| 202 | * address region. | 202 | * address region. |
| 203 | */ | 203 | */ |
| 204 | void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode); | 204 | void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode); |
| 205 | 205 | ||
| 206 | /** | 206 | /** |
| 207 | * Dynarmic has an optimization to memory accesses when the pointer to the page exists that | 207 | * Dynarmic has an optimization to memory accesses when the pointer to the page exists that |
| 208 | * can be used by setting up the current page table as a callback. This function is used to | 208 | * can be used by setting up the current page table as a callback. This function is used to |
| 209 | * retrieve the current page table for that purpose. | 209 | * retrieve the current page table for that purpose. |
| 210 | */ | 210 | */ |
| 211 | std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers(); | 211 | //std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers(); |
| 212 | } | 212 | } |
diff --git a/src/core/memory_setup.h b/src/core/memory_setup.h index 3fdf3a87d..fc3fda466 100644 --- a/src/core/memory_setup.h +++ b/src/core/memory_setup.h | |||
| @@ -18,7 +18,7 @@ void InitMemoryMap(); | |||
| 18 | * @param size The amount of bytes to map. Must be page-aligned. | 18 | * @param size The amount of bytes to map. Must be page-aligned. |
| 19 | * @param target Buffer with the memory backing the mapping. Must be of length at least `size`. | 19 | * @param target Buffer with the memory backing the mapping. Must be of length at least `size`. |
| 20 | */ | 20 | */ |
| 21 | void MapMemoryRegion(VAddr base, u32 size, u8* target); | 21 | void MapMemoryRegion(VAddr base, u64 size, u8* target); |
| 22 | 22 | ||
| 23 | /** | 23 | /** |
| 24 | * Maps a region of the emulated process address space as a IO region. | 24 | * Maps a region of the emulated process address space as a IO region. |
| @@ -26,7 +26,7 @@ void MapMemoryRegion(VAddr base, u32 size, u8* target); | |||
| 26 | * @param size The amount of bytes to map. Must be page-aligned. | 26 | * @param size The amount of bytes to map. Must be page-aligned. |
| 27 | * @param mmio_handler The handler that backs the mapping. | 27 | * @param mmio_handler The handler that backs the mapping. |
| 28 | */ | 28 | */ |
| 29 | void MapIoRegion(VAddr base, u32 size, MMIORegionPointer mmio_handler); | 29 | void MapIoRegion(VAddr base, u64 size, MMIORegionPointer mmio_handler); |
| 30 | 30 | ||
| 31 | void UnmapRegion(VAddr base, u32 size); | 31 | void UnmapRegion(VAddr base, u64 size); |
| 32 | } | 32 | } |