diff options
| author | 2014-12-29 19:47:41 -0800 | |
|---|---|---|
| committer | 2014-12-29 19:47:41 -0800 | |
| commit | 8ba9ac0f74abb0408a26207a76a0c1808bad8de0 (patch) | |
| tree | f1c7c3393fa726435b5b90bf335567c93e528ef1 /src/core/mem_map_funcs.cpp | |
| parent | Add comment regarding __WIN32__ in SkyEye code (diff) | |
| parent | Merge pull request #367 from bunnei/usat_ssat (diff) | |
| download | yuzu-8ba9ac0f74abb0408a26207a76a0c1808bad8de0.tar.gz yuzu-8ba9ac0f74abb0408a26207a76a0c1808bad8de0.tar.xz yuzu-8ba9ac0f74abb0408a26207a76a0c1808bad8de0.zip | |
Fix merge conflicts
Diffstat (limited to 'src/core/mem_map_funcs.cpp')
| -rw-r--r-- | src/core/mem_map_funcs.cpp | 92 |
1 files changed, 41 insertions, 51 deletions
diff --git a/src/core/mem_map_funcs.cpp b/src/core/mem_map_funcs.cpp index 443d5ad7e..fdf382ed6 100644 --- a/src/core/mem_map_funcs.cpp +++ b/src/core/mem_map_funcs.cpp | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | 1 | // Copyright 2014 Citra Emulator Project |
| 2 | // Licensed under GPLv2 | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <map> | 5 | #include <map> |
| @@ -13,7 +13,7 @@ | |||
| 13 | namespace Memory { | 13 | namespace Memory { |
| 14 | 14 | ||
| 15 | static std::map<u32, MemoryBlock> heap_map; | 15 | static std::map<u32, MemoryBlock> heap_map; |
| 16 | static std::map<u32, MemoryBlock> heap_gsp_map; | 16 | static std::map<u32, MemoryBlock> heap_linear_map; |
| 17 | static std::map<u32, MemoryBlock> shared_map; | 17 | static std::map<u32, MemoryBlock> shared_map; |
| 18 | 18 | ||
| 19 | /// Convert a physical address to virtual address | 19 | /// Convert a physical address to virtual address |
| @@ -28,7 +28,7 @@ VAddr PhysicalToVirtualAddress(const PAddr addr) { | |||
| 28 | return addr - FCRAM_PADDR + FCRAM_VADDR; | 28 | return addr - FCRAM_PADDR + FCRAM_VADDR; |
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | ERROR_LOG(MEMMAP, "Unknown physical address @ 0x%08x", addr); | 31 | LOG_ERROR(HW_Memory, "Unknown physical address @ 0x%08x", addr); |
| 32 | return addr; | 32 | return addr; |
| 33 | } | 33 | } |
| 34 | 34 | ||
| @@ -44,7 +44,7 @@ PAddr VirtualToPhysicalAddress(const VAddr addr) { | |||
| 44 | return addr - FCRAM_VADDR + FCRAM_PADDR; | 44 | return addr - FCRAM_VADDR + FCRAM_PADDR; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | ERROR_LOG(MEMMAP, "Unknown virtual address @ 0x%08x", addr); | 47 | LOG_ERROR(HW_Memory, "Unknown virtual address @ 0x%08x", addr); |
| 48 | return addr; | 48 | return addr; |
| 49 | } | 49 | } |
| 50 | 50 | ||
| @@ -56,32 +56,27 @@ inline void Read(T &var, const VAddr vaddr) { | |||
| 56 | 56 | ||
| 57 | // Kernel memory command buffer | 57 | // Kernel memory command buffer |
| 58 | if (vaddr >= KERNEL_MEMORY_VADDR && vaddr < KERNEL_MEMORY_VADDR_END) { | 58 | if (vaddr >= KERNEL_MEMORY_VADDR && vaddr < KERNEL_MEMORY_VADDR_END) { |
| 59 | var = *((const T*)&g_kernel_mem[vaddr & KERNEL_MEMORY_MASK]); | 59 | var = *((const T*)&g_kernel_mem[vaddr - KERNEL_MEMORY_VADDR]); |
| 60 | |||
| 61 | // Hardware I/O register reads | ||
| 62 | // 0x10XXXXXX- is physical address space, 0x1EXXXXXX is virtual address space | ||
| 63 | } else if ((vaddr >= HARDWARE_IO_VADDR) && (vaddr < HARDWARE_IO_VADDR_END)) { | ||
| 64 | HW::Read<T>(var, vaddr); | ||
| 65 | 60 | ||
| 66 | // ExeFS:/.code is loaded here | 61 | // ExeFS:/.code is loaded here |
| 67 | } else if ((vaddr >= EXEFS_CODE_VADDR) && (vaddr < EXEFS_CODE_VADDR_END)) { | 62 | } else if ((vaddr >= EXEFS_CODE_VADDR) && (vaddr < EXEFS_CODE_VADDR_END)) { |
| 68 | var = *((const T*)&g_exefs_code[vaddr & EXEFS_CODE_MASK]); | 63 | var = *((const T*)&g_exefs_code[vaddr - EXEFS_CODE_VADDR]); |
| 69 | 64 | ||
| 70 | // FCRAM - GSP heap | 65 | // FCRAM - linear heap |
| 71 | } else if ((vaddr >= HEAP_GSP_VADDR) && (vaddr < HEAP_GSP_VADDR_END)) { | 66 | } else if ((vaddr >= HEAP_LINEAR_VADDR) && (vaddr < HEAP_LINEAR_VADDR_END)) { |
| 72 | var = *((const T*)&g_heap_gsp[vaddr & HEAP_GSP_MASK]); | 67 | var = *((const T*)&g_heap_linear[vaddr - HEAP_LINEAR_VADDR]); |
| 73 | 68 | ||
| 74 | // FCRAM - application heap | 69 | // FCRAM - application heap |
| 75 | } else if ((vaddr >= HEAP_VADDR) && (vaddr < HEAP_VADDR_END)) { | 70 | } else if ((vaddr >= HEAP_VADDR) && (vaddr < HEAP_VADDR_END)) { |
| 76 | var = *((const T*)&g_heap[vaddr & HEAP_MASK]); | 71 | var = *((const T*)&g_heap[vaddr - HEAP_VADDR]); |
| 77 | 72 | ||
| 78 | // Shared memory | 73 | // Shared memory |
| 79 | } else if ((vaddr >= SHARED_MEMORY_VADDR) && (vaddr < SHARED_MEMORY_VADDR_END)) { | 74 | } else if ((vaddr >= SHARED_MEMORY_VADDR) && (vaddr < SHARED_MEMORY_VADDR_END)) { |
| 80 | var = *((const T*)&g_shared_mem[vaddr & SHARED_MEMORY_MASK]); | 75 | var = *((const T*)&g_shared_mem[vaddr - SHARED_MEMORY_VADDR]); |
| 81 | 76 | ||
| 82 | // System memory | 77 | // System memory |
| 83 | } else if ((vaddr >= SYSTEM_MEMORY_VADDR) && (vaddr < SYSTEM_MEMORY_VADDR_END)) { | 78 | } else if ((vaddr >= SYSTEM_MEMORY_VADDR) && (vaddr < SYSTEM_MEMORY_VADDR_END)) { |
| 84 | var = *((const T*)&g_system_mem[vaddr & SYSTEM_MEMORY_MASK]); | 79 | var = *((const T*)&g_system_mem[vaddr - SYSTEM_MEMORY_VADDR]); |
| 85 | 80 | ||
| 86 | // Config memory | 81 | // Config memory |
| 87 | } else if ((vaddr >= CONFIG_MEMORY_VADDR) && (vaddr < CONFIG_MEMORY_VADDR_END)) { | 82 | } else if ((vaddr >= CONFIG_MEMORY_VADDR) && (vaddr < CONFIG_MEMORY_VADDR_END)) { |
| @@ -89,10 +84,10 @@ inline void Read(T &var, const VAddr vaddr) { | |||
| 89 | 84 | ||
| 90 | // VRAM | 85 | // VRAM |
| 91 | } else if ((vaddr >= VRAM_VADDR) && (vaddr < VRAM_VADDR_END)) { | 86 | } else if ((vaddr >= VRAM_VADDR) && (vaddr < VRAM_VADDR_END)) { |
| 92 | var = *((const T*)&g_vram[vaddr & VRAM_MASK]); | 87 | var = *((const T*)&g_vram[vaddr - VRAM_VADDR]); |
| 93 | 88 | ||
| 94 | } else { | 89 | } else { |
| 95 | ERROR_LOG(MEMMAP, "unknown Read%d @ 0x%08X", sizeof(var) * 8, vaddr); | 90 | LOG_ERROR(HW_Memory, "unknown Read%lu @ 0x%08X", sizeof(var) * 8, vaddr); |
| 96 | } | 91 | } |
| 97 | } | 92 | } |
| 98 | 93 | ||
| @@ -101,36 +96,31 @@ inline void Write(const VAddr vaddr, const T data) { | |||
| 101 | 96 | ||
| 102 | // Kernel memory command buffer | 97 | // Kernel memory command buffer |
| 103 | if (vaddr >= KERNEL_MEMORY_VADDR && vaddr < KERNEL_MEMORY_VADDR_END) { | 98 | if (vaddr >= KERNEL_MEMORY_VADDR && vaddr < KERNEL_MEMORY_VADDR_END) { |
| 104 | *(T*)&g_kernel_mem[vaddr & KERNEL_MEMORY_MASK] = data; | 99 | *(T*)&g_kernel_mem[vaddr - KERNEL_MEMORY_VADDR] = data; |
| 105 | |||
| 106 | // Hardware I/O register writes | ||
| 107 | // 0x10XXXXXX- is physical address space, 0x1EXXXXXX is virtual address space | ||
| 108 | } else if ((vaddr >= HARDWARE_IO_VADDR) && (vaddr < HARDWARE_IO_VADDR_END)) { | ||
| 109 | HW::Write<T>(vaddr, data); | ||
| 110 | 100 | ||
| 111 | // ExeFS:/.code is loaded here | 101 | // ExeFS:/.code is loaded here |
| 112 | } else if ((vaddr >= EXEFS_CODE_VADDR) && (vaddr < EXEFS_CODE_VADDR_END)) { | 102 | } else if ((vaddr >= EXEFS_CODE_VADDR) && (vaddr < EXEFS_CODE_VADDR_END)) { |
| 113 | *(T*)&g_exefs_code[vaddr & EXEFS_CODE_MASK] = data; | 103 | *(T*)&g_exefs_code[vaddr - EXEFS_CODE_VADDR] = data; |
| 114 | 104 | ||
| 115 | // FCRAM - GSP heap | 105 | // FCRAM - linear heap |
| 116 | } else if ((vaddr >= HEAP_GSP_VADDR) && (vaddr < HEAP_GSP_VADDR_END)) { | 106 | } else if ((vaddr >= HEAP_LINEAR_VADDR) && (vaddr < HEAP_LINEAR_VADDR_END)) { |
| 117 | *(T*)&g_heap_gsp[vaddr & HEAP_GSP_MASK] = data; | 107 | *(T*)&g_heap_linear[vaddr - HEAP_LINEAR_VADDR] = data; |
| 118 | 108 | ||
| 119 | // FCRAM - application heap | 109 | // FCRAM - application heap |
| 120 | } else if ((vaddr >= HEAP_VADDR) && (vaddr < HEAP_VADDR_END)) { | 110 | } else if ((vaddr >= HEAP_VADDR) && (vaddr < HEAP_VADDR_END)) { |
| 121 | *(T*)&g_heap[vaddr & HEAP_MASK] = data; | 111 | *(T*)&g_heap[vaddr - HEAP_VADDR] = data; |
| 122 | 112 | ||
| 123 | // Shared memory | 113 | // Shared memory |
| 124 | } else if ((vaddr >= SHARED_MEMORY_VADDR) && (vaddr < SHARED_MEMORY_VADDR_END)) { | 114 | } else if ((vaddr >= SHARED_MEMORY_VADDR) && (vaddr < SHARED_MEMORY_VADDR_END)) { |
| 125 | *(T*)&g_shared_mem[vaddr & SHARED_MEMORY_MASK] = data; | 115 | *(T*)&g_shared_mem[vaddr - SHARED_MEMORY_VADDR] = data; |
| 126 | 116 | ||
| 127 | // System memory | 117 | // System memory |
| 128 | } else if ((vaddr >= SYSTEM_MEMORY_VADDR) && (vaddr < SYSTEM_MEMORY_VADDR_END)) { | 118 | } else if ((vaddr >= SYSTEM_MEMORY_VADDR) && (vaddr < SYSTEM_MEMORY_VADDR_END)) { |
| 129 | *(T*)&g_system_mem[vaddr & SYSTEM_MEMORY_MASK] = data; | 119 | *(T*)&g_system_mem[vaddr - SYSTEM_MEMORY_VADDR] = data; |
| 130 | 120 | ||
| 131 | // VRAM | 121 | // VRAM |
| 132 | } else if ((vaddr >= VRAM_VADDR) && (vaddr < VRAM_VADDR_END)) { | 122 | } else if ((vaddr >= VRAM_VADDR) && (vaddr < VRAM_VADDR_END)) { |
| 133 | *(T*)&g_vram[vaddr & VRAM_MASK] = data; | 123 | *(T*)&g_vram[vaddr - VRAM_VADDR] = data; |
| 134 | 124 | ||
| 135 | //} else if ((vaddr & 0xFFF00000) == 0x1FF00000) { | 125 | //} else if ((vaddr & 0xFFF00000) == 0x1FF00000) { |
| 136 | // _assert_msg_(MEMMAP, false, "umimplemented write to DSP memory"); | 126 | // _assert_msg_(MEMMAP, false, "umimplemented write to DSP memory"); |
| @@ -141,41 +131,41 @@ inline void Write(const VAddr vaddr, const T data) { | |||
| 141 | 131 | ||
| 142 | // Error out... | 132 | // Error out... |
| 143 | } else { | 133 | } else { |
| 144 | ERROR_LOG(MEMMAP, "unknown Write%d 0x%08X @ 0x%08X", sizeof(data) * 8, data, vaddr); | 134 | LOG_ERROR(HW_Memory, "unknown Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, vaddr); |
| 145 | } | 135 | } |
| 146 | } | 136 | } |
| 147 | 137 | ||
| 148 | u8 *GetPointer(const VAddr vaddr) { | 138 | u8 *GetPointer(const VAddr vaddr) { |
| 149 | // Kernel memory command buffer | 139 | // Kernel memory command buffer |
| 150 | if (vaddr >= KERNEL_MEMORY_VADDR && vaddr < KERNEL_MEMORY_VADDR_END) { | 140 | if (vaddr >= KERNEL_MEMORY_VADDR && vaddr < KERNEL_MEMORY_VADDR_END) { |
| 151 | return g_kernel_mem + (vaddr & KERNEL_MEMORY_MASK); | 141 | return g_kernel_mem + (vaddr - KERNEL_MEMORY_VADDR); |
| 152 | 142 | ||
| 153 | // ExeFS:/.code is loaded here | 143 | // ExeFS:/.code is loaded here |
| 154 | } else if ((vaddr >= EXEFS_CODE_VADDR) && (vaddr < EXEFS_CODE_VADDR_END)) { | 144 | } else if ((vaddr >= EXEFS_CODE_VADDR) && (vaddr < EXEFS_CODE_VADDR_END)) { |
| 155 | return g_exefs_code + (vaddr & EXEFS_CODE_MASK); | 145 | return g_exefs_code + (vaddr - EXEFS_CODE_VADDR); |
| 156 | 146 | ||
| 157 | // FCRAM - GSP heap | 147 | // FCRAM - linear heap |
| 158 | } else if ((vaddr >= HEAP_GSP_VADDR) && (vaddr < HEAP_GSP_VADDR_END)) { | 148 | } else if ((vaddr >= HEAP_LINEAR_VADDR) && (vaddr < HEAP_LINEAR_VADDR_END)) { |
| 159 | return g_heap_gsp + (vaddr & HEAP_GSP_MASK); | 149 | return g_heap_linear + (vaddr - HEAP_LINEAR_VADDR); |
| 160 | 150 | ||
| 161 | // FCRAM - application heap | 151 | // FCRAM - application heap |
| 162 | } else if ((vaddr >= HEAP_VADDR) && (vaddr < HEAP_VADDR_END)) { | 152 | } else if ((vaddr >= HEAP_VADDR) && (vaddr < HEAP_VADDR_END)) { |
| 163 | return g_heap + (vaddr & HEAP_MASK); | 153 | return g_heap + (vaddr - HEAP_VADDR); |
| 164 | 154 | ||
| 165 | // Shared memory | 155 | // Shared memory |
| 166 | } else if ((vaddr >= SHARED_MEMORY_VADDR) && (vaddr < SHARED_MEMORY_VADDR_END)) { | 156 | } else if ((vaddr >= SHARED_MEMORY_VADDR) && (vaddr < SHARED_MEMORY_VADDR_END)) { |
| 167 | return g_shared_mem + (vaddr & SHARED_MEMORY_MASK); | 157 | return g_shared_mem + (vaddr - SHARED_MEMORY_VADDR); |
| 168 | 158 | ||
| 169 | // System memory | 159 | // System memory |
| 170 | } else if ((vaddr >= SYSTEM_MEMORY_VADDR) && (vaddr < SYSTEM_MEMORY_VADDR_END)) { | 160 | } else if ((vaddr >= SYSTEM_MEMORY_VADDR) && (vaddr < SYSTEM_MEMORY_VADDR_END)) { |
| 171 | return g_system_mem + (vaddr & SYSTEM_MEMORY_MASK); | 161 | return g_system_mem + (vaddr - SYSTEM_MEMORY_VADDR); |
| 172 | 162 | ||
| 173 | // VRAM | 163 | // VRAM |
| 174 | } else if ((vaddr >= VRAM_VADDR) && (vaddr < VRAM_VADDR_END)) { | 164 | } else if ((vaddr >= VRAM_VADDR) && (vaddr < VRAM_VADDR_END)) { |
| 175 | return g_vram + (vaddr & VRAM_MASK); | 165 | return g_vram + (vaddr - VRAM_VADDR); |
| 176 | 166 | ||
| 177 | } else { | 167 | } else { |
| 178 | ERROR_LOG(MEMMAP, "unknown GetPointer @ 0x%08x", vaddr); | 168 | LOG_ERROR(HW_Memory, "unknown GetPointer @ 0x%08x", vaddr); |
| 179 | return 0; | 169 | return 0; |
| 180 | } | 170 | } |
| 181 | } | 171 | } |
| @@ -204,24 +194,24 @@ u32 MapBlock_Heap(u32 size, u32 operation, u32 permissions) { | |||
| 204 | } | 194 | } |
| 205 | 195 | ||
| 206 | /** | 196 | /** |
| 207 | * Maps a block of memory on the GSP heap | 197 | * Maps a block of memory on the linear heap |
| 208 | * @param size Size of block in bytes | 198 | * @param size Size of block in bytes |
| 209 | * @param operation Memory map operation type | 199 | * @param operation Memory map operation type |
| 210 | * @param flags Memory allocation flags | 200 | * @param flags Memory allocation flags |
| 211 | */ | 201 | */ |
| 212 | u32 MapBlock_HeapGSP(u32 size, u32 operation, u32 permissions) { | 202 | u32 MapBlock_HeapLinear(u32 size, u32 operation, u32 permissions) { |
| 213 | MemoryBlock block; | 203 | MemoryBlock block; |
| 214 | 204 | ||
| 215 | block.base_address = HEAP_GSP_VADDR; | 205 | block.base_address = HEAP_LINEAR_VADDR; |
| 216 | block.size = size; | 206 | block.size = size; |
| 217 | block.operation = operation; | 207 | block.operation = operation; |
| 218 | block.permissions = permissions; | 208 | block.permissions = permissions; |
| 219 | 209 | ||
| 220 | if (heap_gsp_map.size() > 0) { | 210 | if (heap_linear_map.size() > 0) { |
| 221 | const MemoryBlock last_block = heap_gsp_map.rbegin()->second; | 211 | const MemoryBlock last_block = heap_linear_map.rbegin()->second; |
| 222 | block.address = last_block.address + last_block.size; | 212 | block.address = last_block.address + last_block.size; |
| 223 | } | 213 | } |
| 224 | heap_gsp_map[block.GetVirtualAddress()] = block; | 214 | heap_linear_map[block.GetVirtualAddress()] = block; |
| 225 | 215 | ||
| 226 | return block.GetVirtualAddress(); | 216 | return block.GetVirtualAddress(); |
| 227 | } | 217 | } |
| @@ -239,7 +229,7 @@ u16 Read16(const VAddr addr) { | |||
| 239 | // Check for 16-bit unaligned memory reads... | 229 | // Check for 16-bit unaligned memory reads... |
| 240 | if (addr & 1) { | 230 | if (addr & 1) { |
| 241 | // TODO(bunnei): Implement 16-bit unaligned memory reads | 231 | // TODO(bunnei): Implement 16-bit unaligned memory reads |
| 242 | ERROR_LOG(MEMMAP, "16-bit unaligned memory reads are not implemented!"); | 232 | LOG_ERROR(HW_Memory, "16-bit unaligned memory reads are not implemented!"); |
| 243 | } | 233 | } |
| 244 | 234 | ||
| 245 | return (u16)data; | 235 | return (u16)data; |