diff options
| author | 2014-08-12 13:55:41 +0200 | |
|---|---|---|
| committer | 2014-08-12 13:55:41 +0200 | |
| commit | 36cabe35cc15a6590f5d18be695ae505a946cb06 (patch) | |
| tree | 241e6d8b36e6ab9921ef7afb71e7350e52862e2a /src/core/mem_map_funcs.cpp | |
| parent | Merge pull request #38 from neobrain/replace_registerset (diff) | |
| parent | Pica: Add basic rasterizer. (diff) | |
| download | yuzu-36cabe35cc15a6590f5d18be695ae505a946cb06.tar.gz yuzu-36cabe35cc15a6590f5d18be695ae505a946cb06.tar.xz yuzu-36cabe35cc15a6590f5d18be695ae505a946cb06.zip | |
Merge pull request #37 from neobrain/pica
Initial work on Pica rendering.
Diffstat (limited to 'src/core/mem_map_funcs.cpp')
| -rw-r--r-- | src/core/mem_map_funcs.cpp | 68 |
1 files changed, 36 insertions, 32 deletions
diff --git a/src/core/mem_map_funcs.cpp b/src/core/mem_map_funcs.cpp index 305be8468..5772cca52 100644 --- a/src/core/mem_map_funcs.cpp +++ b/src/core/mem_map_funcs.cpp | |||
| @@ -17,37 +17,44 @@ std::map<u32, MemoryBlock> g_heap_map; | |||
| 17 | std::map<u32, MemoryBlock> g_heap_gsp_map; | 17 | std::map<u32, MemoryBlock> g_heap_gsp_map; |
| 18 | std::map<u32, MemoryBlock> g_shared_map; | 18 | std::map<u32, MemoryBlock> g_shared_map; |
| 19 | 19 | ||
| 20 | /// Convert a physical address (or firmware-specific virtual address) to primary virtual address | 20 | /// Convert a physical address to virtual address |
| 21 | u32 _VirtualAddress(const u32 addr) { | 21 | u32 PhysicalToVirtualAddress(const u32 addr) { |
| 22 | // Our memory interface read/write functions assume virtual addresses. Put any physical address | 22 | // Our memory interface read/write functions assume virtual addresses. Put any physical address |
| 23 | // to virtual address translations here. This is obviously quite hacky... But we're not doing | 23 | // to virtual address translations here. This is quite hacky, but necessary until we implement |
| 24 | // any MMU emulation yet or anything | 24 | // proper MMU emulation. |
| 25 | if ((addr >= FCRAM_PADDR) && (addr < FCRAM_PADDR_END)) { | 25 | // TODO: Screw it, I'll let bunnei figure out how to do this properly. |
| 26 | return VirtualAddressFromPhysical_FCRAM(addr); | 26 | if ((addr >= VRAM_PADDR) && (addr < VRAM_PADDR_END)) { |
| 27 | 27 | return addr - VRAM_PADDR + VRAM_VADDR; | |
| 28 | // Virtual address mapping FW0B | 28 | }else if ((addr >= FCRAM_PADDR) && (addr < FCRAM_PADDR_END)) { |
| 29 | } else if ((addr >= FCRAM_VADDR_FW0B) && (addr < FCRAM_VADDR_FW0B_END)) { | 29 | return addr - FCRAM_PADDR + FCRAM_VADDR; |
| 30 | return VirtualAddressFromPhysical_FCRAM(addr); | 30 | } |
| 31 | 31 | ||
| 32 | // Hardware IO | 32 | ERROR_LOG(MEMMAP, "Unknown physical address @ 0x%08x", addr); |
| 33 | // TODO(bunnei): FixMe | 33 | return addr; |
| 34 | // This isn't going to work... The physical address of HARDWARE_IO conflicts with the virtual | 34 | } |
| 35 | // address of shared memory. | ||
| 36 | //} else if ((addr >= HARDWARE_IO_PADDR) && (addr < HARDWARE_IO_PADDR_END)) { | ||
| 37 | // return (addr + 0x0EB00000); | ||
| 38 | 35 | ||
| 36 | /// Convert a physical address to virtual address | ||
| 37 | u32 VirtualToPhysicalAddress(const u32 addr) { | ||
| 38 | // Our memory interface read/write functions assume virtual addresses. Put any physical address | ||
| 39 | // to virtual address translations here. This is quite hacky, but necessary until we implement | ||
| 40 | // proper MMU emulation. | ||
| 41 | // TODO: Screw it, I'll let bunnei figure out how to do this properly. | ||
| 42 | if ((addr >= VRAM_VADDR) && (addr < VRAM_VADDR_END)) { | ||
| 43 | return addr - 0x07000000; | ||
| 44 | } else if ((addr >= FCRAM_VADDR) && (addr < FCRAM_VADDR_END)) { | ||
| 45 | return addr - FCRAM_VADDR + FCRAM_PADDR; | ||
| 39 | } | 46 | } |
| 47 | |||
| 48 | ERROR_LOG(MEMMAP, "Unknown virtual address @ 0x%08x", addr); | ||
| 40 | return addr; | 49 | return addr; |
| 41 | } | 50 | } |
| 42 | 51 | ||
| 43 | template <typename T> | 52 | template <typename T> |
| 44 | inline void Read(T &var, const u32 addr) { | 53 | inline void Read(T &var, const u32 vaddr) { |
| 45 | // TODO: Figure out the fastest order of tests for both read and write (they are probably different). | 54 | // TODO: Figure out the fastest order of tests for both read and write (they are probably different). |
| 46 | // TODO: Make sure this represents the mirrors in a correct way. | 55 | // TODO: Make sure this represents the mirrors in a correct way. |
| 47 | // Could just do a base-relative read, too.... TODO | 56 | // Could just do a base-relative read, too.... TODO |
| 48 | 57 | ||
| 49 | const u32 vaddr = _VirtualAddress(addr); | ||
| 50 | |||
| 51 | // Kernel memory command buffer | 58 | // Kernel memory command buffer |
| 52 | if (vaddr >= KERNEL_MEMORY_VADDR && vaddr < KERNEL_MEMORY_VADDR_END) { | 59 | if (vaddr >= KERNEL_MEMORY_VADDR && vaddr < KERNEL_MEMORY_VADDR_END) { |
| 53 | var = *((const T*)&g_kernel_mem[vaddr & KERNEL_MEMORY_MASK]); | 60 | var = *((const T*)&g_kernel_mem[vaddr & KERNEL_MEMORY_MASK]); |
| @@ -91,9 +98,8 @@ inline void Read(T &var, const u32 addr) { | |||
| 91 | } | 98 | } |
| 92 | 99 | ||
| 93 | template <typename T> | 100 | template <typename T> |
| 94 | inline void Write(u32 addr, const T data) { | 101 | inline void Write(u32 vaddr, const T data) { |
| 95 | u32 vaddr = _VirtualAddress(addr); | 102 | |
| 96 | |||
| 97 | // Kernel memory command buffer | 103 | // Kernel memory command buffer |
| 98 | if (vaddr >= KERNEL_MEMORY_VADDR && vaddr < KERNEL_MEMORY_VADDR_END) { | 104 | if (vaddr >= KERNEL_MEMORY_VADDR && vaddr < KERNEL_MEMORY_VADDR_END) { |
| 99 | *(T*)&g_kernel_mem[vaddr & KERNEL_MEMORY_MASK] = data; | 105 | *(T*)&g_kernel_mem[vaddr & KERNEL_MEMORY_MASK] = data; |
| @@ -133,16 +139,14 @@ inline void Write(u32 addr, const T data) { | |||
| 133 | // _assert_msg_(MEMMAP, false, "umimplemented write to Configuration Memory"); | 139 | // _assert_msg_(MEMMAP, false, "umimplemented write to Configuration Memory"); |
| 134 | //} else if ((vaddr & 0xFFFFF000) == 0x1FF81000) { | 140 | //} else if ((vaddr & 0xFFFFF000) == 0x1FF81000) { |
| 135 | // _assert_msg_(MEMMAP, false, "umimplemented write to shared page"); | 141 | // _assert_msg_(MEMMAP, false, "umimplemented write to shared page"); |
| 136 | 142 | ||
| 137 | // Error out... | 143 | // Error out... |
| 138 | } else { | 144 | } else { |
| 139 | ERROR_LOG(MEMMAP, "unknown Write%d 0x%08X @ 0x%08X", sizeof(data) * 8, data, vaddr); | 145 | ERROR_LOG(MEMMAP, "unknown Write%d 0x%08X @ 0x%08X", sizeof(data) * 8, data, vaddr); |
| 140 | } | 146 | } |
| 141 | } | 147 | } |
| 142 | 148 | ||
| 143 | u8 *GetPointer(const u32 addr) { | 149 | u8 *GetPointer(const u32 vaddr) { |
| 144 | const u32 vaddr = _VirtualAddress(addr); | ||
| 145 | |||
| 146 | // Kernel memory command buffer | 150 | // Kernel memory command buffer |
| 147 | if (vaddr >= KERNEL_MEMORY_VADDR && vaddr < KERNEL_MEMORY_VADDR_END) { | 151 | if (vaddr >= KERNEL_MEMORY_VADDR && vaddr < KERNEL_MEMORY_VADDR_END) { |
| 148 | return g_kernel_mem + (vaddr & KERNEL_MEMORY_MASK); | 152 | return g_kernel_mem + (vaddr & KERNEL_MEMORY_MASK); |
| @@ -185,12 +189,12 @@ u8 *GetPointer(const u32 addr) { | |||
| 185 | */ | 189 | */ |
| 186 | u32 MapBlock_Heap(u32 size, u32 operation, u32 permissions) { | 190 | u32 MapBlock_Heap(u32 size, u32 operation, u32 permissions) { |
| 187 | MemoryBlock block; | 191 | MemoryBlock block; |
| 188 | 192 | ||
| 189 | block.base_address = HEAP_VADDR; | 193 | block.base_address = HEAP_VADDR; |
| 190 | block.size = size; | 194 | block.size = size; |
| 191 | block.operation = operation; | 195 | block.operation = operation; |
| 192 | block.permissions = permissions; | 196 | block.permissions = permissions; |
| 193 | 197 | ||
| 194 | if (g_heap_map.size() > 0) { | 198 | if (g_heap_map.size() > 0) { |
| 195 | const MemoryBlock last_block = g_heap_map.rbegin()->second; | 199 | const MemoryBlock last_block = g_heap_map.rbegin()->second; |
| 196 | block.address = last_block.address + last_block.size; | 200 | block.address = last_block.address + last_block.size; |
| @@ -208,12 +212,12 @@ u32 MapBlock_Heap(u32 size, u32 operation, u32 permissions) { | |||
| 208 | */ | 212 | */ |
| 209 | u32 MapBlock_HeapGSP(u32 size, u32 operation, u32 permissions) { | 213 | u32 MapBlock_HeapGSP(u32 size, u32 operation, u32 permissions) { |
| 210 | MemoryBlock block; | 214 | MemoryBlock block; |
| 211 | 215 | ||
| 212 | block.base_address = HEAP_GSP_VADDR; | 216 | block.base_address = HEAP_GSP_VADDR; |
| 213 | block.size = size; | 217 | block.size = size; |
| 214 | block.operation = operation; | 218 | block.operation = operation; |
| 215 | block.permissions = permissions; | 219 | block.permissions = permissions; |
| 216 | 220 | ||
| 217 | if (g_heap_gsp_map.size() > 0) { | 221 | if (g_heap_gsp_map.size() > 0) { |
| 218 | const MemoryBlock last_block = g_heap_gsp_map.rbegin()->second; | 222 | const MemoryBlock last_block = g_heap_gsp_map.rbegin()->second; |
| 219 | block.address = last_block.address + last_block.size; | 223 | block.address = last_block.address + last_block.size; |