diff options
Diffstat (limited to 'src/core/memory.cpp')
| -rw-r--r-- | src/core/memory.cpp | 243 |
1 files changed, 123 insertions, 120 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 8570689dd..5d8069acd 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -1,7 +1,10 @@ | |||
| 1 | // Copyright 2014 Citra Emulator Project | 1 | // Copyright 2015 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2 or any later version |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <array> | ||
| 6 | |||
| 7 | #include "common/assert.h" | ||
| 5 | #include "common/common_types.h" | 8 | #include "common/common_types.h" |
| 6 | #include "common/logging/log.h" | 9 | #include "common/logging/log.h" |
| 7 | #include "common/swap.h" | 10 | #include "common/swap.h" |
| @@ -14,126 +17,134 @@ | |||
| 14 | 17 | ||
| 15 | namespace Memory { | 18 | namespace Memory { |
| 16 | 19 | ||
| 17 | template <typename T> | 20 | const u32 PAGE_MASK = PAGE_SIZE - 1; |
| 18 | inline void Read(T &var, const VAddr vaddr) { | 21 | const int PAGE_BITS = 12; |
| 19 | // TODO: Figure out the fastest order of tests for both read and write (they are probably different). | 22 | |
| 20 | // TODO: Make sure this represents the mirrors in a correct way. | 23 | enum class PageType { |
| 21 | // Could just do a base-relative read, too.... TODO | 24 | /// Page is unmapped and should cause an access error. |
| 22 | 25 | Unmapped, | |
| 23 | // Kernel memory command buffer | 26 | /// Page is mapped to regular memory. This is the only type you can get pointers to. |
| 24 | if (vaddr >= TLS_AREA_VADDR && vaddr < TLS_AREA_VADDR_END) { | 27 | Memory, |
| 25 | var = *((const T*)&g_tls_mem[vaddr - TLS_AREA_VADDR]); | 28 | /// Page is mapped to a I/O region. Writing and reading to this page is handled by functions. |
| 26 | 29 | Special, | |
| 27 | // ExeFS:/.code is loaded here | 30 | }; |
| 28 | } else if ((vaddr >= PROCESS_IMAGE_VADDR) && (vaddr < PROCESS_IMAGE_VADDR_END)) { | 31 | |
| 29 | var = *((const T*)&g_exefs_code[vaddr - PROCESS_IMAGE_VADDR]); | 32 | /** |
| 30 | 33 | * A (reasonably) fast way of allowing switchable and remmapable process address spaces. It loosely | |
| 31 | // FCRAM - linear heap | 34 | * mimics the way a real CPU page table works, but instead is optimized for minimal decoding and |
| 32 | } else if ((vaddr >= LINEAR_HEAP_VADDR) && (vaddr < LINEAR_HEAP_VADDR_END)) { | 35 | * fetching requirements when acessing. In the usual case of an access to regular memory, it only |
| 33 | var = *((const T*)&g_heap_linear[vaddr - LINEAR_HEAP_VADDR]); | 36 | * requires an indexed fetch and a check for NULL. |
| 34 | 37 | */ | |
| 35 | // FCRAM - application heap | 38 | struct PageTable { |
| 36 | } else if ((vaddr >= HEAP_VADDR) && (vaddr < HEAP_VADDR_END)) { | 39 | static const size_t NUM_ENTRIES = 1 << (32 - PAGE_BITS); |
| 37 | var = *((const T*)&g_heap[vaddr - HEAP_VADDR]); | 40 | |
| 38 | 41 | /** | |
| 39 | // Shared memory | 42 | * Array of memory pointers backing each page. An entry can only be non-null if the |
| 40 | } else if ((vaddr >= SHARED_MEMORY_VADDR) && (vaddr < SHARED_MEMORY_VADDR_END)) { | 43 | * corresponding entry in the `attributes` array is of type `Memory`. |
| 41 | var = *((const T*)&g_shared_mem[vaddr - SHARED_MEMORY_VADDR]); | 44 | */ |
| 42 | 45 | std::array<u8*, NUM_ENTRIES> pointers; | |
| 43 | // Config memory | 46 | |
| 44 | } else if ((vaddr >= CONFIG_MEMORY_VADDR) && (vaddr < CONFIG_MEMORY_VADDR_END)) { | 47 | /** |
| 45 | const u8* raw_memory = (const u8*)&ConfigMem::config_mem; | 48 | * Array of fine grained page attributes. If it is set to any value other than `Memory`, then |
| 46 | var = *((const T*)&raw_memory[vaddr - CONFIG_MEMORY_VADDR]); | 49 | * the corresponding entry in `pointer` MUST be set to null. |
| 47 | 50 | */ | |
| 48 | // Shared page | 51 | std::array<PageType, NUM_ENTRIES> attributes; |
| 49 | } else if ((vaddr >= SHARED_PAGE_VADDR) && (vaddr < SHARED_PAGE_VADDR_END)) { | 52 | }; |
| 50 | const u8* raw_memory = (const u8*)&SharedPage::shared_page; | 53 | |
| 51 | var = *((const T*)&raw_memory[vaddr - SHARED_PAGE_VADDR]); | 54 | /// Singular page table used for the singleton process |
| 52 | 55 | static PageTable main_page_table; | |
| 53 | // DSP memory | 56 | /// Currently active page table |
| 54 | } else if ((vaddr >= DSP_RAM_VADDR) && (vaddr < DSP_RAM_VADDR_END)) { | 57 | static PageTable* current_page_table = &main_page_table; |
| 55 | var = *((const T*)&g_dsp_mem[vaddr - DSP_RAM_VADDR]); | 58 | |
| 56 | 59 | static void MapPages(u32 base, u32 size, u8* memory, PageType type) { | |
| 57 | // VRAM | 60 | LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE, (base + size) * PAGE_SIZE); |
| 58 | } else if ((vaddr >= VRAM_VADDR) && (vaddr < VRAM_VADDR_END)) { | 61 | |
| 59 | var = *((const T*)&g_vram[vaddr - VRAM_VADDR]); | 62 | u32 end = base + size; |
| 60 | 63 | ||
| 61 | } else { | 64 | while (base != end) { |
| 62 | LOG_ERROR(HW_Memory, "unknown Read%lu @ 0x%08X", sizeof(var) * 8, vaddr); | 65 | ASSERT_MSG(base < PageTable::NUM_ENTRIES, "out of range mapping at %08X", base); |
| 66 | |||
| 67 | if (current_page_table->attributes[base] != PageType::Unmapped) { | ||
| 68 | LOG_ERROR(HW_Memory, "overlapping memory ranges at %08X", base * PAGE_SIZE); | ||
| 69 | } | ||
| 70 | current_page_table->attributes[base] = type; | ||
| 71 | current_page_table->pointers[base] = memory; | ||
| 72 | |||
| 73 | base += 1; | ||
| 74 | memory += PAGE_SIZE; | ||
| 63 | } | 75 | } |
| 64 | } | 76 | } |
| 65 | 77 | ||
| 66 | template <typename T> | 78 | void InitMemoryMap() { |
| 67 | inline void Write(const VAddr vaddr, const T data) { | 79 | main_page_table.pointers.fill(nullptr); |
| 68 | 80 | main_page_table.attributes.fill(PageType::Unmapped); | |
| 69 | // Kernel memory command buffer | 81 | } |
| 70 | if (vaddr >= TLS_AREA_VADDR && vaddr < TLS_AREA_VADDR_END) { | ||
| 71 | *(T*)&g_tls_mem[vaddr - TLS_AREA_VADDR] = data; | ||
| 72 | |||
| 73 | // ExeFS:/.code is loaded here | ||
| 74 | } else if ((vaddr >= PROCESS_IMAGE_VADDR) && (vaddr < PROCESS_IMAGE_VADDR_END)) { | ||
| 75 | *(T*)&g_exefs_code[vaddr - PROCESS_IMAGE_VADDR] = data; | ||
| 76 | |||
| 77 | // FCRAM - linear heap | ||
| 78 | } else if ((vaddr >= LINEAR_HEAP_VADDR) && (vaddr < LINEAR_HEAP_VADDR_END)) { | ||
| 79 | *(T*)&g_heap_linear[vaddr - LINEAR_HEAP_VADDR] = data; | ||
| 80 | |||
| 81 | // FCRAM - application heap | ||
| 82 | } else if ((vaddr >= HEAP_VADDR) && (vaddr < HEAP_VADDR_END)) { | ||
| 83 | *(T*)&g_heap[vaddr - HEAP_VADDR] = data; | ||
| 84 | |||
| 85 | // Shared memory | ||
| 86 | } else if ((vaddr >= SHARED_MEMORY_VADDR) && (vaddr < SHARED_MEMORY_VADDR_END)) { | ||
| 87 | *(T*)&g_shared_mem[vaddr - SHARED_MEMORY_VADDR] = data; | ||
| 88 | 82 | ||
| 89 | // VRAM | 83 | void MapMemoryRegion(VAddr base, u32 size, u8* target) { |
| 90 | } else if ((vaddr >= VRAM_VADDR) && (vaddr < VRAM_VADDR_END)) { | 84 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); |
| 91 | *(T*)&g_vram[vaddr - VRAM_VADDR] = data; | 85 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); |
| 86 | MapPages(base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); | ||
| 87 | } | ||
| 92 | 88 | ||
| 93 | // DSP memory | 89 | void MapIoRegion(VAddr base, u32 size) { |
| 94 | } else if ((vaddr >= DSP_RAM_VADDR) && (vaddr < DSP_RAM_VADDR_END)) { | 90 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); |
| 95 | *(T*)&g_dsp_mem[vaddr - DSP_RAM_VADDR] = data; | 91 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); |
| 92 | MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); | ||
| 93 | } | ||
| 96 | 94 | ||
| 97 | //} else if ((vaddr & 0xFFFF0000) == 0x1FF80000) { | 95 | template <typename T> |
| 98 | // ASSERT_MSG(MEMMAP, false, "umimplemented write to Configuration Memory"); | 96 | T Read(const VAddr vaddr) { |
| 99 | //} else if ((vaddr & 0xFFFFF000) == 0x1FF81000) { | 97 | const u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; |
| 100 | // ASSERT_MSG(MEMMAP, false, "umimplemented write to shared page"); | 98 | if (page_pointer) { |
| 99 | return *reinterpret_cast<const T*>(page_pointer + (vaddr & PAGE_MASK)); | ||
| 100 | } | ||
| 101 | 101 | ||
| 102 | // Error out... | 102 | PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; |
| 103 | } else { | 103 | switch (type) { |
| 104 | LOG_ERROR(HW_Memory, "unknown Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, vaddr); | 104 | case PageType::Unmapped: |
| 105 | LOG_ERROR(HW_Memory, "unmapped Read%lu @ 0x%08X", sizeof(T) * 8, vaddr); | ||
| 106 | return 0; | ||
| 107 | case PageType::Memory: | ||
| 108 | ASSERT_MSG(false, "Mapped memory page without a pointer @ %08X", vaddr); | ||
| 109 | case PageType::Special: | ||
| 110 | LOG_ERROR(HW_Memory, "I/O reads aren't implemented yet @ %08X", vaddr); | ||
| 111 | return 0; | ||
| 112 | default: | ||
| 113 | UNREACHABLE(); | ||
| 105 | } | 114 | } |
| 106 | } | 115 | } |
| 107 | 116 | ||
| 108 | u8 *GetPointer(const VAddr vaddr) { | 117 | template <typename T> |
| 109 | // Kernel memory command buffer | 118 | void Write(const VAddr vaddr, const T data) { |
| 110 | if (vaddr >= TLS_AREA_VADDR && vaddr < TLS_AREA_VADDR_END) { | 119 | u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; |
| 111 | return g_tls_mem + (vaddr - TLS_AREA_VADDR); | 120 | if (page_pointer) { |
| 112 | 121 | *reinterpret_cast<T*>(page_pointer + (vaddr & PAGE_MASK)) = data; | |
| 113 | // ExeFS:/.code is loaded here | 122 | return; |
| 114 | } else if ((vaddr >= PROCESS_IMAGE_VADDR) && (vaddr < PROCESS_IMAGE_VADDR_END)) { | 123 | } |
| 115 | return g_exefs_code + (vaddr - PROCESS_IMAGE_VADDR); | ||
| 116 | |||
| 117 | // FCRAM - linear heap | ||
| 118 | } else if ((vaddr >= LINEAR_HEAP_VADDR) && (vaddr < LINEAR_HEAP_VADDR_END)) { | ||
| 119 | return g_heap_linear + (vaddr - LINEAR_HEAP_VADDR); | ||
| 120 | |||
| 121 | // FCRAM - application heap | ||
| 122 | } else if ((vaddr >= HEAP_VADDR) && (vaddr < HEAP_VADDR_END)) { | ||
| 123 | return g_heap + (vaddr - HEAP_VADDR); | ||
| 124 | |||
| 125 | // Shared memory | ||
| 126 | } else if ((vaddr >= SHARED_MEMORY_VADDR) && (vaddr < SHARED_MEMORY_VADDR_END)) { | ||
| 127 | return g_shared_mem + (vaddr - SHARED_MEMORY_VADDR); | ||
| 128 | 124 | ||
| 129 | // VRAM | 125 | PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; |
| 130 | } else if ((vaddr >= VRAM_VADDR) && (vaddr < VRAM_VADDR_END)) { | 126 | switch (type) { |
| 131 | return g_vram + (vaddr - VRAM_VADDR); | 127 | case PageType::Unmapped: |
| 128 | LOG_ERROR(HW_Memory, "unmapped Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32) data, vaddr); | ||
| 129 | return; | ||
| 130 | case PageType::Memory: | ||
| 131 | ASSERT_MSG(false, "Mapped memory page without a pointer @ %08X", vaddr); | ||
| 132 | case PageType::Special: | ||
| 133 | LOG_ERROR(HW_Memory, "I/O writes aren't implemented yet @ %08X", vaddr); | ||
| 134 | return; | ||
| 135 | default: | ||
| 136 | UNREACHABLE(); | ||
| 137 | } | ||
| 138 | } | ||
| 132 | 139 | ||
| 133 | } else { | 140 | u8* GetPointer(const VAddr vaddr) { |
| 134 | LOG_ERROR(HW_Memory, "unknown GetPointer @ 0x%08x", vaddr); | 141 | u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; |
| 135 | return 0; | 142 | if (page_pointer) { |
| 143 | return page_pointer + (vaddr & PAGE_MASK); | ||
| 136 | } | 144 | } |
| 145 | |||
| 146 | LOG_ERROR(HW_Memory, "unknown GetPointer @ 0x%08x", vaddr); | ||
| 147 | return nullptr; | ||
| 137 | } | 148 | } |
| 138 | 149 | ||
| 139 | u8* GetPhysicalPointer(PAddr address) { | 150 | u8* GetPhysicalPointer(PAddr address) { |
| @@ -141,27 +152,19 @@ u8* GetPhysicalPointer(PAddr address) { | |||
| 141 | } | 152 | } |
| 142 | 153 | ||
| 143 | u8 Read8(const VAddr addr) { | 154 | u8 Read8(const VAddr addr) { |
| 144 | u8 data = 0; | 155 | return Read<u8>(addr); |
| 145 | Read<u8>(data, addr); | ||
| 146 | return data; | ||
| 147 | } | 156 | } |
| 148 | 157 | ||
| 149 | u16 Read16(const VAddr addr) { | 158 | u16 Read16(const VAddr addr) { |
| 150 | u16_le data = 0; | 159 | return Read<u16_le>(addr); |
| 151 | Read<u16_le>(data, addr); | ||
| 152 | return data; | ||
| 153 | } | 160 | } |
| 154 | 161 | ||
| 155 | u32 Read32(const VAddr addr) { | 162 | u32 Read32(const VAddr addr) { |
| 156 | u32_le data = 0; | 163 | return Read<u32_le>(addr); |
| 157 | Read<u32_le>(data, addr); | ||
| 158 | return data; | ||
| 159 | } | 164 | } |
| 160 | 165 | ||
| 161 | u64 Read64(const VAddr addr) { | 166 | u64 Read64(const VAddr addr) { |
| 162 | u64_le data = 0; | 167 | return Read<u64_le>(addr); |
| 163 | Read<u64_le>(data, addr); | ||
| 164 | return data; | ||
| 165 | } | 168 | } |
| 166 | 169 | ||
| 167 | void Write8(const VAddr addr, const u8 data) { | 170 | void Write8(const VAddr addr, const u8 data) { |