diff options
| author | 2015-05-14 20:40:53 -0700 | |
|---|---|---|
| committer | 2015-05-14 20:40:53 -0700 | |
| commit | bb689338943791c735c7c6adb186256457e064b4 (patch) | |
| tree | a04ba64d18dd163709b1cb4b4212afaca6c091a6 /src/core/memory.cpp | |
| parent | Merge pull request #769 from lioncash/cond (diff) | |
| parent | Memory: Use a table based lookup scheme to read from memory regions (diff) | |
| download | yuzu-bb689338943791c735c7c6adb186256457e064b4.tar.gz yuzu-bb689338943791c735c7c6adb186256457e064b4.tar.xz yuzu-bb689338943791c735c7c6adb186256457e064b4.zip | |
Merge pull request #762 from yuriks/memmap
Memory: Use a table based lookup scheme to read from memory regions
Diffstat (limited to 'src/core/memory.cpp')
| -rw-r--r-- | src/core/memory.cpp | 202 |
1 files changed, 202 insertions, 0 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp new file mode 100644 index 000000000..5d8069acd --- /dev/null +++ b/src/core/memory.cpp | |||
| @@ -0,0 +1,202 @@ | |||
| 1 | // Copyright 2015 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <array> | ||
| 6 | |||
| 7 | #include "common/assert.h" | ||
| 8 | #include "common/common_types.h" | ||
| 9 | #include "common/logging/log.h" | ||
| 10 | #include "common/swap.h" | ||
| 11 | |||
| 12 | #include "core/hle/config_mem.h" | ||
| 13 | #include "core/hle/shared_page.h" | ||
| 14 | #include "core/hw/hw.h" | ||
| 15 | #include "core/mem_map.h" | ||
| 16 | #include "core/memory.h" | ||
| 17 | |||
| 18 | namespace Memory { | ||
| 19 | |||
| 20 | const u32 PAGE_MASK = PAGE_SIZE - 1; | ||
| 21 | const int PAGE_BITS = 12; | ||
| 22 | |||
| 23 | enum class PageType { | ||
| 24 | /// Page is unmapped and should cause an access error. | ||
| 25 | Unmapped, | ||
| 26 | /// Page is mapped to regular memory. This is the only type you can get pointers to. | ||
| 27 | Memory, | ||
| 28 | /// Page is mapped to a I/O region. Writing and reading to this page is handled by functions. | ||
| 29 | Special, | ||
| 30 | }; | ||
| 31 | |||
| 32 | /** | ||
| 33 | * A (reasonably) fast way of allowing switchable and remmapable process address spaces. It loosely | ||
| 34 | * mimics the way a real CPU page table works, but instead is optimized for minimal decoding and | ||
| 35 | * fetching requirements when acessing. In the usual case of an access to regular memory, it only | ||
| 36 | * requires an indexed fetch and a check for NULL. | ||
| 37 | */ | ||
| 38 | struct PageTable { | ||
| 39 | static const size_t NUM_ENTRIES = 1 << (32 - PAGE_BITS); | ||
| 40 | |||
| 41 | /** | ||
| 42 | * Array of memory pointers backing each page. An entry can only be non-null if the | ||
| 43 | * corresponding entry in the `attributes` array is of type `Memory`. | ||
| 44 | */ | ||
| 45 | std::array<u8*, NUM_ENTRIES> pointers; | ||
| 46 | |||
| 47 | /** | ||
| 48 | * Array of fine grained page attributes. If it is set to any value other than `Memory`, then | ||
| 49 | * the corresponding entry in `pointer` MUST be set to null. | ||
| 50 | */ | ||
| 51 | std::array<PageType, NUM_ENTRIES> attributes; | ||
| 52 | }; | ||
| 53 | |||
| 54 | /// Singular page table used for the singleton process | ||
| 55 | static PageTable main_page_table; | ||
| 56 | /// Currently active page table | ||
| 57 | static PageTable* current_page_table = &main_page_table; | ||
| 58 | |||
| 59 | static void MapPages(u32 base, u32 size, u8* memory, PageType type) { | ||
| 60 | LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE, (base + size) * PAGE_SIZE); | ||
| 61 | |||
| 62 | u32 end = base + size; | ||
| 63 | |||
| 64 | while (base != end) { | ||
| 65 | ASSERT_MSG(base < PageTable::NUM_ENTRIES, "out of range mapping at %08X", base); | ||
| 66 | |||
| 67 | if (current_page_table->attributes[base] != PageType::Unmapped) { | ||
| 68 | LOG_ERROR(HW_Memory, "overlapping memory ranges at %08X", base * PAGE_SIZE); | ||
| 69 | } | ||
| 70 | current_page_table->attributes[base] = type; | ||
| 71 | current_page_table->pointers[base] = memory; | ||
| 72 | |||
| 73 | base += 1; | ||
| 74 | memory += PAGE_SIZE; | ||
| 75 | } | ||
| 76 | } | ||
| 77 | |||
| 78 | void InitMemoryMap() { | ||
| 79 | main_page_table.pointers.fill(nullptr); | ||
| 80 | main_page_table.attributes.fill(PageType::Unmapped); | ||
| 81 | } | ||
| 82 | |||
| 83 | void MapMemoryRegion(VAddr base, u32 size, u8* target) { | ||
| 84 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); | ||
| 85 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); | ||
| 86 | MapPages(base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); | ||
| 87 | } | ||
| 88 | |||
| 89 | void MapIoRegion(VAddr base, u32 size) { | ||
| 90 | ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); | ||
| 91 | ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); | ||
| 92 | MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); | ||
| 93 | } | ||
| 94 | |||
| 95 | template <typename T> | ||
| 96 | T Read(const VAddr vaddr) { | ||
| 97 | const u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; | ||
| 98 | if (page_pointer) { | ||
| 99 | return *reinterpret_cast<const T*>(page_pointer + (vaddr & PAGE_MASK)); | ||
| 100 | } | ||
| 101 | |||
| 102 | PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; | ||
| 103 | switch (type) { | ||
| 104 | case PageType::Unmapped: | ||
| 105 | LOG_ERROR(HW_Memory, "unmapped Read%lu @ 0x%08X", sizeof(T) * 8, vaddr); | ||
| 106 | return 0; | ||
| 107 | case PageType::Memory: | ||
| 108 | ASSERT_MSG(false, "Mapped memory page without a pointer @ %08X", vaddr); | ||
| 109 | case PageType::Special: | ||
| 110 | LOG_ERROR(HW_Memory, "I/O reads aren't implemented yet @ %08X", vaddr); | ||
| 111 | return 0; | ||
| 112 | default: | ||
| 113 | UNREACHABLE(); | ||
| 114 | } | ||
| 115 | } | ||
| 116 | |||
| 117 | template <typename T> | ||
| 118 | void Write(const VAddr vaddr, const T data) { | ||
| 119 | u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; | ||
| 120 | if (page_pointer) { | ||
| 121 | *reinterpret_cast<T*>(page_pointer + (vaddr & PAGE_MASK)) = data; | ||
| 122 | return; | ||
| 123 | } | ||
| 124 | |||
| 125 | PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; | ||
| 126 | switch (type) { | ||
| 127 | case PageType::Unmapped: | ||
| 128 | LOG_ERROR(HW_Memory, "unmapped Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32) data, vaddr); | ||
| 129 | return; | ||
| 130 | case PageType::Memory: | ||
| 131 | ASSERT_MSG(false, "Mapped memory page without a pointer @ %08X", vaddr); | ||
| 132 | case PageType::Special: | ||
| 133 | LOG_ERROR(HW_Memory, "I/O writes aren't implemented yet @ %08X", vaddr); | ||
| 134 | return; | ||
| 135 | default: | ||
| 136 | UNREACHABLE(); | ||
| 137 | } | ||
| 138 | } | ||
| 139 | |||
| 140 | u8* GetPointer(const VAddr vaddr) { | ||
| 141 | u8* page_pointer = current_page_table->pointers[vaddr >> PAGE_BITS]; | ||
| 142 | if (page_pointer) { | ||
| 143 | return page_pointer + (vaddr & PAGE_MASK); | ||
| 144 | } | ||
| 145 | |||
| 146 | LOG_ERROR(HW_Memory, "unknown GetPointer @ 0x%08x", vaddr); | ||
| 147 | return nullptr; | ||
| 148 | } | ||
| 149 | |||
| 150 | u8* GetPhysicalPointer(PAddr address) { | ||
| 151 | return GetPointer(PhysicalToVirtualAddress(address)); | ||
| 152 | } | ||
| 153 | |||
| 154 | u8 Read8(const VAddr addr) { | ||
| 155 | return Read<u8>(addr); | ||
| 156 | } | ||
| 157 | |||
| 158 | u16 Read16(const VAddr addr) { | ||
| 159 | return Read<u16_le>(addr); | ||
| 160 | } | ||
| 161 | |||
| 162 | u32 Read32(const VAddr addr) { | ||
| 163 | return Read<u32_le>(addr); | ||
| 164 | } | ||
| 165 | |||
| 166 | u64 Read64(const VAddr addr) { | ||
| 167 | return Read<u64_le>(addr); | ||
| 168 | } | ||
| 169 | |||
| 170 | void Write8(const VAddr addr, const u8 data) { | ||
| 171 | Write<u8>(addr, data); | ||
| 172 | } | ||
| 173 | |||
| 174 | void Write16(const VAddr addr, const u16 data) { | ||
| 175 | Write<u16_le>(addr, data); | ||
| 176 | } | ||
| 177 | |||
| 178 | void Write32(const VAddr addr, const u32 data) { | ||
| 179 | Write<u32_le>(addr, data); | ||
| 180 | } | ||
| 181 | |||
| 182 | void Write64(const VAddr addr, const u64 data) { | ||
| 183 | Write<u64_le>(addr, data); | ||
| 184 | } | ||
| 185 | |||
| 186 | void WriteBlock(const VAddr addr, const u8* data, const size_t size) { | ||
| 187 | u32 offset = 0; | ||
| 188 | while (offset < (size & ~3)) { | ||
| 189 | Write32(addr + offset, *(u32*)&data[offset]); | ||
| 190 | offset += 4; | ||
| 191 | } | ||
| 192 | |||
| 193 | if (size & 2) { | ||
| 194 | Write16(addr + offset, *(u16*)&data[offset]); | ||
| 195 | offset += 2; | ||
| 196 | } | ||
| 197 | |||
| 198 | if (size & 1) | ||
| 199 | Write8(addr + offset, data[offset]); | ||
| 200 | } | ||
| 201 | |||
| 202 | } // namespace | ||