summaryrefslogtreecommitdiff
path: root/src/core/memory.cpp
diff options
context:
space:
mode:
authorGravatar bunnei2017-09-01 23:10:03 -0400
committerGravatar bunnei2017-09-30 14:28:54 -0400
commitf01472a5ffd03b535e8a66bb00d9a7548a0f61bf (patch)
treed11874933de837f7ce57ccb259f1f869db70bdb7 /src/core/memory.cpp
parentarm: Use 64-bit addressing in a bunch of places. (diff)
downloadyuzu-f01472a5ffd03b535e8a66bb00d9a7548a0f61bf.tar.gz
yuzu-f01472a5ffd03b535e8a66bb00d9a7548a0f61bf.tar.xz
yuzu-f01472a5ffd03b535e8a66bb00d9a7548a0f61bf.zip
core: Various changes to support 64-bit addressing.
Diffstat (limited to 'src/core/memory.cpp')
-rw-r--r--src/core/memory.cpp44
1 files changed, 22 insertions, 22 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 65649d9d7..ed453d0c1 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -34,7 +34,7 @@ enum class PageType {
34 34
35struct SpecialRegion { 35struct SpecialRegion {
36 VAddr base; 36 VAddr base;
37 u32 size; 37 u64 size;
38 MMIORegionPointer handler; 38 MMIORegionPointer handler;
39}; 39};
40 40
@@ -49,7 +49,7 @@ struct PageTable {
49 * Array of memory pointers backing each page. An entry can only be non-null if the 49 * Array of memory pointers backing each page. An entry can only be non-null if the
50 * corresponding entry in the `attributes` array is of type `Memory`. 50 * corresponding entry in the `attributes` array is of type `Memory`.
51 */ 51 */
52 std::array<u8*, PAGE_TABLE_NUM_ENTRIES> pointers; 52 std::map<u64, u8*> pointers;
53 53
54 /** 54 /**
55 * Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of 55 * Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of
@@ -61,13 +61,13 @@ struct PageTable {
61 * Array of fine grained page attributes. If it is set to any value other than `Memory`, then 61 * Array of fine grained page attributes. If it is set to any value other than `Memory`, then
62 * the corresponding entry in `pointers` MUST be set to null. 62 * the corresponding entry in `pointers` MUST be set to null.
63 */ 63 */
64 std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes; 64 std::map<u64, PageType> attributes;
65 65
66 /** 66 /**
67 * Indicates the number of externally cached resources touching a page that should be 67 * Indicates the number of externally cached resources touching a page that should be
68 * flushed before the memory is accessed 68 * flushed before the memory is accessed
69 */ 69 */
70 std::array<u8, PAGE_TABLE_NUM_ENTRIES> cached_res_count; 70 std::map<u64, u8> cached_res_count;
71}; 71};
72 72
73/// Singular page table used for the singleton process 73/// Singular page table used for the singleton process
@@ -75,18 +75,18 @@ static PageTable main_page_table;
75/// Currently active page table 75/// Currently active page table
76static PageTable* current_page_table = &main_page_table; 76static PageTable* current_page_table = &main_page_table;
77 77
78std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers() { 78//std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers() {
79 return &current_page_table->pointers; 79// return &current_page_table->pointers;
80} 80//}
81 81
82static void MapPages(u32 base, u32 size, u8* memory, PageType type) { 82static void MapPages(u64 base, u64 size, u8* memory, PageType type) {
83 LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE, 83 LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE,
84 (base + size) * PAGE_SIZE); 84 (base + size) * PAGE_SIZE);
85 85
86 RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE, 86 RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE,
87 FlushMode::FlushAndInvalidate); 87 FlushMode::FlushAndInvalidate);
88 88
89 u32 end = base + size; 89 u64 end = base + size;
90 while (base != end) { 90 while (base != end) {
91 ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at %08X", base); 91 ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at %08X", base);
92 92
@@ -101,18 +101,18 @@ static void MapPages(u32 base, u32 size, u8* memory, PageType type) {
101} 101}
102 102
103void InitMemoryMap() { 103void InitMemoryMap() {
104 main_page_table.pointers.fill(nullptr); 104 //main_page_table.pointers.fill(nullptr);
105 main_page_table.attributes.fill(PageType::Unmapped); 105 //main_page_table.attributes.fill(PageType::Unmapped);
106 main_page_table.cached_res_count.fill(0); 106 //main_page_table.cached_res_count.fill(0);
107} 107}
108 108
109void MapMemoryRegion(VAddr base, u32 size, u8* target) { 109void MapMemoryRegion(VAddr base, u64 size, u8* target) {
110 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); 110 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
111 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); 111 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
112 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); 112 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory);
113} 113}
114 114
115void MapIoRegion(VAddr base, u32 size, MMIORegionPointer mmio_handler) { 115void MapIoRegion(VAddr base, u64 size, MMIORegionPointer mmio_handler) {
116 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); 116 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
117 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); 117 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
118 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); 118 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special);
@@ -120,7 +120,7 @@ void MapIoRegion(VAddr base, u32 size, MMIORegionPointer mmio_handler) {
120 current_page_table->special_regions.emplace_back(SpecialRegion{base, size, mmio_handler}); 120 current_page_table->special_regions.emplace_back(SpecialRegion{base, size, mmio_handler});
121} 121}
122 122
123void UnmapRegion(VAddr base, u32 size) { 123void UnmapRegion(VAddr base, u64 size) {
124 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); 124 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
125 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); 125 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
126 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped); 126 MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
@@ -222,7 +222,7 @@ void Write(const VAddr vaddr, const T data) {
222 PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; 222 PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
223 switch (type) { 223 switch (type) {
224 case PageType::Unmapped: 224 case PageType::Unmapped:
225 LOG_ERROR(HW_Memory, "unmapped Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, 225 LOG_ERROR(HW_Memory, "unmapped Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u64)data,
226 vaddr); 226 vaddr);
227 return; 227 return;
228 case PageType::Memory: 228 case PageType::Memory:
@@ -304,12 +304,12 @@ u8* GetPhysicalPointer(PAddr address) {
304 return vaddr ? GetPointer(*vaddr) : nullptr; 304 return vaddr ? GetPointer(*vaddr) : nullptr;
305} 305}
306 306
307void RasterizerMarkRegionCached(PAddr start, u32 size, int count_delta) { 307void RasterizerMarkRegionCached(PAddr start, u64 size, int count_delta) {
308 if (start == 0) { 308 if (start == 0) {
309 return; 309 return;
310 } 310 }
311 311
312 u32 num_pages = ((start + size - 1) >> PAGE_BITS) - (start >> PAGE_BITS) + 1; 312 u64 num_pages = ((start + size - 1) >> PAGE_BITS) - (start >> PAGE_BITS) + 1;
313 PAddr paddr = start; 313 PAddr paddr = start;
314 314
315 for (unsigned i = 0; i < num_pages; ++i, paddr += PAGE_SIZE) { 315 for (unsigned i = 0; i < num_pages; ++i, paddr += PAGE_SIZE) {
@@ -368,13 +368,13 @@ void RasterizerMarkRegionCached(PAddr start, u32 size, int count_delta) {
368 } 368 }
369} 369}
370 370
371void RasterizerFlushRegion(PAddr start, u32 size) { 371void RasterizerFlushRegion(PAddr start, u64 size) {
372 if (VideoCore::g_renderer != nullptr) { 372 if (VideoCore::g_renderer != nullptr) {
373 VideoCore::g_renderer->Rasterizer()->FlushRegion(start, size); 373 VideoCore::g_renderer->Rasterizer()->FlushRegion(start, size);
374 } 374 }
375} 375}
376 376
377void RasterizerFlushAndInvalidateRegion(PAddr start, u32 size) { 377void RasterizerFlushAndInvalidateRegion(PAddr start, u64 size) {
378 // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be 378 // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be
379 // null here 379 // null here
380 if (VideoCore::g_renderer != nullptr) { 380 if (VideoCore::g_renderer != nullptr) {
@@ -382,7 +382,7 @@ void RasterizerFlushAndInvalidateRegion(PAddr start, u32 size) {
382 } 382 }
383} 383}
384 384
385void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode) { 385void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) {
386 // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be 386 // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be
387 // null here 387 // null here
388 if (VideoCore::g_renderer != nullptr) { 388 if (VideoCore::g_renderer != nullptr) {
@@ -398,7 +398,7 @@ void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode) {
398 VAddr overlap_end = std::min(end, region_end); 398 VAddr overlap_end = std::min(end, region_end);
399 399
400 PAddr physical_start = TryVirtualToPhysicalAddress(overlap_start).value(); 400 PAddr physical_start = TryVirtualToPhysicalAddress(overlap_start).value();
401 u32 overlap_size = overlap_end - overlap_start; 401 u64 overlap_size = overlap_end - overlap_start;
402 402
403 auto* rasterizer = VideoCore::g_renderer->Rasterizer(); 403 auto* rasterizer = VideoCore::g_renderer->Rasterizer();
404 switch (mode) { 404 switch (mode) {