summaryrefslogtreecommitdiff
path: root/src/core/memory.cpp
diff options
context:
space:
mode:
authorGravatar bunnei2018-01-01 15:40:35 -0500
committerGravatar bunnei2018-01-01 15:40:35 -0500
commit93480b10ef443dbc616a9240fe8f7456315c1940 (patch)
treeca1f8c7f31835e3c895e72e08745789034c2758b /src/core/memory.cpp
parentsvc: Stub out svcWaitSynchronization. (diff)
downloadyuzu-93480b10ef443dbc616a9240fe8f7456315c1940.tar.gz
yuzu-93480b10ef443dbc616a9240fe8f7456315c1940.tar.xz
yuzu-93480b10ef443dbc616a9240fe8f7456315c1940.zip
core/video_core: Fix a bunch of u64 -> u32 warnings.
Diffstat (limited to 'src/core/memory.cpp')
-rw-r--r--src/core/memory.cpp16
1 files changed, 8 insertions, 8 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 9a5661a99..93ffe9938 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -37,7 +37,7 @@ PageTable* GetCurrentPageTable() {
37 return current_page_table; 37 return current_page_table;
38} 38}
39 39
40static void MapPages(PageTable& page_table, VAddr base, u32 size, u8* memory, PageType type) { 40static void MapPages(PageTable& page_table, VAddr base, u64 size, u8* memory, PageType type) {
41 LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE, 41 LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE,
42 (base + size) * PAGE_SIZE); 42 (base + size) * PAGE_SIZE);
43 43
@@ -58,13 +58,13 @@ static void MapPages(PageTable& page_table, VAddr base, u32 size, u8* memory, Pa
58 } 58 }
59} 59}
60 60
61void MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, u8* target) { 61void MapMemoryRegion(PageTable& page_table, VAddr base, u64 size, u8* target) {
62 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); 62 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
63 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); 63 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
64 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory); 64 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory);
65} 65}
66 66
67void MapIoRegion(PageTable& page_table, VAddr base, u32 size, MMIORegionPointer mmio_handler) { 67void MapIoRegion(PageTable& page_table, VAddr base, u64 size, MMIORegionPointer mmio_handler) {
68 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); 68 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
69 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); 69 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
70 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special); 70 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special);
@@ -72,7 +72,7 @@ void MapIoRegion(PageTable& page_table, VAddr base, u32 size, MMIORegionPointer
72 page_table.special_regions.emplace_back(SpecialRegion{base, size, mmio_handler}); 72 page_table.special_regions.emplace_back(SpecialRegion{base, size, mmio_handler});
73} 73}
74 74
75void UnmapRegion(PageTable& page_table, VAddr base, u32 size) { 75void UnmapRegion(PageTable& page_table, VAddr base, u64 size) {
76 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size); 76 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
77 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base); 77 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
78 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped); 78 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
@@ -334,7 +334,7 @@ u8* GetPhysicalPointer(PAddr address) {
334 return target_pointer; 334 return target_pointer;
335} 335}
336 336
337void RasterizerMarkRegionCached(PAddr start, u32 size, int count_delta) { 337void RasterizerMarkRegionCached(PAddr start, u64 size, int count_delta) {
338 if (start == 0) { 338 if (start == 0) {
339 return; 339 return;
340 } 340 }
@@ -413,13 +413,13 @@ void RasterizerMarkRegionCached(PAddr start, u32 size, int count_delta) {
413 } 413 }
414} 414}
415 415
416void RasterizerFlushRegion(PAddr start, u32 size) { 416void RasterizerFlushRegion(PAddr start, u64 size) {
417 if (VideoCore::g_renderer != nullptr) { 417 if (VideoCore::g_renderer != nullptr) {
418 VideoCore::g_renderer->Rasterizer()->FlushRegion(start, size); 418 VideoCore::g_renderer->Rasterizer()->FlushRegion(start, size);
419 } 419 }
420} 420}
421 421
422void RasterizerFlushAndInvalidateRegion(PAddr start, u32 size) { 422void RasterizerFlushAndInvalidateRegion(PAddr start, u64 size) {
423 // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be 423 // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be
424 // null here 424 // null here
425 if (VideoCore::g_renderer != nullptr) { 425 if (VideoCore::g_renderer != nullptr) {
@@ -427,7 +427,7 @@ void RasterizerFlushAndInvalidateRegion(PAddr start, u32 size) {
427 } 427 }
428} 428}
429 429
430void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode) { 430void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) {
431 // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be 431 // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be
432 // null here 432 // null here
433 if (VideoCore::g_renderer != nullptr) { 433 if (VideoCore::g_renderer != nullptr) {