summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Kyle Kienapfel2022-08-18 16:28:55 -0700
committerGravatar Kyle Kienapfel2022-08-19 16:08:40 -0700
commit14e9de6678dab47625826006f001d5e94dfb2716 (patch)
treeddb17512c52f678de96abf6dedf33a02b6a497ec
parentMerge pull request #8685 from FearlessTobi/multiplayer-part2 (diff)
downloadyuzu-14e9de6678dab47625826006f001d5e94dfb2716.tar.gz
yuzu-14e9de6678dab47625826006f001d5e94dfb2716.tar.xz
yuzu-14e9de6678dab47625826006f001d5e94dfb2716.zip
code: dodge PAGE_SIZE #define
Some header files, specifically for OSX and Musl libc define PAGE_SIZE to be a number This is great except in yuzu we're using PAGE_SIZE as a variable Specific example `static constexpr u64 PAGE_SIZE = u64(1) << PAGE_BITS;` PAGE_SIZE PAGE_BITS PAGE_MASK are all similar variables. Simply deleted the underscores, and then added YUZU_ prefix Might be worth noting that there are multiple uses in different classes/namespaces This list may not be exhaustive Core::Memory 12 bits (4096) QueryCacheBase 12 bits ShaderCache 14 bits (16384) TextureCache 20 bits (1048576, or 1MB) Fixes #8779
-rw-r--r--src/common/microprofile.h9
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_32.cpp4
-rw-r--r--src/core/loader/kip.cpp2
-rw-r--r--src/core/loader/nro.cpp2
-rw-r--r--src/core/loader/nso.cpp2
-rw-r--r--src/core/memory.cpp81
-rw-r--r--src/core/memory.h6
-rw-r--r--src/tests/video_core/buffer_base.cpp7
-rw-r--r--src/video_core/buffer_cache/buffer_base.h2
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h49
-rw-r--r--src/video_core/memory_manager.cpp4
-rw-r--r--src/video_core/query_cache.h12
-rw-r--r--src/video_core/rasterizer_accelerated.cpp17
-rw-r--r--src/video_core/shader_cache.cpp12
-rw-r--r--src/video_core/shader_cache.h4
-rw-r--r--src/video_core/texture_cache/texture_cache.h12
-rw-r--r--src/video_core/texture_cache/texture_cache_base.h10
17 files changed, 116 insertions, 119 deletions
diff --git a/src/common/microprofile.h b/src/common/microprofile.h
index 91d14d5e1..56ef0a2dc 100644
--- a/src/common/microprofile.h
+++ b/src/common/microprofile.h
@@ -22,12 +22,3 @@ typedef void* HANDLE;
22#include <microprofile.h> 22#include <microprofile.h>
23 23
24#define MP_RGB(r, g, b) ((r) << 16 | (g) << 8 | (b) << 0) 24#define MP_RGB(r, g, b) ((r) << 16 | (g) << 8 | (b) << 0)
25
26// On OS X, some Mach header included by MicroProfile defines these as macros, conflicting with
27// identifiers we use.
28#ifdef PAGE_SIZE
29#undef PAGE_SIZE
30#endif
31#ifdef PAGE_MASK
32#undef PAGE_MASK
33#endif
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
index 3b8b43994..d1e70f19d 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp
@@ -190,8 +190,8 @@ std::shared_ptr<Dynarmic::A32::Jit> ARM_Dynarmic_32::MakeJit(Common::PageTable*
190 config.callbacks = cb.get(); 190 config.callbacks = cb.get();
191 config.coprocessors[15] = cp15; 191 config.coprocessors[15] = cp15;
192 config.define_unpredictable_behaviour = true; 192 config.define_unpredictable_behaviour = true;
193 static constexpr std::size_t PAGE_BITS = 12; 193 static constexpr std::size_t YUZU_PAGEBITS = 12;
194 static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - PAGE_BITS); 194 static constexpr std::size_t NUM_PAGE_TABLE_ENTRIES = 1 << (32 - YUZU_PAGEBITS);
195 if (page_table) { 195 if (page_table) {
196 config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>( 196 config.page_table = reinterpret_cast<std::array<std::uint8_t*, NUM_PAGE_TABLE_ENTRIES>*>(
197 page_table->pointers.data()); 197 page_table->pointers.data());
diff --git a/src/core/loader/kip.cpp b/src/core/loader/kip.cpp
index 9af46a0f7..d8a1bf82a 100644
--- a/src/core/loader/kip.cpp
+++ b/src/core/loader/kip.cpp
@@ -14,7 +14,7 @@ namespace Loader {
14 14
15namespace { 15namespace {
16constexpr u32 PageAlignSize(u32 size) { 16constexpr u32 PageAlignSize(u32 size) {
17 return static_cast<u32>((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK); 17 return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK);
18} 18}
19} // Anonymous namespace 19} // Anonymous namespace
20 20
diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp
index 1b0bb0876..73d04d7ee 100644
--- a/src/core/loader/nro.cpp
+++ b/src/core/loader/nro.cpp
@@ -125,7 +125,7 @@ FileType AppLoader_NRO::IdentifyType(const FileSys::VirtualFile& nro_file) {
125} 125}
126 126
127static constexpr u32 PageAlignSize(u32 size) { 127static constexpr u32 PageAlignSize(u32 size) {
128 return static_cast<u32>((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK); 128 return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK);
129} 129}
130 130
131static bool LoadNroImpl(Kernel::KProcess& process, const std::vector<u8>& data) { 131static bool LoadNroImpl(Kernel::KProcess& process, const std::vector<u8>& data) {
diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp
index 8dd956fc6..4c3b3c655 100644
--- a/src/core/loader/nso.cpp
+++ b/src/core/loader/nso.cpp
@@ -45,7 +45,7 @@ std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data,
45} 45}
46 46
47constexpr u32 PageAlignSize(u32 size) { 47constexpr u32 PageAlignSize(u32 size) {
48 return static_cast<u32>((size + Core::Memory::PAGE_MASK) & ~Core::Memory::PAGE_MASK); 48 return static_cast<u32>((size + Core::Memory::YUZU_PAGEMASK) & ~Core::Memory::YUZU_PAGEMASK);
49} 49}
50} // Anonymous namespace 50} // Anonymous namespace
51 51
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 1b44280b5..34ad7cadd 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -36,10 +36,11 @@ struct Memory::Impl {
36 } 36 }
37 37
38 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { 38 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
39 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); 39 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
40 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); 40 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base);
41 ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", target); 41 ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", target);
42 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); 42 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target,
43 Common::PageType::Memory);
43 44
44 if (Settings::IsFastmemEnabled()) { 45 if (Settings::IsFastmemEnabled()) {
45 system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size); 46 system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size);
@@ -47,9 +48,10 @@ struct Memory::Impl {
47 } 48 }
48 49
49 void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { 50 void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
50 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); 51 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
51 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); 52 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base);
52 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped); 53 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
54 Common::PageType::Unmapped);
53 55
54 if (Settings::IsFastmemEnabled()) { 56 if (Settings::IsFastmemEnabled()) {
55 system.DeviceMemory().buffer.Unmap(base, size); 57 system.DeviceMemory().buffer.Unmap(base, size);
@@ -57,7 +59,7 @@ struct Memory::Impl {
57 } 59 }
58 60
59 [[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const { 61 [[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const {
60 const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]}; 62 const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
61 63
62 if (!paddr) { 64 if (!paddr) {
63 return {}; 65 return {};
@@ -67,7 +69,7 @@ struct Memory::Impl {
67 } 69 }
68 70
69 [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const { 71 [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const {
70 const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]}; 72 const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
71 73
72 if (paddr == 0) { 74 if (paddr == 0) {
73 return {}; 75 return {};
@@ -176,13 +178,14 @@ struct Memory::Impl {
176 auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) { 178 auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) {
177 const auto& page_table = process.PageTable().PageTableImpl(); 179 const auto& page_table = process.PageTable().PageTableImpl();
178 std::size_t remaining_size = size; 180 std::size_t remaining_size = size;
179 std::size_t page_index = addr >> PAGE_BITS; 181 std::size_t page_index = addr >> YUZU_PAGEBITS;
180 std::size_t page_offset = addr & PAGE_MASK; 182 std::size_t page_offset = addr & YUZU_PAGEMASK;
181 183
182 while (remaining_size) { 184 while (remaining_size) {
183 const std::size_t copy_amount = 185 const std::size_t copy_amount =
184 std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); 186 std::min(static_cast<std::size_t>(YUZU_PAGESIZE) - page_offset, remaining_size);
185 const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); 187 const auto current_vaddr =
188 static_cast<VAddr>((page_index << YUZU_PAGEBITS) + page_offset);
186 189
187 const auto [pointer, type] = page_table.pointers[page_index].PointerType(); 190 const auto [pointer, type] = page_table.pointers[page_index].PointerType();
188 switch (type) { 191 switch (type) {
@@ -192,7 +195,7 @@ struct Memory::Impl {
192 } 195 }
193 case Common::PageType::Memory: { 196 case Common::PageType::Memory: {
194 DEBUG_ASSERT(pointer); 197 DEBUG_ASSERT(pointer);
195 u8* mem_ptr = pointer + page_offset + (page_index << PAGE_BITS); 198 u8* mem_ptr = pointer + page_offset + (page_index << YUZU_PAGEBITS);
196 on_memory(copy_amount, mem_ptr); 199 on_memory(copy_amount, mem_ptr);
197 break; 200 break;
198 } 201 }
@@ -339,10 +342,10 @@ struct Memory::Impl {
339 // Iterate over a contiguous CPU address space, marking/unmarking the region. 342 // Iterate over a contiguous CPU address space, marking/unmarking the region.
340 // The region is at a granularity of CPU pages. 343 // The region is at a granularity of CPU pages.
341 344
342 const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; 345 const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1;
343 for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { 346 for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) {
344 const Common::PageType page_type{ 347 const Common::PageType page_type{
345 current_page_table->pointers[vaddr >> PAGE_BITS].Type()}; 348 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()};
346 if (debug) { 349 if (debug) {
347 // Switch page type to debug if now debug 350 // Switch page type to debug if now debug
348 switch (page_type) { 351 switch (page_type) {
@@ -354,7 +357,7 @@ struct Memory::Impl {
354 // Page is already marked. 357 // Page is already marked.
355 break; 358 break;
356 case Common::PageType::Memory: 359 case Common::PageType::Memory:
357 current_page_table->pointers[vaddr >> PAGE_BITS].Store( 360 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
358 nullptr, Common::PageType::DebugMemory); 361 nullptr, Common::PageType::DebugMemory);
359 break; 362 break;
360 default: 363 default:
@@ -371,9 +374,9 @@ struct Memory::Impl {
371 // Don't mess with already non-debug or rasterizer memory. 374 // Don't mess with already non-debug or rasterizer memory.
372 break; 375 break;
373 case Common::PageType::DebugMemory: { 376 case Common::PageType::DebugMemory: {
374 u8* const pointer{GetPointerFromDebugMemory(vaddr & ~PAGE_MASK)}; 377 u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)};
375 current_page_table->pointers[vaddr >> PAGE_BITS].Store( 378 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
376 pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory); 379 pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory);
377 break; 380 break;
378 } 381 }
379 default: 382 default:
@@ -398,10 +401,10 @@ struct Memory::Impl {
398 // granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size 401 // granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
399 // is different). This assumes the specified GPU address region is contiguous as well. 402 // is different). This assumes the specified GPU address region is contiguous as well.
400 403
401 const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; 404 const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1;
402 for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { 405 for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) {
403 const Common::PageType page_type{ 406 const Common::PageType page_type{
404 current_page_table->pointers[vaddr >> PAGE_BITS].Type()}; 407 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()};
405 if (cached) { 408 if (cached) {
406 // Switch page type to cached if now cached 409 // Switch page type to cached if now cached
407 switch (page_type) { 410 switch (page_type) {
@@ -411,7 +414,7 @@ struct Memory::Impl {
411 break; 414 break;
412 case Common::PageType::DebugMemory: 415 case Common::PageType::DebugMemory:
413 case Common::PageType::Memory: 416 case Common::PageType::Memory:
414 current_page_table->pointers[vaddr >> PAGE_BITS].Store( 417 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
415 nullptr, Common::PageType::RasterizerCachedMemory); 418 nullptr, Common::PageType::RasterizerCachedMemory);
416 break; 419 break;
417 case Common::PageType::RasterizerCachedMemory: 420 case Common::PageType::RasterizerCachedMemory:
@@ -434,16 +437,16 @@ struct Memory::Impl {
434 // that this area is already unmarked as cached. 437 // that this area is already unmarked as cached.
435 break; 438 break;
436 case Common::PageType::RasterizerCachedMemory: { 439 case Common::PageType::RasterizerCachedMemory: {
437 u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~PAGE_MASK)}; 440 u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~YUZU_PAGEMASK)};
438 if (pointer == nullptr) { 441 if (pointer == nullptr) {
439 // It's possible that this function has been called while updating the 442 // It's possible that this function has been called while updating the
440 // pagetable after unmapping a VMA. In that case the underlying VMA will no 443 // pagetable after unmapping a VMA. In that case the underlying VMA will no
441 // longer exist, and we should just leave the pagetable entry blank. 444 // longer exist, and we should just leave the pagetable entry blank.
442 current_page_table->pointers[vaddr >> PAGE_BITS].Store( 445 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
443 nullptr, Common::PageType::Unmapped); 446 nullptr, Common::PageType::Unmapped);
444 } else { 447 } else {
445 current_page_table->pointers[vaddr >> PAGE_BITS].Store( 448 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
446 pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory); 449 pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory);
447 } 450 }
448 break; 451 break;
449 } 452 }
@@ -465,8 +468,8 @@ struct Memory::Impl {
465 */ 468 */
466 void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target, 469 void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target,
467 Common::PageType type) { 470 Common::PageType type) {
468 LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * PAGE_SIZE, 471 LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * YUZU_PAGESIZE,
469 (base + size) * PAGE_SIZE); 472 (base + size) * YUZU_PAGESIZE);
470 473
471 // During boot, current_page_table might not be set yet, in which case we need not flush 474 // During boot, current_page_table might not be set yet, in which case we need not flush
472 if (system.IsPoweredOn()) { 475 if (system.IsPoweredOn()) {
@@ -474,7 +477,7 @@ struct Memory::Impl {
474 for (u64 i = 0; i < size; i++) { 477 for (u64 i = 0; i < size; i++) {
475 const auto page = base + i; 478 const auto page = base + i;
476 if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) { 479 if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) {
477 gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE); 480 gpu.FlushAndInvalidateRegion(page << YUZU_PAGEBITS, YUZU_PAGESIZE);
478 } 481 }
479 } 482 }
480 } 483 }
@@ -485,7 +488,7 @@ struct Memory::Impl {
485 488
486 if (!target) { 489 if (!target) {
487 ASSERT_MSG(type != Common::PageType::Memory, 490 ASSERT_MSG(type != Common::PageType::Memory,
488 "Mapping memory page without a pointer @ {:016x}", base * PAGE_SIZE); 491 "Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE);
489 492
490 while (base != end) { 493 while (base != end) {
491 page_table.pointers[base].Store(nullptr, type); 494 page_table.pointers[base].Store(nullptr, type);
@@ -496,14 +499,14 @@ struct Memory::Impl {
496 } else { 499 } else {
497 while (base != end) { 500 while (base != end) {
498 page_table.pointers[base].Store( 501 page_table.pointers[base].Store(
499 system.DeviceMemory().GetPointer(target) - (base << PAGE_BITS), type); 502 system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type);
500 page_table.backing_addr[base] = target - (base << PAGE_BITS); 503 page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS);
501 504
502 ASSERT_MSG(page_table.pointers[base].Pointer(), 505 ASSERT_MSG(page_table.pointers[base].Pointer(),
503 "memory mapping base yield a nullptr within the table"); 506 "memory mapping base yield a nullptr within the table");
504 507
505 base += 1; 508 base += 1;
506 target += PAGE_SIZE; 509 target += YUZU_PAGESIZE;
507 } 510 }
508 } 511 }
509 } 512 }
@@ -518,7 +521,7 @@ struct Memory::Impl {
518 } 521 }
519 522
520 // Avoid adding any extra logic to this fast-path block 523 // Avoid adding any extra logic to this fast-path block
521 const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); 524 const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw();
522 if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { 525 if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
523 return &pointer[vaddr]; 526 return &pointer[vaddr];
524 } 527 }
@@ -657,7 +660,7 @@ void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
657bool Memory::IsValidVirtualAddress(const VAddr vaddr) const { 660bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
658 const Kernel::KProcess& process = *system.CurrentProcess(); 661 const Kernel::KProcess& process = *system.CurrentProcess();
659 const auto& page_table = process.PageTable().PageTableImpl(); 662 const auto& page_table = process.PageTable().PageTableImpl();
660 const size_t page = vaddr >> PAGE_BITS; 663 const size_t page = vaddr >> YUZU_PAGEBITS;
661 if (page >= page_table.pointers.size()) { 664 if (page >= page_table.pointers.size()) {
662 return false; 665 return false;
663 } 666 }
@@ -668,9 +671,9 @@ bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
668 671
669bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const { 672bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const {
670 VAddr end = base + size; 673 VAddr end = base + size;
671 VAddr page = Common::AlignDown(base, PAGE_SIZE); 674 VAddr page = Common::AlignDown(base, YUZU_PAGESIZE);
672 675
673 for (; page < end; page += PAGE_SIZE) { 676 for (; page < end; page += YUZU_PAGESIZE) {
674 if (!IsValidVirtualAddress(page)) { 677 if (!IsValidVirtualAddress(page)) {
675 return false; 678 return false;
676 } 679 }
diff --git a/src/core/memory.h b/src/core/memory.h
index 2a21fbcfd..a11ff8766 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -27,9 +27,9 @@ namespace Core::Memory {
27 * Page size used by the ARM architecture. This is the smallest granularity with which memory can 27 * Page size used by the ARM architecture. This is the smallest granularity with which memory can
28 * be mapped. 28 * be mapped.
29 */ 29 */
30constexpr std::size_t PAGE_BITS = 12; 30constexpr std::size_t YUZU_PAGEBITS = 12;
31constexpr u64 PAGE_SIZE = 1ULL << PAGE_BITS; 31constexpr u64 YUZU_PAGESIZE = 1ULL << YUZU_PAGEBITS;
32constexpr u64 PAGE_MASK = PAGE_SIZE - 1; 32constexpr u64 YUZU_PAGEMASK = YUZU_PAGESIZE - 1;
33 33
34/// Virtual user-space memory regions 34/// Virtual user-space memory regions
35enum : VAddr { 35enum : VAddr {
diff --git a/src/tests/video_core/buffer_base.cpp b/src/tests/video_core/buffer_base.cpp
index a1be8dcf1..71121e42a 100644
--- a/src/tests/video_core/buffer_base.cpp
+++ b/src/tests/video_core/buffer_base.cpp
@@ -22,8 +22,9 @@ constexpr VAddr c = 0x1328914000;
22class RasterizerInterface { 22class RasterizerInterface {
23public: 23public:
24 void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) { 24 void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
25 const u64 page_start{addr >> Core::Memory::PAGE_BITS}; 25 const u64 page_start{addr >> Core::Memory::YUZU_PAGEBITS};
26 const u64 page_end{(addr + size + Core::Memory::PAGE_SIZE - 1) >> Core::Memory::PAGE_BITS}; 26 const u64 page_end{(addr + size + Core::Memory::YUZU_PAGESIZE - 1) >>
27 Core::Memory::YUZU_PAGEBITS};
27 for (u64 page = page_start; page < page_end; ++page) { 28 for (u64 page = page_start; page < page_end; ++page) {
28 int& value = page_table[page]; 29 int& value = page_table[page];
29 value += delta; 30 value += delta;
@@ -37,7 +38,7 @@ public:
37 } 38 }
38 39
39 [[nodiscard]] int Count(VAddr addr) const noexcept { 40 [[nodiscard]] int Count(VAddr addr) const noexcept {
40 const auto it = page_table.find(addr >> Core::Memory::PAGE_BITS); 41 const auto it = page_table.find(addr >> Core::Memory::YUZU_PAGEBITS);
41 return it == page_table.end() ? 0 : it->second; 42 return it == page_table.end() ? 0 : it->second;
42 } 43 }
43 44
diff --git a/src/video_core/buffer_cache/buffer_base.h b/src/video_core/buffer_cache/buffer_base.h
index 3e20608ca..0b2bc67b1 100644
--- a/src/video_core/buffer_cache/buffer_base.h
+++ b/src/video_core/buffer_cache/buffer_base.h
@@ -36,7 +36,7 @@ struct NullBufferParams {};
36template <class RasterizerInterface> 36template <class RasterizerInterface>
37class BufferBase { 37class BufferBase {
38 static constexpr u64 PAGES_PER_WORD = 64; 38 static constexpr u64 PAGES_PER_WORD = 64;
39 static constexpr u64 BYTES_PER_PAGE = Core::Memory::PAGE_SIZE; 39 static constexpr u64 BYTES_PER_PAGE = Core::Memory::YUZU_PAGESIZE;
40 static constexpr u64 BYTES_PER_WORD = PAGES_PER_WORD * BYTES_PER_PAGE; 40 static constexpr u64 BYTES_PER_WORD = PAGES_PER_WORD * BYTES_PER_PAGE;
41 41
42 /// Vector tracking modified pages tightly packed with small vector optimization 42 /// Vector tracking modified pages tightly packed with small vector optimization
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index b74ad7900..f015dae56 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -60,8 +60,8 @@ class BufferCache {
60 60
61 // Page size for caching purposes. 61 // Page size for caching purposes.
62 // This is unrelated to the CPU page size and it can be changed as it seems optimal. 62 // This is unrelated to the CPU page size and it can be changed as it seems optimal.
63 static constexpr u32 PAGE_BITS = 16; 63 static constexpr u32 YUZU_PAGEBITS = 16;
64 static constexpr u64 PAGE_SIZE = u64{1} << PAGE_BITS; 64 static constexpr u64 YUZU_PAGESIZE = u64{1} << YUZU_PAGEBITS;
65 65
66 static constexpr bool IS_OPENGL = P::IS_OPENGL; 66 static constexpr bool IS_OPENGL = P::IS_OPENGL;
67 static constexpr bool HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS = 67 static constexpr bool HAS_PERSISTENT_UNIFORM_BUFFER_BINDINGS =
@@ -216,8 +216,8 @@ private:
216 216
217 template <typename Func> 217 template <typename Func>
218 void ForEachBufferInRange(VAddr cpu_addr, u64 size, Func&& func) { 218 void ForEachBufferInRange(VAddr cpu_addr, u64 size, Func&& func) {
219 const u64 page_end = Common::DivCeil(cpu_addr + size, PAGE_SIZE); 219 const u64 page_end = Common::DivCeil(cpu_addr + size, YUZU_PAGESIZE);
220 for (u64 page = cpu_addr >> PAGE_BITS; page < page_end;) { 220 for (u64 page = cpu_addr >> YUZU_PAGEBITS; page < page_end;) {
221 const BufferId buffer_id = page_table[page]; 221 const BufferId buffer_id = page_table[page];
222 if (!buffer_id) { 222 if (!buffer_id) {
223 ++page; 223 ++page;
@@ -227,7 +227,7 @@ private:
227 func(buffer_id, buffer); 227 func(buffer_id, buffer);
228 228
229 const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes(); 229 const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes();
230 page = Common::DivCeil(end_addr, PAGE_SIZE); 230 page = Common::DivCeil(end_addr, YUZU_PAGESIZE);
231 } 231 }
232 } 232 }
233 233
@@ -262,8 +262,8 @@ private:
262 } 262 }
263 263
264 static bool IsRangeGranular(VAddr cpu_addr, size_t size) { 264 static bool IsRangeGranular(VAddr cpu_addr, size_t size) {
265 return (cpu_addr & ~Core::Memory::PAGE_MASK) == 265 return (cpu_addr & ~Core::Memory::YUZU_PAGEMASK) ==
266 ((cpu_addr + size) & ~Core::Memory::PAGE_MASK); 266 ((cpu_addr + size) & ~Core::Memory::YUZU_PAGEMASK);
267 } 267 }
268 268
269 void RunGarbageCollector(); 269 void RunGarbageCollector();
@@ -439,7 +439,7 @@ private:
439 u64 minimum_memory = 0; 439 u64 minimum_memory = 0;
440 u64 critical_memory = 0; 440 u64 critical_memory = 0;
441 441
442 std::array<BufferId, ((1ULL << 39) >> PAGE_BITS)> page_table; 442 std::array<BufferId, ((1ULL << 39) >> YUZU_PAGEBITS)> page_table;
443}; 443};
444 444
445template <class P> 445template <class P>
@@ -926,8 +926,8 @@ void BufferCache<P>::PopAsyncFlushes() {}
926 926
927template <class P> 927template <class P>
928bool BufferCache<P>::IsRegionGpuModified(VAddr addr, size_t size) { 928bool BufferCache<P>::IsRegionGpuModified(VAddr addr, size_t size) {
929 const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE); 929 const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE);
930 for (u64 page = addr >> PAGE_BITS; page < page_end;) { 930 for (u64 page = addr >> YUZU_PAGEBITS; page < page_end;) {
931 const BufferId image_id = page_table[page]; 931 const BufferId image_id = page_table[page];
932 if (!image_id) { 932 if (!image_id) {
933 ++page; 933 ++page;
@@ -938,7 +938,7 @@ bool BufferCache<P>::IsRegionGpuModified(VAddr addr, size_t size) {
938 return true; 938 return true;
939 } 939 }
940 const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes(); 940 const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes();
941 page = Common::DivCeil(end_addr, PAGE_SIZE); 941 page = Common::DivCeil(end_addr, YUZU_PAGESIZE);
942 } 942 }
943 return false; 943 return false;
944} 944}
@@ -946,8 +946,8 @@ bool BufferCache<P>::IsRegionGpuModified(VAddr addr, size_t size) {
946template <class P> 946template <class P>
947bool BufferCache<P>::IsRegionRegistered(VAddr addr, size_t size) { 947bool BufferCache<P>::IsRegionRegistered(VAddr addr, size_t size) {
948 const VAddr end_addr = addr + size; 948 const VAddr end_addr = addr + size;
949 const u64 page_end = Common::DivCeil(end_addr, PAGE_SIZE); 949 const u64 page_end = Common::DivCeil(end_addr, YUZU_PAGESIZE);
950 for (u64 page = addr >> PAGE_BITS; page < page_end;) { 950 for (u64 page = addr >> YUZU_PAGEBITS; page < page_end;) {
951 const BufferId buffer_id = page_table[page]; 951 const BufferId buffer_id = page_table[page];
952 if (!buffer_id) { 952 if (!buffer_id) {
953 ++page; 953 ++page;
@@ -959,15 +959,15 @@ bool BufferCache<P>::IsRegionRegistered(VAddr addr, size_t size) {
959 if (buf_start_addr < end_addr && addr < buf_end_addr) { 959 if (buf_start_addr < end_addr && addr < buf_end_addr) {
960 return true; 960 return true;
961 } 961 }
962 page = Common::DivCeil(end_addr, PAGE_SIZE); 962 page = Common::DivCeil(end_addr, YUZU_PAGESIZE);
963 } 963 }
964 return false; 964 return false;
965} 965}
966 966
967template <class P> 967template <class P>
968bool BufferCache<P>::IsRegionCpuModified(VAddr addr, size_t size) { 968bool BufferCache<P>::IsRegionCpuModified(VAddr addr, size_t size) {
969 const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE); 969 const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE);
970 for (u64 page = addr >> PAGE_BITS; page < page_end;) { 970 for (u64 page = addr >> YUZU_PAGEBITS; page < page_end;) {
971 const BufferId image_id = page_table[page]; 971 const BufferId image_id = page_table[page];
972 if (!image_id) { 972 if (!image_id) {
973 ++page; 973 ++page;
@@ -978,7 +978,7 @@ bool BufferCache<P>::IsRegionCpuModified(VAddr addr, size_t size) {
978 return true; 978 return true;
979 } 979 }
980 const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes(); 980 const VAddr end_addr = buffer.CpuAddr() + buffer.SizeBytes();
981 page = Common::DivCeil(end_addr, PAGE_SIZE); 981 page = Common::DivCeil(end_addr, YUZU_PAGESIZE);
982 } 982 }
983 return false; 983 return false;
984} 984}
@@ -1472,7 +1472,7 @@ BufferId BufferCache<P>::FindBuffer(VAddr cpu_addr, u32 size) {
1472 if (cpu_addr == 0) { 1472 if (cpu_addr == 0) {
1473 return NULL_BUFFER_ID; 1473 return NULL_BUFFER_ID;
1474 } 1474 }
1475 const u64 page = cpu_addr >> PAGE_BITS; 1475 const u64 page = cpu_addr >> YUZU_PAGEBITS;
1476 const BufferId buffer_id = page_table[page]; 1476 const BufferId buffer_id = page_table[page];
1477 if (!buffer_id) { 1477 if (!buffer_id) {
1478 return CreateBuffer(cpu_addr, size); 1478 return CreateBuffer(cpu_addr, size);
@@ -1493,8 +1493,9 @@ typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(VAddr cpu
1493 VAddr end = cpu_addr + wanted_size; 1493 VAddr end = cpu_addr + wanted_size;
1494 int stream_score = 0; 1494 int stream_score = 0;
1495 bool has_stream_leap = false; 1495 bool has_stream_leap = false;
1496 for (; cpu_addr >> PAGE_BITS < Common::DivCeil(end, PAGE_SIZE); cpu_addr += PAGE_SIZE) { 1496 for (; cpu_addr >> YUZU_PAGEBITS < Common::DivCeil(end, YUZU_PAGESIZE);
1497 const BufferId overlap_id = page_table[cpu_addr >> PAGE_BITS]; 1497 cpu_addr += YUZU_PAGESIZE) {
1498 const BufferId overlap_id = page_table[cpu_addr >> YUZU_PAGEBITS];
1498 if (!overlap_id) { 1499 if (!overlap_id) {
1499 continue; 1500 continue;
1500 } 1501 }
@@ -1520,11 +1521,11 @@ typename BufferCache<P>::OverlapResult BufferCache<P>::ResolveOverlaps(VAddr cpu
1520 // as a stream buffer. Increase the size to skip constantly recreating buffers. 1521 // as a stream buffer. Increase the size to skip constantly recreating buffers.
1521 has_stream_leap = true; 1522 has_stream_leap = true;
1522 if (expands_right) { 1523 if (expands_right) {
1523 begin -= PAGE_SIZE * 256; 1524 begin -= YUZU_PAGESIZE * 256;
1524 cpu_addr = begin; 1525 cpu_addr = begin;
1525 } 1526 }
1526 if (expands_left) { 1527 if (expands_left) {
1527 end += PAGE_SIZE * 256; 1528 end += YUZU_PAGESIZE * 256;
1528 } 1529 }
1529 } 1530 }
1530 } 1531 }
@@ -1598,8 +1599,8 @@ void BufferCache<P>::ChangeRegister(BufferId buffer_id) {
1598 } 1599 }
1599 const VAddr cpu_addr_begin = buffer.CpuAddr(); 1600 const VAddr cpu_addr_begin = buffer.CpuAddr();
1600 const VAddr cpu_addr_end = cpu_addr_begin + size; 1601 const VAddr cpu_addr_end = cpu_addr_begin + size;
1601 const u64 page_begin = cpu_addr_begin / PAGE_SIZE; 1602 const u64 page_begin = cpu_addr_begin / YUZU_PAGESIZE;
1602 const u64 page_end = Common::DivCeil(cpu_addr_end, PAGE_SIZE); 1603 const u64 page_end = Common::DivCeil(cpu_addr_end, YUZU_PAGESIZE);
1603 for (u64 page = page_begin; page != page_end; ++page) { 1604 for (u64 page = page_begin; page != page_end; ++page) {
1604 if constexpr (insert) { 1605 if constexpr (insert) {
1605 page_table[page] = buffer_id; 1606 page_table[page] = buffer_id;
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index d373be0ba..bf9eb735d 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -369,8 +369,8 @@ bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const {
369 if (!cpu_addr) { 369 if (!cpu_addr) {
370 return false; 370 return false;
371 } 371 }
372 const std::size_t page{(*cpu_addr & Core::Memory::PAGE_MASK) + size}; 372 const std::size_t page{(*cpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
373 return page <= Core::Memory::PAGE_SIZE; 373 return page <= Core::Memory::YUZU_PAGESIZE;
374} 374}
375 375
376bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const { 376bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const {
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
index fcce87acb..889b606b3 100644
--- a/src/video_core/query_cache.h
+++ b/src/video_core/query_cache.h
@@ -214,8 +214,8 @@ private:
214 return cache_begin < addr_end && addr_begin < cache_end; 214 return cache_begin < addr_end && addr_begin < cache_end;
215 }; 215 };
216 216
217 const u64 page_end = addr_end >> PAGE_BITS; 217 const u64 page_end = addr_end >> YUZU_PAGEBITS;
218 for (u64 page = addr_begin >> PAGE_BITS; page <= page_end; ++page) { 218 for (u64 page = addr_begin >> YUZU_PAGEBITS; page <= page_end; ++page) {
219 const auto& it = cached_queries.find(page); 219 const auto& it = cached_queries.find(page);
220 if (it == std::end(cached_queries)) { 220 if (it == std::end(cached_queries)) {
221 continue; 221 continue;
@@ -235,14 +235,14 @@ private:
235 /// Registers the passed parameters as cached and returns a pointer to the stored cached query. 235 /// Registers the passed parameters as cached and returns a pointer to the stored cached query.
236 CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) { 236 CachedQuery* Register(VideoCore::QueryType type, VAddr cpu_addr, u8* host_ptr, bool timestamp) {
237 rasterizer.UpdatePagesCachedCount(cpu_addr, CachedQuery::SizeInBytes(timestamp), 1); 237 rasterizer.UpdatePagesCachedCount(cpu_addr, CachedQuery::SizeInBytes(timestamp), 1);
238 const u64 page = static_cast<u64>(cpu_addr) >> PAGE_BITS; 238 const u64 page = static_cast<u64>(cpu_addr) >> YUZU_PAGEBITS;
239 return &cached_queries[page].emplace_back(static_cast<QueryCache&>(*this), type, cpu_addr, 239 return &cached_queries[page].emplace_back(static_cast<QueryCache&>(*this), type, cpu_addr,
240 host_ptr); 240 host_ptr);
241 } 241 }
242 242
243 /// Tries to a get a cached query. Returns nullptr on failure. 243 /// Tries to a get a cached query. Returns nullptr on failure.
244 CachedQuery* TryGet(VAddr addr) { 244 CachedQuery* TryGet(VAddr addr) {
245 const u64 page = static_cast<u64>(addr) >> PAGE_BITS; 245 const u64 page = static_cast<u64>(addr) >> YUZU_PAGEBITS;
246 const auto it = cached_queries.find(page); 246 const auto it = cached_queries.find(page);
247 if (it == std::end(cached_queries)) { 247 if (it == std::end(cached_queries)) {
248 return nullptr; 248 return nullptr;
@@ -260,8 +260,8 @@ private:
260 uncommitted_flushes->push_back(addr); 260 uncommitted_flushes->push_back(addr);
261 } 261 }
262 262
263 static constexpr std::uintptr_t PAGE_SIZE = 4096; 263 static constexpr std::uintptr_t YUZU_PAGESIZE = 4096;
264 static constexpr unsigned PAGE_BITS = 12; 264 static constexpr unsigned YUZU_PAGEBITS = 12;
265 265
266 VideoCore::RasterizerInterface& rasterizer; 266 VideoCore::RasterizerInterface& rasterizer;
267 Tegra::Engines::Maxwell3D& maxwell3d; 267 Tegra::Engines::Maxwell3D& maxwell3d;
diff --git a/src/video_core/rasterizer_accelerated.cpp b/src/video_core/rasterizer_accelerated.cpp
index 87a29e144..4a197d65d 100644
--- a/src/video_core/rasterizer_accelerated.cpp
+++ b/src/video_core/rasterizer_accelerated.cpp
@@ -24,8 +24,8 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del
24 u64 cache_bytes = 0; 24 u64 cache_bytes = 0;
25 25
26 std::atomic_thread_fence(std::memory_order_acquire); 26 std::atomic_thread_fence(std::memory_order_acquire);
27 const u64 page_end = Common::DivCeil(addr + size, PAGE_SIZE); 27 const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE);
28 for (u64 page = addr >> PAGE_BITS; page != page_end; ++page) { 28 for (u64 page = addr >> YUZU_PAGEBITS; page != page_end; ++page) {
29 std::atomic_uint16_t& count = cached_pages.at(page >> 2).Count(page); 29 std::atomic_uint16_t& count = cached_pages.at(page >> 2).Count(page);
30 30
31 if (delta > 0) { 31 if (delta > 0) {
@@ -44,26 +44,27 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del
44 if (uncache_bytes == 0) { 44 if (uncache_bytes == 0) {
45 uncache_begin = page; 45 uncache_begin = page;
46 } 46 }
47 uncache_bytes += PAGE_SIZE; 47 uncache_bytes += YUZU_PAGESIZE;
48 } else if (uncache_bytes > 0) { 48 } else if (uncache_bytes > 0) {
49 cpu_memory.RasterizerMarkRegionCached(uncache_begin << PAGE_BITS, uncache_bytes, false); 49 cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes,
50 false);
50 uncache_bytes = 0; 51 uncache_bytes = 0;
51 } 52 }
52 if (count.load(std::memory_order::relaxed) == 1 && delta > 0) { 53 if (count.load(std::memory_order::relaxed) == 1 && delta > 0) {
53 if (cache_bytes == 0) { 54 if (cache_bytes == 0) {
54 cache_begin = page; 55 cache_begin = page;
55 } 56 }
56 cache_bytes += PAGE_SIZE; 57 cache_bytes += YUZU_PAGESIZE;
57 } else if (cache_bytes > 0) { 58 } else if (cache_bytes > 0) {
58 cpu_memory.RasterizerMarkRegionCached(cache_begin << PAGE_BITS, cache_bytes, true); 59 cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true);
59 cache_bytes = 0; 60 cache_bytes = 0;
60 } 61 }
61 } 62 }
62 if (uncache_bytes > 0) { 63 if (uncache_bytes > 0) {
63 cpu_memory.RasterizerMarkRegionCached(uncache_begin << PAGE_BITS, uncache_bytes, false); 64 cpu_memory.RasterizerMarkRegionCached(uncache_begin << YUZU_PAGEBITS, uncache_bytes, false);
64 } 65 }
65 if (cache_bytes > 0) { 66 if (cache_bytes > 0) {
66 cpu_memory.RasterizerMarkRegionCached(cache_begin << PAGE_BITS, cache_bytes, true); 67 cpu_memory.RasterizerMarkRegionCached(cache_begin << YUZU_PAGEBITS, cache_bytes, true);
67 } 68 }
68} 69}
69 70
diff --git a/src/video_core/shader_cache.cpp b/src/video_core/shader_cache.cpp
index 4b1101f7c..164e4ee0e 100644
--- a/src/video_core/shader_cache.cpp
+++ b/src/video_core/shader_cache.cpp
@@ -123,8 +123,8 @@ void ShaderCache::Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t
123 const VAddr addr_end = addr + size; 123 const VAddr addr_end = addr + size;
124 Entry* const entry = NewEntry(addr, addr_end, data.get()); 124 Entry* const entry = NewEntry(addr, addr_end, data.get());
125 125
126 const u64 page_end = (addr_end + PAGE_SIZE - 1) >> PAGE_BITS; 126 const u64 page_end = (addr_end + YUZU_PAGESIZE - 1) >> YUZU_PAGEBITS;
127 for (u64 page = addr >> PAGE_BITS; page < page_end; ++page) { 127 for (u64 page = addr >> YUZU_PAGEBITS; page < page_end; ++page) {
128 invalidation_cache[page].push_back(entry); 128 invalidation_cache[page].push_back(entry);
129 } 129 }
130 130
@@ -135,8 +135,8 @@ void ShaderCache::Register(std::unique_ptr<ShaderInfo> data, VAddr addr, size_t
135 135
136void ShaderCache::InvalidatePagesInRegion(VAddr addr, size_t size) { 136void ShaderCache::InvalidatePagesInRegion(VAddr addr, size_t size) {
137 const VAddr addr_end = addr + size; 137 const VAddr addr_end = addr + size;
138 const u64 page_end = (addr_end + PAGE_SIZE - 1) >> PAGE_BITS; 138 const u64 page_end = (addr_end + YUZU_PAGESIZE - 1) >> YUZU_PAGEBITS;
139 for (u64 page = addr >> PAGE_BITS; page < page_end; ++page) { 139 for (u64 page = addr >> YUZU_PAGEBITS; page < page_end; ++page) {
140 auto it = invalidation_cache.find(page); 140 auto it = invalidation_cache.find(page);
141 if (it == invalidation_cache.end()) { 141 if (it == invalidation_cache.end()) {
142 continue; 142 continue;
@@ -189,8 +189,8 @@ void ShaderCache::InvalidatePageEntries(std::vector<Entry*>& entries, VAddr addr
189} 189}
190 190
191void ShaderCache::RemoveEntryFromInvalidationCache(const Entry* entry) { 191void ShaderCache::RemoveEntryFromInvalidationCache(const Entry* entry) {
192 const u64 page_end = (entry->addr_end + PAGE_SIZE - 1) >> PAGE_BITS; 192 const u64 page_end = (entry->addr_end + YUZU_PAGESIZE - 1) >> YUZU_PAGEBITS;
193 for (u64 page = entry->addr_start >> PAGE_BITS; page < page_end; ++page) { 193 for (u64 page = entry->addr_start >> YUZU_PAGEBITS; page < page_end; ++page) {
194 const auto entries_it = invalidation_cache.find(page); 194 const auto entries_it = invalidation_cache.find(page);
195 ASSERT(entries_it != invalidation_cache.end()); 195 ASSERT(entries_it != invalidation_cache.end());
196 std::vector<Entry*>& entries = entries_it->second; 196 std::vector<Entry*>& entries = entries_it->second;
diff --git a/src/video_core/shader_cache.h b/src/video_core/shader_cache.h
index 1109cfe83..f67cea8c4 100644
--- a/src/video_core/shader_cache.h
+++ b/src/video_core/shader_cache.h
@@ -29,8 +29,8 @@ struct ShaderInfo {
29}; 29};
30 30
31class ShaderCache { 31class ShaderCache {
32 static constexpr u64 PAGE_BITS = 14; 32 static constexpr u64 YUZU_PAGEBITS = 14;
33 static constexpr u64 PAGE_SIZE = u64(1) << PAGE_BITS; 33 static constexpr u64 YUZU_PAGESIZE = u64(1) << YUZU_PAGEBITS;
34 34
35 static constexpr size_t NUM_PROGRAMS = 6; 35 static constexpr size_t NUM_PROGRAMS = 6;
36 36
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index cf3ca06a6..1dbe01bc0 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -589,7 +589,7 @@ void TextureCache<P>::BlitImage(const Tegra::Engines::Fermi2D::Surface& dst,
589template <class P> 589template <class P>
590typename P::ImageView* TextureCache<P>::TryFindFramebufferImageView(VAddr cpu_addr) { 590typename P::ImageView* TextureCache<P>::TryFindFramebufferImageView(VAddr cpu_addr) {
591 // TODO: Properly implement this 591 // TODO: Properly implement this
592 const auto it = page_table.find(cpu_addr >> PAGE_BITS); 592 const auto it = page_table.find(cpu_addr >> YUZU_PAGEBITS);
593 if (it == page_table.end()) { 593 if (it == page_table.end()) {
594 return nullptr; 594 return nullptr;
595 } 595 }
@@ -1485,14 +1485,14 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
1485 std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>>& selected_page_table) { 1485 std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>>& selected_page_table) {
1486 const auto page_it = selected_page_table.find(page); 1486 const auto page_it = selected_page_table.find(page);
1487 if (page_it == selected_page_table.end()) { 1487 if (page_it == selected_page_table.end()) {
1488 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS); 1488 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS);
1489 return; 1489 return;
1490 } 1490 }
1491 std::vector<ImageId>& image_ids = page_it->second; 1491 std::vector<ImageId>& image_ids = page_it->second;
1492 const auto vector_it = std::ranges::find(image_ids, image_id); 1492 const auto vector_it = std::ranges::find(image_ids, image_id);
1493 if (vector_it == image_ids.end()) { 1493 if (vector_it == image_ids.end()) {
1494 ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}", 1494 ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}",
1495 page << PAGE_BITS); 1495 page << YUZU_PAGEBITS);
1496 return; 1496 return;
1497 } 1497 }
1498 image_ids.erase(vector_it); 1498 image_ids.erase(vector_it);
@@ -1504,14 +1504,14 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
1504 ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) { 1504 ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) {
1505 const auto page_it = page_table.find(page); 1505 const auto page_it = page_table.find(page);
1506 if (page_it == page_table.end()) { 1506 if (page_it == page_table.end()) {
1507 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS); 1507 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS);
1508 return; 1508 return;
1509 } 1509 }
1510 std::vector<ImageMapId>& image_map_ids = page_it->second; 1510 std::vector<ImageMapId>& image_map_ids = page_it->second;
1511 const auto vector_it = std::ranges::find(image_map_ids, map_id); 1511 const auto vector_it = std::ranges::find(image_map_ids, map_id);
1512 if (vector_it == image_map_ids.end()) { 1512 if (vector_it == image_map_ids.end()) {
1513 ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}", 1513 ASSERT_MSG(false, "Unregistering unregistered image in page=0x{:x}",
1514 page << PAGE_BITS); 1514 page << YUZU_PAGEBITS);
1515 return; 1515 return;
1516 } 1516 }
1517 image_map_ids.erase(vector_it); 1517 image_map_ids.erase(vector_it);
@@ -1532,7 +1532,7 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
1532 ForEachCPUPage(cpu_addr, size, [this, image_id](u64 page) { 1532 ForEachCPUPage(cpu_addr, size, [this, image_id](u64 page) {
1533 const auto page_it = page_table.find(page); 1533 const auto page_it = page_table.find(page);
1534 if (page_it == page_table.end()) { 1534 if (page_it == page_table.end()) {
1535 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << PAGE_BITS); 1535 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS);
1536 return; 1536 return;
1537 } 1537 }
1538 std::vector<ImageMapId>& image_map_ids = page_it->second; 1538 std::vector<ImageMapId>& image_map_ids = page_it->second;
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index e2f8f84c9..7e6c6cef2 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -47,7 +47,7 @@ struct ImageViewInOut {
47template <class P> 47template <class P>
48class TextureCache { 48class TextureCache {
49 /// Address shift for caching images into a hash table 49 /// Address shift for caching images into a hash table
50 static constexpr u64 PAGE_BITS = 20; 50 static constexpr u64 YUZU_PAGEBITS = 20;
51 51
52 /// Enables debugging features to the texture cache 52 /// Enables debugging features to the texture cache
53 static constexpr bool ENABLE_VALIDATION = P::ENABLE_VALIDATION; 53 static constexpr bool ENABLE_VALIDATION = P::ENABLE_VALIDATION;
@@ -178,8 +178,8 @@ private:
178 template <typename Func> 178 template <typename Func>
179 static void ForEachCPUPage(VAddr addr, size_t size, Func&& func) { 179 static void ForEachCPUPage(VAddr addr, size_t size, Func&& func) {
180 static constexpr bool RETURNS_BOOL = std::is_same_v<std::invoke_result<Func, u64>, bool>; 180 static constexpr bool RETURNS_BOOL = std::is_same_v<std::invoke_result<Func, u64>, bool>;
181 const u64 page_end = (addr + size - 1) >> PAGE_BITS; 181 const u64 page_end = (addr + size - 1) >> YUZU_PAGEBITS;
182 for (u64 page = addr >> PAGE_BITS; page <= page_end; ++page) { 182 for (u64 page = addr >> YUZU_PAGEBITS; page <= page_end; ++page) {
183 if constexpr (RETURNS_BOOL) { 183 if constexpr (RETURNS_BOOL) {
184 if (func(page)) { 184 if (func(page)) {
185 break; 185 break;
@@ -193,8 +193,8 @@ private:
193 template <typename Func> 193 template <typename Func>
194 static void ForEachGPUPage(GPUVAddr addr, size_t size, Func&& func) { 194 static void ForEachGPUPage(GPUVAddr addr, size_t size, Func&& func) {
195 static constexpr bool RETURNS_BOOL = std::is_same_v<std::invoke_result<Func, u64>, bool>; 195 static constexpr bool RETURNS_BOOL = std::is_same_v<std::invoke_result<Func, u64>, bool>;
196 const u64 page_end = (addr + size - 1) >> PAGE_BITS; 196 const u64 page_end = (addr + size - 1) >> YUZU_PAGEBITS;
197 for (u64 page = addr >> PAGE_BITS; page <= page_end; ++page) { 197 for (u64 page = addr >> YUZU_PAGEBITS; page <= page_end; ++page) {
198 if constexpr (RETURNS_BOOL) { 198 if constexpr (RETURNS_BOOL) {
199 if (func(page)) { 199 if (func(page)) {
200 break; 200 break;