summaryrefslogtreecommitdiff
path: root/src/core/memory.cpp
diff options
context:
space:
mode:
authorGravatar Kyle Kienapfel2022-08-18 16:28:55 -0700
committerGravatar Kyle Kienapfel2022-08-19 16:08:40 -0700
commit14e9de6678dab47625826006f001d5e94dfb2716 (patch)
treeddb17512c52f678de96abf6dedf33a02b6a497ec /src/core/memory.cpp
parentMerge pull request #8685 from FearlessTobi/multiplayer-part2 (diff)
downloadyuzu-14e9de6678dab47625826006f001d5e94dfb2716.tar.gz
yuzu-14e9de6678dab47625826006f001d5e94dfb2716.tar.xz
yuzu-14e9de6678dab47625826006f001d5e94dfb2716.zip
code: dodge PAGE_SIZE #define
Some header files, specifically for OSX and Musl libc define PAGE_SIZE to be a number This is great except in yuzu we're using PAGE_SIZE as a variable Specific example `static constexpr u64 PAGE_SIZE = u64(1) << PAGE_BITS;` PAGE_SIZE PAGE_BITS PAGE_MASK are all similar variables. Simply deleted the underscores, and then added YUZU_ prefix Might be worth noting that there are multiple uses in different classes/namespaces This list may not be exhaustive Core::Memory 12 bits (4096) QueryCacheBase 12 bits ShaderCache 14 bits (16384) TextureCache 20 bits (1048576, or 1MB) Fixes #8779
Diffstat (limited to 'src/core/memory.cpp')
-rw-r--r--src/core/memory.cpp81
1 files changed, 42 insertions, 39 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 1b44280b5..34ad7cadd 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -36,10 +36,11 @@ struct Memory::Impl {
36 } 36 }
37 37
38 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) { 38 void MapMemoryRegion(Common::PageTable& page_table, VAddr base, u64 size, PAddr target) {
39 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); 39 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
40 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); 40 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base);
41 ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", target); 41 ASSERT_MSG(target >= DramMemoryMap::Base, "Out of bounds target: {:016X}", target);
42 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, target, Common::PageType::Memory); 42 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, target,
43 Common::PageType::Memory);
43 44
44 if (Settings::IsFastmemEnabled()) { 45 if (Settings::IsFastmemEnabled()) {
45 system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size); 46 system.DeviceMemory().buffer.Map(base, target - DramMemoryMap::Base, size);
@@ -47,9 +48,10 @@ struct Memory::Impl {
47 } 48 }
48 49
49 void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) { 50 void UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
50 ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: {:016X}", size); 51 ASSERT_MSG((size & YUZU_PAGEMASK) == 0, "non-page aligned size: {:016X}", size);
51 ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: {:016X}", base); 52 ASSERT_MSG((base & YUZU_PAGEMASK) == 0, "non-page aligned base: {:016X}", base);
52 MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, 0, Common::PageType::Unmapped); 53 MapPages(page_table, base / YUZU_PAGESIZE, size / YUZU_PAGESIZE, 0,
54 Common::PageType::Unmapped);
53 55
54 if (Settings::IsFastmemEnabled()) { 56 if (Settings::IsFastmemEnabled()) {
55 system.DeviceMemory().buffer.Unmap(base, size); 57 system.DeviceMemory().buffer.Unmap(base, size);
@@ -57,7 +59,7 @@ struct Memory::Impl {
57 } 59 }
58 60
59 [[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const { 61 [[nodiscard]] u8* GetPointerFromRasterizerCachedMemory(VAddr vaddr) const {
60 const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]}; 62 const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
61 63
62 if (!paddr) { 64 if (!paddr) {
63 return {}; 65 return {};
@@ -67,7 +69,7 @@ struct Memory::Impl {
67 } 69 }
68 70
69 [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const { 71 [[nodiscard]] u8* GetPointerFromDebugMemory(VAddr vaddr) const {
70 const PAddr paddr{current_page_table->backing_addr[vaddr >> PAGE_BITS]}; 72 const PAddr paddr{current_page_table->backing_addr[vaddr >> YUZU_PAGEBITS]};
71 73
72 if (paddr == 0) { 74 if (paddr == 0) {
73 return {}; 75 return {};
@@ -176,13 +178,14 @@ struct Memory::Impl {
176 auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) { 178 auto on_unmapped, auto on_memory, auto on_rasterizer, auto increment) {
177 const auto& page_table = process.PageTable().PageTableImpl(); 179 const auto& page_table = process.PageTable().PageTableImpl();
178 std::size_t remaining_size = size; 180 std::size_t remaining_size = size;
179 std::size_t page_index = addr >> PAGE_BITS; 181 std::size_t page_index = addr >> YUZU_PAGEBITS;
180 std::size_t page_offset = addr & PAGE_MASK; 182 std::size_t page_offset = addr & YUZU_PAGEMASK;
181 183
182 while (remaining_size) { 184 while (remaining_size) {
183 const std::size_t copy_amount = 185 const std::size_t copy_amount =
184 std::min(static_cast<std::size_t>(PAGE_SIZE) - page_offset, remaining_size); 186 std::min(static_cast<std::size_t>(YUZU_PAGESIZE) - page_offset, remaining_size);
185 const auto current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); 187 const auto current_vaddr =
188 static_cast<VAddr>((page_index << YUZU_PAGEBITS) + page_offset);
186 189
187 const auto [pointer, type] = page_table.pointers[page_index].PointerType(); 190 const auto [pointer, type] = page_table.pointers[page_index].PointerType();
188 switch (type) { 191 switch (type) {
@@ -192,7 +195,7 @@ struct Memory::Impl {
192 } 195 }
193 case Common::PageType::Memory: { 196 case Common::PageType::Memory: {
194 DEBUG_ASSERT(pointer); 197 DEBUG_ASSERT(pointer);
195 u8* mem_ptr = pointer + page_offset + (page_index << PAGE_BITS); 198 u8* mem_ptr = pointer + page_offset + (page_index << YUZU_PAGEBITS);
196 on_memory(copy_amount, mem_ptr); 199 on_memory(copy_amount, mem_ptr);
197 break; 200 break;
198 } 201 }
@@ -339,10 +342,10 @@ struct Memory::Impl {
339 // Iterate over a contiguous CPU address space, marking/unmarking the region. 342 // Iterate over a contiguous CPU address space, marking/unmarking the region.
340 // The region is at a granularity of CPU pages. 343 // The region is at a granularity of CPU pages.
341 344
342 const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; 345 const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1;
343 for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { 346 for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) {
344 const Common::PageType page_type{ 347 const Common::PageType page_type{
345 current_page_table->pointers[vaddr >> PAGE_BITS].Type()}; 348 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()};
346 if (debug) { 349 if (debug) {
347 // Switch page type to debug if now debug 350 // Switch page type to debug if now debug
348 switch (page_type) { 351 switch (page_type) {
@@ -354,7 +357,7 @@ struct Memory::Impl {
354 // Page is already marked. 357 // Page is already marked.
355 break; 358 break;
356 case Common::PageType::Memory: 359 case Common::PageType::Memory:
357 current_page_table->pointers[vaddr >> PAGE_BITS].Store( 360 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
358 nullptr, Common::PageType::DebugMemory); 361 nullptr, Common::PageType::DebugMemory);
359 break; 362 break;
360 default: 363 default:
@@ -371,9 +374,9 @@ struct Memory::Impl {
371 // Don't mess with already non-debug or rasterizer memory. 374 // Don't mess with already non-debug or rasterizer memory.
372 break; 375 break;
373 case Common::PageType::DebugMemory: { 376 case Common::PageType::DebugMemory: {
374 u8* const pointer{GetPointerFromDebugMemory(vaddr & ~PAGE_MASK)}; 377 u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)};
375 current_page_table->pointers[vaddr >> PAGE_BITS].Store( 378 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
376 pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory); 379 pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory);
377 break; 380 break;
378 } 381 }
379 default: 382 default:
@@ -398,10 +401,10 @@ struct Memory::Impl {
398 // granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size 401 // granularity of CPU pages, hence why we iterate on a CPU page basis (note: GPU page size
399 // is different). This assumes the specified GPU address region is contiguous as well. 402 // is different). This assumes the specified GPU address region is contiguous as well.
400 403
401 const u64 num_pages = ((vaddr + size - 1) >> PAGE_BITS) - (vaddr >> PAGE_BITS) + 1; 404 const u64 num_pages = ((vaddr + size - 1) >> YUZU_PAGEBITS) - (vaddr >> YUZU_PAGEBITS) + 1;
402 for (u64 i = 0; i < num_pages; ++i, vaddr += PAGE_SIZE) { 405 for (u64 i = 0; i < num_pages; ++i, vaddr += YUZU_PAGESIZE) {
403 const Common::PageType page_type{ 406 const Common::PageType page_type{
404 current_page_table->pointers[vaddr >> PAGE_BITS].Type()}; 407 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Type()};
405 if (cached) { 408 if (cached) {
406 // Switch page type to cached if now cached 409 // Switch page type to cached if now cached
407 switch (page_type) { 410 switch (page_type) {
@@ -411,7 +414,7 @@ struct Memory::Impl {
411 break; 414 break;
412 case Common::PageType::DebugMemory: 415 case Common::PageType::DebugMemory:
413 case Common::PageType::Memory: 416 case Common::PageType::Memory:
414 current_page_table->pointers[vaddr >> PAGE_BITS].Store( 417 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
415 nullptr, Common::PageType::RasterizerCachedMemory); 418 nullptr, Common::PageType::RasterizerCachedMemory);
416 break; 419 break;
417 case Common::PageType::RasterizerCachedMemory: 420 case Common::PageType::RasterizerCachedMemory:
@@ -434,16 +437,16 @@ struct Memory::Impl {
434 // that this area is already unmarked as cached. 437 // that this area is already unmarked as cached.
435 break; 438 break;
436 case Common::PageType::RasterizerCachedMemory: { 439 case Common::PageType::RasterizerCachedMemory: {
437 u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~PAGE_MASK)}; 440 u8* const pointer{GetPointerFromRasterizerCachedMemory(vaddr & ~YUZU_PAGEMASK)};
438 if (pointer == nullptr) { 441 if (pointer == nullptr) {
439 // It's possible that this function has been called while updating the 442 // It's possible that this function has been called while updating the
440 // pagetable after unmapping a VMA. In that case the underlying VMA will no 443 // pagetable after unmapping a VMA. In that case the underlying VMA will no
441 // longer exist, and we should just leave the pagetable entry blank. 444 // longer exist, and we should just leave the pagetable entry blank.
442 current_page_table->pointers[vaddr >> PAGE_BITS].Store( 445 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
443 nullptr, Common::PageType::Unmapped); 446 nullptr, Common::PageType::Unmapped);
444 } else { 447 } else {
445 current_page_table->pointers[vaddr >> PAGE_BITS].Store( 448 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
446 pointer - (vaddr & ~PAGE_MASK), Common::PageType::Memory); 449 pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory);
447 } 450 }
448 break; 451 break;
449 } 452 }
@@ -465,8 +468,8 @@ struct Memory::Impl {
465 */ 468 */
466 void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target, 469 void MapPages(Common::PageTable& page_table, VAddr base, u64 size, PAddr target,
467 Common::PageType type) { 470 Common::PageType type) {
468 LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * PAGE_SIZE, 471 LOG_DEBUG(HW_Memory, "Mapping {:016X} onto {:016X}-{:016X}", target, base * YUZU_PAGESIZE,
469 (base + size) * PAGE_SIZE); 472 (base + size) * YUZU_PAGESIZE);
470 473
471 // During boot, current_page_table might not be set yet, in which case we need not flush 474 // During boot, current_page_table might not be set yet, in which case we need not flush
472 if (system.IsPoweredOn()) { 475 if (system.IsPoweredOn()) {
@@ -474,7 +477,7 @@ struct Memory::Impl {
474 for (u64 i = 0; i < size; i++) { 477 for (u64 i = 0; i < size; i++) {
475 const auto page = base + i; 478 const auto page = base + i;
476 if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) { 479 if (page_table.pointers[page].Type() == Common::PageType::RasterizerCachedMemory) {
477 gpu.FlushAndInvalidateRegion(page << PAGE_BITS, PAGE_SIZE); 480 gpu.FlushAndInvalidateRegion(page << YUZU_PAGEBITS, YUZU_PAGESIZE);
478 } 481 }
479 } 482 }
480 } 483 }
@@ -485,7 +488,7 @@ struct Memory::Impl {
485 488
486 if (!target) { 489 if (!target) {
487 ASSERT_MSG(type != Common::PageType::Memory, 490 ASSERT_MSG(type != Common::PageType::Memory,
488 "Mapping memory page without a pointer @ {:016x}", base * PAGE_SIZE); 491 "Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE);
489 492
490 while (base != end) { 493 while (base != end) {
491 page_table.pointers[base].Store(nullptr, type); 494 page_table.pointers[base].Store(nullptr, type);
@@ -496,14 +499,14 @@ struct Memory::Impl {
496 } else { 499 } else {
497 while (base != end) { 500 while (base != end) {
498 page_table.pointers[base].Store( 501 page_table.pointers[base].Store(
499 system.DeviceMemory().GetPointer(target) - (base << PAGE_BITS), type); 502 system.DeviceMemory().GetPointer(target) - (base << YUZU_PAGEBITS), type);
500 page_table.backing_addr[base] = target - (base << PAGE_BITS); 503 page_table.backing_addr[base] = target - (base << YUZU_PAGEBITS);
501 504
502 ASSERT_MSG(page_table.pointers[base].Pointer(), 505 ASSERT_MSG(page_table.pointers[base].Pointer(),
503 "memory mapping base yield a nullptr within the table"); 506 "memory mapping base yield a nullptr within the table");
504 507
505 base += 1; 508 base += 1;
506 target += PAGE_SIZE; 509 target += YUZU_PAGESIZE;
507 } 510 }
508 } 511 }
509 } 512 }
@@ -518,7 +521,7 @@ struct Memory::Impl {
518 } 521 }
519 522
520 // Avoid adding any extra logic to this fast-path block 523 // Avoid adding any extra logic to this fast-path block
521 const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> PAGE_BITS].Raw(); 524 const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw();
522 if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { 525 if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
523 return &pointer[vaddr]; 526 return &pointer[vaddr];
524 } 527 }
@@ -657,7 +660,7 @@ void Memory::UnmapRegion(Common::PageTable& page_table, VAddr base, u64 size) {
657bool Memory::IsValidVirtualAddress(const VAddr vaddr) const { 660bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
658 const Kernel::KProcess& process = *system.CurrentProcess(); 661 const Kernel::KProcess& process = *system.CurrentProcess();
659 const auto& page_table = process.PageTable().PageTableImpl(); 662 const auto& page_table = process.PageTable().PageTableImpl();
660 const size_t page = vaddr >> PAGE_BITS; 663 const size_t page = vaddr >> YUZU_PAGEBITS;
661 if (page >= page_table.pointers.size()) { 664 if (page >= page_table.pointers.size()) {
662 return false; 665 return false;
663 } 666 }
@@ -668,9 +671,9 @@ bool Memory::IsValidVirtualAddress(const VAddr vaddr) const {
668 671
669bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const { 672bool Memory::IsValidVirtualAddressRange(VAddr base, u64 size) const {
670 VAddr end = base + size; 673 VAddr end = base + size;
671 VAddr page = Common::AlignDown(base, PAGE_SIZE); 674 VAddr page = Common::AlignDown(base, YUZU_PAGESIZE);
672 675
673 for (; page < end; page += PAGE_SIZE) { 676 for (; page < end; page += YUZU_PAGESIZE) {
674 if (!IsValidVirtualAddress(page)) { 677 if (!IsValidVirtualAddress(page)) {
675 return false; 678 return false;
676 } 679 }