summaryrefslogtreecommitdiff
path: root/src/core/memory.cpp
diff options
context:
space:
mode:
authorGravatar comex2023-07-01 15:00:39 -0700
committerGravatar comex2023-07-15 12:00:28 -0700
commitd7c532d8894ce806c9af13b8dd3eec975642b348 (patch)
tree1353e6d1776384575019db28987e23237552a9de /src/core/memory.cpp
parentfile_sys/content_archive: Detect compressed NCAs (#11047) (diff)
downloadyuzu-d7c532d8894ce806c9af13b8dd3eec975642b348.tar.gz
yuzu-d7c532d8894ce806c9af13b8dd3eec975642b348.tar.xz
yuzu-d7c532d8894ce806c9af13b8dd3eec975642b348.zip
Fixes and workarounds to make UBSan happier on macOS
There are still some other issues not addressed here, but it's a start. Workarounds for false-positive reports: - `RasterizerAccelerated`: Put a gigantic array behind a `unique_ptr`, because UBSan has a [hardcoded limit](https://stackoverflow.com/questions/64531383/c-runtime-error-using-fsanitize-undefined-object-has-a-possibly-invalid-vp) of how big it thinks objects can be, specifically when dealing with offset-to-top values used with multiple inheritance. Hopefully this doesn't have a performance impact. - `QueryCacheBase::QueryCacheBase`: Avoid an operation that UBSan thinks is UB even though it at least arguably isn't. See the link in the comment for more information. Fixes for correct reports: - `PageTable`, `Memory`: Use `uintptr_t` values instead of pointers to avoid UB from pointer overflow (when pointer arithmetic wraps around the address space). - `KScheduler::Reload`: `thread->GetOwnerProcess()` can be `nullptr`; avoid calling methods on it in this case. (The existing code returns a garbage reference to a field, which is then passed into `LoadWatchpointArray`, and apparently it's never used, so it's harmless in practice but still triggers UBSan.) - `KAutoObject::Close`: This function calls `this->Destroy()`, which overwrites the beginning of the object with junk (specifically a free list pointer). Then it calls `this->UnregisterWithKernel()`. UBSan complains about a type mismatch because the vtable has been overwritten, and I believe this is indeed UB. `UnregisterWithKernel` also loads `m_kernel` from the 'freed' object, which seems to be technically safe (the overwriting doesn't extend as far as that field), but seems dubious. Switch to a `static` method and load `m_kernel` in advance.
Diffstat (limited to 'src/core/memory.cpp')
-rw-r--r--src/core/memory.cpp30
1 files changed, 17 insertions, 13 deletions
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 805963178..7538c1d23 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -73,7 +73,7 @@ struct Memory::Impl {
73 return {}; 73 return {};
74 } 74 }
75 75
76 return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; 76 return system.DeviceMemory().GetPointer<u8>(paddr + vaddr);
77 } 77 }
78 78
79 [[nodiscard]] u8* GetPointerFromDebugMemory(u64 vaddr) const { 79 [[nodiscard]] u8* GetPointerFromDebugMemory(u64 vaddr) const {
@@ -84,7 +84,7 @@ struct Memory::Impl {
84 return {}; 84 return {};
85 } 85 }
86 86
87 return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; 87 return system.DeviceMemory().GetPointer<u8>(paddr + vaddr);
88 } 88 }
89 89
90 u8 Read8(const Common::ProcessAddress addr) { 90 u8 Read8(const Common::ProcessAddress addr) {
@@ -204,7 +204,8 @@ struct Memory::Impl {
204 break; 204 break;
205 } 205 }
206 case Common::PageType::Memory: { 206 case Common::PageType::Memory: {
207 u8* mem_ptr = pointer + page_offset + (page_index << YUZU_PAGEBITS); 207 u8* mem_ptr =
208 reinterpret_cast<u8*>(pointer + page_offset + (page_index << YUZU_PAGEBITS));
208 on_memory(copy_amount, mem_ptr); 209 on_memory(copy_amount, mem_ptr);
209 break; 210 break;
210 } 211 }
@@ -448,7 +449,7 @@ struct Memory::Impl {
448 break; 449 break;
449 case Common::PageType::Memory: 450 case Common::PageType::Memory:
450 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( 451 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
451 nullptr, Common::PageType::DebugMemory); 452 0, Common::PageType::DebugMemory);
452 break; 453 break;
453 default: 454 default:
454 UNREACHABLE(); 455 UNREACHABLE();
@@ -466,7 +467,8 @@ struct Memory::Impl {
466 case Common::PageType::DebugMemory: { 467 case Common::PageType::DebugMemory: {
467 u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)}; 468 u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)};
468 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( 469 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
469 pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory); 470 reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~YUZU_PAGEMASK),
471 Common::PageType::Memory);
470 break; 472 break;
471 } 473 }
472 default: 474 default:
@@ -506,7 +508,7 @@ struct Memory::Impl {
506 case Common::PageType::DebugMemory: 508 case Common::PageType::DebugMemory:
507 case Common::PageType::Memory: 509 case Common::PageType::Memory:
508 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( 510 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
509 nullptr, Common::PageType::RasterizerCachedMemory); 511 0, Common::PageType::RasterizerCachedMemory);
510 break; 512 break;
511 case Common::PageType::RasterizerCachedMemory: 513 case Common::PageType::RasterizerCachedMemory:
512 // There can be more than one GPU region mapped per CPU region, so it's common 514 // There can be more than one GPU region mapped per CPU region, so it's common
@@ -534,10 +536,11 @@ struct Memory::Impl {
534 // pagetable after unmapping a VMA. In that case the underlying VMA will no 536 // pagetable after unmapping a VMA. In that case the underlying VMA will no
535 // longer exist, and we should just leave the pagetable entry blank. 537 // longer exist, and we should just leave the pagetable entry blank.
536 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( 538 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
537 nullptr, Common::PageType::Unmapped); 539 0, Common::PageType::Unmapped);
538 } else { 540 } else {
539 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( 541 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
540 pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory); 542 reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~YUZU_PAGEMASK),
543 Common::PageType::Memory);
541 } 544 }
542 break; 545 break;
543 } 546 }
@@ -584,7 +587,7 @@ struct Memory::Impl {
584 "Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE); 587 "Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE);
585 588
586 while (base != end) { 589 while (base != end) {
587 page_table.pointers[base].Store(nullptr, type); 590 page_table.pointers[base].Store(0, type);
588 page_table.backing_addr[base] = 0; 591 page_table.backing_addr[base] = 0;
589 page_table.blocks[base] = 0; 592 page_table.blocks[base] = 0;
590 base += 1; 593 base += 1;
@@ -593,7 +596,8 @@ struct Memory::Impl {
593 auto orig_base = base; 596 auto orig_base = base;
594 while (base != end) { 597 while (base != end) {
595 auto host_ptr = 598 auto host_ptr =
596 system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS); 599 reinterpret_cast<uintptr_t>(system.DeviceMemory().GetPointer<u8>(target)) -
600 (base << YUZU_PAGEBITS);
597 auto backing = GetInteger(target) - (base << YUZU_PAGEBITS); 601 auto backing = GetInteger(target) - (base << YUZU_PAGEBITS);
598 page_table.pointers[base].Store(host_ptr, type); 602 page_table.pointers[base].Store(host_ptr, type);
599 page_table.backing_addr[base] = backing; 603 page_table.backing_addr[base] = backing;
@@ -619,8 +623,8 @@ struct Memory::Impl {
619 623
620 // Avoid adding any extra logic to this fast-path block 624 // Avoid adding any extra logic to this fast-path block
621 const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw(); 625 const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw();
622 if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { 626 if (const uintptr_t pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
623 return &pointer[vaddr]; 627 return reinterpret_cast<u8*>(pointer + vaddr);
624 } 628 }
625 switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { 629 switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
626 case Common::PageType::Unmapped: 630 case Common::PageType::Unmapped:
@@ -814,7 +818,7 @@ bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
814 return false; 818 return false;
815 } 819 }
816 const auto [pointer, type] = page_table.pointers[page].PointerType(); 820 const auto [pointer, type] = page_table.pointers[page].PointerType();
817 return pointer != nullptr || type == Common::PageType::RasterizerCachedMemory || 821 return pointer != 0 || type == Common::PageType::RasterizerCachedMemory ||
818 type == Common::PageType::DebugMemory; 822 type == Common::PageType::DebugMemory;
819} 823}
820 824