summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/common/page_table.h12
-rw-r--r--src/core/arm/arm_interface.cpp4
-rw-r--r--src/core/arm/arm_interface.h2
-rw-r--r--src/core/hle/kernel/k_auto_object.cpp4
-rw-r--r--src/core/hle/kernel/k_auto_object.h5
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp3
-rw-r--r--src/core/hle/kernel/k_thread.cpp2
-rw-r--r--src/core/memory.cpp30
-rw-r--r--src/video_core/query_cache.h4
-rw-r--r--src/video_core/rasterizer_accelerated.cpp5
-rw-r--r--src/video_core/rasterizer_accelerated.h3
11 files changed, 42 insertions, 32 deletions
diff --git a/src/common/page_table.h b/src/common/page_table.h
index fec8378f3..e653d52ad 100644
--- a/src/common/page_table.h
+++ b/src/common/page_table.h
@@ -51,7 +51,7 @@ struct PageTable {
51 class PageInfo { 51 class PageInfo {
52 public: 52 public:
53 /// Returns the page pointer 53 /// Returns the page pointer
54 [[nodiscard]] u8* Pointer() const noexcept { 54 [[nodiscard]] uintptr_t Pointer() const noexcept {
55 return ExtractPointer(raw.load(std::memory_order_relaxed)); 55 return ExtractPointer(raw.load(std::memory_order_relaxed));
56 } 56 }
57 57
@@ -61,7 +61,7 @@ struct PageTable {
61 } 61 }
62 62
63 /// Returns the page pointer and attribute pair, extracted from the same atomic read 63 /// Returns the page pointer and attribute pair, extracted from the same atomic read
64 [[nodiscard]] std::pair<u8*, PageType> PointerType() const noexcept { 64 [[nodiscard]] std::pair<uintptr_t, PageType> PointerType() const noexcept {
65 const uintptr_t non_atomic_raw = raw.load(std::memory_order_relaxed); 65 const uintptr_t non_atomic_raw = raw.load(std::memory_order_relaxed);
66 return {ExtractPointer(non_atomic_raw), ExtractType(non_atomic_raw)}; 66 return {ExtractPointer(non_atomic_raw), ExtractType(non_atomic_raw)};
67 } 67 }
@@ -73,13 +73,13 @@ struct PageTable {
73 } 73 }
74 74
75 /// Write a page pointer and type pair atomically 75 /// Write a page pointer and type pair atomically
76 void Store(u8* pointer, PageType type) noexcept { 76 void Store(uintptr_t pointer, PageType type) noexcept {
77 raw.store(reinterpret_cast<uintptr_t>(pointer) | static_cast<uintptr_t>(type)); 77 raw.store(pointer | static_cast<uintptr_t>(type));
78 } 78 }
79 79
80 /// Unpack a pointer from a page info raw representation 80 /// Unpack a pointer from a page info raw representation
81 [[nodiscard]] static u8* ExtractPointer(uintptr_t raw) noexcept { 81 [[nodiscard]] static uintptr_t ExtractPointer(uintptr_t raw) noexcept {
82 return reinterpret_cast<u8*>(raw & (~uintptr_t{0} << ATTRIBUTE_BITS)); 82 return raw & (~uintptr_t{0} << ATTRIBUTE_BITS);
83 } 83 }
84 84
85 /// Unpack a page type from a page info raw representation 85 /// Unpack a page type from a page info raw representation
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp
index aa0eb9791..0c012f094 100644
--- a/src/core/arm/arm_interface.cpp
+++ b/src/core/arm/arm_interface.cpp
@@ -217,8 +217,8 @@ void ARM_Interface::Run() {
217 } 217 }
218} 218}
219 219
220void ARM_Interface::LoadWatchpointArray(const WatchpointArray& wp) { 220void ARM_Interface::LoadWatchpointArray(const WatchpointArray* wp) {
221 watchpoints = &wp; 221 watchpoints = wp;
222} 222}
223 223
224const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint( 224const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint(
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h
index d5f2fa09a..3d866ff6f 100644
--- a/src/core/arm/arm_interface.h
+++ b/src/core/arm/arm_interface.h
@@ -186,7 +186,7 @@ public:
186 virtual void SaveContext(ThreadContext64& ctx) const = 0; 186 virtual void SaveContext(ThreadContext64& ctx) const = 0;
187 virtual void LoadContext(const ThreadContext32& ctx) = 0; 187 virtual void LoadContext(const ThreadContext32& ctx) = 0;
188 virtual void LoadContext(const ThreadContext64& ctx) = 0; 188 virtual void LoadContext(const ThreadContext64& ctx) = 0;
189 void LoadWatchpointArray(const WatchpointArray& wp); 189 void LoadWatchpointArray(const WatchpointArray* wp);
190 190
191 /// Clears the exclusive monitor's state. 191 /// Clears the exclusive monitor's state.
192 virtual void ClearExclusiveState() = 0; 192 virtual void ClearExclusiveState() = 0;
diff --git a/src/core/hle/kernel/k_auto_object.cpp b/src/core/hle/kernel/k_auto_object.cpp
index 0ae42c95c..9cd7a9fd5 100644
--- a/src/core/hle/kernel/k_auto_object.cpp
+++ b/src/core/hle/kernel/k_auto_object.cpp
@@ -15,8 +15,8 @@ void KAutoObject::RegisterWithKernel() {
15 m_kernel.RegisterKernelObject(this); 15 m_kernel.RegisterKernelObject(this);
16} 16}
17 17
18void KAutoObject::UnregisterWithKernel() { 18void KAutoObject::UnregisterWithKernel(KernelCore& kernel, KAutoObject* self) {
19 m_kernel.UnregisterKernelObject(this); 19 kernel.UnregisterKernelObject(self);
20} 20}
21 21
22} // namespace Kernel 22} // namespace Kernel
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h
index f384b1568..8d4e0df44 100644
--- a/src/core/hle/kernel/k_auto_object.h
+++ b/src/core/hle/kernel/k_auto_object.h
@@ -159,14 +159,15 @@ public:
159 159
160 // If ref count hits zero, destroy the object. 160 // If ref count hits zero, destroy the object.
161 if (cur_ref_count - 1 == 0) { 161 if (cur_ref_count - 1 == 0) {
162 KernelCore& kernel = m_kernel;
162 this->Destroy(); 163 this->Destroy();
163 this->UnregisterWithKernel(); 164 KAutoObject::UnregisterWithKernel(kernel, this);
164 } 165 }
165 } 166 }
166 167
167private: 168private:
168 void RegisterWithKernel(); 169 void RegisterWithKernel();
169 void UnregisterWithKernel(); 170 static void UnregisterWithKernel(KernelCore& kernel, KAutoObject* self);
170 171
171protected: 172protected:
172 KernelCore& m_kernel; 173 KernelCore& m_kernel;
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 75ce5a23c..d8143c650 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -510,11 +510,12 @@ void KScheduler::Unload(KThread* thread) {
510 510
511void KScheduler::Reload(KThread* thread) { 511void KScheduler::Reload(KThread* thread) {
512 auto& cpu_core = m_kernel.System().ArmInterface(m_core_id); 512 auto& cpu_core = m_kernel.System().ArmInterface(m_core_id);
513 auto* process = thread->GetOwnerProcess();
513 cpu_core.LoadContext(thread->GetContext32()); 514 cpu_core.LoadContext(thread->GetContext32());
514 cpu_core.LoadContext(thread->GetContext64()); 515 cpu_core.LoadContext(thread->GetContext64());
515 cpu_core.SetTlsAddress(GetInteger(thread->GetTlsAddress())); 516 cpu_core.SetTlsAddress(GetInteger(thread->GetTlsAddress()));
516 cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0()); 517 cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0());
517 cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); 518 cpu_core.LoadWatchpointArray(process ? &process->GetWatchpoints() : nullptr);
518 cpu_core.ClearExclusiveState(); 519 cpu_core.ClearExclusiveState();
519} 520}
520 521
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index adb6ec581..2a105a762 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -129,7 +129,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress
129 case ThreadType::User: 129 case ThreadType::User:
130 ASSERT(((owner == nullptr) || 130 ASSERT(((owner == nullptr) ||
131 (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask())); 131 (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask()));
132 ASSERT(((owner == nullptr) || 132 ASSERT(((owner == nullptr) || (prio > Svc::LowestThreadPriority) ||
133 (owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask())); 133 (owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask()));
134 break; 134 break;
135 case ThreadType::Kernel: 135 case ThreadType::Kernel:
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 805963178..7538c1d23 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -73,7 +73,7 @@ struct Memory::Impl {
73 return {}; 73 return {};
74 } 74 }
75 75
76 return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; 76 return system.DeviceMemory().GetPointer<u8>(paddr + vaddr);
77 } 77 }
78 78
79 [[nodiscard]] u8* GetPointerFromDebugMemory(u64 vaddr) const { 79 [[nodiscard]] u8* GetPointerFromDebugMemory(u64 vaddr) const {
@@ -84,7 +84,7 @@ struct Memory::Impl {
84 return {}; 84 return {};
85 } 85 }
86 86
87 return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; 87 return system.DeviceMemory().GetPointer<u8>(paddr + vaddr);
88 } 88 }
89 89
90 u8 Read8(const Common::ProcessAddress addr) { 90 u8 Read8(const Common::ProcessAddress addr) {
@@ -204,7 +204,8 @@ struct Memory::Impl {
204 break; 204 break;
205 } 205 }
206 case Common::PageType::Memory: { 206 case Common::PageType::Memory: {
207 u8* mem_ptr = pointer + page_offset + (page_index << YUZU_PAGEBITS); 207 u8* mem_ptr =
208 reinterpret_cast<u8*>(pointer + page_offset + (page_index << YUZU_PAGEBITS));
208 on_memory(copy_amount, mem_ptr); 209 on_memory(copy_amount, mem_ptr);
209 break; 210 break;
210 } 211 }
@@ -448,7 +449,7 @@ struct Memory::Impl {
448 break; 449 break;
449 case Common::PageType::Memory: 450 case Common::PageType::Memory:
450 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( 451 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
451 nullptr, Common::PageType::DebugMemory); 452 0, Common::PageType::DebugMemory);
452 break; 453 break;
453 default: 454 default:
454 UNREACHABLE(); 455 UNREACHABLE();
@@ -466,7 +467,8 @@ struct Memory::Impl {
466 case Common::PageType::DebugMemory: { 467 case Common::PageType::DebugMemory: {
467 u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)}; 468 u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)};
468 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( 469 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
469 pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory); 470 reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~YUZU_PAGEMASK),
471 Common::PageType::Memory);
470 break; 472 break;
471 } 473 }
472 default: 474 default:
@@ -506,7 +508,7 @@ struct Memory::Impl {
506 case Common::PageType::DebugMemory: 508 case Common::PageType::DebugMemory:
507 case Common::PageType::Memory: 509 case Common::PageType::Memory:
508 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( 510 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
509 nullptr, Common::PageType::RasterizerCachedMemory); 511 0, Common::PageType::RasterizerCachedMemory);
510 break; 512 break;
511 case Common::PageType::RasterizerCachedMemory: 513 case Common::PageType::RasterizerCachedMemory:
512 // There can be more than one GPU region mapped per CPU region, so it's common 514 // There can be more than one GPU region mapped per CPU region, so it's common
@@ -534,10 +536,11 @@ struct Memory::Impl {
534 // pagetable after unmapping a VMA. In that case the underlying VMA will no 536 // pagetable after unmapping a VMA. In that case the underlying VMA will no
535 // longer exist, and we should just leave the pagetable entry blank. 537 // longer exist, and we should just leave the pagetable entry blank.
536 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( 538 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
537 nullptr, Common::PageType::Unmapped); 539 0, Common::PageType::Unmapped);
538 } else { 540 } else {
539 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( 541 current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store(
540 pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory); 542 reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~YUZU_PAGEMASK),
543 Common::PageType::Memory);
541 } 544 }
542 break; 545 break;
543 } 546 }
@@ -584,7 +587,7 @@ struct Memory::Impl {
584 "Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE); 587 "Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE);
585 588
586 while (base != end) { 589 while (base != end) {
587 page_table.pointers[base].Store(nullptr, type); 590 page_table.pointers[base].Store(0, type);
588 page_table.backing_addr[base] = 0; 591 page_table.backing_addr[base] = 0;
589 page_table.blocks[base] = 0; 592 page_table.blocks[base] = 0;
590 base += 1; 593 base += 1;
@@ -593,7 +596,8 @@ struct Memory::Impl {
593 auto orig_base = base; 596 auto orig_base = base;
594 while (base != end) { 597 while (base != end) {
595 auto host_ptr = 598 auto host_ptr =
596 system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS); 599 reinterpret_cast<uintptr_t>(system.DeviceMemory().GetPointer<u8>(target)) -
600 (base << YUZU_PAGEBITS);
597 auto backing = GetInteger(target) - (base << YUZU_PAGEBITS); 601 auto backing = GetInteger(target) - (base << YUZU_PAGEBITS);
598 page_table.pointers[base].Store(host_ptr, type); 602 page_table.pointers[base].Store(host_ptr, type);
599 page_table.backing_addr[base] = backing; 603 page_table.backing_addr[base] = backing;
@@ -619,8 +623,8 @@ struct Memory::Impl {
619 623
620 // Avoid adding any extra logic to this fast-path block 624 // Avoid adding any extra logic to this fast-path block
621 const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw(); 625 const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw();
622 if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { 626 if (const uintptr_t pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) {
623 return &pointer[vaddr]; 627 return reinterpret_cast<u8*>(pointer + vaddr);
624 } 628 }
625 switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { 629 switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) {
626 case Common::PageType::Unmapped: 630 case Common::PageType::Unmapped:
@@ -814,7 +818,7 @@ bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const {
814 return false; 818 return false;
815 } 819 }
816 const auto [pointer, type] = page_table.pointers[page].PointerType(); 820 const auto [pointer, type] = page_table.pointers[page].PointerType();
817 return pointer != nullptr || type == Common::PageType::RasterizerCachedMemory || 821 return pointer != 0 || type == Common::PageType::RasterizerCachedMemory ||
818 type == Common::PageType::DebugMemory; 822 type == Common::PageType::DebugMemory;
819} 823}
820 824
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
index 1528cc1dd..7047e2e63 100644
--- a/src/video_core/query_cache.h
+++ b/src/video_core/query_cache.h
@@ -103,7 +103,9 @@ public:
103 explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_, 103 explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_,
104 Core::Memory::Memory& cpu_memory_) 104 Core::Memory::Memory& cpu_memory_)
105 : rasterizer{rasterizer_}, 105 : rasterizer{rasterizer_},
106 cpu_memory{cpu_memory_}, streams{{CounterStream{static_cast<QueryCache&>(*this), 106 // Use reinterpret_cast instead of static_cast as workaround for
107 // UBSan bug (https://github.com/llvm/llvm-project/issues/59060)
108 cpu_memory{cpu_memory_}, streams{{CounterStream{reinterpret_cast<QueryCache&>(*this),
107 VideoCore::QueryType::SamplesPassed}}} { 109 VideoCore::QueryType::SamplesPassed}}} {
108 (void)slot_async_jobs.insert(); // Null value 110 (void)slot_async_jobs.insert(); // Null value
109 } 111 }
diff --git a/src/video_core/rasterizer_accelerated.cpp b/src/video_core/rasterizer_accelerated.cpp
index 4a197d65d..f200a650f 100644
--- a/src/video_core/rasterizer_accelerated.cpp
+++ b/src/video_core/rasterizer_accelerated.cpp
@@ -13,7 +13,8 @@ namespace VideoCore {
13 13
14using namespace Core::Memory; 14using namespace Core::Memory;
15 15
16RasterizerAccelerated::RasterizerAccelerated(Memory& cpu_memory_) : cpu_memory{cpu_memory_} {} 16RasterizerAccelerated::RasterizerAccelerated(Memory& cpu_memory_)
17 : cached_pages(std::make_unique<CachedPages>()), cpu_memory{cpu_memory_} {}
17 18
18RasterizerAccelerated::~RasterizerAccelerated() = default; 19RasterizerAccelerated::~RasterizerAccelerated() = default;
19 20
@@ -26,7 +27,7 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del
26 std::atomic_thread_fence(std::memory_order_acquire); 27 std::atomic_thread_fence(std::memory_order_acquire);
27 const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE); 28 const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE);
28 for (u64 page = addr >> YUZU_PAGEBITS; page != page_end; ++page) { 29 for (u64 page = addr >> YUZU_PAGEBITS; page != page_end; ++page) {
29 std::atomic_uint16_t& count = cached_pages.at(page >> 2).Count(page); 30 std::atomic_uint16_t& count = cached_pages->at(page >> 2).Count(page);
30 31
31 if (delta > 0) { 32 if (delta > 0) {
32 ASSERT_MSG(count.load(std::memory_order::relaxed) < UINT16_MAX, "Count may overflow!"); 33 ASSERT_MSG(count.load(std::memory_order::relaxed) < UINT16_MAX, "Count may overflow!");
diff --git a/src/video_core/rasterizer_accelerated.h b/src/video_core/rasterizer_accelerated.h
index 7118b8aff..e6c0ea87a 100644
--- a/src/video_core/rasterizer_accelerated.h
+++ b/src/video_core/rasterizer_accelerated.h
@@ -41,7 +41,8 @@ private:
41 }; 41 };
42 static_assert(sizeof(CacheEntry) == 8, "CacheEntry should be 8 bytes!"); 42 static_assert(sizeof(CacheEntry) == 8, "CacheEntry should be 8 bytes!");
43 43
44 std::array<CacheEntry, 0x2000000> cached_pages; 44 using CachedPages = std::array<CacheEntry, 0x2000000>;
45 std::unique_ptr<CachedPages> cached_pages;
45 Core::Memory::Memory& cpu_memory; 46 Core::Memory::Memory& cpu_memory;
46}; 47};
47 48