diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/common/page_table.h | 12 | ||||
| -rw-r--r-- | src/core/arm/arm_interface.cpp | 4 | ||||
| -rw-r--r-- | src/core/arm/arm_interface.h | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_auto_object.cpp | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_auto_object.h | 5 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 3 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread.cpp | 2 | ||||
| -rw-r--r-- | src/core/memory.cpp | 30 | ||||
| -rw-r--r-- | src/video_core/query_cache.h | 4 | ||||
| -rw-r--r-- | src/video_core/rasterizer_accelerated.cpp | 5 | ||||
| -rw-r--r-- | src/video_core/rasterizer_accelerated.h | 3 |
11 files changed, 42 insertions, 32 deletions
diff --git a/src/common/page_table.h b/src/common/page_table.h index fec8378f3..e653d52ad 100644 --- a/src/common/page_table.h +++ b/src/common/page_table.h | |||
| @@ -51,7 +51,7 @@ struct PageTable { | |||
| 51 | class PageInfo { | 51 | class PageInfo { |
| 52 | public: | 52 | public: |
| 53 | /// Returns the page pointer | 53 | /// Returns the page pointer |
| 54 | [[nodiscard]] u8* Pointer() const noexcept { | 54 | [[nodiscard]] uintptr_t Pointer() const noexcept { |
| 55 | return ExtractPointer(raw.load(std::memory_order_relaxed)); | 55 | return ExtractPointer(raw.load(std::memory_order_relaxed)); |
| 56 | } | 56 | } |
| 57 | 57 | ||
| @@ -61,7 +61,7 @@ struct PageTable { | |||
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | /// Returns the page pointer and attribute pair, extracted from the same atomic read | 63 | /// Returns the page pointer and attribute pair, extracted from the same atomic read |
| 64 | [[nodiscard]] std::pair<u8*, PageType> PointerType() const noexcept { | 64 | [[nodiscard]] std::pair<uintptr_t, PageType> PointerType() const noexcept { |
| 65 | const uintptr_t non_atomic_raw = raw.load(std::memory_order_relaxed); | 65 | const uintptr_t non_atomic_raw = raw.load(std::memory_order_relaxed); |
| 66 | return {ExtractPointer(non_atomic_raw), ExtractType(non_atomic_raw)}; | 66 | return {ExtractPointer(non_atomic_raw), ExtractType(non_atomic_raw)}; |
| 67 | } | 67 | } |
| @@ -73,13 +73,13 @@ struct PageTable { | |||
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | /// Write a page pointer and type pair atomically | 75 | /// Write a page pointer and type pair atomically |
| 76 | void Store(u8* pointer, PageType type) noexcept { | 76 | void Store(uintptr_t pointer, PageType type) noexcept { |
| 77 | raw.store(reinterpret_cast<uintptr_t>(pointer) | static_cast<uintptr_t>(type)); | 77 | raw.store(pointer | static_cast<uintptr_t>(type)); |
| 78 | } | 78 | } |
| 79 | 79 | ||
| 80 | /// Unpack a pointer from a page info raw representation | 80 | /// Unpack a pointer from a page info raw representation |
| 81 | [[nodiscard]] static u8* ExtractPointer(uintptr_t raw) noexcept { | 81 | [[nodiscard]] static uintptr_t ExtractPointer(uintptr_t raw) noexcept { |
| 82 | return reinterpret_cast<u8*>(raw & (~uintptr_t{0} << ATTRIBUTE_BITS)); | 82 | return raw & (~uintptr_t{0} << ATTRIBUTE_BITS); |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | /// Unpack a page type from a page info raw representation | 85 | /// Unpack a page type from a page info raw representation |
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp index aa0eb9791..0c012f094 100644 --- a/src/core/arm/arm_interface.cpp +++ b/src/core/arm/arm_interface.cpp | |||
| @@ -217,8 +217,8 @@ void ARM_Interface::Run() { | |||
| 217 | } | 217 | } |
| 218 | } | 218 | } |
| 219 | 219 | ||
| 220 | void ARM_Interface::LoadWatchpointArray(const WatchpointArray& wp) { | 220 | void ARM_Interface::LoadWatchpointArray(const WatchpointArray* wp) { |
| 221 | watchpoints = ℘ | 221 | watchpoints = wp; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint( | 224 | const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint( |
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h index d5f2fa09a..3d866ff6f 100644 --- a/src/core/arm/arm_interface.h +++ b/src/core/arm/arm_interface.h | |||
| @@ -186,7 +186,7 @@ public: | |||
| 186 | virtual void SaveContext(ThreadContext64& ctx) const = 0; | 186 | virtual void SaveContext(ThreadContext64& ctx) const = 0; |
| 187 | virtual void LoadContext(const ThreadContext32& ctx) = 0; | 187 | virtual void LoadContext(const ThreadContext32& ctx) = 0; |
| 188 | virtual void LoadContext(const ThreadContext64& ctx) = 0; | 188 | virtual void LoadContext(const ThreadContext64& ctx) = 0; |
| 189 | void LoadWatchpointArray(const WatchpointArray& wp); | 189 | void LoadWatchpointArray(const WatchpointArray* wp); |
| 190 | 190 | ||
| 191 | /// Clears the exclusive monitor's state. | 191 | /// Clears the exclusive monitor's state. |
| 192 | virtual void ClearExclusiveState() = 0; | 192 | virtual void ClearExclusiveState() = 0; |
diff --git a/src/core/hle/kernel/k_auto_object.cpp b/src/core/hle/kernel/k_auto_object.cpp index 0ae42c95c..9cd7a9fd5 100644 --- a/src/core/hle/kernel/k_auto_object.cpp +++ b/src/core/hle/kernel/k_auto_object.cpp | |||
| @@ -15,8 +15,8 @@ void KAutoObject::RegisterWithKernel() { | |||
| 15 | m_kernel.RegisterKernelObject(this); | 15 | m_kernel.RegisterKernelObject(this); |
| 16 | } | 16 | } |
| 17 | 17 | ||
| 18 | void KAutoObject::UnregisterWithKernel() { | 18 | void KAutoObject::UnregisterWithKernel(KernelCore& kernel, KAutoObject* self) { |
| 19 | m_kernel.UnregisterKernelObject(this); | 19 | kernel.UnregisterKernelObject(self); |
| 20 | } | 20 | } |
| 21 | 21 | ||
| 22 | } // namespace Kernel | 22 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h index f384b1568..8d4e0df44 100644 --- a/src/core/hle/kernel/k_auto_object.h +++ b/src/core/hle/kernel/k_auto_object.h | |||
| @@ -159,14 +159,15 @@ public: | |||
| 159 | 159 | ||
| 160 | // If ref count hits zero, destroy the object. | 160 | // If ref count hits zero, destroy the object. |
| 161 | if (cur_ref_count - 1 == 0) { | 161 | if (cur_ref_count - 1 == 0) { |
| 162 | KernelCore& kernel = m_kernel; | ||
| 162 | this->Destroy(); | 163 | this->Destroy(); |
| 163 | this->UnregisterWithKernel(); | 164 | KAutoObject::UnregisterWithKernel(kernel, this); |
| 164 | } | 165 | } |
| 165 | } | 166 | } |
| 166 | 167 | ||
| 167 | private: | 168 | private: |
| 168 | void RegisterWithKernel(); | 169 | void RegisterWithKernel(); |
| 169 | void UnregisterWithKernel(); | 170 | static void UnregisterWithKernel(KernelCore& kernel, KAutoObject* self); |
| 170 | 171 | ||
| 171 | protected: | 172 | protected: |
| 172 | KernelCore& m_kernel; | 173 | KernelCore& m_kernel; |
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 75ce5a23c..d8143c650 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -510,11 +510,12 @@ void KScheduler::Unload(KThread* thread) { | |||
| 510 | 510 | ||
| 511 | void KScheduler::Reload(KThread* thread) { | 511 | void KScheduler::Reload(KThread* thread) { |
| 512 | auto& cpu_core = m_kernel.System().ArmInterface(m_core_id); | 512 | auto& cpu_core = m_kernel.System().ArmInterface(m_core_id); |
| 513 | auto* process = thread->GetOwnerProcess(); | ||
| 513 | cpu_core.LoadContext(thread->GetContext32()); | 514 | cpu_core.LoadContext(thread->GetContext32()); |
| 514 | cpu_core.LoadContext(thread->GetContext64()); | 515 | cpu_core.LoadContext(thread->GetContext64()); |
| 515 | cpu_core.SetTlsAddress(GetInteger(thread->GetTlsAddress())); | 516 | cpu_core.SetTlsAddress(GetInteger(thread->GetTlsAddress())); |
| 516 | cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0()); | 517 | cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0()); |
| 517 | cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); | 518 | cpu_core.LoadWatchpointArray(process ? &process->GetWatchpoints() : nullptr); |
| 518 | cpu_core.ClearExclusiveState(); | 519 | cpu_core.ClearExclusiveState(); |
| 519 | } | 520 | } |
| 520 | 521 | ||
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index d88909889..7df8fd7f7 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp | |||
| @@ -129,7 +129,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress | |||
| 129 | case ThreadType::User: | 129 | case ThreadType::User: |
| 130 | ASSERT(((owner == nullptr) || | 130 | ASSERT(((owner == nullptr) || |
| 131 | (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask())); | 131 | (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask())); |
| 132 | ASSERT(((owner == nullptr) || | 132 | ASSERT(((owner == nullptr) || (prio > Svc::LowestThreadPriority) || |
| 133 | (owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask())); | 133 | (owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask())); |
| 134 | break; | 134 | break; |
| 135 | case ThreadType::Kernel: | 135 | case ThreadType::Kernel: |
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 179685b72..513bc4edb 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -73,7 +73,7 @@ struct Memory::Impl { | |||
| 73 | return {}; | 73 | return {}; |
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; | 76 | return system.DeviceMemory().GetPointer<u8>(paddr + vaddr); |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | [[nodiscard]] u8* GetPointerFromDebugMemory(u64 vaddr) const { | 79 | [[nodiscard]] u8* GetPointerFromDebugMemory(u64 vaddr) const { |
| @@ -84,7 +84,7 @@ struct Memory::Impl { | |||
| 84 | return {}; | 84 | return {}; |
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; | 87 | return system.DeviceMemory().GetPointer<u8>(paddr + vaddr); |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | u8 Read8(const Common::ProcessAddress addr) { | 90 | u8 Read8(const Common::ProcessAddress addr) { |
| @@ -205,7 +205,8 @@ struct Memory::Impl { | |||
| 205 | break; | 205 | break; |
| 206 | } | 206 | } |
| 207 | case Common::PageType::Memory: { | 207 | case Common::PageType::Memory: { |
| 208 | u8* mem_ptr = pointer + page_offset + (page_index << YUZU_PAGEBITS); | 208 | u8* mem_ptr = |
| 209 | reinterpret_cast<u8*>(pointer + page_offset + (page_index << YUZU_PAGEBITS)); | ||
| 209 | on_memory(copy_amount, mem_ptr); | 210 | on_memory(copy_amount, mem_ptr); |
| 210 | break; | 211 | break; |
| 211 | } | 212 | } |
| @@ -447,7 +448,7 @@ struct Memory::Impl { | |||
| 447 | break; | 448 | break; |
| 448 | case Common::PageType::Memory: | 449 | case Common::PageType::Memory: |
| 449 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | 450 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( |
| 450 | nullptr, Common::PageType::DebugMemory); | 451 | 0, Common::PageType::DebugMemory); |
| 451 | break; | 452 | break; |
| 452 | default: | 453 | default: |
| 453 | UNREACHABLE(); | 454 | UNREACHABLE(); |
| @@ -465,7 +466,8 @@ struct Memory::Impl { | |||
| 465 | case Common::PageType::DebugMemory: { | 466 | case Common::PageType::DebugMemory: { |
| 466 | u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)}; | 467 | u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)}; |
| 467 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | 468 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( |
| 468 | pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory); | 469 | reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~YUZU_PAGEMASK), |
| 470 | Common::PageType::Memory); | ||
| 469 | break; | 471 | break; |
| 470 | } | 472 | } |
| 471 | default: | 473 | default: |
| @@ -505,7 +507,7 @@ struct Memory::Impl { | |||
| 505 | case Common::PageType::DebugMemory: | 507 | case Common::PageType::DebugMemory: |
| 506 | case Common::PageType::Memory: | 508 | case Common::PageType::Memory: |
| 507 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | 509 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( |
| 508 | nullptr, Common::PageType::RasterizerCachedMemory); | 510 | 0, Common::PageType::RasterizerCachedMemory); |
| 509 | break; | 511 | break; |
| 510 | case Common::PageType::RasterizerCachedMemory: | 512 | case Common::PageType::RasterizerCachedMemory: |
| 511 | // There can be more than one GPU region mapped per CPU region, so it's common | 513 | // There can be more than one GPU region mapped per CPU region, so it's common |
| @@ -533,10 +535,11 @@ struct Memory::Impl { | |||
| 533 | // pagetable after unmapping a VMA. In that case the underlying VMA will no | 535 | // pagetable after unmapping a VMA. In that case the underlying VMA will no |
| 534 | // longer exist, and we should just leave the pagetable entry blank. | 536 | // longer exist, and we should just leave the pagetable entry blank. |
| 535 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | 537 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( |
| 536 | nullptr, Common::PageType::Unmapped); | 538 | 0, Common::PageType::Unmapped); |
| 537 | } else { | 539 | } else { |
| 538 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | 540 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( |
| 539 | pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory); | 541 | reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~YUZU_PAGEMASK), |
| 542 | Common::PageType::Memory); | ||
| 540 | } | 543 | } |
| 541 | break; | 544 | break; |
| 542 | } | 545 | } |
| @@ -583,7 +586,7 @@ struct Memory::Impl { | |||
| 583 | "Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE); | 586 | "Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE); |
| 584 | 587 | ||
| 585 | while (base != end) { | 588 | while (base != end) { |
| 586 | page_table.pointers[base].Store(nullptr, type); | 589 | page_table.pointers[base].Store(0, type); |
| 587 | page_table.backing_addr[base] = 0; | 590 | page_table.backing_addr[base] = 0; |
| 588 | page_table.blocks[base] = 0; | 591 | page_table.blocks[base] = 0; |
| 589 | base += 1; | 592 | base += 1; |
| @@ -592,7 +595,8 @@ struct Memory::Impl { | |||
| 592 | auto orig_base = base; | 595 | auto orig_base = base; |
| 593 | while (base != end) { | 596 | while (base != end) { |
| 594 | auto host_ptr = | 597 | auto host_ptr = |
| 595 | system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS); | 598 | reinterpret_cast<uintptr_t>(system.DeviceMemory().GetPointer<u8>(target)) - |
| 599 | (base << YUZU_PAGEBITS); | ||
| 596 | auto backing = GetInteger(target) - (base << YUZU_PAGEBITS); | 600 | auto backing = GetInteger(target) - (base << YUZU_PAGEBITS); |
| 597 | page_table.pointers[base].Store(host_ptr, type); | 601 | page_table.pointers[base].Store(host_ptr, type); |
| 598 | page_table.backing_addr[base] = backing; | 602 | page_table.backing_addr[base] = backing; |
| @@ -618,8 +622,8 @@ struct Memory::Impl { | |||
| 618 | 622 | ||
| 619 | // Avoid adding any extra logic to this fast-path block | 623 | // Avoid adding any extra logic to this fast-path block |
| 620 | const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw(); | 624 | const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw(); |
| 621 | if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { | 625 | if (const uintptr_t pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { |
| 622 | return &pointer[vaddr]; | 626 | return reinterpret_cast<u8*>(pointer + vaddr); |
| 623 | } | 627 | } |
| 624 | switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { | 628 | switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { |
| 625 | case Common::PageType::Unmapped: | 629 | case Common::PageType::Unmapped: |
| @@ -813,7 +817,7 @@ bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const { | |||
| 813 | return false; | 817 | return false; |
| 814 | } | 818 | } |
| 815 | const auto [pointer, type] = page_table.pointers[page].PointerType(); | 819 | const auto [pointer, type] = page_table.pointers[page].PointerType(); |
| 816 | return pointer != nullptr || type == Common::PageType::RasterizerCachedMemory || | 820 | return pointer != 0 || type == Common::PageType::RasterizerCachedMemory || |
| 817 | type == Common::PageType::DebugMemory; | 821 | type == Common::PageType::DebugMemory; |
| 818 | } | 822 | } |
| 819 | 823 | ||
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h index 1528cc1dd..7047e2e63 100644 --- a/src/video_core/query_cache.h +++ b/src/video_core/query_cache.h | |||
| @@ -103,7 +103,9 @@ public: | |||
| 103 | explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_, | 103 | explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_, |
| 104 | Core::Memory::Memory& cpu_memory_) | 104 | Core::Memory::Memory& cpu_memory_) |
| 105 | : rasterizer{rasterizer_}, | 105 | : rasterizer{rasterizer_}, |
| 106 | cpu_memory{cpu_memory_}, streams{{CounterStream{static_cast<QueryCache&>(*this), | 106 | // Use reinterpret_cast instead of static_cast as workaround for |
| 107 | // UBSan bug (https://github.com/llvm/llvm-project/issues/59060) | ||
| 108 | cpu_memory{cpu_memory_}, streams{{CounterStream{reinterpret_cast<QueryCache&>(*this), | ||
| 107 | VideoCore::QueryType::SamplesPassed}}} { | 109 | VideoCore::QueryType::SamplesPassed}}} { |
| 108 | (void)slot_async_jobs.insert(); // Null value | 110 | (void)slot_async_jobs.insert(); // Null value |
| 109 | } | 111 | } |
diff --git a/src/video_core/rasterizer_accelerated.cpp b/src/video_core/rasterizer_accelerated.cpp index 4a197d65d..f200a650f 100644 --- a/src/video_core/rasterizer_accelerated.cpp +++ b/src/video_core/rasterizer_accelerated.cpp | |||
| @@ -13,7 +13,8 @@ namespace VideoCore { | |||
| 13 | 13 | ||
| 14 | using namespace Core::Memory; | 14 | using namespace Core::Memory; |
| 15 | 15 | ||
| 16 | RasterizerAccelerated::RasterizerAccelerated(Memory& cpu_memory_) : cpu_memory{cpu_memory_} {} | 16 | RasterizerAccelerated::RasterizerAccelerated(Memory& cpu_memory_) |
| 17 | : cached_pages(std::make_unique<CachedPages>()), cpu_memory{cpu_memory_} {} | ||
| 17 | 18 | ||
| 18 | RasterizerAccelerated::~RasterizerAccelerated() = default; | 19 | RasterizerAccelerated::~RasterizerAccelerated() = default; |
| 19 | 20 | ||
| @@ -26,7 +27,7 @@ void RasterizerAccelerated::UpdatePagesCachedCount(VAddr addr, u64 size, int del | |||
| 26 | std::atomic_thread_fence(std::memory_order_acquire); | 27 | std::atomic_thread_fence(std::memory_order_acquire); |
| 27 | const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE); | 28 | const u64 page_end = Common::DivCeil(addr + size, YUZU_PAGESIZE); |
| 28 | for (u64 page = addr >> YUZU_PAGEBITS; page != page_end; ++page) { | 29 | for (u64 page = addr >> YUZU_PAGEBITS; page != page_end; ++page) { |
| 29 | std::atomic_uint16_t& count = cached_pages.at(page >> 2).Count(page); | 30 | std::atomic_uint16_t& count = cached_pages->at(page >> 2).Count(page); |
| 30 | 31 | ||
| 31 | if (delta > 0) { | 32 | if (delta > 0) { |
| 32 | ASSERT_MSG(count.load(std::memory_order::relaxed) < UINT16_MAX, "Count may overflow!"); | 33 | ASSERT_MSG(count.load(std::memory_order::relaxed) < UINT16_MAX, "Count may overflow!"); |
diff --git a/src/video_core/rasterizer_accelerated.h b/src/video_core/rasterizer_accelerated.h index 7118b8aff..e6c0ea87a 100644 --- a/src/video_core/rasterizer_accelerated.h +++ b/src/video_core/rasterizer_accelerated.h | |||
| @@ -41,7 +41,8 @@ private: | |||
| 41 | }; | 41 | }; |
| 42 | static_assert(sizeof(CacheEntry) == 8, "CacheEntry should be 8 bytes!"); | 42 | static_assert(sizeof(CacheEntry) == 8, "CacheEntry should be 8 bytes!"); |
| 43 | 43 | ||
| 44 | std::array<CacheEntry, 0x2000000> cached_pages; | 44 | using CachedPages = std::array<CacheEntry, 0x2000000>; |
| 45 | std::unique_ptr<CachedPages> cached_pages; | ||
| 45 | Core::Memory::Memory& cpu_memory; | 46 | Core::Memory::Memory& cpu_memory; |
| 46 | }; | 47 | }; |
| 47 | 48 | ||