diff options
Diffstat (limited to 'src/core')
| -rw-r--r-- | src/core/arm/arm_interface.cpp | 4 | ||||
| -rw-r--r-- | src/core/arm/arm_interface.h | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_auto_object.cpp | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_auto_object.h | 5 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 3 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread.cpp | 2 | ||||
| -rw-r--r-- | src/core/memory.cpp | 30 |
7 files changed, 28 insertions, 22 deletions
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp index aa0eb9791..0c012f094 100644 --- a/src/core/arm/arm_interface.cpp +++ b/src/core/arm/arm_interface.cpp | |||
| @@ -217,8 +217,8 @@ void ARM_Interface::Run() { | |||
| 217 | } | 217 | } |
| 218 | } | 218 | } |
| 219 | 219 | ||
| 220 | void ARM_Interface::LoadWatchpointArray(const WatchpointArray& wp) { | 220 | void ARM_Interface::LoadWatchpointArray(const WatchpointArray* wp) { |
| 221 | watchpoints = ℘ | 221 | watchpoints = wp; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint( | 224 | const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint( |
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h index d5f2fa09a..3d866ff6f 100644 --- a/src/core/arm/arm_interface.h +++ b/src/core/arm/arm_interface.h | |||
| @@ -186,7 +186,7 @@ public: | |||
| 186 | virtual void SaveContext(ThreadContext64& ctx) const = 0; | 186 | virtual void SaveContext(ThreadContext64& ctx) const = 0; |
| 187 | virtual void LoadContext(const ThreadContext32& ctx) = 0; | 187 | virtual void LoadContext(const ThreadContext32& ctx) = 0; |
| 188 | virtual void LoadContext(const ThreadContext64& ctx) = 0; | 188 | virtual void LoadContext(const ThreadContext64& ctx) = 0; |
| 189 | void LoadWatchpointArray(const WatchpointArray& wp); | 189 | void LoadWatchpointArray(const WatchpointArray* wp); |
| 190 | 190 | ||
| 191 | /// Clears the exclusive monitor's state. | 191 | /// Clears the exclusive monitor's state. |
| 192 | virtual void ClearExclusiveState() = 0; | 192 | virtual void ClearExclusiveState() = 0; |
diff --git a/src/core/hle/kernel/k_auto_object.cpp b/src/core/hle/kernel/k_auto_object.cpp index 0ae42c95c..9cd7a9fd5 100644 --- a/src/core/hle/kernel/k_auto_object.cpp +++ b/src/core/hle/kernel/k_auto_object.cpp | |||
| @@ -15,8 +15,8 @@ void KAutoObject::RegisterWithKernel() { | |||
| 15 | m_kernel.RegisterKernelObject(this); | 15 | m_kernel.RegisterKernelObject(this); |
| 16 | } | 16 | } |
| 17 | 17 | ||
| 18 | void KAutoObject::UnregisterWithKernel() { | 18 | void KAutoObject::UnregisterWithKernel(KernelCore& kernel, KAutoObject* self) { |
| 19 | m_kernel.UnregisterKernelObject(this); | 19 | kernel.UnregisterKernelObject(self); |
| 20 | } | 20 | } |
| 21 | 21 | ||
| 22 | } // namespace Kernel | 22 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h index f384b1568..8d4e0df44 100644 --- a/src/core/hle/kernel/k_auto_object.h +++ b/src/core/hle/kernel/k_auto_object.h | |||
| @@ -159,14 +159,15 @@ public: | |||
| 159 | 159 | ||
| 160 | // If ref count hits zero, destroy the object. | 160 | // If ref count hits zero, destroy the object. |
| 161 | if (cur_ref_count - 1 == 0) { | 161 | if (cur_ref_count - 1 == 0) { |
| 162 | KernelCore& kernel = m_kernel; | ||
| 162 | this->Destroy(); | 163 | this->Destroy(); |
| 163 | this->UnregisterWithKernel(); | 164 | KAutoObject::UnregisterWithKernel(kernel, this); |
| 164 | } | 165 | } |
| 165 | } | 166 | } |
| 166 | 167 | ||
| 167 | private: | 168 | private: |
| 168 | void RegisterWithKernel(); | 169 | void RegisterWithKernel(); |
| 169 | void UnregisterWithKernel(); | 170 | static void UnregisterWithKernel(KernelCore& kernel, KAutoObject* self); |
| 170 | 171 | ||
| 171 | protected: | 172 | protected: |
| 172 | KernelCore& m_kernel; | 173 | KernelCore& m_kernel; |
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 75ce5a23c..d8143c650 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -510,11 +510,12 @@ void KScheduler::Unload(KThread* thread) { | |||
| 510 | 510 | ||
| 511 | void KScheduler::Reload(KThread* thread) { | 511 | void KScheduler::Reload(KThread* thread) { |
| 512 | auto& cpu_core = m_kernel.System().ArmInterface(m_core_id); | 512 | auto& cpu_core = m_kernel.System().ArmInterface(m_core_id); |
| 513 | auto* process = thread->GetOwnerProcess(); | ||
| 513 | cpu_core.LoadContext(thread->GetContext32()); | 514 | cpu_core.LoadContext(thread->GetContext32()); |
| 514 | cpu_core.LoadContext(thread->GetContext64()); | 515 | cpu_core.LoadContext(thread->GetContext64()); |
| 515 | cpu_core.SetTlsAddress(GetInteger(thread->GetTlsAddress())); | 516 | cpu_core.SetTlsAddress(GetInteger(thread->GetTlsAddress())); |
| 516 | cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0()); | 517 | cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0()); |
| 517 | cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); | 518 | cpu_core.LoadWatchpointArray(process ? &process->GetWatchpoints() : nullptr); |
| 518 | cpu_core.ClearExclusiveState(); | 519 | cpu_core.ClearExclusiveState(); |
| 519 | } | 520 | } |
| 520 | 521 | ||
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index adb6ec581..2a105a762 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp | |||
| @@ -129,7 +129,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress | |||
| 129 | case ThreadType::User: | 129 | case ThreadType::User: |
| 130 | ASSERT(((owner == nullptr) || | 130 | ASSERT(((owner == nullptr) || |
| 131 | (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask())); | 131 | (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask())); |
| 132 | ASSERT(((owner == nullptr) || | 132 | ASSERT(((owner == nullptr) || (prio > Svc::LowestThreadPriority) || |
| 133 | (owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask())); | 133 | (owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask())); |
| 134 | break; | 134 | break; |
| 135 | case ThreadType::Kernel: | 135 | case ThreadType::Kernel: |
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 805963178..7538c1d23 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -73,7 +73,7 @@ struct Memory::Impl { | |||
| 73 | return {}; | 73 | return {}; |
| 74 | } | 74 | } |
| 75 | 75 | ||
| 76 | return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; | 76 | return system.DeviceMemory().GetPointer<u8>(paddr + vaddr); |
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | [[nodiscard]] u8* GetPointerFromDebugMemory(u64 vaddr) const { | 79 | [[nodiscard]] u8* GetPointerFromDebugMemory(u64 vaddr) const { |
| @@ -84,7 +84,7 @@ struct Memory::Impl { | |||
| 84 | return {}; | 84 | return {}; |
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; | 87 | return system.DeviceMemory().GetPointer<u8>(paddr + vaddr); |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | u8 Read8(const Common::ProcessAddress addr) { | 90 | u8 Read8(const Common::ProcessAddress addr) { |
| @@ -204,7 +204,8 @@ struct Memory::Impl { | |||
| 204 | break; | 204 | break; |
| 205 | } | 205 | } |
| 206 | case Common::PageType::Memory: { | 206 | case Common::PageType::Memory: { |
| 207 | u8* mem_ptr = pointer + page_offset + (page_index << YUZU_PAGEBITS); | 207 | u8* mem_ptr = |
| 208 | reinterpret_cast<u8*>(pointer + page_offset + (page_index << YUZU_PAGEBITS)); | ||
| 208 | on_memory(copy_amount, mem_ptr); | 209 | on_memory(copy_amount, mem_ptr); |
| 209 | break; | 210 | break; |
| 210 | } | 211 | } |
| @@ -448,7 +449,7 @@ struct Memory::Impl { | |||
| 448 | break; | 449 | break; |
| 449 | case Common::PageType::Memory: | 450 | case Common::PageType::Memory: |
| 450 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | 451 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( |
| 451 | nullptr, Common::PageType::DebugMemory); | 452 | 0, Common::PageType::DebugMemory); |
| 452 | break; | 453 | break; |
| 453 | default: | 454 | default: |
| 454 | UNREACHABLE(); | 455 | UNREACHABLE(); |
| @@ -466,7 +467,8 @@ struct Memory::Impl { | |||
| 466 | case Common::PageType::DebugMemory: { | 467 | case Common::PageType::DebugMemory: { |
| 467 | u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)}; | 468 | u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)}; |
| 468 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | 469 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( |
| 469 | pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory); | 470 | reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~YUZU_PAGEMASK), |
| 471 | Common::PageType::Memory); | ||
| 470 | break; | 472 | break; |
| 471 | } | 473 | } |
| 472 | default: | 474 | default: |
| @@ -506,7 +508,7 @@ struct Memory::Impl { | |||
| 506 | case Common::PageType::DebugMemory: | 508 | case Common::PageType::DebugMemory: |
| 507 | case Common::PageType::Memory: | 509 | case Common::PageType::Memory: |
| 508 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | 510 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( |
| 509 | nullptr, Common::PageType::RasterizerCachedMemory); | 511 | 0, Common::PageType::RasterizerCachedMemory); |
| 510 | break; | 512 | break; |
| 511 | case Common::PageType::RasterizerCachedMemory: | 513 | case Common::PageType::RasterizerCachedMemory: |
| 512 | // There can be more than one GPU region mapped per CPU region, so it's common | 514 | // There can be more than one GPU region mapped per CPU region, so it's common |
| @@ -534,10 +536,11 @@ struct Memory::Impl { | |||
| 534 | // pagetable after unmapping a VMA. In that case the underlying VMA will no | 536 | // pagetable after unmapping a VMA. In that case the underlying VMA will no |
| 535 | // longer exist, and we should just leave the pagetable entry blank. | 537 | // longer exist, and we should just leave the pagetable entry blank. |
| 536 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | 538 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( |
| 537 | nullptr, Common::PageType::Unmapped); | 539 | 0, Common::PageType::Unmapped); |
| 538 | } else { | 540 | } else { |
| 539 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | 541 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( |
| 540 | pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory); | 542 | reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~YUZU_PAGEMASK), |
| 543 | Common::PageType::Memory); | ||
| 541 | } | 544 | } |
| 542 | break; | 545 | break; |
| 543 | } | 546 | } |
| @@ -584,7 +587,7 @@ struct Memory::Impl { | |||
| 584 | "Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE); | 587 | "Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE); |
| 585 | 588 | ||
| 586 | while (base != end) { | 589 | while (base != end) { |
| 587 | page_table.pointers[base].Store(nullptr, type); | 590 | page_table.pointers[base].Store(0, type); |
| 588 | page_table.backing_addr[base] = 0; | 591 | page_table.backing_addr[base] = 0; |
| 589 | page_table.blocks[base] = 0; | 592 | page_table.blocks[base] = 0; |
| 590 | base += 1; | 593 | base += 1; |
| @@ -593,7 +596,8 @@ struct Memory::Impl { | |||
| 593 | auto orig_base = base; | 596 | auto orig_base = base; |
| 594 | while (base != end) { | 597 | while (base != end) { |
| 595 | auto host_ptr = | 598 | auto host_ptr = |
| 596 | system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS); | 599 | reinterpret_cast<uintptr_t>(system.DeviceMemory().GetPointer<u8>(target)) - |
| 600 | (base << YUZU_PAGEBITS); | ||
| 597 | auto backing = GetInteger(target) - (base << YUZU_PAGEBITS); | 601 | auto backing = GetInteger(target) - (base << YUZU_PAGEBITS); |
| 598 | page_table.pointers[base].Store(host_ptr, type); | 602 | page_table.pointers[base].Store(host_ptr, type); |
| 599 | page_table.backing_addr[base] = backing; | 603 | page_table.backing_addr[base] = backing; |
| @@ -619,8 +623,8 @@ struct Memory::Impl { | |||
| 619 | 623 | ||
| 620 | // Avoid adding any extra logic to this fast-path block | 624 | // Avoid adding any extra logic to this fast-path block |
| 621 | const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw(); | 625 | const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw(); |
| 622 | if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { | 626 | if (const uintptr_t pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { |
| 623 | return &pointer[vaddr]; | 627 | return reinterpret_cast<u8*>(pointer + vaddr); |
| 624 | } | 628 | } |
| 625 | switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { | 629 | switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { |
| 626 | case Common::PageType::Unmapped: | 630 | case Common::PageType::Unmapped: |
| @@ -814,7 +818,7 @@ bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const { | |||
| 814 | return false; | 818 | return false; |
| 815 | } | 819 | } |
| 816 | const auto [pointer, type] = page_table.pointers[page].PointerType(); | 820 | const auto [pointer, type] = page_table.pointers[page].PointerType(); |
| 817 | return pointer != nullptr || type == Common::PageType::RasterizerCachedMemory || | 821 | return pointer != 0 || type == Common::PageType::RasterizerCachedMemory || |
| 818 | type == Common::PageType::DebugMemory; | 822 | type == Common::PageType::DebugMemory; |
| 819 | } | 823 | } |
| 820 | 824 | ||