diff options
Diffstat (limited to 'src/core')
| -rw-r--r-- | src/core/arm/arm_interface.cpp | 4 | ||||
| -rw-r--r-- | src/core/arm/arm_interface.h | 2 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_auto_object.cpp | 4 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_auto_object.h | 5 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_scheduler.cpp | 3 | ||||
| -rw-r--r-- | src/core/hle/kernel/k_thread.cpp | 2 | ||||
| -rw-r--r-- | src/core/memory.cpp | 30 |
7 files changed, 28 insertions, 22 deletions
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp index aa0eb9791..0c012f094 100644 --- a/src/core/arm/arm_interface.cpp +++ b/src/core/arm/arm_interface.cpp | |||
| @@ -217,8 +217,8 @@ void ARM_Interface::Run() { | |||
| 217 | } | 217 | } |
| 218 | } | 218 | } |
| 219 | 219 | ||
| 220 | void ARM_Interface::LoadWatchpointArray(const WatchpointArray& wp) { | 220 | void ARM_Interface::LoadWatchpointArray(const WatchpointArray* wp) { |
| 221 | watchpoints = ℘ | 221 | watchpoints = wp; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint( | 224 | const Kernel::DebugWatchpoint* ARM_Interface::MatchingWatchpoint( |
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h index d5f2fa09a..3d866ff6f 100644 --- a/src/core/arm/arm_interface.h +++ b/src/core/arm/arm_interface.h | |||
| @@ -186,7 +186,7 @@ public: | |||
| 186 | virtual void SaveContext(ThreadContext64& ctx) const = 0; | 186 | virtual void SaveContext(ThreadContext64& ctx) const = 0; |
| 187 | virtual void LoadContext(const ThreadContext32& ctx) = 0; | 187 | virtual void LoadContext(const ThreadContext32& ctx) = 0; |
| 188 | virtual void LoadContext(const ThreadContext64& ctx) = 0; | 188 | virtual void LoadContext(const ThreadContext64& ctx) = 0; |
| 189 | void LoadWatchpointArray(const WatchpointArray& wp); | 189 | void LoadWatchpointArray(const WatchpointArray* wp); |
| 190 | 190 | ||
| 191 | /// Clears the exclusive monitor's state. | 191 | /// Clears the exclusive monitor's state. |
| 192 | virtual void ClearExclusiveState() = 0; | 192 | virtual void ClearExclusiveState() = 0; |
diff --git a/src/core/hle/kernel/k_auto_object.cpp b/src/core/hle/kernel/k_auto_object.cpp index 0ae42c95c..9cd7a9fd5 100644 --- a/src/core/hle/kernel/k_auto_object.cpp +++ b/src/core/hle/kernel/k_auto_object.cpp | |||
| @@ -15,8 +15,8 @@ void KAutoObject::RegisterWithKernel() { | |||
| 15 | m_kernel.RegisterKernelObject(this); | 15 | m_kernel.RegisterKernelObject(this); |
| 16 | } | 16 | } |
| 17 | 17 | ||
| 18 | void KAutoObject::UnregisterWithKernel() { | 18 | void KAutoObject::UnregisterWithKernel(KernelCore& kernel, KAutoObject* self) { |
| 19 | m_kernel.UnregisterKernelObject(this); | 19 | kernel.UnregisterKernelObject(self); |
| 20 | } | 20 | } |
| 21 | 21 | ||
| 22 | } // namespace Kernel | 22 | } // namespace Kernel |
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h index f384b1568..8d4e0df44 100644 --- a/src/core/hle/kernel/k_auto_object.h +++ b/src/core/hle/kernel/k_auto_object.h | |||
| @@ -159,14 +159,15 @@ public: | |||
| 159 | 159 | ||
| 160 | // If ref count hits zero, destroy the object. | 160 | // If ref count hits zero, destroy the object. |
| 161 | if (cur_ref_count - 1 == 0) { | 161 | if (cur_ref_count - 1 == 0) { |
| 162 | KernelCore& kernel = m_kernel; | ||
| 162 | this->Destroy(); | 163 | this->Destroy(); |
| 163 | this->UnregisterWithKernel(); | 164 | KAutoObject::UnregisterWithKernel(kernel, this); |
| 164 | } | 165 | } |
| 165 | } | 166 | } |
| 166 | 167 | ||
| 167 | private: | 168 | private: |
| 168 | void RegisterWithKernel(); | 169 | void RegisterWithKernel(); |
| 169 | void UnregisterWithKernel(); | 170 | static void UnregisterWithKernel(KernelCore& kernel, KAutoObject* self); |
| 170 | 171 | ||
| 171 | protected: | 172 | protected: |
| 172 | KernelCore& m_kernel; | 173 | KernelCore& m_kernel; |
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index 75ce5a23c..d8143c650 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -510,11 +510,12 @@ void KScheduler::Unload(KThread* thread) { | |||
| 510 | 510 | ||
| 511 | void KScheduler::Reload(KThread* thread) { | 511 | void KScheduler::Reload(KThread* thread) { |
| 512 | auto& cpu_core = m_kernel.System().ArmInterface(m_core_id); | 512 | auto& cpu_core = m_kernel.System().ArmInterface(m_core_id); |
| 513 | auto* process = thread->GetOwnerProcess(); | ||
| 513 | cpu_core.LoadContext(thread->GetContext32()); | 514 | cpu_core.LoadContext(thread->GetContext32()); |
| 514 | cpu_core.LoadContext(thread->GetContext64()); | 515 | cpu_core.LoadContext(thread->GetContext64()); |
| 515 | cpu_core.SetTlsAddress(GetInteger(thread->GetTlsAddress())); | 516 | cpu_core.SetTlsAddress(GetInteger(thread->GetTlsAddress())); |
| 516 | cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0()); | 517 | cpu_core.SetTPIDR_EL0(thread->GetTpidrEl0()); |
| 517 | cpu_core.LoadWatchpointArray(thread->GetOwnerProcess()->GetWatchpoints()); | 518 | cpu_core.LoadWatchpointArray(process ? &process->GetWatchpoints() : nullptr); |
| 518 | cpu_core.ClearExclusiveState(); | 519 | cpu_core.ClearExclusiveState(); |
| 519 | } | 520 | } |
| 520 | 521 | ||
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index d88909889..7df8fd7f7 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp | |||
| @@ -129,7 +129,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, KProcessAddress | |||
| 129 | case ThreadType::User: | 129 | case ThreadType::User: |
| 130 | ASSERT(((owner == nullptr) || | 130 | ASSERT(((owner == nullptr) || |
| 131 | (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask())); | 131 | (owner->GetCoreMask() | (1ULL << virt_core)) == owner->GetCoreMask())); |
| 132 | ASSERT(((owner == nullptr) || | 132 | ASSERT(((owner == nullptr) || (prio > Svc::LowestThreadPriority) || |
| 133 | (owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask())); | 133 | (owner->GetPriorityMask() | (1ULL << prio)) == owner->GetPriorityMask())); |
| 134 | break; | 134 | break; |
| 135 | case ThreadType::Kernel: | 135 | case ThreadType::Kernel: |
diff --git a/src/core/memory.cpp b/src/core/memory.cpp index 09c53ea92..fa5273402 100644 --- a/src/core/memory.cpp +++ b/src/core/memory.cpp | |||
| @@ -83,7 +83,7 @@ struct Memory::Impl { | |||
| 83 | return {}; | 83 | return {}; |
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; | 86 | return system.DeviceMemory().GetPointer<u8>(paddr + vaddr); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | [[nodiscard]] u8* GetPointerFromDebugMemory(u64 vaddr) const { | 89 | [[nodiscard]] u8* GetPointerFromDebugMemory(u64 vaddr) const { |
| @@ -94,7 +94,7 @@ struct Memory::Impl { | |||
| 94 | return {}; | 94 | return {}; |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | return system.DeviceMemory().GetPointer<u8>(paddr) + vaddr; | 97 | return system.DeviceMemory().GetPointer<u8>(paddr + vaddr); |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | u8 Read8(const Common::ProcessAddress addr) { | 100 | u8 Read8(const Common::ProcessAddress addr) { |
| @@ -220,7 +220,8 @@ struct Memory::Impl { | |||
| 220 | break; | 220 | break; |
| 221 | } | 221 | } |
| 222 | case Common::PageType::Memory: { | 222 | case Common::PageType::Memory: { |
| 223 | u8* mem_ptr = pointer + page_offset + (page_index << YUZU_PAGEBITS); | 223 | u8* mem_ptr = |
| 224 | reinterpret_cast<u8*>(pointer + page_offset + (page_index << YUZU_PAGEBITS)); | ||
| 224 | on_memory(copy_amount, mem_ptr); | 225 | on_memory(copy_amount, mem_ptr); |
| 225 | break; | 226 | break; |
| 226 | } | 227 | } |
| @@ -462,7 +463,7 @@ struct Memory::Impl { | |||
| 462 | break; | 463 | break; |
| 463 | case Common::PageType::Memory: | 464 | case Common::PageType::Memory: |
| 464 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | 465 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( |
| 465 | nullptr, Common::PageType::DebugMemory); | 466 | 0, Common::PageType::DebugMemory); |
| 466 | break; | 467 | break; |
| 467 | default: | 468 | default: |
| 468 | UNREACHABLE(); | 469 | UNREACHABLE(); |
| @@ -480,7 +481,8 @@ struct Memory::Impl { | |||
| 480 | case Common::PageType::DebugMemory: { | 481 | case Common::PageType::DebugMemory: { |
| 481 | u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)}; | 482 | u8* const pointer{GetPointerFromDebugMemory(vaddr & ~YUZU_PAGEMASK)}; |
| 482 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | 483 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( |
| 483 | pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory); | 484 | reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~YUZU_PAGEMASK), |
| 485 | Common::PageType::Memory); | ||
| 484 | break; | 486 | break; |
| 485 | } | 487 | } |
| 486 | default: | 488 | default: |
| @@ -520,7 +522,7 @@ struct Memory::Impl { | |||
| 520 | case Common::PageType::DebugMemory: | 522 | case Common::PageType::DebugMemory: |
| 521 | case Common::PageType::Memory: | 523 | case Common::PageType::Memory: |
| 522 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | 524 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( |
| 523 | nullptr, Common::PageType::RasterizerCachedMemory); | 525 | 0, Common::PageType::RasterizerCachedMemory); |
| 524 | break; | 526 | break; |
| 525 | case Common::PageType::RasterizerCachedMemory: | 527 | case Common::PageType::RasterizerCachedMemory: |
| 526 | // There can be more than one GPU region mapped per CPU region, so it's common | 528 | // There can be more than one GPU region mapped per CPU region, so it's common |
| @@ -548,10 +550,11 @@ struct Memory::Impl { | |||
| 548 | // pagetable after unmapping a VMA. In that case the underlying VMA will no | 550 | // pagetable after unmapping a VMA. In that case the underlying VMA will no |
| 549 | // longer exist, and we should just leave the pagetable entry blank. | 551 | // longer exist, and we should just leave the pagetable entry blank. |
| 550 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | 552 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( |
| 551 | nullptr, Common::PageType::Unmapped); | 553 | 0, Common::PageType::Unmapped); |
| 552 | } else { | 554 | } else { |
| 553 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( | 555 | current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Store( |
| 554 | pointer - (vaddr & ~YUZU_PAGEMASK), Common::PageType::Memory); | 556 | reinterpret_cast<uintptr_t>(pointer) - (vaddr & ~YUZU_PAGEMASK), |
| 557 | Common::PageType::Memory); | ||
| 555 | } | 558 | } |
| 556 | break; | 559 | break; |
| 557 | } | 560 | } |
| @@ -598,7 +601,7 @@ struct Memory::Impl { | |||
| 598 | "Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE); | 601 | "Mapping memory page without a pointer @ {:016x}", base * YUZU_PAGESIZE); |
| 599 | 602 | ||
| 600 | while (base != end) { | 603 | while (base != end) { |
| 601 | page_table.pointers[base].Store(nullptr, type); | 604 | page_table.pointers[base].Store(0, type); |
| 602 | page_table.backing_addr[base] = 0; | 605 | page_table.backing_addr[base] = 0; |
| 603 | page_table.blocks[base] = 0; | 606 | page_table.blocks[base] = 0; |
| 604 | base += 1; | 607 | base += 1; |
| @@ -607,7 +610,8 @@ struct Memory::Impl { | |||
| 607 | auto orig_base = base; | 610 | auto orig_base = base; |
| 608 | while (base != end) { | 611 | while (base != end) { |
| 609 | auto host_ptr = | 612 | auto host_ptr = |
| 610 | system.DeviceMemory().GetPointer<u8>(target) - (base << YUZU_PAGEBITS); | 613 | reinterpret_cast<uintptr_t>(system.DeviceMemory().GetPointer<u8>(target)) - |
| 614 | (base << YUZU_PAGEBITS); | ||
| 611 | auto backing = GetInteger(target) - (base << YUZU_PAGEBITS); | 615 | auto backing = GetInteger(target) - (base << YUZU_PAGEBITS); |
| 612 | page_table.pointers[base].Store(host_ptr, type); | 616 | page_table.pointers[base].Store(host_ptr, type); |
| 613 | page_table.backing_addr[base] = backing; | 617 | page_table.backing_addr[base] = backing; |
| @@ -633,8 +637,8 @@ struct Memory::Impl { | |||
| 633 | 637 | ||
| 634 | // Avoid adding any extra logic to this fast-path block | 638 | // Avoid adding any extra logic to this fast-path block |
| 635 | const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw(); | 639 | const uintptr_t raw_pointer = current_page_table->pointers[vaddr >> YUZU_PAGEBITS].Raw(); |
| 636 | if (u8* const pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { | 640 | if (const uintptr_t pointer = Common::PageTable::PageInfo::ExtractPointer(raw_pointer)) { |
| 637 | return &pointer[vaddr]; | 641 | return reinterpret_cast<u8*>(pointer + vaddr); |
| 638 | } | 642 | } |
| 639 | switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { | 643 | switch (Common::PageTable::PageInfo::ExtractType(raw_pointer)) { |
| 640 | case Common::PageType::Unmapped: | 644 | case Common::PageType::Unmapped: |
| @@ -828,7 +832,7 @@ bool Memory::IsValidVirtualAddress(const Common::ProcessAddress vaddr) const { | |||
| 828 | return false; | 832 | return false; |
| 829 | } | 833 | } |
| 830 | const auto [pointer, type] = page_table.pointers[page].PointerType(); | 834 | const auto [pointer, type] = page_table.pointers[page].PointerType(); |
| 831 | return pointer != nullptr || type == Common::PageType::RasterizerCachedMemory || | 835 | return pointer != 0 || type == Common::PageType::RasterizerCachedMemory || |
| 832 | type == Common::PageType::DebugMemory; | 836 | type == Common::PageType::DebugMemory; |
| 833 | } | 837 | } |
| 834 | 838 | ||