diff options
Diffstat (limited to 'src')
25 files changed, 300 insertions, 285 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 39d038493..39ae573b2 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt | |||
| @@ -36,7 +36,6 @@ if (MSVC) | |||
| 36 | # /GT - Supports fiber safety for data allocated using static thread-local storage | 36 | # /GT - Supports fiber safety for data allocated using static thread-local storage |
| 37 | add_compile_options( | 37 | add_compile_options( |
| 38 | /MP | 38 | /MP |
| 39 | /Zi | ||
| 40 | /Zm200 | 39 | /Zm200 |
| 41 | /Zo | 40 | /Zo |
| 42 | /permissive- | 41 | /permissive- |
| @@ -79,6 +78,13 @@ if (MSVC) | |||
| 79 | /we5245 # 'function': unreferenced function with internal linkage has been removed | 78 | /we5245 # 'function': unreferenced function with internal linkage has been removed |
| 80 | ) | 79 | ) |
| 81 | 80 | ||
| 81 | if (USE_CCACHE) | ||
| 82 | # when caching, we need to use /Z7 to downgrade debug info to use an older but more cachable format | ||
| 83 | add_compile_options(/Z7) | ||
| 84 | else() | ||
| 85 | add_compile_options(/Zi) | ||
| 86 | endif() | ||
| 87 | |||
| 82 | if (ARCHITECTURE_x86_64) | 88 | if (ARCHITECTURE_x86_64) |
| 83 | add_compile_options(/QIntel-jcc-erratum) | 89 | add_compile_options(/QIntel-jcc-erratum) |
| 84 | endif() | 90 | endif() |
diff --git a/src/common/fiber.cpp b/src/common/fiber.cpp index f9aeb692a..bc92b360b 100644 --- a/src/common/fiber.cpp +++ b/src/common/fiber.cpp | |||
| @@ -20,10 +20,8 @@ struct Fiber::FiberImpl { | |||
| 20 | VirtualBuffer<u8> rewind_stack; | 20 | VirtualBuffer<u8> rewind_stack; |
| 21 | 21 | ||
| 22 | std::mutex guard; | 22 | std::mutex guard; |
| 23 | std::function<void(void*)> entry_point; | 23 | std::function<void()> entry_point; |
| 24 | std::function<void(void*)> rewind_point; | 24 | std::function<void()> rewind_point; |
| 25 | void* rewind_parameter{}; | ||
| 26 | void* start_parameter{}; | ||
| 27 | std::shared_ptr<Fiber> previous_fiber; | 25 | std::shared_ptr<Fiber> previous_fiber; |
| 28 | bool is_thread_fiber{}; | 26 | bool is_thread_fiber{}; |
| 29 | bool released{}; | 27 | bool released{}; |
| @@ -34,13 +32,8 @@ struct Fiber::FiberImpl { | |||
| 34 | boost::context::detail::fcontext_t rewind_context{}; | 32 | boost::context::detail::fcontext_t rewind_context{}; |
| 35 | }; | 33 | }; |
| 36 | 34 | ||
| 37 | void Fiber::SetStartParameter(void* new_parameter) { | 35 | void Fiber::SetRewindPoint(std::function<void()>&& rewind_func) { |
| 38 | impl->start_parameter = new_parameter; | ||
| 39 | } | ||
| 40 | |||
| 41 | void Fiber::SetRewindPoint(std::function<void(void*)>&& rewind_func, void* rewind_param) { | ||
| 42 | impl->rewind_point = std::move(rewind_func); | 36 | impl->rewind_point = std::move(rewind_func); |
| 43 | impl->rewind_parameter = rewind_param; | ||
| 44 | } | 37 | } |
| 45 | 38 | ||
| 46 | void Fiber::Start(boost::context::detail::transfer_t& transfer) { | 39 | void Fiber::Start(boost::context::detail::transfer_t& transfer) { |
| @@ -48,7 +41,7 @@ void Fiber::Start(boost::context::detail::transfer_t& transfer) { | |||
| 48 | impl->previous_fiber->impl->context = transfer.fctx; | 41 | impl->previous_fiber->impl->context = transfer.fctx; |
| 49 | impl->previous_fiber->impl->guard.unlock(); | 42 | impl->previous_fiber->impl->guard.unlock(); |
| 50 | impl->previous_fiber.reset(); | 43 | impl->previous_fiber.reset(); |
| 51 | impl->entry_point(impl->start_parameter); | 44 | impl->entry_point(); |
| 52 | UNREACHABLE(); | 45 | UNREACHABLE(); |
| 53 | } | 46 | } |
| 54 | 47 | ||
| @@ -59,7 +52,7 @@ void Fiber::OnRewind([[maybe_unused]] boost::context::detail::transfer_t& transf | |||
| 59 | u8* tmp = impl->stack_limit; | 52 | u8* tmp = impl->stack_limit; |
| 60 | impl->stack_limit = impl->rewind_stack_limit; | 53 | impl->stack_limit = impl->rewind_stack_limit; |
| 61 | impl->rewind_stack_limit = tmp; | 54 | impl->rewind_stack_limit = tmp; |
| 62 | impl->rewind_point(impl->rewind_parameter); | 55 | impl->rewind_point(); |
| 63 | UNREACHABLE(); | 56 | UNREACHABLE(); |
| 64 | } | 57 | } |
| 65 | 58 | ||
| @@ -73,10 +66,8 @@ void Fiber::RewindStartFunc(boost::context::detail::transfer_t transfer) { | |||
| 73 | fiber->OnRewind(transfer); | 66 | fiber->OnRewind(transfer); |
| 74 | } | 67 | } |
| 75 | 68 | ||
| 76 | Fiber::Fiber(std::function<void(void*)>&& entry_point_func, void* start_parameter) | 69 | Fiber::Fiber(std::function<void()>&& entry_point_func) : impl{std::make_unique<FiberImpl>()} { |
| 77 | : impl{std::make_unique<FiberImpl>()} { | ||
| 78 | impl->entry_point = std::move(entry_point_func); | 70 | impl->entry_point = std::move(entry_point_func); |
| 79 | impl->start_parameter = start_parameter; | ||
| 80 | impl->stack_limit = impl->stack.data(); | 71 | impl->stack_limit = impl->stack.data(); |
| 81 | impl->rewind_stack_limit = impl->rewind_stack.data(); | 72 | impl->rewind_stack_limit = impl->rewind_stack.data(); |
| 82 | u8* stack_base = impl->stack_limit + default_stack_size; | 73 | u8* stack_base = impl->stack_limit + default_stack_size; |
diff --git a/src/common/fiber.h b/src/common/fiber.h index 873604bc6..f24d333a3 100644 --- a/src/common/fiber.h +++ b/src/common/fiber.h | |||
| @@ -29,7 +29,7 @@ namespace Common { | |||
| 29 | */ | 29 | */ |
| 30 | class Fiber { | 30 | class Fiber { |
| 31 | public: | 31 | public: |
| 32 | Fiber(std::function<void(void*)>&& entry_point_func, void* start_parameter); | 32 | Fiber(std::function<void()>&& entry_point_func); |
| 33 | ~Fiber(); | 33 | ~Fiber(); |
| 34 | 34 | ||
| 35 | Fiber(const Fiber&) = delete; | 35 | Fiber(const Fiber&) = delete; |
| @@ -43,16 +43,13 @@ public: | |||
| 43 | static void YieldTo(std::weak_ptr<Fiber> weak_from, Fiber& to); | 43 | static void YieldTo(std::weak_ptr<Fiber> weak_from, Fiber& to); |
| 44 | [[nodiscard]] static std::shared_ptr<Fiber> ThreadToFiber(); | 44 | [[nodiscard]] static std::shared_ptr<Fiber> ThreadToFiber(); |
| 45 | 45 | ||
| 46 | void SetRewindPoint(std::function<void(void*)>&& rewind_func, void* rewind_param); | 46 | void SetRewindPoint(std::function<void()>&& rewind_func); |
| 47 | 47 | ||
| 48 | void Rewind(); | 48 | void Rewind(); |
| 49 | 49 | ||
| 50 | /// Only call from main thread's fiber | 50 | /// Only call from main thread's fiber |
| 51 | void Exit(); | 51 | void Exit(); |
| 52 | 52 | ||
| 53 | /// Changes the start parameter of the fiber. Has no effect if the fiber already started | ||
| 54 | void SetStartParameter(void* new_parameter); | ||
| 55 | |||
| 56 | private: | 53 | private: |
| 57 | Fiber(); | 54 | Fiber(); |
| 58 | 55 | ||
diff --git a/src/common/thread.cpp b/src/common/thread.cpp index f932a7290..919e33af9 100644 --- a/src/common/thread.cpp +++ b/src/common/thread.cpp | |||
| @@ -47,6 +47,9 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) { | |||
| 47 | case ThreadPriority::VeryHigh: | 47 | case ThreadPriority::VeryHigh: |
| 48 | windows_priority = THREAD_PRIORITY_HIGHEST; | 48 | windows_priority = THREAD_PRIORITY_HIGHEST; |
| 49 | break; | 49 | break; |
| 50 | case ThreadPriority::Critical: | ||
| 51 | windows_priority = THREAD_PRIORITY_TIME_CRITICAL; | ||
| 52 | break; | ||
| 50 | default: | 53 | default: |
| 51 | windows_priority = THREAD_PRIORITY_NORMAL; | 54 | windows_priority = THREAD_PRIORITY_NORMAL; |
| 52 | break; | 55 | break; |
| @@ -59,9 +62,10 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) { | |||
| 59 | void SetCurrentThreadPriority(ThreadPriority new_priority) { | 62 | void SetCurrentThreadPriority(ThreadPriority new_priority) { |
| 60 | pthread_t this_thread = pthread_self(); | 63 | pthread_t this_thread = pthread_self(); |
| 61 | 64 | ||
| 62 | s32 max_prio = sched_get_priority_max(SCHED_OTHER); | 65 | const auto scheduling_type = SCHED_OTHER; |
| 63 | s32 min_prio = sched_get_priority_min(SCHED_OTHER); | 66 | s32 max_prio = sched_get_priority_max(scheduling_type); |
| 64 | u32 level = static_cast<u32>(new_priority) + 1; | 67 | s32 min_prio = sched_get_priority_min(scheduling_type); |
| 68 | u32 level = std::max(static_cast<u32>(new_priority) + 1, 4U); | ||
| 65 | 69 | ||
| 66 | struct sched_param params; | 70 | struct sched_param params; |
| 67 | if (max_prio > min_prio) { | 71 | if (max_prio > min_prio) { |
| @@ -70,7 +74,7 @@ void SetCurrentThreadPriority(ThreadPriority new_priority) { | |||
| 70 | params.sched_priority = min_prio - ((min_prio - max_prio) * level) / 4; | 74 | params.sched_priority = min_prio - ((min_prio - max_prio) * level) / 4; |
| 71 | } | 75 | } |
| 72 | 76 | ||
| 73 | pthread_setschedparam(this_thread, SCHED_OTHER, ¶ms); | 77 | pthread_setschedparam(this_thread, scheduling_type, ¶ms); |
| 74 | } | 78 | } |
| 75 | 79 | ||
| 76 | #endif | 80 | #endif |
diff --git a/src/common/thread.h b/src/common/thread.h index a63122516..1552f58e0 100644 --- a/src/common/thread.h +++ b/src/common/thread.h | |||
| @@ -92,6 +92,7 @@ enum class ThreadPriority : u32 { | |||
| 92 | Normal = 1, | 92 | Normal = 1, |
| 93 | High = 2, | 93 | High = 2, |
| 94 | VeryHigh = 3, | 94 | VeryHigh = 3, |
| 95 | Critical = 4, | ||
| 95 | }; | 96 | }; |
| 96 | 97 | ||
| 97 | void SetCurrentThreadPriority(ThreadPriority new_priority); | 98 | void SetCurrentThreadPriority(ThreadPriority new_priority); |
diff --git a/src/common/uint128.h b/src/common/uint128.h index f890ffec2..199d0f55e 100644 --- a/src/common/uint128.h +++ b/src/common/uint128.h | |||
| @@ -31,12 +31,17 @@ namespace Common { | |||
| 31 | return _udiv128(r[1], r[0], d, &remainder); | 31 | return _udiv128(r[1], r[0], d, &remainder); |
| 32 | #endif | 32 | #endif |
| 33 | #else | 33 | #else |
| 34 | #ifdef __SIZEOF_INT128__ | ||
| 35 | const auto product = static_cast<unsigned __int128>(a) * static_cast<unsigned __int128>(b); | ||
| 36 | return static_cast<u64>(product / d); | ||
| 37 | #else | ||
| 34 | const u64 diva = a / d; | 38 | const u64 diva = a / d; |
| 35 | const u64 moda = a % d; | 39 | const u64 moda = a % d; |
| 36 | const u64 divb = b / d; | 40 | const u64 divb = b / d; |
| 37 | const u64 modb = b % d; | 41 | const u64 modb = b % d; |
| 38 | return diva * b + moda * divb + moda * modb / d; | 42 | return diva * b + moda * divb + moda * modb / d; |
| 39 | #endif | 43 | #endif |
| 44 | #endif | ||
| 40 | } | 45 | } |
| 41 | 46 | ||
| 42 | // This function multiplies 2 u64 values and produces a u128 value; | 47 | // This function multiplies 2 u64 values and produces a u128 value; |
diff --git a/src/common/x64/native_clock.cpp b/src/common/x64/native_clock.cpp index 1b7194503..6aaa8cdf9 100644 --- a/src/common/x64/native_clock.cpp +++ b/src/common/x64/native_clock.cpp | |||
| @@ -75,8 +75,8 @@ NativeClock::NativeClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequen | |||
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | u64 NativeClock::GetRTSC() { | 77 | u64 NativeClock::GetRTSC() { |
| 78 | TimePoint new_time_point{}; | ||
| 79 | TimePoint current_time_point{}; | 78 | TimePoint current_time_point{}; |
| 79 | TimePoint new_time_point{}; | ||
| 80 | 80 | ||
| 81 | current_time_point.pack = Common::AtomicLoad128(time_point.pack.data()); | 81 | current_time_point.pack = Common::AtomicLoad128(time_point.pack.data()); |
| 82 | do { | 82 | do { |
| @@ -89,8 +89,7 @@ u64 NativeClock::GetRTSC() { | |||
| 89 | new_time_point.inner.accumulated_ticks = current_time_point.inner.accumulated_ticks + diff; | 89 | new_time_point.inner.accumulated_ticks = current_time_point.inner.accumulated_ticks + diff; |
| 90 | } while (!Common::AtomicCompareAndSwap(time_point.pack.data(), new_time_point.pack, | 90 | } while (!Common::AtomicCompareAndSwap(time_point.pack.data(), new_time_point.pack, |
| 91 | current_time_point.pack, current_time_point.pack)); | 91 | current_time_point.pack, current_time_point.pack)); |
| 92 | /// The clock cannot be more precise than the guest timer, remove the lower bits | 92 | return new_time_point.inner.accumulated_ticks; |
| 93 | return new_time_point.inner.accumulated_ticks & inaccuracy_mask; | ||
| 94 | } | 93 | } |
| 95 | 94 | ||
| 96 | void NativeClock::Pause(bool is_paused) { | 95 | void NativeClock::Pause(bool is_paused) { |
diff --git a/src/common/x64/native_clock.h b/src/common/x64/native_clock.h index 30d2ba2e9..38ae7a462 100644 --- a/src/common/x64/native_clock.h +++ b/src/common/x64/native_clock.h | |||
| @@ -37,12 +37,8 @@ private: | |||
| 37 | } inner; | 37 | } inner; |
| 38 | }; | 38 | }; |
| 39 | 39 | ||
| 40 | /// value used to reduce the native clocks accuracy as some apss rely on | ||
| 41 | /// undefined behavior where the level of accuracy in the clock shouldn't | ||
| 42 | /// be higher. | ||
| 43 | static constexpr u64 inaccuracy_mask = ~(UINT64_C(0x400) - 1); | ||
| 44 | |||
| 45 | TimePoint time_point; | 40 | TimePoint time_point; |
| 41 | |||
| 46 | // factors | 42 | // factors |
| 47 | u64 clock_rtsc_factor{}; | 43 | u64 clock_rtsc_factor{}; |
| 48 | u64 cpu_rtsc_factor{}; | 44 | u64 cpu_rtsc_factor{}; |
diff --git a/src/core/arm/arm_interface.cpp b/src/core/arm/arm_interface.cpp index 8e095cdcd..0efc3732f 100644 --- a/src/core/arm/arm_interface.cpp +++ b/src/core/arm/arm_interface.cpp | |||
| @@ -119,16 +119,23 @@ void ARM_Interface::Run() { | |||
| 119 | } | 119 | } |
| 120 | system.ExitDynarmicProfile(); | 120 | system.ExitDynarmicProfile(); |
| 121 | 121 | ||
| 122 | // Notify the debugger and go to sleep if a breakpoint was hit. | 122 | // Notify the debugger and go to sleep if a breakpoint was hit, |
| 123 | if (Has(hr, breakpoint)) { | 123 | // or if the thread is unable to continue for any reason. |
| 124 | if (Has(hr, breakpoint) || Has(hr, no_execute)) { | ||
| 124 | RewindBreakpointInstruction(); | 125 | RewindBreakpointInstruction(); |
| 125 | system.GetDebugger().NotifyThreadStopped(current_thread); | 126 | if (system.DebuggerEnabled()) { |
| 126 | current_thread->RequestSuspend(SuspendType::Debug); | 127 | system.GetDebugger().NotifyThreadStopped(current_thread); |
| 128 | } | ||
| 129 | current_thread->RequestSuspend(Kernel::SuspendType::Debug); | ||
| 127 | break; | 130 | break; |
| 128 | } | 131 | } |
| 132 | |||
| 133 | // Notify the debugger and go to sleep if a watchpoint was hit. | ||
| 129 | if (Has(hr, watchpoint)) { | 134 | if (Has(hr, watchpoint)) { |
| 130 | RewindBreakpointInstruction(); | 135 | RewindBreakpointInstruction(); |
| 131 | system.GetDebugger().NotifyThreadWatchpoint(current_thread, *HaltedWatchpoint()); | 136 | if (system.DebuggerEnabled()) { |
| 137 | system.GetDebugger().NotifyThreadWatchpoint(current_thread, *HaltedWatchpoint()); | ||
| 138 | } | ||
| 132 | current_thread->RequestSuspend(SuspendType::Debug); | 139 | current_thread->RequestSuspend(SuspendType::Debug); |
| 133 | break; | 140 | break; |
| 134 | } | 141 | } |
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h index 4e431e27a..8a066ed91 100644 --- a/src/core/arm/arm_interface.h +++ b/src/core/arm/arm_interface.h | |||
| @@ -204,6 +204,7 @@ public: | |||
| 204 | static constexpr Dynarmic::HaltReason svc_call = Dynarmic::HaltReason::UserDefined3; | 204 | static constexpr Dynarmic::HaltReason svc_call = Dynarmic::HaltReason::UserDefined3; |
| 205 | static constexpr Dynarmic::HaltReason breakpoint = Dynarmic::HaltReason::UserDefined4; | 205 | static constexpr Dynarmic::HaltReason breakpoint = Dynarmic::HaltReason::UserDefined4; |
| 206 | static constexpr Dynarmic::HaltReason watchpoint = Dynarmic::HaltReason::UserDefined5; | 206 | static constexpr Dynarmic::HaltReason watchpoint = Dynarmic::HaltReason::UserDefined5; |
| 207 | static constexpr Dynarmic::HaltReason no_execute = Dynarmic::HaltReason::UserDefined6; | ||
| 207 | 208 | ||
| 208 | protected: | 209 | protected: |
| 209 | /// System context that this ARM interface is running under. | 210 | /// System context that this ARM interface is running under. |
diff --git a/src/core/arm/dynarmic/arm_dynarmic_32.cpp b/src/core/arm/dynarmic/arm_dynarmic_32.cpp index 8c90c8be0..10cf72a45 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_32.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_32.cpp | |||
| @@ -48,6 +48,12 @@ public: | |||
| 48 | CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Read); | 48 | CheckMemoryAccess(vaddr, 8, Kernel::DebugWatchpointType::Read); |
| 49 | return memory.Read64(vaddr); | 49 | return memory.Read64(vaddr); |
| 50 | } | 50 | } |
| 51 | std::optional<u32> MemoryReadCode(u32 vaddr) override { | ||
| 52 | if (!memory.IsValidVirtualAddressRange(vaddr, sizeof(u32))) { | ||
| 53 | return std::nullopt; | ||
| 54 | } | ||
| 55 | return MemoryRead32(vaddr); | ||
| 56 | } | ||
| 51 | 57 | ||
| 52 | void MemoryWrite8(u32 vaddr, u8 value) override { | 58 | void MemoryWrite8(u32 vaddr, u8 value) override { |
| 53 | if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) { | 59 | if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) { |
| @@ -89,21 +95,28 @@ public: | |||
| 89 | 95 | ||
| 90 | void InterpreterFallback(u32 pc, std::size_t num_instructions) override { | 96 | void InterpreterFallback(u32 pc, std::size_t num_instructions) override { |
| 91 | parent.LogBacktrace(); | 97 | parent.LogBacktrace(); |
| 92 | UNIMPLEMENTED_MSG("This should never happen, pc = {:08X}, code = {:08X}", pc, | 98 | LOG_ERROR(Core_ARM, |
| 93 | MemoryReadCode(pc)); | 99 | "Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc, |
| 100 | num_instructions, MemoryRead32(pc)); | ||
| 94 | } | 101 | } |
| 95 | 102 | ||
| 96 | void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override { | 103 | void ExceptionRaised(u32 pc, Dynarmic::A32::Exception exception) override { |
| 97 | if (debugger_enabled) { | 104 | switch (exception) { |
| 98 | parent.SaveContext(parent.breakpoint_context); | 105 | case Dynarmic::A32::Exception::NoExecuteFault: |
| 99 | parent.jit.load()->HaltExecution(ARM_Interface::breakpoint); | 106 | LOG_CRITICAL(Core_ARM, "Cannot execute instruction at unmapped address {:#08x}", pc); |
| 107 | ReturnException(pc, ARM_Interface::no_execute); | ||
| 100 | return; | 108 | return; |
| 101 | } | 109 | default: |
| 110 | if (debugger_enabled) { | ||
| 111 | ReturnException(pc, ARM_Interface::breakpoint); | ||
| 112 | return; | ||
| 113 | } | ||
| 102 | 114 | ||
| 103 | parent.LogBacktrace(); | 115 | parent.LogBacktrace(); |
| 104 | LOG_CRITICAL(Core_ARM, | 116 | LOG_CRITICAL(Core_ARM, |
| 105 | "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})", | 117 | "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X}, thumb = {})", |
| 106 | exception, pc, MemoryReadCode(pc), parent.IsInThumbMode()); | 118 | exception, pc, MemoryRead32(pc), parent.IsInThumbMode()); |
| 119 | } | ||
| 107 | } | 120 | } |
| 108 | 121 | ||
| 109 | void CallSVC(u32 swi) override { | 122 | void CallSVC(u32 swi) override { |
| @@ -141,15 +154,20 @@ public: | |||
| 141 | 154 | ||
| 142 | const auto match{parent.MatchingWatchpoint(addr, size, type)}; | 155 | const auto match{parent.MatchingWatchpoint(addr, size, type)}; |
| 143 | if (match) { | 156 | if (match) { |
| 144 | parent.SaveContext(parent.breakpoint_context); | ||
| 145 | parent.jit.load()->HaltExecution(ARM_Interface::watchpoint); | ||
| 146 | parent.halted_watchpoint = match; | 157 | parent.halted_watchpoint = match; |
| 158 | ReturnException(parent.jit.load()->Regs()[15], ARM_Interface::watchpoint); | ||
| 147 | return false; | 159 | return false; |
| 148 | } | 160 | } |
| 149 | 161 | ||
| 150 | return true; | 162 | return true; |
| 151 | } | 163 | } |
| 152 | 164 | ||
| 165 | void ReturnException(u32 pc, Dynarmic::HaltReason hr) { | ||
| 166 | parent.SaveContext(parent.breakpoint_context); | ||
| 167 | parent.breakpoint_context.cpu_registers[15] = pc; | ||
| 168 | parent.jit.load()->HaltExecution(hr); | ||
| 169 | } | ||
| 170 | |||
| 153 | ARM_Dynarmic_32& parent; | 171 | ARM_Dynarmic_32& parent; |
| 154 | Core::Memory::Memory& memory; | 172 | Core::Memory::Memory& memory; |
| 155 | std::size_t num_interpreted_instructions{}; | 173 | std::size_t num_interpreted_instructions{}; |
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp index 4370ca294..92266aa9e 100644 --- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp +++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp | |||
| @@ -52,6 +52,12 @@ public: | |||
| 52 | CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Read); | 52 | CheckMemoryAccess(vaddr, 16, Kernel::DebugWatchpointType::Read); |
| 53 | return {memory.Read64(vaddr), memory.Read64(vaddr + 8)}; | 53 | return {memory.Read64(vaddr), memory.Read64(vaddr + 8)}; |
| 54 | } | 54 | } |
| 55 | std::optional<u32> MemoryReadCode(u64 vaddr) override { | ||
| 56 | if (!memory.IsValidVirtualAddressRange(vaddr, sizeof(u32))) { | ||
| 57 | return std::nullopt; | ||
| 58 | } | ||
| 59 | return MemoryRead32(vaddr); | ||
| 60 | } | ||
| 55 | 61 | ||
| 56 | void MemoryWrite8(u64 vaddr, u8 value) override { | 62 | void MemoryWrite8(u64 vaddr, u8 value) override { |
| 57 | if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) { | 63 | if (CheckMemoryAccess(vaddr, 1, Kernel::DebugWatchpointType::Write)) { |
| @@ -105,7 +111,7 @@ public: | |||
| 105 | parent.LogBacktrace(); | 111 | parent.LogBacktrace(); |
| 106 | LOG_ERROR(Core_ARM, | 112 | LOG_ERROR(Core_ARM, |
| 107 | "Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc, | 113 | "Unimplemented instruction @ 0x{:X} for {} instructions (instr = {:08X})", pc, |
| 108 | num_instructions, MemoryReadCode(pc)); | 114 | num_instructions, MemoryRead32(pc)); |
| 109 | } | 115 | } |
| 110 | 116 | ||
| 111 | void InstructionCacheOperationRaised(Dynarmic::A64::InstructionCacheOperation op, | 117 | void InstructionCacheOperationRaised(Dynarmic::A64::InstructionCacheOperation op, |
| @@ -138,16 +144,19 @@ public: | |||
| 138 | case Dynarmic::A64::Exception::SendEventLocal: | 144 | case Dynarmic::A64::Exception::SendEventLocal: |
| 139 | case Dynarmic::A64::Exception::Yield: | 145 | case Dynarmic::A64::Exception::Yield: |
| 140 | return; | 146 | return; |
| 147 | case Dynarmic::A64::Exception::NoExecuteFault: | ||
| 148 | LOG_CRITICAL(Core_ARM, "Cannot execute instruction at unmapped address {:#016x}", pc); | ||
| 149 | ReturnException(pc, ARM_Interface::no_execute); | ||
| 150 | return; | ||
| 141 | default: | 151 | default: |
| 142 | if (debugger_enabled) { | 152 | if (debugger_enabled) { |
| 143 | parent.SaveContext(parent.breakpoint_context); | 153 | ReturnException(pc, ARM_Interface::breakpoint); |
| 144 | parent.jit.load()->HaltExecution(ARM_Interface::breakpoint); | ||
| 145 | return; | 154 | return; |
| 146 | } | 155 | } |
| 147 | 156 | ||
| 148 | parent.LogBacktrace(); | 157 | parent.LogBacktrace(); |
| 149 | ASSERT_MSG(false, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})", | 158 | LOG_CRITICAL(Core_ARM, "ExceptionRaised(exception = {}, pc = {:08X}, code = {:08X})", |
| 150 | static_cast<std::size_t>(exception), pc, MemoryReadCode(pc)); | 159 | static_cast<std::size_t>(exception), pc, MemoryRead32(pc)); |
| 151 | } | 160 | } |
| 152 | } | 161 | } |
| 153 | 162 | ||
| @@ -188,15 +197,20 @@ public: | |||
| 188 | 197 | ||
| 189 | const auto match{parent.MatchingWatchpoint(addr, size, type)}; | 198 | const auto match{parent.MatchingWatchpoint(addr, size, type)}; |
| 190 | if (match) { | 199 | if (match) { |
| 191 | parent.SaveContext(parent.breakpoint_context); | ||
| 192 | parent.jit.load()->HaltExecution(ARM_Interface::watchpoint); | ||
| 193 | parent.halted_watchpoint = match; | 200 | parent.halted_watchpoint = match; |
| 201 | ReturnException(parent.jit.load()->GetPC(), ARM_Interface::watchpoint); | ||
| 194 | return false; | 202 | return false; |
| 195 | } | 203 | } |
| 196 | 204 | ||
| 197 | return true; | 205 | return true; |
| 198 | } | 206 | } |
| 199 | 207 | ||
| 208 | void ReturnException(u64 pc, Dynarmic::HaltReason hr) { | ||
| 209 | parent.SaveContext(parent.breakpoint_context); | ||
| 210 | parent.breakpoint_context.pc = pc; | ||
| 211 | parent.jit.load()->HaltExecution(hr); | ||
| 212 | } | ||
| 213 | |||
| 200 | ARM_Dynarmic_64& parent; | 214 | ARM_Dynarmic_64& parent; |
| 201 | Core::Memory::Memory& memory; | 215 | Core::Memory::Memory& memory; |
| 202 | u64 tpidrro_el0 = 0; | 216 | u64 tpidrro_el0 = 0; |
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index 29e7dba9b..140578069 100644 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp | |||
| @@ -6,7 +6,9 @@ | |||
| 6 | #include <string> | 6 | #include <string> |
| 7 | #include <tuple> | 7 | #include <tuple> |
| 8 | 8 | ||
| 9 | #include "common/logging/log.h" | ||
| 9 | #include "common/microprofile.h" | 10 | #include "common/microprofile.h" |
| 11 | #include "common/thread.h" | ||
| 10 | #include "core/core_timing.h" | 12 | #include "core/core_timing.h" |
| 11 | #include "core/core_timing_util.h" | 13 | #include "core/core_timing_util.h" |
| 12 | #include "core/hardware_properties.h" | 14 | #include "core/hardware_properties.h" |
| @@ -41,11 +43,11 @@ CoreTiming::CoreTiming() | |||
| 41 | 43 | ||
| 42 | CoreTiming::~CoreTiming() = default; | 44 | CoreTiming::~CoreTiming() = default; |
| 43 | 45 | ||
| 44 | void CoreTiming::ThreadEntry(CoreTiming& instance) { | 46 | void CoreTiming::ThreadEntry(CoreTiming& instance, size_t id) { |
| 45 | constexpr char name[] = "yuzu:HostTiming"; | 47 | const std::string name = "yuzu:HostTiming_" + std::to_string(id); |
| 46 | MicroProfileOnThreadCreate(name); | 48 | MicroProfileOnThreadCreate(name.c_str()); |
| 47 | Common::SetCurrentThreadName(name); | 49 | Common::SetCurrentThreadName(name.c_str()); |
| 48 | Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh); | 50 | Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical); |
| 49 | instance.on_thread_init(); | 51 | instance.on_thread_init(); |
| 50 | instance.ThreadLoop(); | 52 | instance.ThreadLoop(); |
| 51 | MicroProfileOnThreadExit(); | 53 | MicroProfileOnThreadExit(); |
| @@ -59,68 +61,97 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) { | |||
| 59 | const auto empty_timed_callback = [](std::uintptr_t, std::chrono::nanoseconds) {}; | 61 | const auto empty_timed_callback = [](std::uintptr_t, std::chrono::nanoseconds) {}; |
| 60 | ev_lost = CreateEvent("_lost_event", empty_timed_callback); | 62 | ev_lost = CreateEvent("_lost_event", empty_timed_callback); |
| 61 | if (is_multicore) { | 63 | if (is_multicore) { |
| 62 | timer_thread = std::make_unique<std::thread>(ThreadEntry, std::ref(*this)); | 64 | const auto hardware_concurrency = std::thread::hardware_concurrency(); |
| 65 | size_t id = 0; | ||
| 66 | worker_threads.emplace_back(ThreadEntry, std::ref(*this), id++); | ||
| 67 | if (hardware_concurrency > 8) { | ||
| 68 | worker_threads.emplace_back(ThreadEntry, std::ref(*this), id++); | ||
| 69 | } | ||
| 63 | } | 70 | } |
| 64 | } | 71 | } |
| 65 | 72 | ||
| 66 | void CoreTiming::Shutdown() { | 73 | void CoreTiming::Shutdown() { |
| 67 | paused = true; | 74 | is_paused = true; |
| 68 | shutting_down = true; | 75 | shutting_down = true; |
| 69 | pause_event.Set(); | 76 | std::atomic_thread_fence(std::memory_order_release); |
| 70 | event.Set(); | 77 | |
| 71 | if (timer_thread) { | 78 | event_cv.notify_all(); |
| 72 | timer_thread->join(); | 79 | wait_pause_cv.notify_all(); |
| 80 | for (auto& thread : worker_threads) { | ||
| 81 | thread.join(); | ||
| 73 | } | 82 | } |
| 83 | worker_threads.clear(); | ||
| 74 | ClearPendingEvents(); | 84 | ClearPendingEvents(); |
| 75 | timer_thread.reset(); | ||
| 76 | has_started = false; | 85 | has_started = false; |
| 77 | } | 86 | } |
| 78 | 87 | ||
| 79 | void CoreTiming::Pause(bool is_paused) { | 88 | void CoreTiming::Pause(bool is_paused_) { |
| 80 | paused = is_paused; | 89 | std::unique_lock main_lock(event_mutex); |
| 81 | pause_event.Set(); | 90 | if (is_paused_ == paused_state.load(std::memory_order_relaxed)) { |
| 91 | return; | ||
| 92 | } | ||
| 93 | if (is_multicore) { | ||
| 94 | is_paused = is_paused_; | ||
| 95 | event_cv.notify_all(); | ||
| 96 | if (!is_paused_) { | ||
| 97 | wait_pause_cv.notify_all(); | ||
| 98 | } | ||
| 99 | } | ||
| 100 | paused_state.store(is_paused_, std::memory_order_relaxed); | ||
| 82 | } | 101 | } |
| 83 | 102 | ||
| 84 | void CoreTiming::SyncPause(bool is_paused) { | 103 | void CoreTiming::SyncPause(bool is_paused_) { |
| 85 | if (is_paused == paused && paused_set == paused) { | 104 | std::unique_lock main_lock(event_mutex); |
| 105 | if (is_paused_ == paused_state.load(std::memory_order_relaxed)) { | ||
| 86 | return; | 106 | return; |
| 87 | } | 107 | } |
| 88 | Pause(is_paused); | 108 | |
| 89 | if (timer_thread) { | 109 | if (is_multicore) { |
| 90 | if (!is_paused) { | 110 | is_paused = is_paused_; |
| 91 | pause_event.Set(); | 111 | event_cv.notify_all(); |
| 112 | if (!is_paused_) { | ||
| 113 | wait_pause_cv.notify_all(); | ||
| 114 | } | ||
| 115 | } | ||
| 116 | paused_state.store(is_paused_, std::memory_order_relaxed); | ||
| 117 | if (is_multicore) { | ||
| 118 | if (is_paused_) { | ||
| 119 | wait_signal_cv.wait(main_lock, [this] { return pause_count == worker_threads.size(); }); | ||
| 120 | } else { | ||
| 121 | wait_signal_cv.wait(main_lock, [this] { return pause_count == 0; }); | ||
| 92 | } | 122 | } |
| 93 | event.Set(); | ||
| 94 | while (paused_set != is_paused) | ||
| 95 | ; | ||
| 96 | } | 123 | } |
| 97 | } | 124 | } |
| 98 | 125 | ||
| 99 | bool CoreTiming::IsRunning() const { | 126 | bool CoreTiming::IsRunning() const { |
| 100 | return !paused_set; | 127 | return !paused_state.load(std::memory_order_acquire); |
| 101 | } | 128 | } |
| 102 | 129 | ||
| 103 | bool CoreTiming::HasPendingEvents() const { | 130 | bool CoreTiming::HasPendingEvents() const { |
| 104 | return !(wait_set && event_queue.empty()); | 131 | std::unique_lock main_lock(event_mutex); |
| 132 | return !event_queue.empty() || pending_events.load(std::memory_order_relaxed) != 0; | ||
| 105 | } | 133 | } |
| 106 | 134 | ||
| 107 | void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future, | 135 | void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future, |
| 108 | const std::shared_ptr<EventType>& event_type, | 136 | const std::shared_ptr<EventType>& event_type, |
| 109 | std::uintptr_t user_data) { | 137 | std::uintptr_t user_data) { |
| 110 | { | ||
| 111 | std::scoped_lock scope{basic_lock}; | ||
| 112 | const u64 timeout = static_cast<u64>((GetGlobalTimeNs() + ns_into_future).count()); | ||
| 113 | 138 | ||
| 114 | event_queue.emplace_back(Event{timeout, event_fifo_id++, user_data, event_type}); | 139 | std::unique_lock main_lock(event_mutex); |
| 140 | const u64 timeout = static_cast<u64>((GetGlobalTimeNs() + ns_into_future).count()); | ||
| 141 | |||
| 142 | event_queue.emplace_back(Event{timeout, event_fifo_id++, user_data, event_type}); | ||
| 143 | pending_events.fetch_add(1, std::memory_order_relaxed); | ||
| 115 | 144 | ||
| 116 | std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); | 145 | std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); |
| 146 | |||
| 147 | if (is_multicore) { | ||
| 148 | event_cv.notify_one(); | ||
| 117 | } | 149 | } |
| 118 | event.Set(); | ||
| 119 | } | 150 | } |
| 120 | 151 | ||
| 121 | void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, | 152 | void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, |
| 122 | std::uintptr_t user_data) { | 153 | std::uintptr_t user_data) { |
| 123 | std::scoped_lock scope{basic_lock}; | 154 | std::unique_lock main_lock(event_mutex); |
| 124 | const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { | 155 | const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { |
| 125 | return e.type.lock().get() == event_type.get() && e.user_data == user_data; | 156 | return e.type.lock().get() == event_type.get() && e.user_data == user_data; |
| 126 | }); | 157 | }); |
| @@ -129,6 +160,7 @@ void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, | |||
| 129 | if (itr != event_queue.end()) { | 160 | if (itr != event_queue.end()) { |
| 130 | event_queue.erase(itr, event_queue.end()); | 161 | event_queue.erase(itr, event_queue.end()); |
| 131 | std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>()); | 162 | std::make_heap(event_queue.begin(), event_queue.end(), std::greater<>()); |
| 163 | pending_events.fetch_sub(1, std::memory_order_relaxed); | ||
| 132 | } | 164 | } |
| 133 | } | 165 | } |
| 134 | 166 | ||
| @@ -168,11 +200,12 @@ u64 CoreTiming::GetClockTicks() const { | |||
| 168 | } | 200 | } |
| 169 | 201 | ||
| 170 | void CoreTiming::ClearPendingEvents() { | 202 | void CoreTiming::ClearPendingEvents() { |
| 203 | std::unique_lock main_lock(event_mutex); | ||
| 171 | event_queue.clear(); | 204 | event_queue.clear(); |
| 172 | } | 205 | } |
| 173 | 206 | ||
| 174 | void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { | 207 | void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { |
| 175 | std::scoped_lock lock{basic_lock}; | 208 | std::unique_lock main_lock(event_mutex); |
| 176 | 209 | ||
| 177 | const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { | 210 | const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { |
| 178 | return e.type.lock().get() == event_type.get(); | 211 | return e.type.lock().get() == event_type.get(); |
| @@ -186,21 +219,28 @@ void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { | |||
| 186 | } | 219 | } |
| 187 | 220 | ||
| 188 | std::optional<s64> CoreTiming::Advance() { | 221 | std::optional<s64> CoreTiming::Advance() { |
| 189 | std::scoped_lock lock{advance_lock, basic_lock}; | ||
| 190 | global_timer = GetGlobalTimeNs().count(); | 222 | global_timer = GetGlobalTimeNs().count(); |
| 191 | 223 | ||
| 224 | std::unique_lock main_lock(event_mutex); | ||
| 192 | while (!event_queue.empty() && event_queue.front().time <= global_timer) { | 225 | while (!event_queue.empty() && event_queue.front().time <= global_timer) { |
| 193 | Event evt = std::move(event_queue.front()); | 226 | Event evt = std::move(event_queue.front()); |
| 194 | std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>()); | 227 | std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>()); |
| 195 | event_queue.pop_back(); | 228 | event_queue.pop_back(); |
| 196 | basic_lock.unlock(); | ||
| 197 | 229 | ||
| 198 | if (const auto event_type{evt.type.lock()}) { | 230 | if (const auto event_type{evt.type.lock()}) { |
| 199 | event_type->callback( | 231 | sequence_mutex.lock(); |
| 200 | evt.user_data, std::chrono::nanoseconds{static_cast<s64>(global_timer - evt.time)}); | 232 | event_mutex.unlock(); |
| 233 | |||
| 234 | event_type->guard.lock(); | ||
| 235 | sequence_mutex.unlock(); | ||
| 236 | const s64 delay = static_cast<s64>(GetGlobalTimeNs().count() - evt.time); | ||
| 237 | event_type->callback(evt.user_data, std::chrono::nanoseconds{delay}); | ||
| 238 | event_type->guard.unlock(); | ||
| 239 | |||
| 240 | event_mutex.lock(); | ||
| 241 | pending_events.fetch_sub(1, std::memory_order_relaxed); | ||
| 201 | } | 242 | } |
| 202 | 243 | ||
| 203 | basic_lock.lock(); | ||
| 204 | global_timer = GetGlobalTimeNs().count(); | 244 | global_timer = GetGlobalTimeNs().count(); |
| 205 | } | 245 | } |
| 206 | 246 | ||
| @@ -213,26 +253,34 @@ std::optional<s64> CoreTiming::Advance() { | |||
| 213 | } | 253 | } |
| 214 | 254 | ||
| 215 | void CoreTiming::ThreadLoop() { | 255 | void CoreTiming::ThreadLoop() { |
| 256 | const auto predicate = [this] { return !event_queue.empty() || is_paused; }; | ||
| 216 | has_started = true; | 257 | has_started = true; |
| 217 | while (!shutting_down) { | 258 | while (!shutting_down) { |
| 218 | while (!paused) { | 259 | while (!is_paused && !shutting_down) { |
| 219 | paused_set = false; | ||
| 220 | const auto next_time = Advance(); | 260 | const auto next_time = Advance(); |
| 221 | if (next_time) { | 261 | if (next_time) { |
| 222 | if (*next_time > 0) { | 262 | if (*next_time > 0) { |
| 223 | std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time); | 263 | std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time); |
| 224 | event.WaitFor(next_time_ns); | 264 | std::unique_lock main_lock(event_mutex); |
| 265 | event_cv.wait_for(main_lock, next_time_ns, predicate); | ||
| 225 | } | 266 | } |
| 226 | } else { | 267 | } else { |
| 227 | wait_set = true; | 268 | std::unique_lock main_lock(event_mutex); |
| 228 | event.Wait(); | 269 | event_cv.wait(main_lock, predicate); |
| 229 | } | 270 | } |
| 230 | wait_set = false; | ||
| 231 | } | 271 | } |
| 232 | paused_set = true; | 272 | std::unique_lock main_lock(event_mutex); |
| 233 | clock->Pause(true); | 273 | pause_count++; |
| 234 | pause_event.Wait(); | 274 | if (pause_count == worker_threads.size()) { |
| 235 | clock->Pause(false); | 275 | clock->Pause(true); |
| 276 | wait_signal_cv.notify_all(); | ||
| 277 | } | ||
| 278 | wait_pause_cv.wait(main_lock, [this] { return !is_paused || shutting_down; }); | ||
| 279 | pause_count--; | ||
| 280 | if (pause_count == 0) { | ||
| 281 | clock->Pause(false); | ||
| 282 | wait_signal_cv.notify_all(); | ||
| 283 | } | ||
| 236 | } | 284 | } |
| 237 | } | 285 | } |
| 238 | 286 | ||
diff --git a/src/core/core_timing.h b/src/core/core_timing.h index d27773009..a86553e08 100644 --- a/src/core/core_timing.h +++ b/src/core/core_timing.h | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | 5 | ||
| 6 | #include <atomic> | 6 | #include <atomic> |
| 7 | #include <chrono> | 7 | #include <chrono> |
| 8 | #include <condition_variable> | ||
| 8 | #include <functional> | 9 | #include <functional> |
| 9 | #include <memory> | 10 | #include <memory> |
| 10 | #include <mutex> | 11 | #include <mutex> |
| @@ -14,7 +15,6 @@ | |||
| 14 | #include <vector> | 15 | #include <vector> |
| 15 | 16 | ||
| 16 | #include "common/common_types.h" | 17 | #include "common/common_types.h" |
| 17 | #include "common/thread.h" | ||
| 18 | #include "common/wall_clock.h" | 18 | #include "common/wall_clock.h" |
| 19 | 19 | ||
| 20 | namespace Core::Timing { | 20 | namespace Core::Timing { |
| @@ -32,6 +32,7 @@ struct EventType { | |||
| 32 | TimedCallback callback; | 32 | TimedCallback callback; |
| 33 | /// A pointer to the name of the event. | 33 | /// A pointer to the name of the event. |
| 34 | const std::string name; | 34 | const std::string name; |
| 35 | mutable std::mutex guard; | ||
| 35 | }; | 36 | }; |
| 36 | 37 | ||
| 37 | /** | 38 | /** |
| @@ -131,7 +132,7 @@ private: | |||
| 131 | /// Clear all pending events. This should ONLY be done on exit. | 132 | /// Clear all pending events. This should ONLY be done on exit. |
| 132 | void ClearPendingEvents(); | 133 | void ClearPendingEvents(); |
| 133 | 134 | ||
| 134 | static void ThreadEntry(CoreTiming& instance); | 135 | static void ThreadEntry(CoreTiming& instance, size_t id); |
| 135 | void ThreadLoop(); | 136 | void ThreadLoop(); |
| 136 | 137 | ||
| 137 | std::unique_ptr<Common::WallClock> clock; | 138 | std::unique_ptr<Common::WallClock> clock; |
| @@ -144,21 +145,25 @@ private: | |||
| 144 | // accomodated by the standard adaptor class. | 145 | // accomodated by the standard adaptor class. |
| 145 | std::vector<Event> event_queue; | 146 | std::vector<Event> event_queue; |
| 146 | u64 event_fifo_id = 0; | 147 | u64 event_fifo_id = 0; |
| 148 | std::atomic<size_t> pending_events{}; | ||
| 147 | 149 | ||
| 148 | std::shared_ptr<EventType> ev_lost; | 150 | std::shared_ptr<EventType> ev_lost; |
| 149 | Common::Event event{}; | ||
| 150 | Common::Event pause_event{}; | ||
| 151 | std::mutex basic_lock; | ||
| 152 | std::mutex advance_lock; | ||
| 153 | std::unique_ptr<std::thread> timer_thread; | ||
| 154 | std::atomic<bool> paused{}; | ||
| 155 | std::atomic<bool> paused_set{}; | ||
| 156 | std::atomic<bool> wait_set{}; | ||
| 157 | std::atomic<bool> shutting_down{}; | ||
| 158 | std::atomic<bool> has_started{}; | 151 | std::atomic<bool> has_started{}; |
| 159 | std::function<void()> on_thread_init{}; | 152 | std::function<void()> on_thread_init{}; |
| 160 | 153 | ||
| 154 | std::vector<std::thread> worker_threads; | ||
| 155 | |||
| 156 | std::condition_variable event_cv; | ||
| 157 | std::condition_variable wait_pause_cv; | ||
| 158 | std::condition_variable wait_signal_cv; | ||
| 159 | mutable std::mutex event_mutex; | ||
| 160 | mutable std::mutex sequence_mutex; | ||
| 161 | |||
| 162 | std::atomic<bool> paused_state{}; | ||
| 163 | bool is_paused{}; | ||
| 164 | bool shutting_down{}; | ||
| 161 | bool is_multicore{}; | 165 | bool is_multicore{}; |
| 166 | size_t pause_count{}; | ||
| 162 | 167 | ||
| 163 | /// Cycle timing | 168 | /// Cycle timing |
| 164 | u64 ticks{}; | 169 | u64 ticks{}; |
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp index fd6928105..37d3d83b9 100644 --- a/src/core/cpu_manager.cpp +++ b/src/core/cpu_manager.cpp | |||
| @@ -41,51 +41,32 @@ void CpuManager::Shutdown() { | |||
| 41 | } | 41 | } |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | std::function<void(void*)> CpuManager::GetGuestThreadStartFunc() { | 44 | void CpuManager::GuestThreadFunction() { |
| 45 | return GuestThreadFunction; | 45 | if (is_multicore) { |
| 46 | } | 46 | MultiCoreRunGuestThread(); |
| 47 | |||
| 48 | std::function<void(void*)> CpuManager::GetIdleThreadStartFunc() { | ||
| 49 | return IdleThreadFunction; | ||
| 50 | } | ||
| 51 | |||
| 52 | std::function<void(void*)> CpuManager::GetShutdownThreadStartFunc() { | ||
| 53 | return ShutdownThreadFunction; | ||
| 54 | } | ||
| 55 | |||
| 56 | void CpuManager::GuestThreadFunction(void* cpu_manager_) { | ||
| 57 | CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_); | ||
| 58 | if (cpu_manager->is_multicore) { | ||
| 59 | cpu_manager->MultiCoreRunGuestThread(); | ||
| 60 | } else { | 47 | } else { |
| 61 | cpu_manager->SingleCoreRunGuestThread(); | 48 | SingleCoreRunGuestThread(); |
| 62 | } | 49 | } |
| 63 | } | 50 | } |
| 64 | 51 | ||
| 65 | void CpuManager::GuestRewindFunction(void* cpu_manager_) { | 52 | void CpuManager::GuestRewindFunction() { |
| 66 | CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_); | 53 | if (is_multicore) { |
| 67 | if (cpu_manager->is_multicore) { | 54 | MultiCoreRunGuestLoop(); |
| 68 | cpu_manager->MultiCoreRunGuestLoop(); | ||
| 69 | } else { | 55 | } else { |
| 70 | cpu_manager->SingleCoreRunGuestLoop(); | 56 | SingleCoreRunGuestLoop(); |
| 71 | } | 57 | } |
| 72 | } | 58 | } |
| 73 | 59 | ||
| 74 | void CpuManager::IdleThreadFunction(void* cpu_manager_) { | 60 | void CpuManager::IdleThreadFunction() { |
| 75 | CpuManager* cpu_manager = static_cast<CpuManager*>(cpu_manager_); | 61 | if (is_multicore) { |
| 76 | if (cpu_manager->is_multicore) { | 62 | MultiCoreRunIdleThread(); |
| 77 | cpu_manager->MultiCoreRunIdleThread(); | ||
| 78 | } else { | 63 | } else { |
| 79 | cpu_manager->SingleCoreRunIdleThread(); | 64 | SingleCoreRunIdleThread(); |
| 80 | } | 65 | } |
| 81 | } | 66 | } |
| 82 | 67 | ||
| 83 | void CpuManager::ShutdownThreadFunction(void* cpu_manager) { | 68 | void CpuManager::ShutdownThreadFunction() { |
| 84 | static_cast<CpuManager*>(cpu_manager)->ShutdownThread(); | 69 | ShutdownThread(); |
| 85 | } | ||
| 86 | |||
| 87 | void* CpuManager::GetStartFuncParameter() { | ||
| 88 | return this; | ||
| 89 | } | 70 | } |
| 90 | 71 | ||
| 91 | /////////////////////////////////////////////////////////////////////////////// | 72 | /////////////////////////////////////////////////////////////////////////////// |
| @@ -97,7 +78,7 @@ void CpuManager::MultiCoreRunGuestThread() { | |||
| 97 | kernel.CurrentScheduler()->OnThreadStart(); | 78 | kernel.CurrentScheduler()->OnThreadStart(); |
| 98 | auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread(); | 79 | auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread(); |
| 99 | auto& host_context = thread->GetHostContext(); | 80 | auto& host_context = thread->GetHostContext(); |
| 100 | host_context->SetRewindPoint(GuestRewindFunction, this); | 81 | host_context->SetRewindPoint([this] { GuestRewindFunction(); }); |
| 101 | MultiCoreRunGuestLoop(); | 82 | MultiCoreRunGuestLoop(); |
| 102 | } | 83 | } |
| 103 | 84 | ||
| @@ -134,7 +115,7 @@ void CpuManager::SingleCoreRunGuestThread() { | |||
| 134 | kernel.CurrentScheduler()->OnThreadStart(); | 115 | kernel.CurrentScheduler()->OnThreadStart(); |
| 135 | auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread(); | 116 | auto* thread = kernel.CurrentScheduler()->GetSchedulerCurrentThread(); |
| 136 | auto& host_context = thread->GetHostContext(); | 117 | auto& host_context = thread->GetHostContext(); |
| 137 | host_context->SetRewindPoint(GuestRewindFunction, this); | 118 | host_context->SetRewindPoint([this] { GuestRewindFunction(); }); |
| 138 | SingleCoreRunGuestLoop(); | 119 | SingleCoreRunGuestLoop(); |
| 139 | } | 120 | } |
| 140 | 121 | ||
| @@ -194,7 +175,9 @@ void CpuManager::PreemptSingleCore(bool from_running_enviroment) { | |||
| 194 | { | 175 | { |
| 195 | auto& scheduler = system.Kernel().Scheduler(current_core); | 176 | auto& scheduler = system.Kernel().Scheduler(current_core); |
| 196 | scheduler.Reload(scheduler.GetSchedulerCurrentThread()); | 177 | scheduler.Reload(scheduler.GetSchedulerCurrentThread()); |
| 197 | idle_count = 0; | 178 | if (!scheduler.IsIdle()) { |
| 179 | idle_count = 0; | ||
| 180 | } | ||
| 198 | } | 181 | } |
| 199 | } | 182 | } |
| 200 | 183 | ||
diff --git a/src/core/cpu_manager.h b/src/core/cpu_manager.h index f0751fc58..76dc58ee1 100644 --- a/src/core/cpu_manager.h +++ b/src/core/cpu_manager.h | |||
| @@ -50,10 +50,15 @@ public: | |||
| 50 | void Initialize(); | 50 | void Initialize(); |
| 51 | void Shutdown(); | 51 | void Shutdown(); |
| 52 | 52 | ||
| 53 | static std::function<void(void*)> GetGuestThreadStartFunc(); | 53 | std::function<void()> GetGuestThreadStartFunc() { |
| 54 | static std::function<void(void*)> GetIdleThreadStartFunc(); | 54 | return [this] { GuestThreadFunction(); }; |
| 55 | static std::function<void(void*)> GetShutdownThreadStartFunc(); | 55 | } |
| 56 | void* GetStartFuncParameter(); | 56 | std::function<void()> GetIdleThreadStartFunc() { |
| 57 | return [this] { IdleThreadFunction(); }; | ||
| 58 | } | ||
| 59 | std::function<void()> GetShutdownThreadStartFunc() { | ||
| 60 | return [this] { ShutdownThreadFunction(); }; | ||
| 61 | } | ||
| 57 | 62 | ||
| 58 | void PreemptSingleCore(bool from_running_enviroment = true); | 63 | void PreemptSingleCore(bool from_running_enviroment = true); |
| 59 | 64 | ||
| @@ -62,10 +67,10 @@ public: | |||
| 62 | } | 67 | } |
| 63 | 68 | ||
| 64 | private: | 69 | private: |
| 65 | static void GuestThreadFunction(void* cpu_manager); | 70 | void GuestThreadFunction(); |
| 66 | static void GuestRewindFunction(void* cpu_manager); | 71 | void GuestRewindFunction(); |
| 67 | static void IdleThreadFunction(void* cpu_manager); | 72 | void IdleThreadFunction(); |
| 68 | static void ShutdownThreadFunction(void* cpu_manager); | 73 | void ShutdownThreadFunction(); |
| 69 | 74 | ||
| 70 | void MultiCoreRunGuestThread(); | 75 | void MultiCoreRunGuestThread(); |
| 71 | void MultiCoreRunGuestLoop(); | 76 | void MultiCoreRunGuestLoop(); |
diff --git a/src/core/debugger/gdbstub_arch.cpp b/src/core/debugger/gdbstub_arch.cpp index 750c353b9..4bef09bd7 100644 --- a/src/core/debugger/gdbstub_arch.cpp +++ b/src/core/debugger/gdbstub_arch.cpp | |||
| @@ -191,8 +191,10 @@ std::string GDBStubA64::RegRead(const Kernel::KThread* thread, size_t id) const | |||
| 191 | const auto& gprs{context.cpu_registers}; | 191 | const auto& gprs{context.cpu_registers}; |
| 192 | const auto& fprs{context.vector_registers}; | 192 | const auto& fprs{context.vector_registers}; |
| 193 | 193 | ||
| 194 | if (id <= SP_REGISTER) { | 194 | if (id < SP_REGISTER) { |
| 195 | return ValueToHex(gprs[id]); | 195 | return ValueToHex(gprs[id]); |
| 196 | } else if (id == SP_REGISTER) { | ||
| 197 | return ValueToHex(context.sp); | ||
| 196 | } else if (id == PC_REGISTER) { | 198 | } else if (id == PC_REGISTER) { |
| 197 | return ValueToHex(context.pc); | 199 | return ValueToHex(context.pc); |
| 198 | } else if (id == PSTATE_REGISTER) { | 200 | } else if (id == PSTATE_REGISTER) { |
| @@ -215,8 +217,10 @@ void GDBStubA64::RegWrite(Kernel::KThread* thread, size_t id, std::string_view v | |||
| 215 | 217 | ||
| 216 | auto& context{thread->GetContext64()}; | 218 | auto& context{thread->GetContext64()}; |
| 217 | 219 | ||
| 218 | if (id <= SP_REGISTER) { | 220 | if (id < SP_REGISTER) { |
| 219 | context.cpu_registers[id] = HexToValue<u64>(value); | 221 | context.cpu_registers[id] = HexToValue<u64>(value); |
| 222 | } else if (id == SP_REGISTER) { | ||
| 223 | context.sp = HexToValue<u64>(value); | ||
| 220 | } else if (id == PC_REGISTER) { | 224 | } else if (id == PC_REGISTER) { |
| 221 | context.pc = HexToValue<u64>(value); | 225 | context.pc = HexToValue<u64>(value); |
| 222 | } else if (id == PSTATE_REGISTER) { | 226 | } else if (id == PSTATE_REGISTER) { |
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp index d586b3f5c..d599d2bcb 100644 --- a/src/core/hle/kernel/k_scheduler.cpp +++ b/src/core/hle/kernel/k_scheduler.cpp | |||
| @@ -622,7 +622,7 @@ void KScheduler::YieldToAnyThread(KernelCore& kernel) { | |||
| 622 | } | 622 | } |
| 623 | 623 | ||
| 624 | KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, core_id{core_id_} { | 624 | KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, core_id{core_id_} { |
| 625 | switch_fiber = std::make_shared<Common::Fiber>(OnSwitch, this); | 625 | switch_fiber = std::make_shared<Common::Fiber>([this] { SwitchToCurrent(); }); |
| 626 | state.needs_scheduling.store(true); | 626 | state.needs_scheduling.store(true); |
| 627 | state.interrupt_task_thread_runnable = false; | 627 | state.interrupt_task_thread_runnable = false; |
| 628 | state.should_count_idle = false; | 628 | state.should_count_idle = false; |
| @@ -778,11 +778,6 @@ void KScheduler::ScheduleImpl() { | |||
| 778 | next_scheduler.SwitchContextStep2(); | 778 | next_scheduler.SwitchContextStep2(); |
| 779 | } | 779 | } |
| 780 | 780 | ||
| 781 | void KScheduler::OnSwitch(void* this_scheduler) { | ||
| 782 | KScheduler* sched = static_cast<KScheduler*>(this_scheduler); | ||
| 783 | sched->SwitchToCurrent(); | ||
| 784 | } | ||
| 785 | |||
| 786 | void KScheduler::SwitchToCurrent() { | 781 | void KScheduler::SwitchToCurrent() { |
| 787 | while (true) { | 782 | while (true) { |
| 788 | { | 783 | { |
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h index 3f90656ee..6a4760eca 100644 --- a/src/core/hle/kernel/k_scheduler.h +++ b/src/core/hle/kernel/k_scheduler.h | |||
| @@ -55,6 +55,11 @@ public: | |||
| 55 | return idle_thread; | 55 | return idle_thread; |
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | /// Returns true if the scheduler is idle | ||
| 59 | [[nodiscard]] bool IsIdle() const { | ||
| 60 | return GetSchedulerCurrentThread() == idle_thread; | ||
| 61 | } | ||
| 62 | |||
| 58 | /// Gets the timestamp for the last context switch in ticks. | 63 | /// Gets the timestamp for the last context switch in ticks. |
| 59 | [[nodiscard]] u64 GetLastContextSwitchTicks() const; | 64 | [[nodiscard]] u64 GetLastContextSwitchTicks() const; |
| 60 | 65 | ||
| @@ -165,7 +170,6 @@ private: | |||
| 165 | */ | 170 | */ |
| 166 | void UpdateLastContextSwitchTime(KThread* thread, KProcess* process); | 171 | void UpdateLastContextSwitchTime(KThread* thread, KProcess* process); |
| 167 | 172 | ||
| 168 | static void OnSwitch(void* this_scheduler); | ||
| 169 | void SwitchToCurrent(); | 173 | void SwitchToCurrent(); |
| 170 | 174 | ||
| 171 | KThread* prev_thread{}; | 175 | KThread* prev_thread{}; |
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp index 8d7faa662..23bf7425a 100644 --- a/src/core/hle/kernel/k_thread.cpp +++ b/src/core/hle/kernel/k_thread.cpp | |||
| @@ -246,14 +246,12 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack | |||
| 246 | 246 | ||
| 247 | Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, | 247 | Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg, |
| 248 | VAddr user_stack_top, s32 prio, s32 core, KProcess* owner, | 248 | VAddr user_stack_top, s32 prio, s32 core, KProcess* owner, |
| 249 | ThreadType type, std::function<void(void*)>&& init_func, | 249 | ThreadType type, std::function<void()>&& init_func) { |
| 250 | void* init_func_parameter) { | ||
| 251 | // Initialize the thread. | 250 | // Initialize the thread. |
| 252 | R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); | 251 | R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); |
| 253 | 252 | ||
| 254 | // Initialize emulation parameters. | 253 | // Initialize emulation parameters. |
| 255 | thread->host_context = | 254 | thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func)); |
| 256 | std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); | ||
| 257 | thread->is_single_core = !Settings::values.use_multi_core.GetValue(); | 255 | thread->is_single_core = !Settings::values.use_multi_core.GetValue(); |
| 258 | 256 | ||
| 259 | return ResultSuccess; | 257 | return ResultSuccess; |
| @@ -265,15 +263,13 @@ Result KThread::InitializeDummyThread(KThread* thread) { | |||
| 265 | 263 | ||
| 266 | Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { | 264 | Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) { |
| 267 | return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, | 265 | return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main, |
| 268 | Core::CpuManager::GetIdleThreadStartFunc(), | 266 | system.GetCpuManager().GetIdleThreadStartFunc()); |
| 269 | system.GetCpuManager().GetStartFuncParameter()); | ||
| 270 | } | 267 | } |
| 271 | 268 | ||
| 272 | Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, | 269 | Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread, |
| 273 | KThreadFunction func, uintptr_t arg, s32 virt_core) { | 270 | KThreadFunction func, uintptr_t arg, s32 virt_core) { |
| 274 | return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority, | 271 | return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority, |
| 275 | Core::CpuManager::GetShutdownThreadStartFunc(), | 272 | system.GetCpuManager().GetShutdownThreadStartFunc()); |
| 276 | system.GetCpuManager().GetStartFuncParameter()); | ||
| 277 | } | 273 | } |
| 278 | 274 | ||
| 279 | Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, | 275 | Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func, |
| @@ -281,8 +277,7 @@ Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThr | |||
| 281 | KProcess* owner) { | 277 | KProcess* owner) { |
| 282 | system.Kernel().GlobalSchedulerContext().AddThread(thread); | 278 | system.Kernel().GlobalSchedulerContext().AddThread(thread); |
| 283 | return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, | 279 | return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, |
| 284 | ThreadType::User, Core::CpuManager::GetGuestThreadStartFunc(), | 280 | ThreadType::User, system.GetCpuManager().GetGuestThreadStartFunc()); |
| 285 | system.GetCpuManager().GetStartFuncParameter()); | ||
| 286 | } | 281 | } |
| 287 | 282 | ||
| 288 | void KThread::PostDestroy(uintptr_t arg) { | 283 | void KThread::PostDestroy(uintptr_t arg) { |
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h index 94c4cd1c8..28cd7ecb0 100644 --- a/src/core/hle/kernel/k_thread.h +++ b/src/core/hle/kernel/k_thread.h | |||
| @@ -729,8 +729,7 @@ private: | |||
| 729 | [[nodiscard]] static Result InitializeThread(KThread* thread, KThreadFunction func, | 729 | [[nodiscard]] static Result InitializeThread(KThread* thread, KThreadFunction func, |
| 730 | uintptr_t arg, VAddr user_stack_top, s32 prio, | 730 | uintptr_t arg, VAddr user_stack_top, s32 prio, |
| 731 | s32 core, KProcess* owner, ThreadType type, | 731 | s32 core, KProcess* owner, ThreadType type, |
| 732 | std::function<void(void*)>&& init_func, | 732 | std::function<void()>&& init_func); |
| 733 | void* init_func_parameter); | ||
| 734 | 733 | ||
| 735 | static void RestorePriority(KernelCore& kernel_ctx, KThread* thread); | 734 | static void RestorePriority(KernelCore& kernel_ctx, KThread* thread); |
| 736 | 735 | ||
diff --git a/src/tests/common/fibers.cpp b/src/tests/common/fibers.cpp index cfc84d423..4e29f9199 100644 --- a/src/tests/common/fibers.cpp +++ b/src/tests/common/fibers.cpp | |||
| @@ -43,7 +43,15 @@ class TestControl1 { | |||
| 43 | public: | 43 | public: |
| 44 | TestControl1() = default; | 44 | TestControl1() = default; |
| 45 | 45 | ||
| 46 | void DoWork(); | 46 | void DoWork() { |
| 47 | const u32 id = thread_ids.Get(); | ||
| 48 | u32 value = items[id]; | ||
| 49 | for (u32 i = 0; i < id; i++) { | ||
| 50 | value++; | ||
| 51 | } | ||
| 52 | results[id] = value; | ||
| 53 | Fiber::YieldTo(work_fibers[id], *thread_fibers[id]); | ||
| 54 | } | ||
| 47 | 55 | ||
| 48 | void ExecuteThread(u32 id); | 56 | void ExecuteThread(u32 id); |
| 49 | 57 | ||
| @@ -54,35 +62,16 @@ public: | |||
| 54 | std::vector<u32> results; | 62 | std::vector<u32> results; |
| 55 | }; | 63 | }; |
| 56 | 64 | ||
| 57 | static void WorkControl1(void* control) { | ||
| 58 | auto* test_control = static_cast<TestControl1*>(control); | ||
| 59 | test_control->DoWork(); | ||
| 60 | } | ||
| 61 | |||
| 62 | void TestControl1::DoWork() { | ||
| 63 | const u32 id = thread_ids.Get(); | ||
| 64 | u32 value = items[id]; | ||
| 65 | for (u32 i = 0; i < id; i++) { | ||
| 66 | value++; | ||
| 67 | } | ||
| 68 | results[id] = value; | ||
| 69 | Fiber::YieldTo(work_fibers[id], *thread_fibers[id]); | ||
| 70 | } | ||
| 71 | |||
| 72 | void TestControl1::ExecuteThread(u32 id) { | 65 | void TestControl1::ExecuteThread(u32 id) { |
| 73 | thread_ids.Register(id); | 66 | thread_ids.Register(id); |
| 74 | auto thread_fiber = Fiber::ThreadToFiber(); | 67 | auto thread_fiber = Fiber::ThreadToFiber(); |
| 75 | thread_fibers[id] = thread_fiber; | 68 | thread_fibers[id] = thread_fiber; |
| 76 | work_fibers[id] = std::make_shared<Fiber>(std::function<void(void*)>{WorkControl1}, this); | 69 | work_fibers[id] = std::make_shared<Fiber>([this] { DoWork(); }); |
| 77 | items[id] = rand() % 256; | 70 | items[id] = rand() % 256; |
| 78 | Fiber::YieldTo(thread_fibers[id], *work_fibers[id]); | 71 | Fiber::YieldTo(thread_fibers[id], *work_fibers[id]); |
| 79 | thread_fibers[id]->Exit(); | 72 | thread_fibers[id]->Exit(); |
| 80 | } | 73 | } |
| 81 | 74 | ||
| 82 | static void ThreadStart1(u32 id, TestControl1& test_control) { | ||
| 83 | test_control.ExecuteThread(id); | ||
| 84 | } | ||
| 85 | |||
| 86 | /** This test checks for fiber setup configuration and validates that fibers are | 75 | /** This test checks for fiber setup configuration and validates that fibers are |
| 87 | * doing all the work required. | 76 | * doing all the work required. |
| 88 | */ | 77 | */ |
| @@ -95,7 +84,7 @@ TEST_CASE("Fibers::Setup", "[common]") { | |||
| 95 | test_control.results.resize(num_threads, 0); | 84 | test_control.results.resize(num_threads, 0); |
| 96 | std::vector<std::thread> threads; | 85 | std::vector<std::thread> threads; |
| 97 | for (u32 i = 0; i < num_threads; i++) { | 86 | for (u32 i = 0; i < num_threads; i++) { |
| 98 | threads.emplace_back(ThreadStart1, i, std::ref(test_control)); | 87 | threads.emplace_back([&test_control, i] { test_control.ExecuteThread(i); }); |
| 99 | } | 88 | } |
| 100 | for (u32 i = 0; i < num_threads; i++) { | 89 | for (u32 i = 0; i < num_threads; i++) { |
| 101 | threads[i].join(); | 90 | threads[i].join(); |
| @@ -167,21 +156,6 @@ public: | |||
| 167 | std::shared_ptr<Common::Fiber> fiber3; | 156 | std::shared_ptr<Common::Fiber> fiber3; |
| 168 | }; | 157 | }; |
| 169 | 158 | ||
| 170 | static void WorkControl2_1(void* control) { | ||
| 171 | auto* test_control = static_cast<TestControl2*>(control); | ||
| 172 | test_control->DoWork1(); | ||
| 173 | } | ||
| 174 | |||
| 175 | static void WorkControl2_2(void* control) { | ||
| 176 | auto* test_control = static_cast<TestControl2*>(control); | ||
| 177 | test_control->DoWork2(); | ||
| 178 | } | ||
| 179 | |||
| 180 | static void WorkControl2_3(void* control) { | ||
| 181 | auto* test_control = static_cast<TestControl2*>(control); | ||
| 182 | test_control->DoWork3(); | ||
| 183 | } | ||
| 184 | |||
| 185 | void TestControl2::ExecuteThread(u32 id) { | 159 | void TestControl2::ExecuteThread(u32 id) { |
| 186 | thread_ids.Register(id); | 160 | thread_ids.Register(id); |
| 187 | auto thread_fiber = Fiber::ThreadToFiber(); | 161 | auto thread_fiber = Fiber::ThreadToFiber(); |
| @@ -193,18 +167,6 @@ void TestControl2::Exit() { | |||
| 193 | thread_fibers[id]->Exit(); | 167 | thread_fibers[id]->Exit(); |
| 194 | } | 168 | } |
| 195 | 169 | ||
| 196 | static void ThreadStart2_1(u32 id, TestControl2& test_control) { | ||
| 197 | test_control.ExecuteThread(id); | ||
| 198 | test_control.CallFiber1(); | ||
| 199 | test_control.Exit(); | ||
| 200 | } | ||
| 201 | |||
| 202 | static void ThreadStart2_2(u32 id, TestControl2& test_control) { | ||
| 203 | test_control.ExecuteThread(id); | ||
| 204 | test_control.CallFiber2(); | ||
| 205 | test_control.Exit(); | ||
| 206 | } | ||
| 207 | |||
| 208 | /** This test checks for fiber thread exchange configuration and validates that fibers are | 170 | /** This test checks for fiber thread exchange configuration and validates that fibers are |
| 209 | * that a fiber has been successfully transferred from one thread to another and that the TLS | 171 | * that a fiber has been successfully transferred from one thread to another and that the TLS |
| 210 | * region of the thread is kept while changing fibers. | 172 | * region of the thread is kept while changing fibers. |
| @@ -212,14 +174,19 @@ static void ThreadStart2_2(u32 id, TestControl2& test_control) { | |||
| 212 | TEST_CASE("Fibers::InterExchange", "[common]") { | 174 | TEST_CASE("Fibers::InterExchange", "[common]") { |
| 213 | TestControl2 test_control{}; | 175 | TestControl2 test_control{}; |
| 214 | test_control.thread_fibers.resize(2); | 176 | test_control.thread_fibers.resize(2); |
| 215 | test_control.fiber1 = | 177 | test_control.fiber1 = std::make_shared<Fiber>([&test_control] { test_control.DoWork1(); }); |
| 216 | std::make_shared<Fiber>(std::function<void(void*)>{WorkControl2_1}, &test_control); | 178 | test_control.fiber2 = std::make_shared<Fiber>([&test_control] { test_control.DoWork2(); }); |
| 217 | test_control.fiber2 = | 179 | test_control.fiber3 = std::make_shared<Fiber>([&test_control] { test_control.DoWork3(); }); |
| 218 | std::make_shared<Fiber>(std::function<void(void*)>{WorkControl2_2}, &test_control); | 180 | std::thread thread1{[&test_control] { |
| 219 | test_control.fiber3 = | 181 | test_control.ExecuteThread(0); |
| 220 | std::make_shared<Fiber>(std::function<void(void*)>{WorkControl2_3}, &test_control); | 182 | test_control.CallFiber1(); |
| 221 | std::thread thread1(ThreadStart2_1, 0, std::ref(test_control)); | 183 | test_control.Exit(); |
| 222 | std::thread thread2(ThreadStart2_2, 1, std::ref(test_control)); | 184 | }}; |
| 185 | std::thread thread2{[&test_control] { | ||
| 186 | test_control.ExecuteThread(1); | ||
| 187 | test_control.CallFiber2(); | ||
| 188 | test_control.Exit(); | ||
| 189 | }}; | ||
| 223 | thread1.join(); | 190 | thread1.join(); |
| 224 | thread2.join(); | 191 | thread2.join(); |
| 225 | REQUIRE(test_control.assert1); | 192 | REQUIRE(test_control.assert1); |
| @@ -270,16 +237,6 @@ public: | |||
| 270 | std::shared_ptr<Common::Fiber> fiber2; | 237 | std::shared_ptr<Common::Fiber> fiber2; |
| 271 | }; | 238 | }; |
| 272 | 239 | ||
| 273 | static void WorkControl3_1(void* control) { | ||
| 274 | auto* test_control = static_cast<TestControl3*>(control); | ||
| 275 | test_control->DoWork1(); | ||
| 276 | } | ||
| 277 | |||
| 278 | static void WorkControl3_2(void* control) { | ||
| 279 | auto* test_control = static_cast<TestControl3*>(control); | ||
| 280 | test_control->DoWork2(); | ||
| 281 | } | ||
| 282 | |||
| 283 | void TestControl3::ExecuteThread(u32 id) { | 240 | void TestControl3::ExecuteThread(u32 id) { |
| 284 | thread_ids.Register(id); | 241 | thread_ids.Register(id); |
| 285 | auto thread_fiber = Fiber::ThreadToFiber(); | 242 | auto thread_fiber = Fiber::ThreadToFiber(); |
| @@ -291,12 +248,6 @@ void TestControl3::Exit() { | |||
| 291 | thread_fibers[id]->Exit(); | 248 | thread_fibers[id]->Exit(); |
| 292 | } | 249 | } |
| 293 | 250 | ||
| 294 | static void ThreadStart3(u32 id, TestControl3& test_control) { | ||
| 295 | test_control.ExecuteThread(id); | ||
| 296 | test_control.CallFiber1(); | ||
| 297 | test_control.Exit(); | ||
| 298 | } | ||
| 299 | |||
| 300 | /** This test checks for one two threads racing for starting the same fiber. | 251 | /** This test checks for one two threads racing for starting the same fiber. |
| 301 | * It checks execution occurred in an ordered manner and by no time there were | 252 | * It checks execution occurred in an ordered manner and by no time there were |
| 302 | * two contexts at the same time. | 253 | * two contexts at the same time. |
| @@ -304,12 +255,15 @@ static void ThreadStart3(u32 id, TestControl3& test_control) { | |||
| 304 | TEST_CASE("Fibers::StartRace", "[common]") { | 255 | TEST_CASE("Fibers::StartRace", "[common]") { |
| 305 | TestControl3 test_control{}; | 256 | TestControl3 test_control{}; |
| 306 | test_control.thread_fibers.resize(2); | 257 | test_control.thread_fibers.resize(2); |
| 307 | test_control.fiber1 = | 258 | test_control.fiber1 = std::make_shared<Fiber>([&test_control] { test_control.DoWork1(); }); |
| 308 | std::make_shared<Fiber>(std::function<void(void*)>{WorkControl3_1}, &test_control); | 259 | test_control.fiber2 = std::make_shared<Fiber>([&test_control] { test_control.DoWork2(); }); |
| 309 | test_control.fiber2 = | 260 | const auto race_function{[&test_control](u32 id) { |
| 310 | std::make_shared<Fiber>(std::function<void(void*)>{WorkControl3_2}, &test_control); | 261 | test_control.ExecuteThread(id); |
| 311 | std::thread thread1(ThreadStart3, 0, std::ref(test_control)); | 262 | test_control.CallFiber1(); |
| 312 | std::thread thread2(ThreadStart3, 1, std::ref(test_control)); | 263 | test_control.Exit(); |
| 264 | }}; | ||
| 265 | std::thread thread1([&] { race_function(0); }); | ||
| 266 | std::thread thread2([&] { race_function(1); }); | ||
| 313 | thread1.join(); | 267 | thread1.join(); |
| 314 | thread2.join(); | 268 | thread2.join(); |
| 315 | REQUIRE(test_control.value1 == 1); | 269 | REQUIRE(test_control.value1 == 1); |
| @@ -319,12 +273,10 @@ TEST_CASE("Fibers::StartRace", "[common]") { | |||
| 319 | 273 | ||
| 320 | class TestControl4; | 274 | class TestControl4; |
| 321 | 275 | ||
| 322 | static void WorkControl4(void* control); | ||
| 323 | |||
| 324 | class TestControl4 { | 276 | class TestControl4 { |
| 325 | public: | 277 | public: |
| 326 | TestControl4() { | 278 | TestControl4() { |
| 327 | fiber1 = std::make_shared<Fiber>(std::function<void(void*)>{WorkControl4}, this); | 279 | fiber1 = std::make_shared<Fiber>([this] { DoWork(); }); |
| 328 | goal_reached = false; | 280 | goal_reached = false; |
| 329 | rewinded = false; | 281 | rewinded = false; |
| 330 | } | 282 | } |
| @@ -336,7 +288,7 @@ public: | |||
| 336 | } | 288 | } |
| 337 | 289 | ||
| 338 | void DoWork() { | 290 | void DoWork() { |
| 339 | fiber1->SetRewindPoint(std::function<void(void*)>{WorkControl4}, this); | 291 | fiber1->SetRewindPoint([this] { DoWork(); }); |
| 340 | if (rewinded) { | 292 | if (rewinded) { |
| 341 | goal_reached = true; | 293 | goal_reached = true; |
| 342 | Fiber::YieldTo(fiber1, *thread_fiber); | 294 | Fiber::YieldTo(fiber1, *thread_fiber); |
| @@ -351,11 +303,6 @@ public: | |||
| 351 | bool rewinded; | 303 | bool rewinded; |
| 352 | }; | 304 | }; |
| 353 | 305 | ||
| 354 | static void WorkControl4(void* control) { | ||
| 355 | auto* test_control = static_cast<TestControl4*>(control); | ||
| 356 | test_control->DoWork(); | ||
| 357 | } | ||
| 358 | |||
| 359 | TEST_CASE("Fibers::Rewind", "[common]") { | 306 | TEST_CASE("Fibers::Rewind", "[common]") { |
| 360 | TestControl4 test_control{}; | 307 | TestControl4 test_control{}; |
| 361 | test_control.Execute(); | 308 | test_control.Execute(); |
diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp index 8358d36b5..e687416a8 100644 --- a/src/tests/core/core_timing.cpp +++ b/src/tests/core/core_timing.cpp | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include <chrono> | 8 | #include <chrono> |
| 9 | #include <cstdlib> | 9 | #include <cstdlib> |
| 10 | #include <memory> | 10 | #include <memory> |
| 11 | #include <mutex> | ||
| 11 | #include <string> | 12 | #include <string> |
| 12 | 13 | ||
| 13 | #include "core/core.h" | 14 | #include "core/core.h" |
| @@ -21,13 +22,14 @@ std::array<s64, 5> delays{}; | |||
| 21 | 22 | ||
| 22 | std::bitset<CB_IDS.size()> callbacks_ran_flags; | 23 | std::bitset<CB_IDS.size()> callbacks_ran_flags; |
| 23 | u64 expected_callback = 0; | 24 | u64 expected_callback = 0; |
| 25 | std::mutex control_mutex; | ||
| 24 | 26 | ||
| 25 | template <unsigned int IDX> | 27 | template <unsigned int IDX> |
| 26 | void HostCallbackTemplate(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) { | 28 | void HostCallbackTemplate(std::uintptr_t user_data, std::chrono::nanoseconds ns_late) { |
| 29 | std::unique_lock<std::mutex> lk(control_mutex); | ||
| 27 | static_assert(IDX < CB_IDS.size(), "IDX out of range"); | 30 | static_assert(IDX < CB_IDS.size(), "IDX out of range"); |
| 28 | callbacks_ran_flags.set(IDX); | 31 | callbacks_ran_flags.set(IDX); |
| 29 | REQUIRE(CB_IDS[IDX] == user_data); | 32 | REQUIRE(CB_IDS[IDX] == user_data); |
| 30 | REQUIRE(CB_IDS[IDX] == CB_IDS[calls_order[expected_callback]]); | ||
| 31 | delays[IDX] = ns_late.count(); | 33 | delays[IDX] = ns_late.count(); |
| 32 | ++expected_callback; | 34 | ++expected_callback; |
| 33 | } | 35 | } |
diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp index 11ce865a7..743ac09f6 100644 --- a/src/video_core/vulkan_common/vulkan_device.cpp +++ b/src/video_core/vulkan_common/vulkan_device.cpp | |||
| @@ -669,17 +669,6 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR | |||
| 669 | const bool is_amd = | 669 | const bool is_amd = |
| 670 | driver_id == VK_DRIVER_ID_AMD_PROPRIETARY || driver_id == VK_DRIVER_ID_AMD_OPEN_SOURCE; | 670 | driver_id == VK_DRIVER_ID_AMD_PROPRIETARY || driver_id == VK_DRIVER_ID_AMD_OPEN_SOURCE; |
| 671 | if (is_amd) { | 671 | if (is_amd) { |
| 672 | // TODO(lat9nq): Add an upper bound when AMD fixes their VK_KHR_push_descriptor | ||
| 673 | const bool has_broken_push_descriptor = VK_VERSION_MAJOR(properties.driverVersion) == 2 && | ||
| 674 | VK_VERSION_MINOR(properties.driverVersion) == 0 && | ||
| 675 | VK_VERSION_PATCH(properties.driverVersion) >= 226; | ||
| 676 | if (khr_push_descriptor && has_broken_push_descriptor) { | ||
| 677 | LOG_WARNING( | ||
| 678 | Render_Vulkan, | ||
| 679 | "Disabling AMD driver 2.0.226 and later from broken VK_KHR_push_descriptor"); | ||
| 680 | khr_push_descriptor = false; | ||
| 681 | } | ||
| 682 | |||
| 683 | // AMD drivers need a higher amount of Sets per Pool in certain circunstances like in XC2. | 672 | // AMD drivers need a higher amount of Sets per Pool in certain circunstances like in XC2. |
| 684 | sets_per_pool = 96; | 673 | sets_per_pool = 96; |
| 685 | // Disable VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT on AMD GCN4 and lower as it is broken. | 674 | // Disable VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT on AMD GCN4 and lower as it is broken. |
diff --git a/src/yuzu/applets/qt_web_browser.cpp b/src/yuzu/applets/qt_web_browser.cpp index 283c04cd5..790edbb2a 100644 --- a/src/yuzu/applets/qt_web_browser.cpp +++ b/src/yuzu/applets/qt_web_browser.cpp | |||
| @@ -52,8 +52,8 @@ QtNXWebEngineView::QtNXWebEngineView(QWidget* parent, Core::System& system, | |||
| 52 | : QWebEngineView(parent), input_subsystem{input_subsystem_}, | 52 | : QWebEngineView(parent), input_subsystem{input_subsystem_}, |
| 53 | url_interceptor(std::make_unique<UrlRequestInterceptor>()), | 53 | url_interceptor(std::make_unique<UrlRequestInterceptor>()), |
| 54 | input_interpreter(std::make_unique<InputInterpreter>(system)), | 54 | input_interpreter(std::make_unique<InputInterpreter>(system)), |
| 55 | default_profile{QWebEngineProfile::defaultProfile()}, | 55 | default_profile{QWebEngineProfile::defaultProfile()}, global_settings{ |
| 56 | global_settings{QWebEngineSettings::globalSettings()} { | 56 | default_profile->settings()} { |
| 57 | default_profile->setPersistentStoragePath(QString::fromStdString(Common::FS::PathToUTF8String( | 57 | default_profile->setPersistentStoragePath(QString::fromStdString(Common::FS::PathToUTF8String( |
| 58 | Common::FS::GetYuzuPath(Common::FS::YuzuPath::YuzuDir) / "qtwebengine"))); | 58 | Common::FS::GetYuzuPath(Common::FS::YuzuPath::YuzuDir) / "qtwebengine"))); |
| 59 | 59 | ||
| @@ -78,7 +78,7 @@ QtNXWebEngineView::QtNXWebEngineView(QWidget* parent, Core::System& system, | |||
| 78 | default_profile->scripts()->insert(gamepad); | 78 | default_profile->scripts()->insert(gamepad); |
| 79 | default_profile->scripts()->insert(window_nx); | 79 | default_profile->scripts()->insert(window_nx); |
| 80 | 80 | ||
| 81 | default_profile->setRequestInterceptor(url_interceptor.get()); | 81 | default_profile->setUrlRequestInterceptor(url_interceptor.get()); |
| 82 | 82 | ||
| 83 | global_settings->setAttribute(QWebEngineSettings::LocalContentCanAccessRemoteUrls, true); | 83 | global_settings->setAttribute(QWebEngineSettings::LocalContentCanAccessRemoteUrls, true); |
| 84 | global_settings->setAttribute(QWebEngineSettings::FullScreenSupportEnabled, true); | 84 | global_settings->setAttribute(QWebEngineSettings::FullScreenSupportEnabled, true); |