summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/common/fiber.cpp5
-rw-r--r--src/core/core_timing.h6
-rw-r--r--src/core/hle/kernel/global_scheduler_context.h3
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp8
-rw-r--r--src/core/hle/kernel/k_spin_lock.cpp39
-rw-r--r--src/core/hle/kernel/k_spin_lock.h4
-rw-r--r--src/core/hle/kernel/k_thread.cpp39
-rw-r--r--src/core/hle/kernel/k_thread.h13
-rw-r--r--src/core/hle/kernel/physical_core.cpp3
-rw-r--r--src/core/hle/kernel/physical_core.h7
-rw-r--r--src/core/hle/service/ldr/ldr.cpp3
-rw-r--r--src/core/hle/service/service.h5
12 files changed, 46 insertions, 89 deletions
diff --git a/src/common/fiber.cpp b/src/common/fiber.cpp
index 81b212e4b..177a74deb 100644
--- a/src/common/fiber.cpp
+++ b/src/common/fiber.cpp
@@ -2,9 +2,10 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <mutex>
6
5#include "common/assert.h" 7#include "common/assert.h"
6#include "common/fiber.h" 8#include "common/fiber.h"
7#include "common/spin_lock.h"
8#include "common/virtual_buffer.h" 9#include "common/virtual_buffer.h"
9 10
10#include <boost/context/detail/fcontext.hpp> 11#include <boost/context/detail/fcontext.hpp>
@@ -19,7 +20,7 @@ struct Fiber::FiberImpl {
19 VirtualBuffer<u8> stack; 20 VirtualBuffer<u8> stack;
20 VirtualBuffer<u8> rewind_stack; 21 VirtualBuffer<u8> rewind_stack;
21 22
22 SpinLock guard{}; 23 std::mutex guard;
23 std::function<void(void*)> entry_point; 24 std::function<void(void*)> entry_point;
24 std::function<void(void*)> rewind_point; 25 std::function<void(void*)> rewind_point;
25 void* rewind_parameter{}; 26 void* rewind_parameter{};
diff --git a/src/core/core_timing.h b/src/core/core_timing.h
index 888828fd0..28b63be43 100644
--- a/src/core/core_timing.h
+++ b/src/core/core_timing.h
@@ -8,13 +8,13 @@
8#include <chrono> 8#include <chrono>
9#include <functional> 9#include <functional>
10#include <memory> 10#include <memory>
11#include <mutex>
11#include <optional> 12#include <optional>
12#include <string> 13#include <string>
13#include <thread> 14#include <thread>
14#include <vector> 15#include <vector>
15 16
16#include "common/common_types.h" 17#include "common/common_types.h"
17#include "common/spin_lock.h"
18#include "common/thread.h" 18#include "common/thread.h"
19#include "common/wall_clock.h" 19#include "common/wall_clock.h"
20 20
@@ -149,8 +149,8 @@ private:
149 std::shared_ptr<EventType> ev_lost; 149 std::shared_ptr<EventType> ev_lost;
150 Common::Event event{}; 150 Common::Event event{};
151 Common::Event pause_event{}; 151 Common::Event pause_event{};
152 Common::SpinLock basic_lock{}; 152 std::mutex basic_lock;
153 Common::SpinLock advance_lock{}; 153 std::mutex advance_lock;
154 std::unique_ptr<std::thread> timer_thread; 154 std::unique_ptr<std::thread> timer_thread;
155 std::atomic<bool> paused{}; 155 std::atomic<bool> paused{};
156 std::atomic<bool> paused_set{}; 156 std::atomic<bool> paused_set{};
diff --git a/src/core/hle/kernel/global_scheduler_context.h b/src/core/hle/kernel/global_scheduler_context.h
index 6f44b534f..47425a3a1 100644
--- a/src/core/hle/kernel/global_scheduler_context.h
+++ b/src/core/hle/kernel/global_scheduler_context.h
@@ -8,7 +8,6 @@
8#include <vector> 8#include <vector>
9 9
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "common/spin_lock.h"
12#include "core/hardware_properties.h" 11#include "core/hardware_properties.h"
13#include "core/hle/kernel/k_priority_queue.h" 12#include "core/hle/kernel/k_priority_queue.h"
14#include "core/hle/kernel/k_scheduler_lock.h" 13#include "core/hle/kernel/k_scheduler_lock.h"
@@ -80,7 +79,7 @@ private:
80 79
81 /// Lists all thread ids that aren't deleted/etc. 80 /// Lists all thread ids that aren't deleted/etc.
82 std::vector<KThread*> thread_list; 81 std::vector<KThread*> thread_list;
83 Common::SpinLock global_list_guard{}; 82 std::mutex global_list_guard;
84}; 83};
85 84
86} // namespace Kernel 85} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 6c0bb1672..526eb4b70 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -705,7 +705,7 @@ void KScheduler::Unload(KThread* thread) {
705 prev_thread = nullptr; 705 prev_thread = nullptr;
706 } 706 }
707 707
708 thread->context_guard.Unlock(); 708 thread->context_guard.unlock();
709} 709}
710 710
711void KScheduler::Reload(KThread* thread) { 711void KScheduler::Reload(KThread* thread) {
@@ -794,13 +794,13 @@ void KScheduler::SwitchToCurrent() {
794 do { 794 do {
795 auto next_thread = current_thread.load(); 795 auto next_thread = current_thread.load();
796 if (next_thread != nullptr) { 796 if (next_thread != nullptr) {
797 const auto locked = next_thread->context_guard.TryLock(); 797 const auto locked = next_thread->context_guard.try_lock();
798 if (state.needs_scheduling.load()) { 798 if (state.needs_scheduling.load()) {
799 next_thread->context_guard.Unlock(); 799 next_thread->context_guard.unlock();
800 break; 800 break;
801 } 801 }
802 if (next_thread->GetActiveCore() != core_id) { 802 if (next_thread->GetActiveCore() != core_id) {
803 next_thread->context_guard.Unlock(); 803 next_thread->context_guard.unlock();
804 break; 804 break;
805 } 805 }
806 if (!locked) { 806 if (!locked) {
diff --git a/src/core/hle/kernel/k_spin_lock.cpp b/src/core/hle/kernel/k_spin_lock.cpp
index 4412aa4bb..527ff0f9f 100644
--- a/src/core/hle/kernel/k_spin_lock.cpp
+++ b/src/core/hle/kernel/k_spin_lock.cpp
@@ -4,51 +4,18 @@
4 4
5#include "core/hle/kernel/k_spin_lock.h" 5#include "core/hle/kernel/k_spin_lock.h"
6 6
7#if _MSC_VER
8#include <intrin.h>
9#if _M_AMD64
10#define __x86_64__ 1
11#endif
12#if _M_ARM64
13#define __aarch64__ 1
14#endif
15#else
16#if __x86_64__
17#include <xmmintrin.h>
18#endif
19#endif
20
21namespace {
22
23void ThreadPause() {
24#if __x86_64__
25 _mm_pause();
26#elif __aarch64__ && _MSC_VER
27 __yield();
28#elif __aarch64__
29 asm("yield");
30#endif
31}
32
33} // namespace
34
35namespace Kernel { 7namespace Kernel {
36 8
37void KSpinLock::Lock() { 9void KSpinLock::Lock() {
38 while (lck.test_and_set(std::memory_order_acquire)) { 10 lck.lock();
39 ThreadPause();
40 }
41} 11}
42 12
43void KSpinLock::Unlock() { 13void KSpinLock::Unlock() {
44 lck.clear(std::memory_order_release); 14 lck.unlock();
45} 15}
46 16
47bool KSpinLock::TryLock() { 17bool KSpinLock::TryLock() {
48 if (lck.test_and_set(std::memory_order_acquire)) { 18 return lck.try_lock();
49 return false;
50 }
51 return true;
52} 19}
53 20
54} // namespace Kernel 21} // namespace Kernel
diff --git a/src/core/hle/kernel/k_spin_lock.h b/src/core/hle/kernel/k_spin_lock.h
index 4d87d006a..7868b25a5 100644
--- a/src/core/hle/kernel/k_spin_lock.h
+++ b/src/core/hle/kernel/k_spin_lock.h
@@ -4,7 +4,7 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <atomic> 7#include <mutex>
8 8
9#include "core/hle/kernel/k_scoped_lock.h" 9#include "core/hle/kernel/k_scoped_lock.h"
10 10
@@ -25,7 +25,7 @@ public:
25 [[nodiscard]] bool TryLock(); 25 [[nodiscard]] bool TryLock();
26 26
27private: 27private:
28 std::atomic_flag lck = ATOMIC_FLAG_INIT; 28 std::mutex lck;
29}; 29};
30 30
31// TODO(bunnei): Alias for now, in case we want to implement these accurately in the future. 31// TODO(bunnei): Alias for now, in case we want to implement these accurately in the future.
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index d3bb1c871..af71987e8 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -723,10 +723,10 @@ void KThread::UpdateState() {
723 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 723 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
724 724
725 // Set our suspend flags in state. 725 // Set our suspend flags in state.
726 const ThreadState old_state = thread_state; 726 const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
727 const auto new_state = 727 const auto new_state =
728 static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask); 728 static_cast<ThreadState>(this->GetSuspendFlags()) | (old_state & ThreadState::Mask);
729 thread_state = new_state; 729 thread_state.store(new_state, std::memory_order_relaxed);
730 730
731 // Note the state change in scheduler. 731 // Note the state change in scheduler.
732 if (new_state != old_state) { 732 if (new_state != old_state) {
@@ -738,8 +738,8 @@ void KThread::Continue() {
738 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 738 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
739 739
740 // Clear our suspend flags in state. 740 // Clear our suspend flags in state.
741 const ThreadState old_state = thread_state; 741 const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
742 thread_state = old_state & ThreadState::Mask; 742 thread_state.store(old_state & ThreadState::Mask, std::memory_order_relaxed);
743 743
744 // Note the state change in scheduler. 744 // Note the state change in scheduler.
745 KScheduler::OnThreadStateChanged(kernel, this, old_state); 745 KScheduler::OnThreadStateChanged(kernel, this, old_state);
@@ -1079,17 +1079,10 @@ void KThread::IfDummyThreadTryWait() {
1079 return; 1079 return;
1080 } 1080 }
1081 1081
1082 // Block until we can grab the lock. 1082 // Block until we are no longer waiting.
1083 KScopedSpinLock lk{dummy_wait_lock}; 1083 std::unique_lock lk(dummy_wait_lock);
1084} 1084 dummy_wait_cv.wait(
1085 1085 lk, [&] { return GetState() != ThreadState::Waiting || kernel.IsShuttingDown(); });
1086void KThread::IfDummyThreadBeginWait() {
1087 if (!IsDummyThread()) {
1088 return;
1089 }
1090
1091 // Ensure the thread will block when IfDummyThreadTryWait is called.
1092 dummy_wait_lock.Lock();
1093} 1086}
1094 1087
1095void KThread::IfDummyThreadEndWait() { 1088void KThread::IfDummyThreadEndWait() {
@@ -1097,8 +1090,8 @@ void KThread::IfDummyThreadEndWait() {
1097 return; 1090 return;
1098 } 1091 }
1099 1092
1100 // Ensure the thread will no longer block. 1093 // Wake up the waiting thread.
1101 dummy_wait_lock.Unlock(); 1094 dummy_wait_cv.notify_one();
1102} 1095}
1103 1096
1104void KThread::BeginWait(KThreadQueue* queue) { 1097void KThread::BeginWait(KThreadQueue* queue) {
@@ -1107,9 +1100,6 @@ void KThread::BeginWait(KThreadQueue* queue) {
1107 1100
1108 // Set our wait queue. 1101 // Set our wait queue.
1109 wait_queue = queue; 1102 wait_queue = queue;
1110
1111 // Special case for dummy threads to ensure they block.
1112 IfDummyThreadBeginWait();
1113} 1103}
1114 1104
1115void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) { 1105void KThread::NotifyAvailable(KSynchronizationObject* signaled_object, ResultCode wait_result_) {
@@ -1158,10 +1148,11 @@ void KThread::SetState(ThreadState state) {
1158 SetMutexWaitAddressForDebugging({}); 1148 SetMutexWaitAddressForDebugging({});
1159 SetWaitReasonForDebugging({}); 1149 SetWaitReasonForDebugging({});
1160 1150
1161 const ThreadState old_state = thread_state; 1151 const ThreadState old_state = thread_state.load(std::memory_order_relaxed);
1162 thread_state = 1152 thread_state.store(
1163 static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)); 1153 static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask)),
1164 if (thread_state != old_state) { 1154 std::memory_order_relaxed);
1155 if (thread_state.load(std::memory_order_relaxed) != old_state) {
1165 KScheduler::OnThreadStateChanged(kernel, this, old_state); 1156 KScheduler::OnThreadStateChanged(kernel, this, old_state);
1166 } 1157 }
1167} 1158}
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index d0fd85130..4892fdf76 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -6,6 +6,8 @@
6 6
7#include <array> 7#include <array>
8#include <atomic> 8#include <atomic>
9#include <condition_variable>
10#include <mutex>
9#include <span> 11#include <span>
10#include <string> 12#include <string>
11#include <utility> 13#include <utility>
@@ -15,6 +17,7 @@
15 17
16#include "common/common_types.h" 18#include "common/common_types.h"
17#include "common/intrusive_red_black_tree.h" 19#include "common/intrusive_red_black_tree.h"
20#include "common/spin_lock.h"
18#include "core/arm/arm_interface.h" 21#include "core/arm/arm_interface.h"
19#include "core/hle/kernel/k_affinity_mask.h" 22#include "core/hle/kernel/k_affinity_mask.h"
20#include "core/hle/kernel/k_light_lock.h" 23#include "core/hle/kernel/k_light_lock.h"
@@ -256,11 +259,11 @@ public:
256 [[nodiscard]] std::shared_ptr<Common::Fiber>& GetHostContext(); 259 [[nodiscard]] std::shared_ptr<Common::Fiber>& GetHostContext();
257 260
258 [[nodiscard]] ThreadState GetState() const { 261 [[nodiscard]] ThreadState GetState() const {
259 return thread_state & ThreadState::Mask; 262 return thread_state.load(std::memory_order_relaxed) & ThreadState::Mask;
260 } 263 }
261 264
262 [[nodiscard]] ThreadState GetRawState() const { 265 [[nodiscard]] ThreadState GetRawState() const {
263 return thread_state; 266 return thread_state.load(std::memory_order_relaxed);
264 } 267 }
265 268
266 void SetState(ThreadState state); 269 void SetState(ThreadState state);
@@ -642,7 +645,6 @@ public:
642 // blocking as needed. 645 // blocking as needed.
643 646
644 void IfDummyThreadTryWait(); 647 void IfDummyThreadTryWait();
645 void IfDummyThreadBeginWait();
646 void IfDummyThreadEndWait(); 648 void IfDummyThreadEndWait();
647 649
648private: 650private:
@@ -762,13 +764,14 @@ private:
762 s8 priority_inheritance_count{}; 764 s8 priority_inheritance_count{};
763 bool resource_limit_release_hint{}; 765 bool resource_limit_release_hint{};
764 StackParameters stack_parameters{}; 766 StackParameters stack_parameters{};
765 KSpinLock context_guard{}; 767 Common::SpinLock context_guard{};
766 KSpinLock dummy_wait_lock{};
767 768
768 // For emulation 769 // For emulation
769 std::shared_ptr<Common::Fiber> host_context{}; 770 std::shared_ptr<Common::Fiber> host_context{};
770 bool is_single_core{}; 771 bool is_single_core{};
771 ThreadType thread_type{}; 772 ThreadType thread_type{};
773 std::mutex dummy_wait_lock;
774 std::condition_variable dummy_wait_cv;
772 775
773 // For debugging 776 // For debugging
774 std::vector<KSynchronizationObject*> wait_objects_for_debugging; 777 std::vector<KSynchronizationObject*> wait_objects_for_debugging;
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index 18a5f40f8..cc49e8c7e 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -2,7 +2,6 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "common/spin_lock.h"
6#include "core/arm/cpu_interrupt_handler.h" 5#include "core/arm/cpu_interrupt_handler.h"
7#include "core/arm/dynarmic/arm_dynarmic_32.h" 6#include "core/arm/dynarmic/arm_dynarmic_32.h"
8#include "core/arm/dynarmic/arm_dynarmic_64.h" 7#include "core/arm/dynarmic/arm_dynarmic_64.h"
@@ -16,7 +15,7 @@ namespace Kernel {
16PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_, 15PhysicalCore::PhysicalCore(std::size_t core_index_, Core::System& system_, KScheduler& scheduler_,
17 Core::CPUInterrupts& interrupts_) 16 Core::CPUInterrupts& interrupts_)
18 : core_index{core_index_}, system{system_}, scheduler{scheduler_}, 17 : core_index{core_index_}, system{system_}, scheduler{scheduler_},
19 interrupts{interrupts_}, guard{std::make_unique<Common::SpinLock>()} { 18 interrupts{interrupts_}, guard{std::make_unique<std::mutex>()} {
20#ifdef ARCHITECTURE_x86_64 19#ifdef ARCHITECTURE_x86_64
21 // TODO(bunnei): Initialization relies on a core being available. We may later replace this with 20 // TODO(bunnei): Initialization relies on a core being available. We may later replace this with
22 // a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager. 21 // a 32-bit instance of Dynarmic. This should be abstracted out to a CPU manager.
diff --git a/src/core/hle/kernel/physical_core.h b/src/core/hle/kernel/physical_core.h
index 16a032e89..f2112fc1d 100644
--- a/src/core/hle/kernel/physical_core.h
+++ b/src/core/hle/kernel/physical_core.h
@@ -6,13 +6,10 @@
6 6
7#include <cstddef> 7#include <cstddef>
8#include <memory> 8#include <memory>
9#include <mutex>
9 10
10#include "core/arm/arm_interface.h" 11#include "core/arm/arm_interface.h"
11 12
12namespace Common {
13class SpinLock;
14}
15
16namespace Kernel { 13namespace Kernel {
17class KScheduler; 14class KScheduler;
18} // namespace Kernel 15} // namespace Kernel
@@ -91,7 +88,7 @@ private:
91 Core::System& system; 88 Core::System& system;
92 Kernel::KScheduler& scheduler; 89 Kernel::KScheduler& scheduler;
93 Core::CPUInterrupts& interrupts; 90 Core::CPUInterrupts& interrupts;
94 std::unique_ptr<Common::SpinLock> guard; 91 std::unique_ptr<std::mutex> guard;
95 std::unique_ptr<Core::ARM_Interface> arm_interface; 92 std::unique_ptr<Core::ARM_Interface> arm_interface;
96}; 93};
97 94
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index cf727c167..42f9cf811 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -160,7 +160,8 @@ public:
160 160
161class RelocatableObject final : public ServiceFramework<RelocatableObject> { 161class RelocatableObject final : public ServiceFramework<RelocatableObject> {
162public: 162public:
163 explicit RelocatableObject(Core::System& system_) : ServiceFramework{system_, "ldr:ro"} { 163 explicit RelocatableObject(Core::System& system_)
164 : ServiceFramework{system_, "ldr:ro", ServiceThreadType::CreateNew} {
164 // clang-format off 165 // clang-format off
165 static const FunctionInfo functions[] = { 166 static const FunctionInfo functions[] = {
166 {0, &RelocatableObject::LoadModule, "LoadModule"}, 167 {0, &RelocatableObject::LoadModule, "LoadModule"},
diff --git a/src/core/hle/service/service.h b/src/core/hle/service/service.h
index c78b2baeb..148265218 100644
--- a/src/core/hle/service/service.h
+++ b/src/core/hle/service/service.h
@@ -9,7 +9,6 @@
9#include <string> 9#include <string>
10#include <boost/container/flat_map.hpp> 10#include <boost/container/flat_map.hpp>
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "common/spin_lock.h"
13#include "core/hle/kernel/hle_ipc.h" 12#include "core/hle/kernel/hle_ipc.h"
14 13
15//////////////////////////////////////////////////////////////////////////////////////////////////// 14////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -90,7 +89,7 @@ protected:
90 using HandlerFnP = void (Self::*)(Kernel::HLERequestContext&); 89 using HandlerFnP = void (Self::*)(Kernel::HLERequestContext&);
91 90
92 /// Used to gain exclusive access to the service members, e.g. from CoreTiming thread. 91 /// Used to gain exclusive access to the service members, e.g. from CoreTiming thread.
93 [[nodiscard]] std::scoped_lock<Common::SpinLock> LockService() { 92 [[nodiscard]] std::scoped_lock<std::mutex> LockService() {
94 return std::scoped_lock{lock_service}; 93 return std::scoped_lock{lock_service};
95 } 94 }
96 95
@@ -135,7 +134,7 @@ private:
135 boost::container::flat_map<u32, FunctionInfoBase> handlers_tipc; 134 boost::container::flat_map<u32, FunctionInfoBase> handlers_tipc;
136 135
137 /// Used to gain exclusive access to the service members, e.g. from CoreTiming thread. 136 /// Used to gain exclusive access to the service members, e.g. from CoreTiming thread.
138 Common::SpinLock lock_service; 137 std::mutex lock_service;
139}; 138};
140 139
141/** 140/**