summaryrefslogtreecommitdiff
path: root/src/core/core_timing.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/core_timing.cpp')
-rw-r--r--src/core/core_timing.cpp126
1 files changed, 82 insertions, 44 deletions
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index 29e7dba9b..918502929 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -7,6 +7,7 @@
7#include <tuple> 7#include <tuple>
8 8
9#include "common/microprofile.h" 9#include "common/microprofile.h"
10#include "common/thread.h"
10#include "core/core_timing.h" 11#include "core/core_timing.h"
11#include "core/core_timing_util.h" 12#include "core/core_timing_util.h"
12#include "core/hardware_properties.h" 13#include "core/hardware_properties.h"
@@ -59,68 +60,96 @@ void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
59 const auto empty_timed_callback = [](std::uintptr_t, std::chrono::nanoseconds) {}; 60 const auto empty_timed_callback = [](std::uintptr_t, std::chrono::nanoseconds) {};
60 ev_lost = CreateEvent("_lost_event", empty_timed_callback); 61 ev_lost = CreateEvent("_lost_event", empty_timed_callback);
61 if (is_multicore) { 62 if (is_multicore) {
62 timer_thread = std::make_unique<std::thread>(ThreadEntry, std::ref(*this)); 63 const auto hardware_concurrency = std::thread::hardware_concurrency();
64 worker_threads.emplace_back(ThreadEntry, std::ref(*this));
65 if (hardware_concurrency > 8) {
66 worker_threads.emplace_back(ThreadEntry, std::ref(*this));
67 }
63 } 68 }
64} 69}
65 70
66void CoreTiming::Shutdown() { 71void CoreTiming::Shutdown() {
67 paused = true; 72 is_paused = true;
68 shutting_down = true; 73 shutting_down = true;
69 pause_event.Set(); 74 {
70 event.Set(); 75 std::unique_lock<std::mutex> main_lock(event_mutex);
71 if (timer_thread) { 76 event_cv.notify_all();
72 timer_thread->join(); 77 wait_pause_cv.notify_all();
78 }
79 for (auto& thread : worker_threads) {
80 thread.join();
73 } 81 }
82 worker_threads.clear();
74 ClearPendingEvents(); 83 ClearPendingEvents();
75 timer_thread.reset();
76 has_started = false; 84 has_started = false;
77} 85}
78 86
79void CoreTiming::Pause(bool is_paused) { 87void CoreTiming::Pause(bool is_paused_) {
80 paused = is_paused; 88 std::unique_lock<std::mutex> main_lock(event_mutex);
81 pause_event.Set(); 89 if (is_paused_ == paused_state.load(std::memory_order_relaxed)) {
90 return;
91 }
92 if (is_multicore) {
93 is_paused = is_paused_;
94 event_cv.notify_all();
95 if (!is_paused_) {
96 wait_pause_cv.notify_all();
97 }
98 }
99 paused_state.store(is_paused_, std::memory_order_relaxed);
82} 100}
83 101
84void CoreTiming::SyncPause(bool is_paused) { 102void CoreTiming::SyncPause(bool is_paused_) {
85 if (is_paused == paused && paused_set == paused) { 103 std::unique_lock<std::mutex> main_lock(event_mutex);
104 if (is_paused_ == paused_state.load(std::memory_order_relaxed)) {
86 return; 105 return;
87 } 106 }
88 Pause(is_paused); 107
89 if (timer_thread) { 108 if (is_multicore) {
90 if (!is_paused) { 109 is_paused = is_paused_;
91 pause_event.Set(); 110 event_cv.notify_all();
111 if (!is_paused_) {
112 wait_pause_cv.notify_all();
113 }
114 }
115 paused_state.store(is_paused_, std::memory_order_relaxed);
116 if (is_multicore) {
117 if (is_paused_) {
118 wait_signal_cv.wait(main_lock, [this] { return pause_count == worker_threads.size(); });
119 } else {
120 wait_signal_cv.wait(main_lock, [this] { return pause_count == 0; });
92 } 121 }
93 event.Set();
94 while (paused_set != is_paused)
95 ;
96 } 122 }
97} 123}
98 124
99bool CoreTiming::IsRunning() const { 125bool CoreTiming::IsRunning() const {
100 return !paused_set; 126 return !paused_state.load(std::memory_order_acquire);
101} 127}
102 128
103bool CoreTiming::HasPendingEvents() const { 129bool CoreTiming::HasPendingEvents() const {
104 return !(wait_set && event_queue.empty()); 130 std::unique_lock<std::mutex> main_lock(event_mutex);
131 return !event_queue.empty();
105} 132}
106 133
107void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future, 134void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future,
108 const std::shared_ptr<EventType>& event_type, 135 const std::shared_ptr<EventType>& event_type,
109 std::uintptr_t user_data) { 136 std::uintptr_t user_data) {
110 {
111 std::scoped_lock scope{basic_lock};
112 const u64 timeout = static_cast<u64>((GetGlobalTimeNs() + ns_into_future).count());
113 137
114 event_queue.emplace_back(Event{timeout, event_fifo_id++, user_data, event_type}); 138 std::unique_lock<std::mutex> main_lock(event_mutex);
139 const u64 timeout = static_cast<u64>((GetGlobalTimeNs() + ns_into_future).count());
140
141 event_queue.emplace_back(Event{timeout, event_fifo_id++, user_data, event_type});
115 142
116 std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); 143 std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
144
145 if (is_multicore) {
146 event_cv.notify_one();
117 } 147 }
118 event.Set();
119} 148}
120 149
121void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type, 150void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
122 std::uintptr_t user_data) { 151 std::uintptr_t user_data) {
123 std::scoped_lock scope{basic_lock}; 152 std::unique_lock<std::mutex> main_lock(event_mutex);
124 const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { 153 const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
125 return e.type.lock().get() == event_type.get() && e.user_data == user_data; 154 return e.type.lock().get() == event_type.get() && e.user_data == user_data;
126 }); 155 });
@@ -168,11 +197,12 @@ u64 CoreTiming::GetClockTicks() const {
168} 197}
169 198
170void CoreTiming::ClearPendingEvents() { 199void CoreTiming::ClearPendingEvents() {
200 std::unique_lock<std::mutex> main_lock(event_mutex);
171 event_queue.clear(); 201 event_queue.clear();
172} 202}
173 203
174void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) { 204void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
175 std::scoped_lock lock{basic_lock}; 205 std::unique_lock<std::mutex> main_lock(event_mutex);
176 206
177 const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { 207 const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
178 return e.type.lock().get() == event_type.get(); 208 return e.type.lock().get() == event_type.get();
@@ -186,21 +216,21 @@ void CoreTiming::RemoveEvent(const std::shared_ptr<EventType>& event_type) {
186} 216}
187 217
188std::optional<s64> CoreTiming::Advance() { 218std::optional<s64> CoreTiming::Advance() {
189 std::scoped_lock lock{advance_lock, basic_lock};
190 global_timer = GetGlobalTimeNs().count(); 219 global_timer = GetGlobalTimeNs().count();
191 220
221 std::unique_lock<std::mutex> main_lock(event_mutex);
192 while (!event_queue.empty() && event_queue.front().time <= global_timer) { 222 while (!event_queue.empty() && event_queue.front().time <= global_timer) {
193 Event evt = std::move(event_queue.front()); 223 Event evt = std::move(event_queue.front());
194 std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>()); 224 std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<>());
195 event_queue.pop_back(); 225 event_queue.pop_back();
196 basic_lock.unlock(); 226 event_mutex.unlock();
197 227
198 if (const auto event_type{evt.type.lock()}) { 228 if (const auto event_type{evt.type.lock()}) {
199 event_type->callback( 229 event_type->callback(evt.user_data, std::chrono::nanoseconds{static_cast<s64>(
200 evt.user_data, std::chrono::nanoseconds{static_cast<s64>(global_timer - evt.time)}); 230 GetGlobalTimeNs().count() - evt.time)});
201 } 231 }
202 232
203 basic_lock.lock(); 233 event_mutex.lock();
204 global_timer = GetGlobalTimeNs().count(); 234 global_timer = GetGlobalTimeNs().count();
205 } 235 }
206 236
@@ -213,26 +243,34 @@ std::optional<s64> CoreTiming::Advance() {
213} 243}
214 244
215void CoreTiming::ThreadLoop() { 245void CoreTiming::ThreadLoop() {
246 const auto predicate = [this] { return !event_queue.empty() || is_paused; };
216 has_started = true; 247 has_started = true;
217 while (!shutting_down) { 248 while (!shutting_down) {
218 while (!paused) { 249 while (!is_paused && !shutting_down) {
219 paused_set = false;
220 const auto next_time = Advance(); 250 const auto next_time = Advance();
221 if (next_time) { 251 if (next_time) {
222 if (*next_time > 0) { 252 if (*next_time > 0) {
223 std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time); 253 std::chrono::nanoseconds next_time_ns = std::chrono::nanoseconds(*next_time);
224 event.WaitFor(next_time_ns); 254 std::unique_lock<std::mutex> main_lock(event_mutex);
255 event_cv.wait_for(main_lock, next_time_ns, predicate);
225 } 256 }
226 } else { 257 } else {
227 wait_set = true; 258 std::unique_lock<std::mutex> main_lock(event_mutex);
228 event.Wait(); 259 event_cv.wait(main_lock, predicate);
229 } 260 }
230 wait_set = false;
231 } 261 }
232 paused_set = true; 262 std::unique_lock<std::mutex> main_lock(event_mutex);
233 clock->Pause(true); 263 pause_count++;
234 pause_event.Wait(); 264 if (pause_count == worker_threads.size()) {
235 clock->Pause(false); 265 clock->Pause(true);
266 wait_signal_cv.notify_all();
267 }
268 wait_pause_cv.wait(main_lock, [this] { return !is_paused || shutting_down; });
269 pause_count--;
270 if (pause_count == 0) {
271 clock->Pause(false);
272 wait_signal_cv.notify_all();
273 }
236 } 274 }
237} 275}
238 276