summaryrefslogtreecommitdiff
path: root/src/core/core_timing.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/core_timing.cpp')
-rw-r--r--src/core/core_timing.cpp189
1 files changed, 80 insertions, 109 deletions
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index 4ea00c277..a0dd5db24 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -8,94 +8,42 @@
8#include <mutex> 8#include <mutex>
9#include <string> 9#include <string>
10#include <tuple> 10#include <tuple>
11#include <unordered_map> 11
12#include <vector>
13#include "common/assert.h" 12#include "common/assert.h"
14#include "common/thread.h" 13#include "common/thread.h"
15#include "common/threadsafe_queue.h"
16#include "core/core_timing_util.h" 14#include "core/core_timing_util.h"
17 15
18namespace Core::Timing { 16namespace Core::Timing {
19 17
20static s64 global_timer; 18constexpr int MAX_SLICE_LENGTH = 20000;
21static int slice_length;
22static int downcount;
23
24struct EventType {
25 TimedCallback callback;
26 const std::string* name;
27};
28 19
29struct Event { 20struct CoreTiming::Event {
30 s64 time; 21 s64 time;
31 u64 fifo_order; 22 u64 fifo_order;
32 u64 userdata; 23 u64 userdata;
33 const EventType* type; 24 const EventType* type;
34};
35
36// Sort by time, unless the times are the same, in which case sort by the order added to the queue
37static bool operator>(const Event& left, const Event& right) {
38 return std::tie(left.time, left.fifo_order) > std::tie(right.time, right.fifo_order);
39}
40
41static bool operator<(const Event& left, const Event& right) {
42 return std::tie(left.time, left.fifo_order) < std::tie(right.time, right.fifo_order);
43}
44
45// unordered_map stores each element separately as a linked list node so pointers to elements
46// remain stable regardless of rehashes/resizing.
47static std::unordered_map<std::string, EventType> event_types;
48 25
49// The queue is a min-heap using std::make_heap/push_heap/pop_heap. 26 // Sort by time, unless the times are the same, in which case sort by
50// We don't use std::priority_queue because we need to be able to serialize, unserialize and 27 // the order added to the queue
51// erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't accomodated 28 friend bool operator>(const Event& left, const Event& right) {
52// by the standard adaptor class. 29 return std::tie(left.time, left.fifo_order) > std::tie(right.time, right.fifo_order);
53static std::vector<Event> event_queue; 30 }
54static u64 event_fifo_id;
55// the queue for storing the events from other threads threadsafe until they will be added
56// to the event_queue by the emu thread
57static Common::MPSCQueue<Event> ts_queue;
58
59// the queue for unscheduling the events from other threads threadsafe
60static Common::MPSCQueue<std::pair<const EventType*, u64>> unschedule_queue;
61
62constexpr int MAX_SLICE_LENGTH = 20000;
63
64static s64 idled_cycles;
65
66// Are we in a function that has been called from Advance()
67// If events are sheduled from a function that gets called from Advance(),
68// don't change slice_length and downcount.
69static bool is_global_timer_sane;
70
71static EventType* ev_lost = nullptr;
72
73EventType* RegisterEvent(const std::string& name, TimedCallback callback) {
74 // check for existing type with same name.
75 // we want event type names to remain unique so that we can use them for serialization.
76 ASSERT_MSG(event_types.find(name) == event_types.end(),
77 "CoreTiming Event \"{}\" is already registered. Events should only be registered "
78 "during Init to avoid breaking save states.",
79 name.c_str());
80 31
81 auto info = event_types.emplace(name, EventType{callback, nullptr}); 32 friend bool operator<(const Event& left, const Event& right) {
82 EventType* event_type = &info.first->second; 33 return std::tie(left.time, left.fifo_order) < std::tie(right.time, right.fifo_order);
83 event_type->name = &info.first->first; 34 }
84 return event_type; 35};
85}
86 36
87void UnregisterAllEvents() { 37CoreTiming::CoreTiming() = default;
88 ASSERT_MSG(event_queue.empty(), "Cannot unregister events with events pending"); 38CoreTiming::~CoreTiming() = default;
89 event_types.clear();
90}
91 39
92void Init() { 40void CoreTiming::Initialize() {
93 downcount = MAX_SLICE_LENGTH; 41 downcount = MAX_SLICE_LENGTH;
94 slice_length = MAX_SLICE_LENGTH; 42 slice_length = MAX_SLICE_LENGTH;
95 global_timer = 0; 43 global_timer = 0;
96 idled_cycles = 0; 44 idled_cycles = 0;
97 45
98 // The time between CoreTiming being intialized and the first call to Advance() is considered 46 // The time between CoreTiming being initialized and the first call to Advance() is considered
99 // the slice boundary between slice -1 and slice 0. Dispatcher loops must call Advance() before 47 // the slice boundary between slice -1 and slice 0. Dispatcher loops must call Advance() before
100 // executing the first cycle of each slice to prepare the slice length and downcount for 48 // executing the first cycle of each slice to prepare the slice length and downcount for
101 // that slice. 49 // that slice.
@@ -107,50 +55,51 @@ void Init() {
107 ev_lost = RegisterEvent("_lost_event", empty_timed_callback); 55 ev_lost = RegisterEvent("_lost_event", empty_timed_callback);
108} 56}
109 57
110void Shutdown() { 58void CoreTiming::Shutdown() {
111 MoveEvents(); 59 MoveEvents();
112 ClearPendingEvents(); 60 ClearPendingEvents();
113 UnregisterAllEvents(); 61 UnregisterAllEvents();
114} 62}
115 63
116// This should only be called from the CPU thread. If you are calling 64EventType* CoreTiming::RegisterEvent(const std::string& name, TimedCallback callback) {
117// it from any other thread, you are doing something evil 65 // check for existing type with same name.
118u64 GetTicks() { 66 // we want event type names to remain unique so that we can use them for serialization.
119 u64 ticks = static_cast<u64>(global_timer); 67 ASSERT_MSG(event_types.find(name) == event_types.end(),
120 if (!is_global_timer_sane) { 68 "CoreTiming Event \"{}\" is already registered. Events should only be registered "
121 ticks += slice_length - downcount; 69 "during Init to avoid breaking save states.",
122 } 70 name.c_str());
123 return ticks;
124}
125
126void AddTicks(u64 ticks) {
127 downcount -= static_cast<int>(ticks);
128}
129 71
130u64 GetIdleTicks() { 72 auto info = event_types.emplace(name, EventType{callback, nullptr});
131 return static_cast<u64>(idled_cycles); 73 EventType* event_type = &info.first->second;
74 event_type->name = &info.first->first;
75 return event_type;
132} 76}
133 77
134void ClearPendingEvents() { 78void CoreTiming::UnregisterAllEvents() {
135 event_queue.clear(); 79 ASSERT_MSG(event_queue.empty(), "Cannot unregister events with events pending");
80 event_types.clear();
136} 81}
137 82
138void ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata) { 83void CoreTiming::ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata) {
139 ASSERT(event_type != nullptr); 84 ASSERT(event_type != nullptr);
140 s64 timeout = GetTicks() + cycles_into_future; 85 const s64 timeout = GetTicks() + cycles_into_future;
86
141 // If this event needs to be scheduled before the next advance(), force one early 87 // If this event needs to be scheduled before the next advance(), force one early
142 if (!is_global_timer_sane) 88 if (!is_global_timer_sane) {
143 ForceExceptionCheck(cycles_into_future); 89 ForceExceptionCheck(cycles_into_future);
90 }
91
144 event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type}); 92 event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type});
145 std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>()); 93 std::push_heap(event_queue.begin(), event_queue.end(), std::greater<>());
146} 94}
147 95
148void ScheduleEventThreadsafe(s64 cycles_into_future, const EventType* event_type, u64 userdata) { 96void CoreTiming::ScheduleEventThreadsafe(s64 cycles_into_future, const EventType* event_type,
97 u64 userdata) {
149 ts_queue.Push(Event{global_timer + cycles_into_future, 0, userdata, event_type}); 98 ts_queue.Push(Event{global_timer + cycles_into_future, 0, userdata, event_type});
150} 99}
151 100
152void UnscheduleEvent(const EventType* event_type, u64 userdata) { 101void CoreTiming::UnscheduleEvent(const EventType* event_type, u64 userdata) {
153 auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { 102 const auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) {
154 return e.type == event_type && e.userdata == userdata; 103 return e.type == event_type && e.userdata == userdata;
155 }); 104 });
156 105
@@ -161,13 +110,33 @@ void UnscheduleEvent(const EventType* event_type, u64 userdata) {
161 } 110 }
162} 111}
163 112
164void UnscheduleEventThreadsafe(const EventType* event_type, u64 userdata) { 113void CoreTiming::UnscheduleEventThreadsafe(const EventType* event_type, u64 userdata) {
165 unschedule_queue.Push(std::make_pair(event_type, userdata)); 114 unschedule_queue.Push(std::make_pair(event_type, userdata));
166} 115}
167 116
168void RemoveEvent(const EventType* event_type) { 117u64 CoreTiming::GetTicks() const {
169 auto itr = std::remove_if(event_queue.begin(), event_queue.end(), 118 u64 ticks = static_cast<u64>(global_timer);
170 [&](const Event& e) { return e.type == event_type; }); 119 if (!is_global_timer_sane) {
120 ticks += slice_length - downcount;
121 }
122 return ticks;
123}
124
125u64 CoreTiming::GetIdleTicks() const {
126 return static_cast<u64>(idled_cycles);
127}
128
129void CoreTiming::AddTicks(u64 ticks) {
130 downcount -= static_cast<int>(ticks);
131}
132
133void CoreTiming::ClearPendingEvents() {
134 event_queue.clear();
135}
136
137void CoreTiming::RemoveEvent(const EventType* event_type) {
138 const auto itr = std::remove_if(event_queue.begin(), event_queue.end(),
139 [&](const Event& e) { return e.type == event_type; });
171 140
172 // Removing random items breaks the invariant so we have to re-establish it. 141 // Removing random items breaks the invariant so we have to re-establish it.
173 if (itr != event_queue.end()) { 142 if (itr != event_queue.end()) {
@@ -176,22 +145,24 @@ void RemoveEvent(const EventType* event_type) {
176 } 145 }
177} 146}
178 147
179void RemoveNormalAndThreadsafeEvent(const EventType* event_type) { 148void CoreTiming::RemoveNormalAndThreadsafeEvent(const EventType* event_type) {
180 MoveEvents(); 149 MoveEvents();
181 RemoveEvent(event_type); 150 RemoveEvent(event_type);
182} 151}
183 152
184void ForceExceptionCheck(s64 cycles) { 153void CoreTiming::ForceExceptionCheck(s64 cycles) {
185 cycles = std::max<s64>(0, cycles); 154 cycles = std::max<s64>(0, cycles);
186 if (downcount > cycles) { 155 if (downcount <= cycles) {
187 // downcount is always (much) smaller than MAX_INT so we can safely cast cycles to an int 156 return;
188 // here. Account for cycles already executed by adjusting the g.slice_length
189 slice_length -= downcount - static_cast<int>(cycles);
190 downcount = static_cast<int>(cycles);
191 } 157 }
158
159 // downcount is always (much) smaller than MAX_INT so we can safely cast cycles to an int
160 // here. Account for cycles already executed by adjusting the g.slice_length
161 slice_length -= downcount - static_cast<int>(cycles);
162 downcount = static_cast<int>(cycles);
192} 163}
193 164
194void MoveEvents() { 165void CoreTiming::MoveEvents() {
195 for (Event ev; ts_queue.Pop(ev);) { 166 for (Event ev; ts_queue.Pop(ev);) {
196 ev.fifo_order = event_fifo_id++; 167 ev.fifo_order = event_fifo_id++;
197 event_queue.emplace_back(std::move(ev)); 168 event_queue.emplace_back(std::move(ev));
@@ -199,13 +170,13 @@ void MoveEvents() {
199 } 170 }
200} 171}
201 172
202void Advance() { 173void CoreTiming::Advance() {
203 MoveEvents(); 174 MoveEvents();
204 for (std::pair<const EventType*, u64> ev; unschedule_queue.Pop(ev);) { 175 for (std::pair<const EventType*, u64> ev; unschedule_queue.Pop(ev);) {
205 UnscheduleEvent(ev.first, ev.second); 176 UnscheduleEvent(ev.first, ev.second);
206 } 177 }
207 178
208 int cycles_executed = slice_length - downcount; 179 const int cycles_executed = slice_length - downcount;
209 global_timer += cycles_executed; 180 global_timer += cycles_executed;
210 slice_length = MAX_SLICE_LENGTH; 181 slice_length = MAX_SLICE_LENGTH;
211 182
@@ -229,16 +200,16 @@ void Advance() {
229 downcount = slice_length; 200 downcount = slice_length;
230} 201}
231 202
232void Idle() { 203void CoreTiming::Idle() {
233 idled_cycles += downcount; 204 idled_cycles += downcount;
234 downcount = 0; 205 downcount = 0;
235} 206}
236 207
237std::chrono::microseconds GetGlobalTimeUs() { 208std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const {
238 return std::chrono::microseconds{GetTicks() * 1000000 / BASE_CLOCK_RATE}; 209 return std::chrono::microseconds{GetTicks() * 1000000 / BASE_CLOCK_RATE};
239} 210}
240 211
241int GetDowncount() { 212int CoreTiming::GetDowncount() const {
242 return downcount; 213 return downcount;
243} 214}
244 215