diff options
Diffstat (limited to 'src/core/core_timing.cpp')
| -rw-r--r-- | src/core/core_timing.cpp | 610 |
1 files changed, 143 insertions, 467 deletions
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp index c90e62385..a0656f0a8 100644 --- a/src/core/core_timing.cpp +++ b/src/core/core_timing.cpp | |||
| @@ -1,562 +1,238 @@ | |||
| 1 | // Copyright (c) 2012- PPSSPP Project / Dolphin Project. | 1 | // Copyright 2008 Dolphin Emulator Project / 2017 Citra Emulator Project |
| 2 | // Licensed under GPLv2 or any later version | 2 | // Licensed under GPLv2+ |
| 3 | // Refer to the license.txt file included. | 3 | // Refer to the license.txt file included. |
| 4 | 4 | ||
| 5 | #include <atomic> | 5 | #include "core/core_timing.h" |
| 6 | |||
| 7 | #include <algorithm> | ||
| 6 | #include <cinttypes> | 8 | #include <cinttypes> |
| 7 | #include <mutex> | 9 | #include <mutex> |
| 10 | #include <string> | ||
| 11 | #include <tuple> | ||
| 12 | #include <unordered_map> | ||
| 8 | #include <vector> | 13 | #include <vector> |
| 9 | #include "common/chunk_file.h" | 14 | #include "common/assert.h" |
| 10 | #include "common/logging/log.h" | 15 | #include "common/logging/log.h" |
| 11 | #include "common/string_util.h" | 16 | #include "common/thread.h" |
| 12 | #include "core/arm/arm_interface.h" | 17 | #include "common/threadsafe_queue.h" |
| 13 | #include "core/core.h" | ||
| 14 | #include "core/core_timing.h" | ||
| 15 | |||
| 16 | int g_clock_rate_arm11 = BASE_CLOCK_RATE; | ||
| 17 | |||
| 18 | // is this really necessary? | ||
| 19 | #define INITIAL_SLICE_LENGTH 20000 | ||
| 20 | #define MAX_SLICE_LENGTH 100000000 | ||
| 21 | 18 | ||
| 22 | namespace CoreTiming { | 19 | namespace CoreTiming { |
| 23 | struct EventType { | ||
| 24 | EventType() {} | ||
| 25 | 20 | ||
| 26 | EventType(TimedCallback cb, const char* n) : callback(cb), name(n) {} | 21 | static s64 global_timer; |
| 22 | static int slice_length; | ||
| 23 | static int downcount; | ||
| 27 | 24 | ||
| 25 | struct EventType { | ||
| 28 | TimedCallback callback; | 26 | TimedCallback callback; |
| 29 | const char* name; | 27 | const std::string* name; |
| 30 | }; | 28 | }; |
| 31 | 29 | ||
| 32 | static std::vector<EventType> event_types; | 30 | struct Event { |
| 33 | |||
| 34 | struct BaseEvent { | ||
| 35 | s64 time; | 31 | s64 time; |
| 32 | u64 fifo_order; | ||
| 36 | u64 userdata; | 33 | u64 userdata; |
| 37 | int type; | 34 | const EventType* type; |
| 38 | }; | 35 | }; |
| 39 | 36 | ||
| 40 | typedef LinkedListItem<BaseEvent> Event; | 37 | // Sort by time, unless the times are the same, in which case sort by the order added to the queue |
| 41 | 38 | static bool operator>(const Event& left, const Event& right) { | |
| 42 | static Event* first; | 39 | return std::tie(left.time, left.fifo_order) > std::tie(right.time, right.fifo_order); |
| 43 | static Event* ts_first; | ||
| 44 | static Event* ts_last; | ||
| 45 | |||
| 46 | // event pools | ||
| 47 | static Event* event_pool = nullptr; | ||
| 48 | static Event* event_ts_pool = nullptr; | ||
| 49 | static int allocated_ts_events = 0; | ||
| 50 | // Optimization to skip MoveEvents when possible. | ||
| 51 | static std::atomic<bool> has_ts_events(false); | ||
| 52 | |||
| 53 | int g_slice_length; | ||
| 54 | |||
| 55 | static s64 global_timer; | ||
| 56 | static s64 idled_cycles; | ||
| 57 | static s64 last_global_time_ticks; | ||
| 58 | static s64 last_global_time_us; | ||
| 59 | |||
| 60 | static s64 down_count = 0; ///< A decreasing counter of remaining cycles before the next event, | ||
| 61 | /// decreased by the cpu run loop | ||
| 62 | |||
| 63 | static std::recursive_mutex external_event_section; | ||
| 64 | |||
| 65 | // Warning: not included in save state. | ||
| 66 | using AdvanceCallback = void(int cycles_executed); | ||
| 67 | static AdvanceCallback* advance_callback = nullptr; | ||
| 68 | static std::vector<MHzChangeCallback> mhz_change_callbacks; | ||
| 69 | |||
| 70 | static void FireMhzChange() { | ||
| 71 | for (auto callback : mhz_change_callbacks) | ||
| 72 | callback(); | ||
| 73 | } | ||
| 74 | |||
| 75 | void SetClockFrequencyMHz(int cpu_mhz) { | ||
| 76 | // When the mhz changes, we keep track of what "time" it was before hand. | ||
| 77 | // This way, time always moves forward, even if mhz is changed. | ||
| 78 | last_global_time_us = GetGlobalTimeUs(); | ||
| 79 | last_global_time_ticks = GetTicks(); | ||
| 80 | |||
| 81 | g_clock_rate_arm11 = cpu_mhz * 1000000; | ||
| 82 | // TODO: Rescale times of scheduled events? | ||
| 83 | |||
| 84 | FireMhzChange(); | ||
| 85 | } | ||
| 86 | |||
| 87 | int GetClockFrequencyMHz() { | ||
| 88 | return g_clock_rate_arm11 / 1000000; | ||
| 89 | } | 40 | } |
| 90 | 41 | ||
| 91 | u64 GetGlobalTimeUs() { | 42 | static bool operator<(const Event& left, const Event& right) { |
| 92 | s64 ticks_since_last = GetTicks() - last_global_time_ticks; | 43 | return std::tie(left.time, left.fifo_order) < std::tie(right.time, right.fifo_order); |
| 93 | int freq = GetClockFrequencyMHz(); | ||
| 94 | s64 us_since_last = ticks_since_last / freq; | ||
| 95 | return last_global_time_us + us_since_last; | ||
| 96 | } | 44 | } |
| 97 | 45 | ||
| 98 | static Event* GetNewEvent() { | 46 | // unordered_map stores each element separately as a linked list node so pointers to elements |
| 99 | if (!event_pool) | 47 | // remain stable regardless of rehashes/resizing. |
| 100 | return new Event; | 48 | static std::unordered_map<std::string, EventType> event_types; |
| 101 | |||
| 102 | Event* event = event_pool; | ||
| 103 | event_pool = event->next; | ||
| 104 | return event; | ||
| 105 | } | ||
| 106 | 49 | ||
| 107 | static Event* GetNewTsEvent() { | 50 | // The queue is a min-heap using std::make_heap/push_heap/pop_heap. |
| 108 | allocated_ts_events++; | 51 | // We don't use std::priority_queue because we need to be able to serialize, unserialize and |
| 52 | // erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't accomodated | ||
| 53 | // by the standard adaptor class. | ||
| 54 | static std::vector<Event> event_queue; | ||
| 55 | static u64 event_fifo_id; | ||
| 56 | // the queue for storing the events from other threads threadsafe until they will be added | ||
| 57 | // to the event_queue by the emu thread | ||
| 58 | static Common::MPSCQueue<Event, false> ts_queue; | ||
| 109 | 59 | ||
| 110 | if (!event_ts_pool) | 60 | static constexpr int MAX_SLICE_LENGTH = 20000; |
| 111 | return new Event; | ||
| 112 | 61 | ||
| 113 | Event* event = event_ts_pool; | 62 | static s64 idled_cycles; |
| 114 | event_ts_pool = event->next; | ||
| 115 | return event; | ||
| 116 | } | ||
| 117 | |||
| 118 | static void FreeEvent(Event* event) { | ||
| 119 | event->next = event_pool; | ||
| 120 | event_pool = event; | ||
| 121 | } | ||
| 122 | 63 | ||
| 123 | static void FreeTsEvent(Event* event) { | 64 | // Are we in a function that has been called from Advance() |
| 124 | event->next = event_ts_pool; | 65 | // If events are sheduled from a function that gets called from Advance(), |
| 125 | event_ts_pool = event; | 66 | // don't change slice_length and downcount. |
| 126 | allocated_ts_events--; | 67 | static bool is_global_timer_sane; |
| 127 | } | ||
| 128 | 68 | ||
| 129 | int RegisterEvent(const char* name, TimedCallback callback) { | 69 | static EventType* ev_lost = nullptr; |
| 130 | event_types.emplace_back(callback, name); | ||
| 131 | return (int)event_types.size() - 1; | ||
| 132 | } | ||
| 133 | 70 | ||
| 134 | static void AntiCrashCallback(u64 userdata, int cycles_late) { | 71 | static void EmptyTimedCallback(u64 userdata, s64 cyclesLate) {} |
| 135 | LOG_CRITICAL(Core_Timing, "Savestate broken: an unregistered event was called."); | ||
| 136 | } | ||
| 137 | 72 | ||
| 138 | void RestoreRegisterEvent(int event_type, const char* name, TimedCallback callback) { | 73 | EventType* RegisterEvent(const std::string& name, TimedCallback callback) { |
| 139 | if (event_type >= (int)event_types.size()) | 74 | // check for existing type with same name. |
| 140 | event_types.resize(event_type + 1, EventType(AntiCrashCallback, "INVALID EVENT")); | 75 | // we want event type names to remain unique so that we can use them for serialization. |
| 76 | ASSERT_MSG(event_types.find(name) == event_types.end(), | ||
| 77 | "CoreTiming Event \"%s\" is already registered. Events should only be registered " | ||
| 78 | "during Init to avoid breaking save states.", | ||
| 79 | name.c_str()); | ||
| 141 | 80 | ||
| 142 | event_types[event_type] = EventType(callback, name); | 81 | auto info = event_types.emplace(name, EventType{callback, nullptr}); |
| 82 | EventType* event_type = &info.first->second; | ||
| 83 | event_type->name = &info.first->first; | ||
| 84 | return event_type; | ||
| 143 | } | 85 | } |
| 144 | 86 | ||
| 145 | void UnregisterAllEvents() { | 87 | void UnregisterAllEvents() { |
| 146 | if (first) | 88 | ASSERT_MSG(event_queue.empty(), "Cannot unregister events with events pending"); |
| 147 | LOG_ERROR(Core_Timing, "Cannot unregister events with events pending"); | ||
| 148 | event_types.clear(); | 89 | event_types.clear(); |
| 149 | } | 90 | } |
| 150 | 91 | ||
| 151 | void Init() { | 92 | void Init() { |
| 152 | down_count = INITIAL_SLICE_LENGTH; | 93 | downcount = MAX_SLICE_LENGTH; |
| 153 | g_slice_length = INITIAL_SLICE_LENGTH; | 94 | slice_length = MAX_SLICE_LENGTH; |
| 154 | global_timer = 0; | 95 | global_timer = 0; |
| 155 | idled_cycles = 0; | 96 | idled_cycles = 0; |
| 156 | last_global_time_ticks = 0; | ||
| 157 | last_global_time_us = 0; | ||
| 158 | has_ts_events = 0; | ||
| 159 | mhz_change_callbacks.clear(); | ||
| 160 | |||
| 161 | first = nullptr; | ||
| 162 | ts_first = nullptr; | ||
| 163 | ts_last = nullptr; | ||
| 164 | 97 | ||
| 165 | event_pool = nullptr; | 98 | // The time between CoreTiming being intialized and the first call to Advance() is considered |
| 166 | event_ts_pool = nullptr; | 99 | // the slice boundary between slice -1 and slice 0. Dispatcher loops must call Advance() before |
| 167 | allocated_ts_events = 0; | 100 | // executing the first cycle of each slice to prepare the slice length and downcount for |
| 101 | // that slice. | ||
| 102 | is_global_timer_sane = true; | ||
| 168 | 103 | ||
| 169 | advance_callback = nullptr; | 104 | event_fifo_id = 0; |
| 105 | ev_lost = RegisterEvent("_lost_event", &EmptyTimedCallback); | ||
| 170 | } | 106 | } |
| 171 | 107 | ||
| 172 | void Shutdown() { | 108 | void Shutdown() { |
| 173 | MoveEvents(); | 109 | MoveEvents(); |
| 174 | ClearPendingEvents(); | 110 | ClearPendingEvents(); |
| 175 | UnregisterAllEvents(); | 111 | UnregisterAllEvents(); |
| 176 | |||
| 177 | while (event_pool) { | ||
| 178 | Event* event = event_pool; | ||
| 179 | event_pool = event->next; | ||
| 180 | delete event; | ||
| 181 | } | ||
| 182 | |||
| 183 | std::lock_guard<std::recursive_mutex> lock(external_event_section); | ||
| 184 | while (event_ts_pool) { | ||
| 185 | Event* event = event_ts_pool; | ||
| 186 | event_ts_pool = event->next; | ||
| 187 | delete event; | ||
| 188 | } | ||
| 189 | } | 112 | } |
| 190 | 113 | ||
| 191 | void AddTicks(u64 ticks) { | 114 | // This should only be called from the CPU thread. If you are calling |
| 192 | down_count -= ticks; | 115 | // it from any other thread, you are doing something evil |
| 193 | if (down_count < 0) { | 116 | u64 GetTicks() { |
| 194 | Advance(); | 117 | u64 ticks = static_cast<u64>(global_timer); |
| 118 | if (!is_global_timer_sane) { | ||
| 119 | ticks += slice_length - downcount; | ||
| 195 | } | 120 | } |
| 121 | return ticks; | ||
| 196 | } | 122 | } |
| 197 | 123 | ||
| 198 | u64 GetTicks() { | 124 | void AddTicks(u64 ticks) { |
| 199 | return (u64)global_timer + g_slice_length - down_count; | 125 | downcount -= ticks; |
| 200 | } | 126 | } |
| 201 | 127 | ||
| 202 | u64 GetIdleTicks() { | 128 | u64 GetIdleTicks() { |
| 203 | return (u64)idled_cycles; | 129 | return static_cast<u64>(idled_cycles); |
| 204 | } | ||
| 205 | |||
| 206 | // This is to be called when outside threads, such as the graphics thread, wants to | ||
| 207 | // schedule things to be executed on the main thread. | ||
| 208 | void ScheduleEvent_Threadsafe(s64 cycles_into_future, int event_type, u64 userdata) { | ||
| 209 | std::lock_guard<std::recursive_mutex> lock(external_event_section); | ||
| 210 | Event* new_event = GetNewTsEvent(); | ||
| 211 | new_event->time = GetTicks() + cycles_into_future; | ||
| 212 | new_event->type = event_type; | ||
| 213 | new_event->next = nullptr; | ||
| 214 | new_event->userdata = userdata; | ||
| 215 | if (!ts_first) | ||
| 216 | ts_first = new_event; | ||
| 217 | if (ts_last) | ||
| 218 | ts_last->next = new_event; | ||
| 219 | ts_last = new_event; | ||
| 220 | |||
| 221 | has_ts_events = true; | ||
| 222 | } | ||
| 223 | |||
| 224 | // Same as ScheduleEvent_Threadsafe(0, ...) EXCEPT if we are already on the CPU thread | ||
| 225 | // in which case the event will get handled immediately, before returning. | ||
| 226 | void ScheduleEvent_Threadsafe_Immediate(int event_type, u64 userdata) { | ||
| 227 | if (false) // Core::IsCPUThread()) | ||
| 228 | { | ||
| 229 | std::lock_guard<std::recursive_mutex> lock(external_event_section); | ||
| 230 | event_types[event_type].callback(userdata, 0); | ||
| 231 | } else | ||
| 232 | ScheduleEvent_Threadsafe(0, event_type, userdata); | ||
| 233 | } | 130 | } |
| 234 | 131 | ||
| 235 | void ClearPendingEvents() { | 132 | void ClearPendingEvents() { |
| 236 | while (first) { | 133 | event_queue.clear(); |
| 237 | Event* event = first->next; | ||
| 238 | FreeEvent(first); | ||
| 239 | first = event; | ||
| 240 | } | ||
| 241 | } | ||
| 242 | |||
| 243 | static void AddEventToQueue(Event* new_event) { | ||
| 244 | Event* prev_event = nullptr; | ||
| 245 | Event** next_event = &first; | ||
| 246 | for (;;) { | ||
| 247 | Event*& next = *next_event; | ||
| 248 | if (!next || new_event->time < next->time) { | ||
| 249 | new_event->next = next; | ||
| 250 | next = new_event; | ||
| 251 | break; | ||
| 252 | } | ||
| 253 | prev_event = next; | ||
| 254 | next_event = &prev_event->next; | ||
| 255 | } | ||
| 256 | } | ||
| 257 | |||
| 258 | void ScheduleEvent(s64 cycles_into_future, int event_type, u64 userdata) { | ||
| 259 | Event* new_event = GetNewEvent(); | ||
| 260 | new_event->userdata = userdata; | ||
| 261 | new_event->type = event_type; | ||
| 262 | new_event->time = GetTicks() + cycles_into_future; | ||
| 263 | AddEventToQueue(new_event); | ||
| 264 | } | ||
| 265 | |||
| 266 | s64 UnscheduleEvent(int event_type, u64 userdata) { | ||
| 267 | s64 result = 0; | ||
| 268 | if (!first) | ||
| 269 | return result; | ||
| 270 | while (first) { | ||
| 271 | if (first->type == event_type && first->userdata == userdata) { | ||
| 272 | result = first->time - GetTicks(); | ||
| 273 | |||
| 274 | Event* next = first->next; | ||
| 275 | FreeEvent(first); | ||
| 276 | first = next; | ||
| 277 | } else { | ||
| 278 | break; | ||
| 279 | } | ||
| 280 | } | ||
| 281 | if (!first) | ||
| 282 | return result; | ||
| 283 | |||
| 284 | Event* prev_event = first; | ||
| 285 | Event* ptr = prev_event->next; | ||
| 286 | |||
| 287 | while (ptr) { | ||
| 288 | if (ptr->type == event_type && ptr->userdata == userdata) { | ||
| 289 | result = ptr->time - GetTicks(); | ||
| 290 | |||
| 291 | prev_event->next = ptr->next; | ||
| 292 | FreeEvent(ptr); | ||
| 293 | ptr = prev_event->next; | ||
| 294 | } else { | ||
| 295 | prev_event = ptr; | ||
| 296 | ptr = ptr->next; | ||
| 297 | } | ||
| 298 | } | ||
| 299 | |||
| 300 | return result; | ||
| 301 | } | 134 | } |
| 302 | 135 | ||
| 303 | s64 UnscheduleThreadsafeEvent(int event_type, u64 userdata) { | 136 | void ScheduleEvent(s64 cycles_into_future, const EventType* event_type, u64 userdata) { |
| 304 | s64 result = 0; | 137 | ASSERT(event_type != nullptr); |
| 305 | std::lock_guard<std::recursive_mutex> lock(external_event_section); | 138 | s64 timeout = GetTicks() + cycles_into_future; |
| 306 | if (!ts_first) | ||
| 307 | return result; | ||
| 308 | |||
| 309 | while (ts_first) { | ||
| 310 | if (ts_first->type == event_type && ts_first->userdata == userdata) { | ||
| 311 | result = ts_first->time - GetTicks(); | ||
| 312 | |||
| 313 | Event* next = ts_first->next; | ||
| 314 | FreeTsEvent(ts_first); | ||
| 315 | ts_first = next; | ||
| 316 | } else { | ||
| 317 | break; | ||
| 318 | } | ||
| 319 | } | ||
| 320 | 139 | ||
| 321 | if (!ts_first) { | 140 | // If this event needs to be scheduled before the next advance(), force one early |
| 322 | ts_last = nullptr; | 141 | if (!is_global_timer_sane) |
| 323 | return result; | 142 | ForceExceptionCheck(cycles_into_future); |
| 324 | } | ||
| 325 | 143 | ||
| 326 | Event* prev_event = ts_first; | 144 | event_queue.emplace_back(Event{timeout, event_fifo_id++, userdata, event_type}); |
| 327 | Event* next = prev_event->next; | 145 | std::push_heap(event_queue.begin(), event_queue.end(), std::greater<Event>()); |
| 328 | while (next) { | ||
| 329 | if (next->type == event_type && next->userdata == userdata) { | ||
| 330 | result = next->time - GetTicks(); | ||
| 331 | |||
| 332 | prev_event->next = next->next; | ||
| 333 | if (next == ts_last) | ||
| 334 | ts_last = prev_event; | ||
| 335 | FreeTsEvent(next); | ||
| 336 | next = prev_event->next; | ||
| 337 | } else { | ||
| 338 | prev_event = next; | ||
| 339 | next = next->next; | ||
| 340 | } | ||
| 341 | } | ||
| 342 | |||
| 343 | return result; | ||
| 344 | } | 146 | } |
| 345 | 147 | ||
| 346 | // Warning: not included in save state. | 148 | void ScheduleEventThreadsafe(s64 cycles_into_future, const EventType* event_type, u64 userdata) { |
| 347 | void RegisterAdvanceCallback(AdvanceCallback* callback) { | 149 | ts_queue.Push(Event{global_timer + cycles_into_future, 0, userdata, event_type}); |
| 348 | advance_callback = callback; | ||
| 349 | } | 150 | } |
| 350 | 151 | ||
| 351 | void RegisterMHzChangeCallback(MHzChangeCallback callback) { | 152 | void UnscheduleEvent(const EventType* event_type, u64 userdata) { |
| 352 | mhz_change_callbacks.push_back(callback); | 153 | auto itr = std::remove_if(event_queue.begin(), event_queue.end(), [&](const Event& e) { |
| 353 | } | 154 | return e.type == event_type && e.userdata == userdata; |
| 354 | 155 | }); | |
| 355 | bool IsScheduled(int event_type) { | ||
| 356 | if (!first) | ||
| 357 | return false; | ||
| 358 | Event* event = first; | ||
| 359 | while (event) { | ||
| 360 | if (event->type == event_type) | ||
| 361 | return true; | ||
| 362 | event = event->next; | ||
| 363 | } | ||
| 364 | return false; | ||
| 365 | } | ||
| 366 | 156 | ||
| 367 | void RemoveEvent(int event_type) { | 157 | // Removing random items breaks the invariant so we have to re-establish it. |
| 368 | if (!first) | 158 | if (itr != event_queue.end()) { |
| 369 | return; | 159 | event_queue.erase(itr, event_queue.end()); |
| 370 | while (first) { | 160 | std::make_heap(event_queue.begin(), event_queue.end(), std::greater<Event>()); |
| 371 | if (first->type == event_type) { | ||
| 372 | Event* next = first->next; | ||
| 373 | FreeEvent(first); | ||
| 374 | first = next; | ||
| 375 | } else { | ||
| 376 | break; | ||
| 377 | } | ||
| 378 | } | ||
| 379 | if (!first) | ||
| 380 | return; | ||
| 381 | Event* prev = first; | ||
| 382 | Event* next = prev->next; | ||
| 383 | while (next) { | ||
| 384 | if (next->type == event_type) { | ||
| 385 | prev->next = next->next; | ||
| 386 | FreeEvent(next); | ||
| 387 | next = prev->next; | ||
| 388 | } else { | ||
| 389 | prev = next; | ||
| 390 | next = next->next; | ||
| 391 | } | ||
| 392 | } | 161 | } |
| 393 | } | 162 | } |
| 394 | 163 | ||
| 395 | void RemoveThreadsafeEvent(int event_type) { | 164 | void RemoveEvent(const EventType* event_type) { |
| 396 | std::lock_guard<std::recursive_mutex> lock(external_event_section); | 165 | auto itr = std::remove_if(event_queue.begin(), event_queue.end(), |
| 397 | if (!ts_first) | 166 | [&](const Event& e) { return e.type == event_type; }); |
| 398 | return; | ||
| 399 | |||
| 400 | while (ts_first) { | ||
| 401 | if (ts_first->type == event_type) { | ||
| 402 | Event* next = ts_first->next; | ||
| 403 | FreeTsEvent(ts_first); | ||
| 404 | ts_first = next; | ||
| 405 | } else { | ||
| 406 | break; | ||
| 407 | } | ||
| 408 | } | ||
| 409 | |||
| 410 | if (!ts_first) { | ||
| 411 | ts_last = nullptr; | ||
| 412 | return; | ||
| 413 | } | ||
| 414 | 167 | ||
| 415 | Event* prev = ts_first; | 168 | // Removing random items breaks the invariant so we have to re-establish it. |
| 416 | Event* next = prev->next; | 169 | if (itr != event_queue.end()) { |
| 417 | while (next) { | 170 | event_queue.erase(itr, event_queue.end()); |
| 418 | if (next->type == event_type) { | 171 | std::make_heap(event_queue.begin(), event_queue.end(), std::greater<Event>()); |
| 419 | prev->next = next->next; | ||
| 420 | if (next == ts_last) | ||
| 421 | ts_last = prev; | ||
| 422 | FreeTsEvent(next); | ||
| 423 | next = prev->next; | ||
| 424 | } else { | ||
| 425 | prev = next; | ||
| 426 | next = next->next; | ||
| 427 | } | ||
| 428 | } | 172 | } |
| 429 | } | 173 | } |
| 430 | 174 | ||
| 431 | void RemoveAllEvents(int event_type) { | 175 | void RemoveNormalAndThreadsafeEvent(const EventType* event_type) { |
| 432 | RemoveThreadsafeEvent(event_type); | 176 | MoveEvents(); |
| 433 | RemoveEvent(event_type); | 177 | RemoveEvent(event_type); |
| 434 | } | 178 | } |
| 435 | 179 | ||
| 436 | // This raise only the events required while the fifo is processing data | 180 | void ForceExceptionCheck(s64 cycles) { |
| 437 | void ProcessFifoWaitEvents() { | 181 | cycles = std::max<s64>(0, cycles); |
| 438 | while (first) { | 182 | if (downcount > cycles) { |
| 439 | if (first->time <= (s64)GetTicks()) { | 183 | // downcount is always (much) smaller than MAX_INT so we can safely cast cycles to an int |
| 440 | Event* evt = first; | 184 | // here. Account for cycles already executed by adjusting the g.slice_length |
| 441 | first = first->next; | 185 | slice_length -= downcount - static_cast<int>(cycles); |
| 442 | event_types[evt->type].callback(evt->userdata, (int)(GetTicks() - evt->time)); | 186 | downcount = static_cast<int>(cycles); |
| 443 | FreeEvent(evt); | ||
| 444 | } else { | ||
| 445 | break; | ||
| 446 | } | ||
| 447 | } | 187 | } |
| 448 | } | 188 | } |
| 449 | 189 | ||
| 450 | void MoveEvents() { | 190 | void MoveEvents() { |
| 451 | has_ts_events = false; | 191 | for (Event ev; ts_queue.Pop(ev);) { |
| 452 | 192 | ev.fifo_order = event_fifo_id++; | |
| 453 | std::lock_guard<std::recursive_mutex> lock(external_event_section); | 193 | event_queue.emplace_back(std::move(ev)); |
| 454 | // Move events from async queue into main queue | 194 | std::push_heap(event_queue.begin(), event_queue.end(), std::greater<Event>()); |
| 455 | while (ts_first) { | ||
| 456 | Event* next = ts_first->next; | ||
| 457 | AddEventToQueue(ts_first); | ||
| 458 | ts_first = next; | ||
| 459 | } | ||
| 460 | ts_last = nullptr; | ||
| 461 | |||
| 462 | // Move free events to threadsafe pool | ||
| 463 | while (allocated_ts_events > 0 && event_pool) { | ||
| 464 | Event* event = event_pool; | ||
| 465 | event_pool = event->next; | ||
| 466 | event->next = event_ts_pool; | ||
| 467 | event_ts_pool = event; | ||
| 468 | allocated_ts_events--; | ||
| 469 | } | 195 | } |
| 470 | } | 196 | } |
| 471 | 197 | ||
| 472 | void ForceCheck() { | ||
| 473 | s64 cycles_executed = g_slice_length - down_count; | ||
| 474 | global_timer += cycles_executed; | ||
| 475 | // This will cause us to check for new events immediately. | ||
| 476 | down_count = 0; | ||
| 477 | // But let's not eat a bunch more time in Advance() because of this. | ||
| 478 | g_slice_length = 0; | ||
| 479 | } | ||
| 480 | |||
| 481 | void Advance() { | 198 | void Advance() { |
| 482 | s64 cycles_executed = g_slice_length - down_count; | 199 | MoveEvents(); |
| 200 | |||
| 201 | int cycles_executed = slice_length - downcount; | ||
| 483 | global_timer += cycles_executed; | 202 | global_timer += cycles_executed; |
| 484 | down_count = g_slice_length; | 203 | slice_length = MAX_SLICE_LENGTH; |
| 485 | |||
| 486 | if (has_ts_events) | ||
| 487 | MoveEvents(); | ||
| 488 | ProcessFifoWaitEvents(); | ||
| 489 | |||
| 490 | if (!first) { | ||
| 491 | if (g_slice_length < 10000) { | ||
| 492 | g_slice_length += 10000; | ||
| 493 | down_count += g_slice_length; | ||
| 494 | } | ||
| 495 | } else { | ||
| 496 | // Note that events can eat cycles as well. | ||
| 497 | int target = (int)(first->time - global_timer); | ||
| 498 | if (target > MAX_SLICE_LENGTH) | ||
| 499 | target = MAX_SLICE_LENGTH; | ||
| 500 | |||
| 501 | const int diff = target - g_slice_length; | ||
| 502 | g_slice_length += diff; | ||
| 503 | down_count += diff; | ||
| 504 | } | ||
| 505 | if (advance_callback) | ||
| 506 | advance_callback(static_cast<int>(cycles_executed)); | ||
| 507 | } | ||
| 508 | 204 | ||
| 509 | void LogPendingEvents() { | 205 | is_global_timer_sane = true; |
| 510 | Event* event = first; | 206 | |
| 511 | while (event) { | 207 | while (!event_queue.empty() && event_queue.front().time <= global_timer) { |
| 512 | // LOG_TRACE(Core_Timing, "PENDING: Now: %lld Pending: %lld Type: %d", globalTimer, | 208 | Event evt = std::move(event_queue.front()); |
| 513 | // next->time, next->type); | 209 | std::pop_heap(event_queue.begin(), event_queue.end(), std::greater<Event>()); |
| 514 | event = event->next; | 210 | event_queue.pop_back(); |
| 211 | evt.type->callback(evt.userdata, global_timer - evt.time); | ||
| 515 | } | 212 | } |
| 516 | } | ||
| 517 | 213 | ||
| 518 | void Idle(int max_idle) { | 214 | is_global_timer_sane = false; |
| 519 | s64 cycles_down = down_count; | 215 | |
| 520 | if (max_idle != 0 && cycles_down > max_idle) | 216 | // Still events left (scheduled in the future) |
| 521 | cycles_down = max_idle; | 217 | if (!event_queue.empty()) { |
| 522 | 218 | slice_length = static_cast<int>( | |
| 523 | if (first && cycles_down > 0) { | 219 | std::min<s64>(event_queue.front().time - global_timer, MAX_SLICE_LENGTH)); |
| 524 | s64 cycles_executed = g_slice_length - down_count; | ||
| 525 | s64 cycles_next_event = first->time - global_timer; | ||
| 526 | |||
| 527 | if (cycles_next_event < cycles_executed + cycles_down) { | ||
| 528 | cycles_down = cycles_next_event - cycles_executed; | ||
| 529 | // Now, now... no time machines, please. | ||
| 530 | if (cycles_down < 0) | ||
| 531 | cycles_down = 0; | ||
| 532 | } | ||
| 533 | } | 220 | } |
| 534 | 221 | ||
| 535 | LOG_TRACE(Core_Timing, "Idle for %" PRId64 " cycles! (%f ms)", cycles_down, | 222 | downcount = slice_length; |
| 536 | cycles_down / (float)(g_clock_rate_arm11 * 0.001f)); | 223 | } |
| 537 | 224 | ||
| 538 | idled_cycles += cycles_down; | 225 | void Idle() { |
| 539 | down_count -= cycles_down; | 226 | idled_cycles += downcount; |
| 540 | if (down_count == 0) | 227 | downcount = 0; |
| 541 | down_count = -1; | ||
| 542 | } | 228 | } |
| 543 | 229 | ||
| 544 | std::string GetScheduledEventsSummary() { | 230 | u64 GetGlobalTimeUs() { |
| 545 | Event* event = first; | 231 | return GetTicks() * 1000000 / BASE_CLOCK_RATE; |
| 546 | std::string text = "Scheduled events\n"; | 232 | } |
| 547 | text.reserve(1000); | 233 | |
| 548 | while (event) { | 234 | int GetDowncount() { |
| 549 | unsigned int t = event->type; | 235 | return downcount; |
| 550 | if (t >= event_types.size()) | ||
| 551 | LOG_ERROR(Core_Timing, "Invalid event type"); // %i", t); | ||
| 552 | const char* name = event_types[event->type].name; | ||
| 553 | if (!name) | ||
| 554 | name = "[unknown]"; | ||
| 555 | text += Common::StringFromFormat("%s : %i %08x%08x\n", name, (int)event->time, | ||
| 556 | (u32)(event->userdata >> 32), (u32)(event->userdata)); | ||
| 557 | event = event->next; | ||
| 558 | } | ||
| 559 | return text; | ||
| 560 | } | 236 | } |
| 561 | 237 | ||
| 562 | } // namespace | 238 | } // namespace CoreTiming |