diff options
Diffstat (limited to 'src/tests')
| -rw-r--r-- | src/tests/CMakeLists.txt | 1 | ||||
| -rw-r--r-- | src/tests/common/fibers.cpp | 358 | ||||
| -rw-r--r-- | src/tests/core/core_timing.cpp | 182 |
3 files changed, 444 insertions, 97 deletions
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt index c7038b217..47ef30aa9 100644 --- a/src/tests/CMakeLists.txt +++ b/src/tests/CMakeLists.txt | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | add_executable(tests | 1 | add_executable(tests |
| 2 | common/bit_field.cpp | 2 | common/bit_field.cpp |
| 3 | common/bit_utils.cpp | 3 | common/bit_utils.cpp |
| 4 | common/fibers.cpp | ||
| 4 | common/multi_level_queue.cpp | 5 | common/multi_level_queue.cpp |
| 5 | common/param_package.cpp | 6 | common/param_package.cpp |
| 6 | common/ring_buffer.cpp | 7 | common/ring_buffer.cpp |
diff --git a/src/tests/common/fibers.cpp b/src/tests/common/fibers.cpp new file mode 100644 index 000000000..4fd92428f --- /dev/null +++ b/src/tests/common/fibers.cpp | |||
| @@ -0,0 +1,358 @@ | |||
| 1 | // Copyright 2020 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <atomic> | ||
| 6 | #include <cstdlib> | ||
| 7 | #include <functional> | ||
| 8 | #include <memory> | ||
| 9 | #include <thread> | ||
| 10 | #include <unordered_map> | ||
| 11 | #include <vector> | ||
| 12 | |||
| 13 | #include <catch2/catch.hpp> | ||
| 14 | #include <math.h> | ||
| 15 | #include "common/common_types.h" | ||
| 16 | #include "common/fiber.h" | ||
| 17 | #include "common/spin_lock.h" | ||
| 18 | |||
| 19 | namespace Common { | ||
| 20 | |||
| 21 | class TestControl1 { | ||
| 22 | public: | ||
| 23 | TestControl1() = default; | ||
| 24 | |||
| 25 | void DoWork(); | ||
| 26 | |||
| 27 | void ExecuteThread(u32 id); | ||
| 28 | |||
| 29 | std::unordered_map<std::thread::id, u32> ids; | ||
| 30 | std::vector<std::shared_ptr<Common::Fiber>> thread_fibers; | ||
| 31 | std::vector<std::shared_ptr<Common::Fiber>> work_fibers; | ||
| 32 | std::vector<u32> items; | ||
| 33 | std::vector<u32> results; | ||
| 34 | }; | ||
| 35 | |||
| 36 | static void WorkControl1(void* control) { | ||
| 37 | auto* test_control = static_cast<TestControl1*>(control); | ||
| 38 | test_control->DoWork(); | ||
| 39 | } | ||
| 40 | |||
| 41 | void TestControl1::DoWork() { | ||
| 42 | std::thread::id this_id = std::this_thread::get_id(); | ||
| 43 | u32 id = ids[this_id]; | ||
| 44 | u32 value = items[id]; | ||
| 45 | for (u32 i = 0; i < id; i++) { | ||
| 46 | value++; | ||
| 47 | } | ||
| 48 | results[id] = value; | ||
| 49 | Fiber::YieldTo(work_fibers[id], thread_fibers[id]); | ||
| 50 | } | ||
| 51 | |||
| 52 | void TestControl1::ExecuteThread(u32 id) { | ||
| 53 | std::thread::id this_id = std::this_thread::get_id(); | ||
| 54 | ids[this_id] = id; | ||
| 55 | auto thread_fiber = Fiber::ThreadToFiber(); | ||
| 56 | thread_fibers[id] = thread_fiber; | ||
| 57 | work_fibers[id] = std::make_shared<Fiber>(std::function<void(void*)>{WorkControl1}, this); | ||
| 58 | items[id] = rand() % 256; | ||
| 59 | Fiber::YieldTo(thread_fibers[id], work_fibers[id]); | ||
| 60 | thread_fibers[id]->Exit(); | ||
| 61 | } | ||
| 62 | |||
| 63 | static void ThreadStart1(u32 id, TestControl1& test_control) { | ||
| 64 | test_control.ExecuteThread(id); | ||
| 65 | } | ||
| 66 | |||
| 67 | /** This test checks for fiber setup configuration and validates that fibers are | ||
| 68 | * doing all the work required. | ||
| 69 | */ | ||
| 70 | TEST_CASE("Fibers::Setup", "[common]") { | ||
| 71 | constexpr std::size_t num_threads = 7; | ||
| 72 | TestControl1 test_control{}; | ||
| 73 | test_control.thread_fibers.resize(num_threads); | ||
| 74 | test_control.work_fibers.resize(num_threads); | ||
| 75 | test_control.items.resize(num_threads, 0); | ||
| 76 | test_control.results.resize(num_threads, 0); | ||
| 77 | std::vector<std::thread> threads; | ||
| 78 | for (u32 i = 0; i < num_threads; i++) { | ||
| 79 | threads.emplace_back(ThreadStart1, i, std::ref(test_control)); | ||
| 80 | } | ||
| 81 | for (u32 i = 0; i < num_threads; i++) { | ||
| 82 | threads[i].join(); | ||
| 83 | } | ||
| 84 | for (u32 i = 0; i < num_threads; i++) { | ||
| 85 | REQUIRE(test_control.items[i] + i == test_control.results[i]); | ||
| 86 | } | ||
| 87 | } | ||
| 88 | |||
| 89 | class TestControl2 { | ||
| 90 | public: | ||
| 91 | TestControl2() = default; | ||
| 92 | |||
| 93 | void DoWork1() { | ||
| 94 | trap2 = false; | ||
| 95 | while (trap.load()) | ||
| 96 | ; | ||
| 97 | for (u32 i = 0; i < 12000; i++) { | ||
| 98 | value1 += i; | ||
| 99 | } | ||
| 100 | Fiber::YieldTo(fiber1, fiber3); | ||
| 101 | std::thread::id this_id = std::this_thread::get_id(); | ||
| 102 | u32 id = ids[this_id]; | ||
| 103 | assert1 = id == 1; | ||
| 104 | value2 += 5000; | ||
| 105 | Fiber::YieldTo(fiber1, thread_fibers[id]); | ||
| 106 | } | ||
| 107 | |||
| 108 | void DoWork2() { | ||
| 109 | while (trap2.load()) | ||
| 110 | ; | ||
| 111 | value2 = 2000; | ||
| 112 | trap = false; | ||
| 113 | Fiber::YieldTo(fiber2, fiber1); | ||
| 114 | assert3 = false; | ||
| 115 | } | ||
| 116 | |||
| 117 | void DoWork3() { | ||
| 118 | std::thread::id this_id = std::this_thread::get_id(); | ||
| 119 | u32 id = ids[this_id]; | ||
| 120 | assert2 = id == 0; | ||
| 121 | value1 += 1000; | ||
| 122 | Fiber::YieldTo(fiber3, thread_fibers[id]); | ||
| 123 | } | ||
| 124 | |||
| 125 | void ExecuteThread(u32 id); | ||
| 126 | |||
| 127 | void CallFiber1() { | ||
| 128 | std::thread::id this_id = std::this_thread::get_id(); | ||
| 129 | u32 id = ids[this_id]; | ||
| 130 | Fiber::YieldTo(thread_fibers[id], fiber1); | ||
| 131 | } | ||
| 132 | |||
| 133 | void CallFiber2() { | ||
| 134 | std::thread::id this_id = std::this_thread::get_id(); | ||
| 135 | u32 id = ids[this_id]; | ||
| 136 | Fiber::YieldTo(thread_fibers[id], fiber2); | ||
| 137 | } | ||
| 138 | |||
| 139 | void Exit(); | ||
| 140 | |||
| 141 | bool assert1{}; | ||
| 142 | bool assert2{}; | ||
| 143 | bool assert3{true}; | ||
| 144 | u32 value1{}; | ||
| 145 | u32 value2{}; | ||
| 146 | std::atomic<bool> trap{true}; | ||
| 147 | std::atomic<bool> trap2{true}; | ||
| 148 | std::unordered_map<std::thread::id, u32> ids; | ||
| 149 | std::vector<std::shared_ptr<Common::Fiber>> thread_fibers; | ||
| 150 | std::shared_ptr<Common::Fiber> fiber1; | ||
| 151 | std::shared_ptr<Common::Fiber> fiber2; | ||
| 152 | std::shared_ptr<Common::Fiber> fiber3; | ||
| 153 | }; | ||
| 154 | |||
| 155 | static void WorkControl2_1(void* control) { | ||
| 156 | auto* test_control = static_cast<TestControl2*>(control); | ||
| 157 | test_control->DoWork1(); | ||
| 158 | } | ||
| 159 | |||
| 160 | static void WorkControl2_2(void* control) { | ||
| 161 | auto* test_control = static_cast<TestControl2*>(control); | ||
| 162 | test_control->DoWork2(); | ||
| 163 | } | ||
| 164 | |||
| 165 | static void WorkControl2_3(void* control) { | ||
| 166 | auto* test_control = static_cast<TestControl2*>(control); | ||
| 167 | test_control->DoWork3(); | ||
| 168 | } | ||
| 169 | |||
| 170 | void TestControl2::ExecuteThread(u32 id) { | ||
| 171 | std::thread::id this_id = std::this_thread::get_id(); | ||
| 172 | ids[this_id] = id; | ||
| 173 | auto thread_fiber = Fiber::ThreadToFiber(); | ||
| 174 | thread_fibers[id] = thread_fiber; | ||
| 175 | } | ||
| 176 | |||
| 177 | void TestControl2::Exit() { | ||
| 178 | std::thread::id this_id = std::this_thread::get_id(); | ||
| 179 | u32 id = ids[this_id]; | ||
| 180 | thread_fibers[id]->Exit(); | ||
| 181 | } | ||
| 182 | |||
| 183 | static void ThreadStart2_1(u32 id, TestControl2& test_control) { | ||
| 184 | test_control.ExecuteThread(id); | ||
| 185 | test_control.CallFiber1(); | ||
| 186 | test_control.Exit(); | ||
| 187 | } | ||
| 188 | |||
| 189 | static void ThreadStart2_2(u32 id, TestControl2& test_control) { | ||
| 190 | test_control.ExecuteThread(id); | ||
| 191 | test_control.CallFiber2(); | ||
| 192 | test_control.Exit(); | ||
| 193 | } | ||
| 194 | |||
| 195 | /** This test checks for fiber thread exchange configuration and validates that fibers are | ||
| 196 | * that a fiber has been succesfully transfered from one thread to another and that the TLS | ||
| 197 | * region of the thread is kept while changing fibers. | ||
| 198 | */ | ||
| 199 | TEST_CASE("Fibers::InterExchange", "[common]") { | ||
| 200 | TestControl2 test_control{}; | ||
| 201 | test_control.thread_fibers.resize(2); | ||
| 202 | test_control.fiber1 = | ||
| 203 | std::make_shared<Fiber>(std::function<void(void*)>{WorkControl2_1}, &test_control); | ||
| 204 | test_control.fiber2 = | ||
| 205 | std::make_shared<Fiber>(std::function<void(void*)>{WorkControl2_2}, &test_control); | ||
| 206 | test_control.fiber3 = | ||
| 207 | std::make_shared<Fiber>(std::function<void(void*)>{WorkControl2_3}, &test_control); | ||
| 208 | std::thread thread1(ThreadStart2_1, 0, std::ref(test_control)); | ||
| 209 | std::thread thread2(ThreadStart2_2, 1, std::ref(test_control)); | ||
| 210 | thread1.join(); | ||
| 211 | thread2.join(); | ||
| 212 | REQUIRE(test_control.assert1); | ||
| 213 | REQUIRE(test_control.assert2); | ||
| 214 | REQUIRE(test_control.assert3); | ||
| 215 | REQUIRE(test_control.value2 == 7000); | ||
| 216 | u32 cal_value = 0; | ||
| 217 | for (u32 i = 0; i < 12000; i++) { | ||
| 218 | cal_value += i; | ||
| 219 | } | ||
| 220 | cal_value += 1000; | ||
| 221 | REQUIRE(test_control.value1 == cal_value); | ||
| 222 | } | ||
| 223 | |||
| 224 | class TestControl3 { | ||
| 225 | public: | ||
| 226 | TestControl3() = default; | ||
| 227 | |||
| 228 | void DoWork1() { | ||
| 229 | value1 += 1; | ||
| 230 | Fiber::YieldTo(fiber1, fiber2); | ||
| 231 | std::thread::id this_id = std::this_thread::get_id(); | ||
| 232 | u32 id = ids[this_id]; | ||
| 233 | value3 += 1; | ||
| 234 | Fiber::YieldTo(fiber1, thread_fibers[id]); | ||
| 235 | } | ||
| 236 | |||
| 237 | void DoWork2() { | ||
| 238 | value2 += 1; | ||
| 239 | std::thread::id this_id = std::this_thread::get_id(); | ||
| 240 | u32 id = ids[this_id]; | ||
| 241 | Fiber::YieldTo(fiber2, thread_fibers[id]); | ||
| 242 | } | ||
| 243 | |||
| 244 | void ExecuteThread(u32 id); | ||
| 245 | |||
| 246 | void CallFiber1() { | ||
| 247 | std::thread::id this_id = std::this_thread::get_id(); | ||
| 248 | u32 id = ids[this_id]; | ||
| 249 | Fiber::YieldTo(thread_fibers[id], fiber1); | ||
| 250 | } | ||
| 251 | |||
| 252 | void Exit(); | ||
| 253 | |||
| 254 | u32 value1{}; | ||
| 255 | u32 value2{}; | ||
| 256 | u32 value3{}; | ||
| 257 | std::unordered_map<std::thread::id, u32> ids; | ||
| 258 | std::vector<std::shared_ptr<Common::Fiber>> thread_fibers; | ||
| 259 | std::shared_ptr<Common::Fiber> fiber1; | ||
| 260 | std::shared_ptr<Common::Fiber> fiber2; | ||
| 261 | }; | ||
| 262 | |||
| 263 | static void WorkControl3_1(void* control) { | ||
| 264 | auto* test_control = static_cast<TestControl3*>(control); | ||
| 265 | test_control->DoWork1(); | ||
| 266 | } | ||
| 267 | |||
| 268 | static void WorkControl3_2(void* control) { | ||
| 269 | auto* test_control = static_cast<TestControl3*>(control); | ||
| 270 | test_control->DoWork2(); | ||
| 271 | } | ||
| 272 | |||
| 273 | void TestControl3::ExecuteThread(u32 id) { | ||
| 274 | std::thread::id this_id = std::this_thread::get_id(); | ||
| 275 | ids[this_id] = id; | ||
| 276 | auto thread_fiber = Fiber::ThreadToFiber(); | ||
| 277 | thread_fibers[id] = thread_fiber; | ||
| 278 | } | ||
| 279 | |||
| 280 | void TestControl3::Exit() { | ||
| 281 | std::thread::id this_id = std::this_thread::get_id(); | ||
| 282 | u32 id = ids[this_id]; | ||
| 283 | thread_fibers[id]->Exit(); | ||
| 284 | } | ||
| 285 | |||
| 286 | static void ThreadStart3(u32 id, TestControl3& test_control) { | ||
| 287 | test_control.ExecuteThread(id); | ||
| 288 | test_control.CallFiber1(); | ||
| 289 | test_control.Exit(); | ||
| 290 | } | ||
| 291 | |||
| 292 | /** This test checks for one two threads racing for starting the same fiber. | ||
| 293 | * It checks execution occured in an ordered manner and by no time there were | ||
| 294 | * two contexts at the same time. | ||
| 295 | */ | ||
| 296 | TEST_CASE("Fibers::StartRace", "[common]") { | ||
| 297 | TestControl3 test_control{}; | ||
| 298 | test_control.thread_fibers.resize(2); | ||
| 299 | test_control.fiber1 = | ||
| 300 | std::make_shared<Fiber>(std::function<void(void*)>{WorkControl3_1}, &test_control); | ||
| 301 | test_control.fiber2 = | ||
| 302 | std::make_shared<Fiber>(std::function<void(void*)>{WorkControl3_2}, &test_control); | ||
| 303 | std::thread thread1(ThreadStart3, 0, std::ref(test_control)); | ||
| 304 | std::thread thread2(ThreadStart3, 1, std::ref(test_control)); | ||
| 305 | thread1.join(); | ||
| 306 | thread2.join(); | ||
| 307 | REQUIRE(test_control.value1 == 1); | ||
| 308 | REQUIRE(test_control.value2 == 1); | ||
| 309 | REQUIRE(test_control.value3 == 1); | ||
| 310 | } | ||
| 311 | |||
| 312 | class TestControl4; | ||
| 313 | |||
| 314 | static void WorkControl4(void* control); | ||
| 315 | |||
| 316 | class TestControl4 { | ||
| 317 | public: | ||
| 318 | TestControl4() { | ||
| 319 | fiber1 = std::make_shared<Fiber>(std::function<void(void*)>{WorkControl4}, this); | ||
| 320 | goal_reached = false; | ||
| 321 | rewinded = false; | ||
| 322 | } | ||
| 323 | |||
| 324 | void Execute() { | ||
| 325 | thread_fiber = Fiber::ThreadToFiber(); | ||
| 326 | Fiber::YieldTo(thread_fiber, fiber1); | ||
| 327 | thread_fiber->Exit(); | ||
| 328 | } | ||
| 329 | |||
| 330 | void DoWork() { | ||
| 331 | fiber1->SetRewindPoint(std::function<void(void*)>{WorkControl4}, this); | ||
| 332 | if (rewinded) { | ||
| 333 | goal_reached = true; | ||
| 334 | Fiber::YieldTo(fiber1, thread_fiber); | ||
| 335 | } | ||
| 336 | rewinded = true; | ||
| 337 | fiber1->Rewind(); | ||
| 338 | } | ||
| 339 | |||
| 340 | std::shared_ptr<Common::Fiber> fiber1; | ||
| 341 | std::shared_ptr<Common::Fiber> thread_fiber; | ||
| 342 | bool goal_reached; | ||
| 343 | bool rewinded; | ||
| 344 | }; | ||
| 345 | |||
| 346 | static void WorkControl4(void* control) { | ||
| 347 | auto* test_control = static_cast<TestControl4*>(control); | ||
| 348 | test_control->DoWork(); | ||
| 349 | } | ||
| 350 | |||
| 351 | TEST_CASE("Fibers::Rewind", "[common]") { | ||
| 352 | TestControl4 test_control{}; | ||
| 353 | test_control.Execute(); | ||
| 354 | REQUIRE(test_control.goal_reached); | ||
| 355 | REQUIRE(test_control.rewinded); | ||
| 356 | } | ||
| 357 | |||
| 358 | } // namespace Common | ||
diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp index ff2d11cc8..e66db1940 100644 --- a/src/tests/core/core_timing.cpp +++ b/src/tests/core/core_timing.cpp | |||
| @@ -18,29 +18,26 @@ namespace { | |||
| 18 | // Numbers are chosen randomly to make sure the correct one is given. | 18 | // Numbers are chosen randomly to make sure the correct one is given. |
| 19 | constexpr std::array<u64, 5> CB_IDS{{42, 144, 93, 1026, UINT64_C(0xFFFF7FFFF7FFFF)}}; | 19 | constexpr std::array<u64, 5> CB_IDS{{42, 144, 93, 1026, UINT64_C(0xFFFF7FFFF7FFFF)}}; |
| 20 | constexpr int MAX_SLICE_LENGTH = 10000; // Copied from CoreTiming internals | 20 | constexpr int MAX_SLICE_LENGTH = 10000; // Copied from CoreTiming internals |
| 21 | constexpr std::array<u64, 5> calls_order{{2, 0, 1, 4, 3}}; | ||
| 22 | std::array<s64, 5> delays{}; | ||
| 21 | 23 | ||
| 22 | std::bitset<CB_IDS.size()> callbacks_ran_flags; | 24 | std::bitset<CB_IDS.size()> callbacks_ran_flags; |
| 23 | u64 expected_callback = 0; | 25 | u64 expected_callback = 0; |
| 24 | s64 lateness = 0; | ||
| 25 | 26 | ||
| 26 | template <unsigned int IDX> | 27 | template <unsigned int IDX> |
| 27 | void CallbackTemplate(u64 userdata, s64 cycles_late) { | 28 | void HostCallbackTemplate(u64 userdata, s64 nanoseconds_late) { |
| 28 | static_assert(IDX < CB_IDS.size(), "IDX out of range"); | 29 | static_assert(IDX < CB_IDS.size(), "IDX out of range"); |
| 29 | callbacks_ran_flags.set(IDX); | 30 | callbacks_ran_flags.set(IDX); |
| 30 | REQUIRE(CB_IDS[IDX] == userdata); | 31 | REQUIRE(CB_IDS[IDX] == userdata); |
| 31 | REQUIRE(CB_IDS[IDX] == expected_callback); | 32 | REQUIRE(CB_IDS[IDX] == CB_IDS[calls_order[expected_callback]]); |
| 32 | REQUIRE(lateness == cycles_late); | 33 | delays[IDX] = nanoseconds_late; |
| 33 | } | 34 | ++expected_callback; |
| 34 | |||
| 35 | u64 callbacks_done = 0; | ||
| 36 | |||
| 37 | void EmptyCallback(u64 userdata, s64 cycles_late) { | ||
| 38 | ++callbacks_done; | ||
| 39 | } | 35 | } |
| 40 | 36 | ||
| 41 | struct ScopeInit final { | 37 | struct ScopeInit final { |
| 42 | ScopeInit() { | 38 | ScopeInit() { |
| 43 | core_timing.Initialize(); | 39 | core_timing.SetMulticore(true); |
| 40 | core_timing.Initialize([]() {}); | ||
| 44 | } | 41 | } |
| 45 | ~ScopeInit() { | 42 | ~ScopeInit() { |
| 46 | core_timing.Shutdown(); | 43 | core_timing.Shutdown(); |
| @@ -49,110 +46,101 @@ struct ScopeInit final { | |||
| 49 | Core::Timing::CoreTiming core_timing; | 46 | Core::Timing::CoreTiming core_timing; |
| 50 | }; | 47 | }; |
| 51 | 48 | ||
| 52 | void AdvanceAndCheck(Core::Timing::CoreTiming& core_timing, u32 idx, u32 context = 0, | 49 | #pragma optimize("", off) |
| 53 | int expected_lateness = 0, int cpu_downcount = 0) { | ||
| 54 | callbacks_ran_flags = 0; | ||
| 55 | expected_callback = CB_IDS[idx]; | ||
| 56 | lateness = expected_lateness; | ||
| 57 | |||
| 58 | // Pretend we executed X cycles of instructions. | ||
| 59 | core_timing.SwitchContext(context); | ||
| 60 | core_timing.AddTicks(core_timing.GetDowncount() - cpu_downcount); | ||
| 61 | core_timing.Advance(); | ||
| 62 | core_timing.SwitchContext((context + 1) % 4); | ||
| 63 | 50 | ||
| 64 | REQUIRE(decltype(callbacks_ran_flags)().set(idx) == callbacks_ran_flags); | 51 | u64 TestTimerSpeed(Core::Timing::CoreTiming& core_timing) { |
| 52 | u64 start = core_timing.GetGlobalTimeNs().count(); | ||
| 53 | u64 placebo = 0; | ||
| 54 | for (std::size_t i = 0; i < 1000; i++) { | ||
| 55 | placebo += core_timing.GetGlobalTimeNs().count(); | ||
| 56 | } | ||
| 57 | u64 end = core_timing.GetGlobalTimeNs().count(); | ||
| 58 | return (end - start); | ||
| 65 | } | 59 | } |
| 60 | |||
| 61 | #pragma optimize("", on) | ||
| 62 | |||
| 66 | } // Anonymous namespace | 63 | } // Anonymous namespace |
| 67 | 64 | ||
| 68 | TEST_CASE("CoreTiming[BasicOrder]", "[core]") { | 65 | TEST_CASE("CoreTiming[BasicOrder]", "[core]") { |
| 69 | ScopeInit guard; | 66 | ScopeInit guard; |
| 70 | auto& core_timing = guard.core_timing; | 67 | auto& core_timing = guard.core_timing; |
| 68 | std::vector<std::shared_ptr<Core::Timing::EventType>> events{ | ||
| 69 | Core::Timing::CreateEvent("callbackA", HostCallbackTemplate<0>), | ||
| 70 | Core::Timing::CreateEvent("callbackB", HostCallbackTemplate<1>), | ||
| 71 | Core::Timing::CreateEvent("callbackC", HostCallbackTemplate<2>), | ||
| 72 | Core::Timing::CreateEvent("callbackD", HostCallbackTemplate<3>), | ||
| 73 | Core::Timing::CreateEvent("callbackE", HostCallbackTemplate<4>), | ||
| 74 | }; | ||
| 75 | |||
| 76 | expected_callback = 0; | ||
| 77 | |||
| 78 | core_timing.SyncPause(true); | ||
| 79 | |||
| 80 | u64 one_micro = 1000U; | ||
| 81 | for (std::size_t i = 0; i < events.size(); i++) { | ||
| 82 | u64 order = calls_order[i]; | ||
| 83 | core_timing.ScheduleEvent(i * one_micro + 100U, events[order], CB_IDS[order]); | ||
| 84 | } | ||
| 85 | /// test pause | ||
| 86 | REQUIRE(callbacks_ran_flags.none()); | ||
| 71 | 87 | ||
| 72 | std::shared_ptr<Core::Timing::EventType> cb_a = | 88 | core_timing.Pause(false); // No need to sync |
| 73 | Core::Timing::CreateEvent("callbackA", CallbackTemplate<0>); | ||
| 74 | std::shared_ptr<Core::Timing::EventType> cb_b = | ||
| 75 | Core::Timing::CreateEvent("callbackB", CallbackTemplate<1>); | ||
| 76 | std::shared_ptr<Core::Timing::EventType> cb_c = | ||
| 77 | Core::Timing::CreateEvent("callbackC", CallbackTemplate<2>); | ||
| 78 | std::shared_ptr<Core::Timing::EventType> cb_d = | ||
| 79 | Core::Timing::CreateEvent("callbackD", CallbackTemplate<3>); | ||
| 80 | std::shared_ptr<Core::Timing::EventType> cb_e = | ||
| 81 | Core::Timing::CreateEvent("callbackE", CallbackTemplate<4>); | ||
| 82 | |||
| 83 | // Enter slice 0 | ||
| 84 | core_timing.ResetRun(); | ||
| 85 | |||
| 86 | // D -> B -> C -> A -> E | ||
| 87 | core_timing.SwitchContext(0); | ||
| 88 | core_timing.ScheduleEvent(1000, cb_a, CB_IDS[0]); | ||
| 89 | REQUIRE(1000 == core_timing.GetDowncount()); | ||
| 90 | core_timing.ScheduleEvent(500, cb_b, CB_IDS[1]); | ||
| 91 | REQUIRE(500 == core_timing.GetDowncount()); | ||
| 92 | core_timing.ScheduleEvent(800, cb_c, CB_IDS[2]); | ||
| 93 | REQUIRE(500 == core_timing.GetDowncount()); | ||
| 94 | core_timing.ScheduleEvent(100, cb_d, CB_IDS[3]); | ||
| 95 | REQUIRE(100 == core_timing.GetDowncount()); | ||
| 96 | core_timing.ScheduleEvent(1200, cb_e, CB_IDS[4]); | ||
| 97 | REQUIRE(100 == core_timing.GetDowncount()); | ||
| 98 | |||
| 99 | AdvanceAndCheck(core_timing, 3, 0); | ||
| 100 | AdvanceAndCheck(core_timing, 1, 1); | ||
| 101 | AdvanceAndCheck(core_timing, 2, 2); | ||
| 102 | AdvanceAndCheck(core_timing, 0, 3); | ||
| 103 | AdvanceAndCheck(core_timing, 4, 0); | ||
| 104 | } | ||
| 105 | |||
| 106 | TEST_CASE("CoreTiming[FairSharing]", "[core]") { | ||
| 107 | 89 | ||
| 108 | ScopeInit guard; | 90 | while (core_timing.HasPendingEvents()) |
| 109 | auto& core_timing = guard.core_timing; | 91 | ; |
| 110 | 92 | ||
| 111 | std::shared_ptr<Core::Timing::EventType> empty_callback = | 93 | REQUIRE(callbacks_ran_flags.all()); |
| 112 | Core::Timing::CreateEvent("empty_callback", EmptyCallback); | ||
| 113 | 94 | ||
| 114 | callbacks_done = 0; | 95 | for (std::size_t i = 0; i < delays.size(); i++) { |
| 115 | u64 MAX_CALLBACKS = 10; | 96 | const double delay = static_cast<double>(delays[i]); |
| 116 | for (std::size_t i = 0; i < 10; i++) { | 97 | const double micro = delay / 1000.0f; |
| 117 | core_timing.ScheduleEvent(i * 3333U, empty_callback, 0); | 98 | const double mili = micro / 1000.0f; |
| 99 | printf("HostTimer Pausing Delay[%zu]: %.3f %.6f\n", i, micro, mili); | ||
| 118 | } | 100 | } |
| 119 | |||
| 120 | const s64 advances = MAX_SLICE_LENGTH / 10; | ||
| 121 | core_timing.ResetRun(); | ||
| 122 | u64 current_time = core_timing.GetTicks(); | ||
| 123 | bool keep_running{}; | ||
| 124 | do { | ||
| 125 | keep_running = false; | ||
| 126 | for (u32 active_core = 0; active_core < 4; ++active_core) { | ||
| 127 | core_timing.SwitchContext(active_core); | ||
| 128 | if (core_timing.CanCurrentContextRun()) { | ||
| 129 | core_timing.AddTicks(std::min<s64>(advances, core_timing.GetDowncount())); | ||
| 130 | core_timing.Advance(); | ||
| 131 | } | ||
| 132 | keep_running |= core_timing.CanCurrentContextRun(); | ||
| 133 | } | ||
| 134 | } while (keep_running); | ||
| 135 | u64 current_time_2 = core_timing.GetTicks(); | ||
| 136 | |||
| 137 | REQUIRE(MAX_CALLBACKS == callbacks_done); | ||
| 138 | REQUIRE(current_time_2 == current_time + MAX_SLICE_LENGTH * 4); | ||
| 139 | } | 101 | } |
| 140 | 102 | ||
| 141 | TEST_CASE("Core::Timing[PredictableLateness]", "[core]") { | 103 | TEST_CASE("CoreTiming[BasicOrderNoPausing]", "[core]") { |
| 142 | ScopeInit guard; | 104 | ScopeInit guard; |
| 143 | auto& core_timing = guard.core_timing; | 105 | auto& core_timing = guard.core_timing; |
| 106 | std::vector<std::shared_ptr<Core::Timing::EventType>> events{ | ||
| 107 | Core::Timing::CreateEvent("callbackA", HostCallbackTemplate<0>), | ||
| 108 | Core::Timing::CreateEvent("callbackB", HostCallbackTemplate<1>), | ||
| 109 | Core::Timing::CreateEvent("callbackC", HostCallbackTemplate<2>), | ||
| 110 | Core::Timing::CreateEvent("callbackD", HostCallbackTemplate<3>), | ||
| 111 | Core::Timing::CreateEvent("callbackE", HostCallbackTemplate<4>), | ||
| 112 | }; | ||
| 113 | |||
| 114 | core_timing.SyncPause(true); | ||
| 115 | core_timing.SyncPause(false); | ||
| 116 | |||
| 117 | expected_callback = 0; | ||
| 118 | |||
| 119 | u64 start = core_timing.GetGlobalTimeNs().count(); | ||
| 120 | u64 one_micro = 1000U; | ||
| 121 | for (std::size_t i = 0; i < events.size(); i++) { | ||
| 122 | u64 order = calls_order[i]; | ||
| 123 | core_timing.ScheduleEvent(i * one_micro + 100U, events[order], CB_IDS[order]); | ||
| 124 | } | ||
| 125 | u64 end = core_timing.GetGlobalTimeNs().count(); | ||
| 126 | const double scheduling_time = static_cast<double>(end - start); | ||
| 127 | const double timer_time = static_cast<double>(TestTimerSpeed(core_timing)); | ||
| 144 | 128 | ||
| 145 | std::shared_ptr<Core::Timing::EventType> cb_a = | 129 | while (core_timing.HasPendingEvents()) |
| 146 | Core::Timing::CreateEvent("callbackA", CallbackTemplate<0>); | 130 | ; |
| 147 | std::shared_ptr<Core::Timing::EventType> cb_b = | ||
| 148 | Core::Timing::CreateEvent("callbackB", CallbackTemplate<1>); | ||
| 149 | 131 | ||
| 150 | // Enter slice 0 | 132 | REQUIRE(callbacks_ran_flags.all()); |
| 151 | core_timing.ResetRun(); | ||
| 152 | 133 | ||
| 153 | core_timing.ScheduleEvent(100, cb_a, CB_IDS[0]); | 134 | for (std::size_t i = 0; i < delays.size(); i++) { |
| 154 | core_timing.ScheduleEvent(200, cb_b, CB_IDS[1]); | 135 | const double delay = static_cast<double>(delays[i]); |
| 136 | const double micro = delay / 1000.0f; | ||
| 137 | const double mili = micro / 1000.0f; | ||
| 138 | printf("HostTimer No Pausing Delay[%zu]: %.3f %.6f\n", i, micro, mili); | ||
| 139 | } | ||
| 155 | 140 | ||
| 156 | AdvanceAndCheck(core_timing, 0, 0, 10, -10); // (100 - 10) | 141 | const double micro = scheduling_time / 1000.0f; |
| 157 | AdvanceAndCheck(core_timing, 1, 1, 50, -50); | 142 | const double mili = micro / 1000.0f; |
| 143 | printf("HostTimer No Pausing Scheduling Time: %.3f %.6f\n", micro, mili); | ||
| 144 | printf("HostTimer No Pausing Timer Time: %.3f %.6f\n", timer_time / 1000.f, | ||
| 145 | timer_time / 1000000.f); | ||
| 158 | } | 146 | } |