summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/scheduler.cpp
diff options
context:
space:
mode:
authorGravatar bunnei2020-10-20 19:07:39 -0700
committerGravatar GitHub2020-10-20 19:07:39 -0700
commit3d592972dc3fd61cc88771b889eff237e4e03e0f (patch)
tree0dbc65ac86e609ae22087c7be9d4759ac6b73004 /src/core/hle/kernel/scheduler.cpp
parentkernel: Fix build with recent compiler flag changes (diff)
downloadyuzu-3d592972dc3fd61cc88771b889eff237e4e03e0f.tar.gz
yuzu-3d592972dc3fd61cc88771b889eff237e4e03e0f.tar.xz
yuzu-3d592972dc3fd61cc88771b889eff237e4e03e0f.zip
Revert "core: Fix clang build"
Diffstat (limited to 'src/core/hle/kernel/scheduler.cpp')
-rw-r--r--src/core/hle/kernel/scheduler.cpp72
1 files changed, 21 insertions, 51 deletions
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index 4a9a762f3..6b7db5372 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -89,11 +89,9 @@ u32 GlobalScheduler::SelectThreads() {
89 while (iter != suggested_queue[core_id].end()) { 89 while (iter != suggested_queue[core_id].end()) {
90 suggested = *iter; 90 suggested = *iter;
91 iter++; 91 iter++;
92 const s32 suggested_core_id = suggested->GetProcessorID(); 92 s32 suggested_core_id = suggested->GetProcessorID();
93 Thread* top_thread = suggested_core_id >= 0 93 Thread* top_thread =
94 ? top_threads[static_cast<u32>(suggested_core_id)] 94 suggested_core_id >= 0 ? top_threads[suggested_core_id] : nullptr;
95 : nullptr;
96
97 if (top_thread != suggested) { 95 if (top_thread != suggested) {
98 if (top_thread != nullptr && 96 if (top_thread != nullptr &&
99 top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) { 97 top_thread->GetPriority() < THREADPRIO_MAX_CORE_MIGRATION) {
@@ -104,19 +102,16 @@ u32 GlobalScheduler::SelectThreads() {
104 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested); 102 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), suggested);
105 break; 103 break;
106 } 104 }
107
108 suggested = nullptr; 105 suggested = nullptr;
109 migration_candidates[num_candidates++] = suggested_core_id; 106 migration_candidates[num_candidates++] = suggested_core_id;
110 } 107 }
111
112 // Step 3: Select a suggested thread from another core 108 // Step 3: Select a suggested thread from another core
113 if (suggested == nullptr) { 109 if (suggested == nullptr) {
114 for (std::size_t i = 0; i < num_candidates; i++) { 110 for (std::size_t i = 0; i < num_candidates; i++) {
115 const auto candidate_core = static_cast<u32>(migration_candidates[i]); 111 s32 candidate_core = migration_candidates[i];
116 suggested = top_threads[candidate_core]; 112 suggested = top_threads[candidate_core];
117 auto it = scheduled_queue[candidate_core].begin(); 113 auto it = scheduled_queue[candidate_core].begin();
118 ++it; 114 it++;
119
120 Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr; 115 Thread* next = it != scheduled_queue[candidate_core].end() ? *it : nullptr;
121 if (next != nullptr) { 116 if (next != nullptr) {
122 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id), 117 TransferToCore(suggested->GetPriority(), static_cast<s32>(core_id),
@@ -133,8 +128,7 @@ u32 GlobalScheduler::SelectThreads() {
133 128
134 idle_cores &= ~(1U << core_id); 129 idle_cores &= ~(1U << core_id);
135 } 130 }
136 131 u32 cores_needing_context_switch{};
137 u32 cores_needing_context_switch = 0;
138 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) { 132 for (u32 core = 0; core < Core::Hardware::NUM_CPU_CORES; core++) {
139 Scheduler& sched = kernel.Scheduler(core); 133 Scheduler& sched = kernel.Scheduler(core);
140 ASSERT(top_threads[core] == nullptr || 134 ASSERT(top_threads[core] == nullptr ||
@@ -192,16 +186,13 @@ bool GlobalScheduler::YieldThreadAndBalanceLoad(Thread* yielding_thread) {
192 for (auto& thread : suggested_queue[core_id]) { 186 for (auto& thread : suggested_queue[core_id]) {
193 const s32 source_core = thread->GetProcessorID(); 187 const s32 source_core = thread->GetProcessorID();
194 if (source_core >= 0) { 188 if (source_core >= 0) {
195 const auto sanitized_source_core = static_cast<u32>(source_core); 189 if (current_threads[source_core] != nullptr) {
196 190 if (thread == current_threads[source_core] ||
197 if (current_threads[sanitized_source_core] != nullptr) { 191 current_threads[source_core]->GetPriority() < min_regular_priority) {
198 if (thread == current_threads[sanitized_source_core] ||
199 current_threads[sanitized_source_core]->GetPriority() < min_regular_priority) {
200 continue; 192 continue;
201 } 193 }
202 } 194 }
203 } 195 }
204
205 if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() || 196 if (next_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks() ||
206 next_thread->GetPriority() < thread->GetPriority()) { 197 next_thread->GetPriority() < thread->GetPriority()) {
207 if (thread->GetPriority() <= priority) { 198 if (thread->GetPriority() <= priority) {
@@ -249,25 +240,17 @@ bool GlobalScheduler::YieldThreadAndWaitForLoadBalancing(Thread* yielding_thread
249 for (std::size_t i = 0; i < current_threads.size(); i++) { 240 for (std::size_t i = 0; i < current_threads.size(); i++) {
250 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front(); 241 current_threads[i] = scheduled_queue[i].empty() ? nullptr : scheduled_queue[i].front();
251 } 242 }
252
253 for (auto& thread : suggested_queue[core_id]) { 243 for (auto& thread : suggested_queue[core_id]) {
254 const s32 source_core = thread->GetProcessorID(); 244 const s32 source_core = thread->GetProcessorID();
255 if (source_core < 0) { 245 if (source_core < 0 || thread == current_threads[source_core]) {
256 continue;
257 }
258
259 const auto sanitized_source_core = static_cast<u32>(source_core);
260 if (thread == current_threads[sanitized_source_core]) {
261 continue; 246 continue;
262 } 247 }
263 248 if (current_threads[source_core] == nullptr ||
264 if (current_threads[sanitized_source_core] == nullptr || 249 current_threads[source_core]->GetPriority() >= min_regular_priority) {
265 current_threads[sanitized_source_core]->GetPriority() >= min_regular_priority) {
266 winner = thread; 250 winner = thread;
267 } 251 }
268 break; 252 break;
269 } 253 }
270
271 if (winner != nullptr) { 254 if (winner != nullptr) {
272 if (winner != yielding_thread) { 255 if (winner != yielding_thread) {
273 TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner); 256 TransferToCore(winner->GetPriority(), static_cast<s32>(core_id), winner);
@@ -309,22 +292,17 @@ void GlobalScheduler::PreemptThreads() {
309 if (thread->GetPriority() != priority) { 292 if (thread->GetPriority() != priority) {
310 continue; 293 continue;
311 } 294 }
312
313 if (source_core >= 0) { 295 if (source_core >= 0) {
314 const auto sanitized_source_core = static_cast<u32>(source_core); 296 Thread* next_thread = scheduled_queue[source_core].empty()
315 Thread* next_thread = scheduled_queue[sanitized_source_core].empty()
316 ? nullptr 297 ? nullptr
317 : scheduled_queue[sanitized_source_core].front(); 298 : scheduled_queue[source_core].front();
318
319 if (next_thread != nullptr && next_thread->GetPriority() < 2) { 299 if (next_thread != nullptr && next_thread->GetPriority() < 2) {
320 break; 300 break;
321 } 301 }
322
323 if (next_thread == thread) { 302 if (next_thread == thread) {
324 continue; 303 continue;
325 } 304 }
326 } 305 }
327
328 if (current_thread != nullptr && 306 if (current_thread != nullptr &&
329 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { 307 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
330 winner = thread; 308 winner = thread;
@@ -344,22 +322,17 @@ void GlobalScheduler::PreemptThreads() {
344 if (thread->GetPriority() < priority) { 322 if (thread->GetPriority() < priority) {
345 continue; 323 continue;
346 } 324 }
347
348 if (source_core >= 0) { 325 if (source_core >= 0) {
349 const auto sanitized_source_core = static_cast<u32>(source_core); 326 Thread* next_thread = scheduled_queue[source_core].empty()
350 Thread* next_thread = scheduled_queue[sanitized_source_core].empty()
351 ? nullptr 327 ? nullptr
352 : scheduled_queue[sanitized_source_core].front(); 328 : scheduled_queue[source_core].front();
353
354 if (next_thread != nullptr && next_thread->GetPriority() < 2) { 329 if (next_thread != nullptr && next_thread->GetPriority() < 2) {
355 break; 330 break;
356 } 331 }
357
358 if (next_thread == thread) { 332 if (next_thread == thread) {
359 continue; 333 continue;
360 } 334 }
361 } 335 }
362
363 if (current_thread != nullptr && 336 if (current_thread != nullptr &&
364 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) { 337 current_thread->GetLastRunningTicks() >= thread->GetLastRunningTicks()) {
365 winner = thread; 338 winner = thread;
@@ -379,11 +352,11 @@ void GlobalScheduler::PreemptThreads() {
379 352
380void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule, 353void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
381 Core::EmuThreadHandle global_thread) { 354 Core::EmuThreadHandle global_thread) {
382 const u32 current_core = global_thread.host_handle; 355 u32 current_core = global_thread.host_handle;
383 bool must_context_switch = global_thread.guest_handle != InvalidHandle && 356 bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
384 (current_core < Core::Hardware::NUM_CPU_CORES); 357 (current_core < Core::Hardware::NUM_CPU_CORES);
385 while (cores_pending_reschedule != 0) { 358 while (cores_pending_reschedule != 0) {
386 const u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule); 359 u32 core = Common::CountTrailingZeroes32(cores_pending_reschedule);
387 ASSERT(core < Core::Hardware::NUM_CPU_CORES); 360 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
388 if (!must_context_switch || core != current_core) { 361 if (!must_context_switch || core != current_core) {
389 auto& phys_core = kernel.PhysicalCore(core); 362 auto& phys_core = kernel.PhysicalCore(core);
@@ -393,7 +366,6 @@ void GlobalScheduler::EnableInterruptAndSchedule(u32 cores_pending_reschedule,
393 } 366 }
394 cores_pending_reschedule &= ~(1U << core); 367 cores_pending_reschedule &= ~(1U << core);
395 } 368 }
396
397 if (must_context_switch) { 369 if (must_context_switch) {
398 auto& core_scheduler = kernel.CurrentScheduler(); 370 auto& core_scheduler = kernel.CurrentScheduler();
399 kernel.ExitSVCProfile(); 371 kernel.ExitSVCProfile();
@@ -831,11 +803,9 @@ void Scheduler::Initialize() {
831 std::string name = "Idle Thread Id:" + std::to_string(core_id); 803 std::string name = "Idle Thread Id:" + std::to_string(core_id);
832 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc(); 804 std::function<void(void*)> init_func = Core::CpuManager::GetIdleThreadStartFunc();
833 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater(); 805 void* init_func_parameter = system.GetCpuManager().GetStartFuncParamater();
834 const auto type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE); 806 ThreadType type = static_cast<ThreadType>(THREADTYPE_KERNEL | THREADTYPE_HLE | THREADTYPE_IDLE);
835 auto thread_res = 807 auto thread_res = Thread::Create(system, type, name, 0, 64, 0, static_cast<u32>(core_id), 0,
836 Thread::Create(system, type, std::move(name), 0, 64, 0, static_cast<s32>(core_id), 0, 808 nullptr, std::move(init_func), init_func_parameter);
837 nullptr, std::move(init_func), init_func_parameter);
838
839 idle_thread = std::move(thread_res).Unwrap(); 809 idle_thread = std::move(thread_res).Unwrap();
840} 810}
841 811