diff options
| author | 2014-05-15 18:27:08 -0400 | |
|---|---|---|
| committer | 2014-05-15 18:27:08 -0400 | |
| commit | 940330c6e12b3eefb9fb035f75f4b090c969cb75 (patch) | |
| tree | 1beb3f6e0c32b513b4a80bccb5609ca018605064 /src/core/hle/kernel/thread.cpp | |
| parent | changed "UID" to "Handle" to be a little more consistent with CTR naming (diff) | |
| download | yuzu-940330c6e12b3eefb9fb035f75f4b090c969cb75.tar.gz yuzu-940330c6e12b3eefb9fb035f75f4b090c969cb75.tar.xz yuzu-940330c6e12b3eefb9fb035f75f4b090c969cb75.zip | |
completely gutted/refactored threading code to be simpler
Diffstat (limited to 'src/core/hle/kernel/thread.cpp')
| -rw-r--r-- | src/core/hle/kernel/thread.cpp | 844 |
1 files changed, 228 insertions, 616 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index b6d02aa12..833a1b4ba 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <string> | 10 | #include <string> |
| 11 | 11 | ||
| 12 | #include "common/common.h" | 12 | #include "common/common.h" |
| 13 | #include "common/thread_queue_list.h" | ||
| 13 | 14 | ||
| 14 | #include "core/core.h" | 15 | #include "core/core.h" |
| 15 | #include "core/mem_map.h" | 16 | #include "core/mem_map.h" |
| @@ -18,698 +19,309 @@ | |||
| 18 | #include "core/hle/kernel/kernel.h" | 19 | #include "core/hle/kernel/kernel.h" |
| 19 | #include "core/hle/kernel/thread.h" | 20 | #include "core/hle/kernel/thread.h" |
| 20 | 21 | ||
| 21 | struct ThreadQueueList { | 22 | // Enums |
| 22 | // Number of queues (number of priority levels starting at 0.) | ||
| 23 | static const int NUM_QUEUES = 128; | ||
| 24 | // Initial number of threads a single queue can handle. | ||
| 25 | static const int INITIAL_CAPACITY = 32; | ||
| 26 | |||
| 27 | struct Queue { | ||
| 28 | // Next ever-been-used queue (worse priority.) | ||
| 29 | Queue *next; | ||
| 30 | // First valid item in data. | ||
| 31 | int first; | ||
| 32 | // One after last valid item in data. | ||
| 33 | int end; | ||
| 34 | // A too-large array with room on the front and end. | ||
| 35 | UID *data; | ||
| 36 | // Size of data array. | ||
| 37 | int capacity; | ||
| 38 | }; | ||
| 39 | |||
| 40 | ThreadQueueList() { | ||
| 41 | memset(queues, 0, sizeof(queues)); | ||
| 42 | first = invalid(); | ||
| 43 | } | ||
| 44 | |||
| 45 | ~ThreadQueueList() { | ||
| 46 | for (int i = 0; i < NUM_QUEUES; ++i) { | ||
| 47 | if (queues[i].data != NULL) { | ||
| 48 | free(queues[i].data); | ||
| 49 | } | ||
| 50 | } | ||
| 51 | } | ||
| 52 | |||
| 53 | // Only for debugging, returns priority level. | ||
| 54 | int contains(const UID uid) { | ||
| 55 | for (int i = 0; i < NUM_QUEUES; ++i) { | ||
| 56 | if (queues[i].data == NULL) { | ||
| 57 | continue; | ||
| 58 | } | ||
| 59 | Queue *cur = &queues[i]; | ||
| 60 | for (int j = cur->first; j < cur->end; ++j) { | ||
| 61 | if (cur->data[j] == uid) { | ||
| 62 | return i; | ||
| 63 | } | ||
| 64 | } | ||
| 65 | } | ||
| 66 | return -1; | ||
| 67 | } | ||
| 68 | |||
| 69 | inline UID pop_first() { | ||
| 70 | Queue *cur = first; | ||
| 71 | while (cur != invalid()) { | ||
| 72 | if (cur->end - cur->first > 0) { | ||
| 73 | return cur->data[cur->first++]; | ||
| 74 | } | ||
| 75 | cur = cur->next; | ||
| 76 | } | ||
| 77 | |||
| 78 | _dbg_assert_msg_(KERNEL, false, "ThreadQueueList should not be empty."); | ||
| 79 | return 0; | ||
| 80 | } | ||
| 81 | |||
| 82 | inline UID pop_first_better(u32 priority) { | ||
| 83 | Queue *cur = first; | ||
| 84 | Queue *stop = &queues[priority]; | ||
| 85 | while (cur < stop) { | ||
| 86 | if (cur->end - cur->first > 0) { | ||
| 87 | return cur->data[cur->first++]; | ||
| 88 | } | ||
| 89 | cur = cur->next; | ||
| 90 | } | ||
| 91 | return 0; | ||
| 92 | } | ||
| 93 | |||
| 94 | inline void push_front(u32 priority, const UID thread_id) { | ||
| 95 | Queue *cur = &queues[priority]; | ||
| 96 | cur->data[--cur->first] = thread_id; | ||
| 97 | if (cur->first == 0) { | ||
| 98 | rebalance(priority); | ||
| 99 | } | ||
| 100 | } | ||
| 101 | |||
| 102 | inline void push_back(u32 priority, const UID thread_id) | ||
| 103 | { | ||
| 104 | Queue *cur = &queues[priority]; | ||
| 105 | cur->data[cur->end++] = thread_id; | ||
| 106 | if (cur->end == cur->capacity) { | ||
| 107 | rebalance(priority); | ||
| 108 | } | ||
| 109 | } | ||
| 110 | |||
| 111 | inline void remove(u32 priority, const UID thread_id) { | ||
| 112 | Queue *cur = &queues[priority]; | ||
| 113 | _dbg_assert_msg_(KERNEL, cur->next != NULL, "ThreadQueueList::Queue should already be linked up."); | ||
| 114 | |||
| 115 | for (int i = cur->first; i < cur->end; ++i) { | ||
| 116 | if (cur->data[i] == thread_id) { | ||
| 117 | int remaining = --cur->end - i; | ||
| 118 | if (remaining > 0) { | ||
| 119 | memmove(&cur->data[i], &cur->data[i + 1], remaining * sizeof(UID)); | ||
| 120 | } | ||
| 121 | return; | ||
| 122 | } | ||
| 123 | } | ||
| 124 | |||
| 125 | // Wasn't there. | ||
| 126 | } | ||
| 127 | |||
| 128 | inline void rotate(u32 priority) { | ||
| 129 | Queue *cur = &queues[priority]; | ||
| 130 | _dbg_assert_msg_(KERNEL, cur->next != NULL, "ThreadQueueList::Queue should already be linked up."); | ||
| 131 | |||
| 132 | if (cur->end - cur->first > 1) { | ||
| 133 | cur->data[cur->end++] = cur->data[cur->first++]; | ||
| 134 | if (cur->end == cur->capacity) { | ||
| 135 | rebalance(priority); | ||
| 136 | } | ||
| 137 | } | ||
| 138 | } | ||
| 139 | |||
| 140 | inline void clear() { | ||
| 141 | for (int i = 0; i < NUM_QUEUES; ++i) { | ||
| 142 | if (queues[i].data != NULL) { | ||
| 143 | free(queues[i].data); | ||
| 144 | } | ||
| 145 | } | ||
| 146 | memset(queues, 0, sizeof(queues)); | ||
| 147 | first = invalid(); | ||
| 148 | } | ||
| 149 | |||
| 150 | inline bool empty(u32 priority) const { | ||
| 151 | const Queue *cur = &queues[priority]; | ||
| 152 | return cur->first == cur->end; | ||
| 153 | } | ||
| 154 | |||
| 155 | inline void prepare(u32 priority) { | ||
| 156 | Queue *cur = &queues[priority]; | ||
| 157 | if (cur->next == NULL) { | ||
| 158 | link(priority, INITIAL_CAPACITY); | ||
| 159 | } | ||
| 160 | } | ||
| 161 | |||
| 162 | private: | ||
| 163 | Queue *invalid() const { | ||
| 164 | return (Queue *)-1; | ||
| 165 | } | ||
| 166 | |||
| 167 | void link(u32 priority, int size) { | ||
| 168 | _dbg_assert_msg_(KERNEL, queues[priority].data == NULL, "ThreadQueueList::Queue should only be initialized once."); | ||
| 169 | |||
| 170 | if (size <= INITIAL_CAPACITY) { | ||
| 171 | size = INITIAL_CAPACITY; | ||
| 172 | } else { | ||
| 173 | int goal = size; | ||
| 174 | size = INITIAL_CAPACITY; | ||
| 175 | while (size < goal) | ||
| 176 | size *= 2; | ||
| 177 | } | ||
| 178 | Queue *cur = &queues[priority]; | ||
| 179 | cur->data = (UID*)malloc(sizeof(UID)* size); | ||
| 180 | cur->capacity = size; | ||
| 181 | cur->first = size / 2; | ||
| 182 | cur->end = size / 2; | ||
| 183 | |||
| 184 | for (int i = (int)priority - 1; i >= 0; --i) { | ||
| 185 | if (queues[i].next != NULL) { | ||
| 186 | cur->next = queues[i].next; | ||
| 187 | queues[i].next = cur; | ||
| 188 | return; | ||
| 189 | } | ||
| 190 | } | ||
| 191 | |||
| 192 | cur->next = first; | ||
| 193 | first = cur; | ||
| 194 | } | ||
| 195 | |||
| 196 | void rebalance(u32 priority) { | ||
| 197 | Queue *cur = &queues[priority]; | ||
| 198 | int size = cur->end - cur->first; | ||
| 199 | if (size >= cur->capacity - 2) { | ||
| 200 | UID* new_data = (UID*)realloc(cur->data, cur->capacity * 2 * sizeof(UID)); | ||
| 201 | if (new_data != NULL) { | ||
| 202 | cur->capacity *= 2; | ||
| 203 | cur->data = new_data; | ||
| 204 | } | ||
| 205 | } | ||
| 206 | 23 | ||
| 207 | int newFirst = (cur->capacity - size) / 2; | 24 | enum ThreadPriority { |
| 208 | if (newFirst != cur->first) { | 25 | THREADPRIO_HIGHEST = 0, |
| 209 | memmove(&cur->data[newFirst], &cur->data[cur->first], size * sizeof(UID)); | 26 | THREADPRIO_DEFAULT = 16, |
| 210 | cur->first = newFirst; | 27 | THREADPRIO_LOWEST = 31, |
| 211 | cur->end = newFirst + size; | 28 | }; |
| 212 | } | ||
| 213 | } | ||
| 214 | 29 | ||
| 215 | // The first queue that's ever been used. | 30 | enum ThreadStatus { |
| 216 | Queue* first; | 31 | THREADSTATUS_RUNNING = 1, |
| 217 | // The priority level queues of thread ids. | 32 | THREADSTATUS_READY = 2, |
| 218 | Queue queues[NUM_QUEUES]; | 33 | THREADSTATUS_WAIT = 4, |
| 34 | THREADSTATUS_SUSPEND = 8, | ||
| 35 | THREADSTATUS_DORMANT = 16, | ||
| 36 | THREADSTATUS_DEAD = 32, | ||
| 37 | THREADSTATUS_WAITSUSPEND = THREADSTATUS_WAIT | THREADSTATUS_SUSPEND | ||
| 219 | }; | 38 | }; |
| 220 | 39 | ||
| 221 | // Supposed to represent a real CTR struct... but not sure of the correct fields yet. | 40 | enum WaitType { |
| 222 | struct NativeThread { | 41 | WAITTYPE_NONE, |
| 223 | //u32 Pointer to vtable | 42 | WAITTYPE_SLEEP, |
| 224 | //u32 Reference count | 43 | WAITTYPE_SEMA, |
| 225 | //KProcess* Process the thread belongs to (virtual address) | 44 | WAITTYPE_EVENTFLAG, |
| 226 | //u32 Thread id | 45 | WAITTYPE_THREADEND, |
| 227 | //u32* ptr = *(KThread+0x8C) - 0xB0 | 46 | WAITTYPE_VBLANK, |
| 228 | //u32* End-address of the page for this thread allocated in the 0xFF4XX000 region. Thus, | 47 | WAITTYPE_MUTEX, |
| 229 | // if the beginning of this mapped page is 0xFF401000, this ptr would be 0xFF402000. | 48 | WAITTYPE_SYNCH, |
| 230 | //KThread* Previous ? (virtual address) | ||
| 231 | //KThread* Next ? (virtual address) | ||
| 232 | |||
| 233 | u32_le native_size; | ||
| 234 | char name[KERNELOBJECT_MAX_NAME_LENGTH + 1]; | ||
| 235 | |||
| 236 | // Threading stuff | ||
| 237 | u32_le status; | ||
| 238 | u32_le entry_point; | ||
| 239 | u32_le initial_stack; | ||
| 240 | u32_le stack_top; | ||
| 241 | u32_le stack_size; | ||
| 242 | 49 | ||
| 243 | u32_le arg; | 50 | NUM_WAITTYPES |
| 244 | u32_le processor_id; | ||
| 245 | |||
| 246 | s32_le initial_priority; | ||
| 247 | s32_le current_priority; | ||
| 248 | }; | 51 | }; |
| 249 | 52 | ||
| 250 | struct ThreadWaitInfo { | 53 | typedef s32 Handle; |
| 251 | u32 wait_value; | ||
| 252 | u32 timeout_ptr; | ||
| 253 | }; | ||
| 254 | 54 | ||
| 255 | class Thread : public KernelObject { | 55 | class Thread : public KernelObject { |
| 256 | public: | 56 | public: |
| 257 | /*const char *GetName() { return nt.name; }*/ | 57 | |
| 58 | const char *GetName() { return name; } | ||
| 258 | const char *GetTypeName() { return "Thread"; } | 59 | const char *GetTypeName() { return "Thread"; } |
| 259 | //void GetQuickInfo(char *ptr, int size) | 60 | |
| 260 | //{ | ||
| 261 | // sprintf(ptr, "pc= %08x sp= %08x %s %s %s %s %s %s (wt=%i wid=%i wv= %08x )", | ||
| 262 | // context.pc, context.r[13], // 13 is stack pointer | ||
| 263 | // (nt.status & THREADSTATUS_RUNNING) ? "RUN" : "", | ||
| 264 | // (nt.status & THREADSTATUS_READY) ? "READY" : "", | ||
| 265 | // (nt.status & THREADSTATUS_WAIT) ? "WAIT" : "", | ||
| 266 | // (nt.status & THREADSTATUS_SUSPEND) ? "SUSPEND" : "", | ||
| 267 | // (nt.status & THREADSTATUS_DORMANT) ? "DORMANT" : "", | ||
| 268 | // (nt.status & THREADSTATUS_DEAD) ? "DEAD" : "", | ||
| 269 | // nt.waitType, | ||
| 270 | // nt.waitID, | ||
| 271 | // waitInfo.waitValue); | ||
| 272 | //} | ||
| 273 | |||
| 274 | //static u32 GetMissingErrorCode() { return SCE_KERNEL_ERROR_UNKNOWN_THID; } | ||
| 275 | static KernelIDType GetStaticIDType() { return KERNEL_ID_TYPE_THREAD; } | 61 | static KernelIDType GetStaticIDType() { return KERNEL_ID_TYPE_THREAD; } |
| 276 | KernelIDType GetIDType() const { return KERNEL_ID_TYPE_THREAD; } | 62 | KernelIDType GetIDType() const { return KERNEL_ID_TYPE_THREAD; } |
| 277 | 63 | ||
| 278 | bool SetupStack(u32 stack_top, int stack_size) { | 64 | inline bool IsRunning() const { return (status & THREADSTATUS_RUNNING) != 0; } |
| 279 | current_stack.start = stack_top; | 65 | inline bool IsStopped() const { return (status & THREADSTATUS_DORMANT) != 0; } |
| 280 | nt.initial_stack = current_stack.start; | 66 | inline bool IsReady() const { return (status & THREADSTATUS_READY) != 0; } |
| 281 | nt.stack_size = stack_size; | 67 | inline bool IsWaiting() const { return (status & THREADSTATUS_WAIT) != 0; } |
| 282 | return true; | 68 | inline bool IsSuspended() const { return (status & THREADSTATUS_SUSPEND) != 0; } |
| 283 | } | ||
| 284 | |||
| 285 | //bool FillStack() { | ||
| 286 | // // Fill the stack. | ||
| 287 | // if ((nt.attr & PSP_THREAD_ATTR_NO_FILLSTACK) == 0) { | ||
| 288 | // Memory::Memset(current_stack.start, 0xFF, nt.stack_size); | ||
| 289 | // } | ||
| 290 | // context.r[MIPS_REG_SP] = current_stack.start + nt.stack_size; | ||
| 291 | // current_stack.end = context.r[MIPS_REG_SP]; | ||
| 292 | // // The k0 section is 256 bytes at the top of the stack. | ||
| 293 | // context.r[MIPS_REG_SP] -= 256; | ||
| 294 | // context.r[MIPS_REG_K0] = context.r[MIPS_REG_SP]; | ||
| 295 | // u32 k0 = context.r[MIPS_REG_K0]; | ||
| 296 | // Memory::Memset(k0, 0, 0x100); | ||
| 297 | // Memory::Write_U32(GetUID(), k0 + 0xc0); | ||
| 298 | // Memory::Write_U32(nt.initialStack, k0 + 0xc8); | ||
| 299 | // Memory::Write_U32(0xffffffff, k0 + 0xf8); | ||
| 300 | // Memory::Write_U32(0xffffffff, k0 + 0xfc); | ||
| 301 | // // After k0 comes the arguments, which is done by sceKernelStartThread(). | ||
| 302 | |||
| 303 | // Memory::Write_U32(GetUID(), nt.initialStack); | ||
| 304 | // return true; | ||
| 305 | //} | ||
| 306 | |||
| 307 | //void FreeStack() { | ||
| 308 | // if (current_stack.start != 0) { | ||
| 309 | // DEBUG_LOG(KERNEL, "Freeing thread stack %s", nt.name); | ||
| 310 | |||
| 311 | // if ((nt.attr & PSP_THREAD_ATTR_CLEAR_STACK) != 0 && nt.initialStack != 0) { | ||
| 312 | // Memory::Memset(nt.initialStack, 0, nt.stack_size); | ||
| 313 | // } | ||
| 314 | |||
| 315 | // if (nt.attr & PSP_THREAD_ATTR_KERNEL) { | ||
| 316 | // kernelMemory.Free(current_stack.start); | ||
| 317 | // } | ||
| 318 | // else { | ||
| 319 | // userMemory.Free(current_stack.start); | ||
| 320 | // } | ||
| 321 | // current_stack.start = 0; | ||
| 322 | // } | ||
| 323 | //} | ||
| 324 | |||
| 325 | //bool PushExtendedStack(u32 size) { | ||
| 326 | // u32 stack = userMemory.Alloc(size, true, (std::string("extended/") + nt.name).c_str()); | ||
| 327 | // if (stack == (u32)-1) | ||
| 328 | // return false; | ||
| 329 | |||
| 330 | // pushed_stacks.push_back(current_stack); | ||
| 331 | // current_stack.start = stack; | ||
| 332 | // current_stack.end = stack + size; | ||
| 333 | // nt.initialStack = current_stack.start; | ||
| 334 | // nt.stack_size = current_stack.end - current_stack.start; | ||
| 335 | |||
| 336 | // // We still drop the thread_id at the bottom and fill it, but there's no k0. | ||
| 337 | // Memory::Memset(current_stack.start, 0xFF, nt.stack_size); | ||
| 338 | // Memory::Write_U32(GetUID(), nt.initialStack); | ||
| 339 | // return true; | ||
| 340 | //} | ||
| 341 | |||
| 342 | //bool PopExtendedStack() { | ||
| 343 | // if (pushed_stacks.size() == 0) { | ||
| 344 | // return false; | ||
| 345 | // } | ||
| 346 | // userMemory.Free(current_stack.start); | ||
| 347 | // current_stack = pushed_stacks.back(); | ||
| 348 | // pushed_stacks.pop_back(); | ||
| 349 | // nt.initialStack = current_stack.start; | ||
| 350 | // nt.stack_size = current_stack.end - current_stack.start; | ||
| 351 | // return true; | ||
| 352 | //} | ||
| 353 | |||
| 354 | Thread() { | ||
| 355 | current_stack.start = 0; | ||
| 356 | } | ||
| 357 | |||
| 358 | // Can't use a destructor since savestates will call that too. | ||
| 359 | //void Cleanup() { | ||
| 360 | // // Callbacks are automatically deleted when their owning thread is deleted. | ||
| 361 | // for (auto it = callbacks.begin(), end = callbacks.end(); it != end; ++it) | ||
| 362 | // g_kernel_objects.Destroy<Callback>(*it); | ||
| 363 | |||
| 364 | // if (pushed_stacks.size() != 0) | ||
| 365 | // { | ||
| 366 | // WARN_LOG(KERNEL, "Thread ended within an extended stack"); | ||
| 367 | // for (size_t i = 0; i < pushed_stacks.size(); ++i) | ||
| 368 | // userMemory.Free(pushed_stacks[i].start); | ||
| 369 | // } | ||
| 370 | // FreeStack(); | ||
| 371 | //} | ||
| 372 | |||
| 373 | void setReturnValue(u32 retval); | ||
| 374 | void setReturnValue(u64 retval); | ||
| 375 | void resumeFromWait(); | ||
| 376 | //bool isWaitingFor(WaitType type, int id); | ||
| 377 | //int getWaitID(WaitType type); | ||
| 378 | ThreadWaitInfo getWaitInfo(); | ||
| 379 | |||
| 380 | // Utils | ||
| 381 | inline bool IsRunning() const { return (nt.status & THREADSTATUS_RUNNING) != 0; } | ||
| 382 | inline bool IsStopped() const { return (nt.status & THREADSTATUS_DORMANT) != 0; } | ||
| 383 | inline bool IsReady() const { return (nt.status & THREADSTATUS_READY) != 0; } | ||
| 384 | inline bool IsWaiting() const { return (nt.status & THREADSTATUS_WAIT) != 0; } | ||
| 385 | inline bool IsSuspended() const { return (nt.status & THREADSTATUS_SUSPEND) != 0; } | ||
| 386 | |||
| 387 | NativeThread nt; | ||
| 388 | |||
| 389 | ThreadWaitInfo waitInfo; | ||
| 390 | UID moduleId; | ||
| 391 | |||
| 392 | //bool isProcessingCallbacks; | ||
| 393 | //u32 currentMipscallId; | ||
| 394 | //UID currentCallbackId; | ||
| 395 | 69 | ||
| 396 | ThreadContext context; | 70 | ThreadContext context; |
| 397 | 71 | ||
| 398 | std::vector<UID> callbacks; | 72 | u32 status; |
| 73 | u32 entry_point; | ||
| 74 | u32 stack_top; | ||
| 75 | u32 stack_size; | ||
| 399 | 76 | ||
| 400 | std::list<u32> pending_calls; | 77 | s32 initial_priority; |
| 78 | s32 current_priority; | ||
| 401 | 79 | ||
| 402 | struct StackInfo { | 80 | s32 processor_id; |
| 403 | u32 start; | ||
| 404 | u32 end; | ||
| 405 | }; | ||
| 406 | // This is a stack of... stacks, since sceKernelExtendThreadStack() can recurse. | ||
| 407 | // These are stacks that aren't "active" right now, but will pop off once the func returns. | ||
| 408 | std::vector<StackInfo> pushed_stacks; | ||
| 409 | 81 | ||
| 410 | StackInfo current_stack; | 82 | WaitType wait_type; |
| 411 | 83 | ||
| 412 | // For thread end. | 84 | char name[KERNELOBJECT_MAX_NAME_LENGTH+1]; |
| 413 | std::vector<UID> waiting_threads; | ||
| 414 | // Key is the callback id it was for, or if no callback, the thread id. | ||
| 415 | std::map<UID, u64> paused_waits; | ||
| 416 | }; | 85 | }; |
| 417 | 86 | ||
| 418 | void ThreadContext::reset() { | ||
| 419 | for (int i = 0; i < 16; i++) { | ||
| 420 | reg[i] = 0; | ||
| 421 | } | ||
| 422 | cpsr = 0; | ||
| 423 | } | ||
| 424 | |||
| 425 | // Lists all thread ids that aren't deleted/etc. | 87 | // Lists all thread ids that aren't deleted/etc. |
| 426 | std::vector<UID> g_thread_queue; | 88 | std::vector<Handle> g_thread_queue; |
| 427 | 89 | ||
| 428 | // Lists only ready thread ids | 90 | // Lists only ready thread ids. |
| 429 | ThreadQueueList g_thread_ready_queue; | 91 | Common::ThreadQueueList<Handle> g_thread_ready_queue; |
| 430 | 92 | ||
| 431 | UID g_current_thread = 0; | 93 | Handle g_current_thread_handle; |
| 432 | Thread* g_current_thread_ptr = NULL; | ||
| 433 | const char* g_hle_current_thread_name = NULL; | ||
| 434 | 94 | ||
| 435 | /// Creates a new thread | 95 | Thread* g_current_thread; |
| 436 | Thread* __KernelCreateThread(UID& id, UID module_id, const char* name, u32 priority, | ||
| 437 | u32 entry_point, u32 arg, u32 stack_top, u32 processor_id, int stack_size) { | ||
| 438 | 96 | ||
| 439 | Thread *t = new Thread; | ||
| 440 | id = g_kernel_objects.Create(t); | ||
| 441 | |||
| 442 | g_thread_queue.push_back(id); | ||
| 443 | g_thread_ready_queue.prepare(priority); | ||
| 444 | |||
| 445 | memset(&t->nt, 0xCD, sizeof(t->nt)); | ||
| 446 | 97 | ||
| 447 | t->nt.entry_point = entry_point; | 98 | inline Thread *__GetCurrentThread() { |
| 448 | t->nt.native_size = sizeof(t->nt); | 99 | return g_current_thread; |
| 449 | t->nt.initial_priority = t->nt.current_priority = priority; | 100 | } |
| 450 | t->nt.status = THREADSTATUS_DORMANT; | ||
| 451 | t->nt.initial_stack = t->nt.stack_top = stack_top; | ||
| 452 | t->nt.stack_size = stack_size; | ||
| 453 | t->nt.processor_id = processor_id; | ||
| 454 | 101 | ||
| 455 | strncpy(t->nt.name, name, KERNELOBJECT_MAX_NAME_LENGTH); | 102 | inline void __SetCurrentThread(Thread *t) { |
| 456 | t->nt.name[KERNELOBJECT_MAX_NAME_LENGTH] = '\0'; | 103 | g_current_thread = t; |
| 104 | g_current_thread_handle = t->GetHandle(); | ||
| 105 | } | ||
| 457 | 106 | ||
| 458 | t->nt.stack_size = stack_size; | 107 | //////////////////////////////////////////////////////////////////////////////////////////////////// |
| 459 | t->SetupStack(stack_top, stack_size); | ||
| 460 | 108 | ||
| 461 | return t; | 109 | /// Saves the current CPU context |
| 110 | void __KernelSaveContext(ThreadContext &ctx) { | ||
| 111 | ctx.cpu_registers[0] = Core::g_app_core->GetReg(0); | ||
| 112 | ctx.cpu_registers[1] = Core::g_app_core->GetReg(1); | ||
| 113 | ctx.cpu_registers[2] = Core::g_app_core->GetReg(2); | ||
| 114 | ctx.cpu_registers[3] = Core::g_app_core->GetReg(3); | ||
| 115 | ctx.cpu_registers[4] = Core::g_app_core->GetReg(4); | ||
| 116 | ctx.cpu_registers[5] = Core::g_app_core->GetReg(5); | ||
| 117 | ctx.cpu_registers[6] = Core::g_app_core->GetReg(6); | ||
| 118 | ctx.cpu_registers[7] = Core::g_app_core->GetReg(7); | ||
| 119 | ctx.cpu_registers[8] = Core::g_app_core->GetReg(8); | ||
| 120 | ctx.cpu_registers[9] = Core::g_app_core->GetReg(9); | ||
| 121 | ctx.cpu_registers[10] = Core::g_app_core->GetReg(10); | ||
| 122 | ctx.cpu_registers[11] = Core::g_app_core->GetReg(11); | ||
| 123 | ctx.cpu_registers[12] = Core::g_app_core->GetReg(12); | ||
| 124 | ctx.sp = Core::g_app_core->GetReg(13); | ||
| 125 | ctx.lr = Core::g_app_core->GetReg(14); | ||
| 126 | ctx.pc = Core::g_app_core->GetPC(); | ||
| 127 | ctx.cpsr = Core::g_app_core->GetCPSR(); | ||
| 462 | } | 128 | } |
| 463 | 129 | ||
| 464 | UID __KernelCreateThread(UID module_id, const char* name, u32 priority, u32 entry_point, u32 arg, | 130 | /// Loads a CPU context |
| 465 | u32 stack_top, u32 processor_id, int stack_size) { | 131 | void __KernelLoadContext(const ThreadContext &ctx) { |
| 466 | UID id; | 132 | Core::g_app_core->SetReg(0, ctx.cpu_registers[0]); |
| 467 | __KernelCreateThread(id, module_id, name, priority, entry_point, arg, stack_top, processor_id, | 133 | Core::g_app_core->SetReg(1, ctx.cpu_registers[1]); |
| 468 | stack_size); | 134 | Core::g_app_core->SetReg(2, ctx.cpu_registers[2]); |
| 469 | 135 | Core::g_app_core->SetReg(3, ctx.cpu_registers[3]); | |
| 470 | HLE::EatCycles(32000); | 136 | Core::g_app_core->SetReg(4, ctx.cpu_registers[4]); |
| 471 | HLE::ReSchedule("thread created"); | 137 | Core::g_app_core->SetReg(5, ctx.cpu_registers[5]); |
| 472 | 138 | Core::g_app_core->SetReg(6, ctx.cpu_registers[6]); | |
| 473 | return id; | 139 | Core::g_app_core->SetReg(7, ctx.cpu_registers[7]); |
| 140 | Core::g_app_core->SetReg(8, ctx.cpu_registers[8]); | ||
| 141 | Core::g_app_core->SetReg(9, ctx.cpu_registers[9]); | ||
| 142 | Core::g_app_core->SetReg(10, ctx.cpu_registers[10]); | ||
| 143 | Core::g_app_core->SetReg(11, ctx.cpu_registers[11]); | ||
| 144 | Core::g_app_core->SetReg(12, ctx.cpu_registers[12]); | ||
| 145 | Core::g_app_core->SetReg(13, ctx.sp); | ||
| 146 | Core::g_app_core->SetReg(14, ctx.lr); | ||
| 147 | //Core::g_app_core->SetReg(15, ctx.pc); | ||
| 148 | |||
| 149 | Core::g_app_core->SetPC(ctx.pc); | ||
| 150 | Core::g_app_core->SetCPSR(ctx.cpsr); | ||
| 474 | } | 151 | } |
| 475 | 152 | ||
| 476 | /// Resets the specified thread back to initial calling state | 153 | /// Resets a thread |
| 477 | void __KernelResetThread(Thread *t, int lowest_priority) { | 154 | void __KernelResetThread(Thread *t, s32 lowest_priority) { |
| 478 | t->context.reset(); | 155 | memset(&t->context, 0, sizeof(ThreadContext)); |
| 479 | t->context.pc = t->nt.entry_point; | ||
| 480 | t->context.reg[13] = t->nt.initial_stack; | ||
| 481 | 156 | ||
| 482 | // If the thread would be better than lowestPriority, reset to its initial. Yes, kinda odd... | 157 | t->context.pc = t->entry_point; |
| 483 | if (t->nt.current_priority < lowest_priority) { | 158 | t->context.sp = t->stack_top; |
| 484 | t->nt.current_priority = t->nt.initial_priority; | 159 | |
| 160 | if (t->current_priority < lowest_priority) { | ||
| 161 | t->current_priority = t->initial_priority; | ||
| 485 | } | 162 | } |
| 486 | 163 | ||
| 487 | memset(&t->waitInfo, 0, sizeof(t->waitInfo)); | 164 | t->wait_type = WAITTYPE_NONE; |
| 488 | } | ||
| 489 | |||
| 490 | /// Returns the current executing thread | ||
| 491 | inline Thread *__GetCurrentThread() { | ||
| 492 | return g_current_thread_ptr; | ||
| 493 | } | 165 | } |
| 494 | 166 | ||
| 495 | /// Sets the current executing thread | 167 | /// Creates a new thread |
| 496 | inline void __SetCurrentThread(Thread *thread, UID thread_id, const char *name) { | 168 | Thread *__KernelCreateThread(Handle &handle, const char *name, u32 entry_point, s32 priority, s32 processor_id, u32 stack_top, int stack_size=0x4000) { |
| 497 | g_current_thread = thread_id; | 169 | static u32 _handle_count = 1; |
| 498 | g_current_thread_ptr = thread; | 170 | |
| 499 | g_hle_current_thread_name = name; | 171 | Thread *t = new Thread; |
| 172 | |||
| 173 | handle = (_handle_count++); | ||
| 174 | |||
| 175 | g_thread_queue.push_back(handle); | ||
| 176 | g_thread_ready_queue.prepare(priority); | ||
| 177 | |||
| 178 | t->status = THREADSTATUS_DORMANT; | ||
| 179 | t->entry_point = entry_point; | ||
| 180 | t->stack_top = stack_top; | ||
| 181 | t->stack_size = stack_size; | ||
| 182 | t->initial_priority = t->current_priority = priority; | ||
| 183 | t->processor_id = processor_id; | ||
| 184 | t->wait_type = WAITTYPE_NONE; | ||
| 185 | |||
| 186 | strncpy(t->name, name, KERNELOBJECT_MAX_NAME_LENGTH); | ||
| 187 | t->name[KERNELOBJECT_MAX_NAME_LENGTH] = '\0'; | ||
| 188 | |||
| 189 | return t; | ||
| 500 | } | 190 | } |
| 501 | 191 | ||
| 502 | // TODO: Use __KernelChangeThreadState instead? It has other affects... | 192 | /// Change a thread to "ready" state |
| 503 | void __KernelChangeReadyState(Thread *thread, UID thread_id, bool ready) { | 193 | void __KernelChangeReadyState(Thread *t, bool ready) { |
| 504 | // Passing the id as a parameter is just an optimization, if it's wrong it will cause havoc. | 194 | Handle handle = t->GetHandle(); |
| 505 | _dbg_assert_msg_(KERNEL, thread->GetUID() == thread_id, "Incorrect thread_id"); | 195 | if (t->IsReady()) { |
| 506 | int prio = thread->nt.current_priority; | 196 | if (!ready) { |
| 507 | 197 | g_thread_ready_queue.remove(t->current_priority, handle); | |
| 508 | if (thread->IsReady()) { | 198 | } |
| 509 | if (!ready) | 199 | } else if (ready) { |
| 510 | g_thread_ready_queue.remove(prio, thread_id); | 200 | if (t->IsRunning()) { |
| 511 | } else if (ready) { | 201 | g_thread_ready_queue.push_front(t->current_priority, handle); |
| 512 | if (thread->IsRunning()) { | ||
| 513 | g_thread_ready_queue.push_front(prio, thread_id); | ||
| 514 | } else { | 202 | } else { |
| 515 | g_thread_ready_queue.push_back(prio, thread_id); | 203 | g_thread_ready_queue.push_back(t->current_priority, handle); |
| 516 | } | 204 | } |
| 517 | thread->nt.status = THREADSTATUS_READY; | 205 | t->status = THREADSTATUS_READY; |
| 518 | } | 206 | } |
| 519 | } | 207 | } |
| 520 | 208 | ||
| 521 | void __KernelChangeReadyState(UID thread_id, bool ready) { | 209 | /// Changes a threads state |
| 522 | u32 error; | 210 | void __KernelChangeThreadState(Thread *t, ThreadStatus new_status) { |
| 523 | Thread *thread = g_kernel_objects.Get<Thread>(thread_id, error); | 211 | if (!t || t->status == new_status) { |
| 524 | if (thread) { | 212 | return; |
| 525 | __KernelChangeReadyState(thread, thread_id, ready); | 213 | } |
| 526 | } else { | 214 | __KernelChangeReadyState(t, (new_status & THREADSTATUS_READY) != 0); |
| 527 | WARN_LOG(KERNEL, "Trying to change the ready state of an unknown thread?"); | 215 | t->status = new_status; |
| 216 | |||
| 217 | if (new_status == THREADSTATUS_WAIT) { | ||
| 218 | if (t->wait_type == WAITTYPE_NONE) { | ||
| 219 | printf("ERROR: Waittype none not allowed here\n"); | ||
| 220 | } | ||
| 528 | } | 221 | } |
| 529 | } | 222 | } |
| 530 | 223 | ||
| 531 | /// Returns NULL if the current thread is fine. | 224 | /// Switches CPU context to that of the specified thread |
| 532 | Thread* __KernelNextThread() { | 225 | void __KernelSwitchContext(Thread* t, const char *reason) { |
| 533 | UID best_thread; | ||
| 534 | |||
| 535 | // If the current thread is running, it's a valid candidate. | ||
| 536 | Thread *cur = __GetCurrentThread(); | 226 | Thread *cur = __GetCurrentThread(); |
| 537 | if (cur && cur->IsRunning()) { | 227 | |
| 538 | best_thread = g_thread_ready_queue.pop_first_better(cur->nt.current_priority); | 228 | // Save context for current thread |
| 539 | if (best_thread != 0) { | 229 | if (cur) { |
| 540 | __KernelChangeReadyState(cur, g_current_thread, true); | 230 | __KernelSaveContext(cur->context); |
| 231 | |||
| 232 | if (cur->IsRunning()) { | ||
| 233 | __KernelChangeReadyState(cur, true); | ||
| 541 | } | 234 | } |
| 542 | } else { | ||
| 543 | best_thread = g_thread_ready_queue.pop_first(); | ||
| 544 | } | 235 | } |
| 545 | // Assume g_thread_ready_queue has not become corrupt. | 236 | // Load context of new thread |
| 546 | if (best_thread != 0) { | 237 | if (t) { |
| 547 | return g_kernel_objects.GetFast<Thread>(best_thread); | 238 | __SetCurrentThread(t); |
| 239 | __KernelChangeReadyState(t, false); | ||
| 240 | t->status = (t->status | THREADSTATUS_RUNNING) & ~THREADSTATUS_READY; | ||
| 241 | t->wait_type = WAITTYPE_NONE; | ||
| 242 | __KernelLoadContext(t->context); | ||
| 548 | } else { | 243 | } else { |
| 549 | return NULL; | 244 | __SetCurrentThread(NULL); |
| 550 | } | 245 | } |
| 551 | } | 246 | } |
| 552 | 247 | ||
| 553 | /// Saves the current CPU context | 248 | /// Gets the next thread that is ready to be run by priority |
| 554 | void __KernelSaveContext(ThreadContext *ctx) { | 249 | Thread *__KernelNextThread() { |
| 555 | ctx->reg[0] = Core::g_app_core->GetReg(0); | 250 | Handle next; |
| 556 | ctx->reg[1] = Core::g_app_core->GetReg(1); | ||
| 557 | ctx->reg[2] = Core::g_app_core->GetReg(2); | ||
| 558 | ctx->reg[3] = Core::g_app_core->GetReg(3); | ||
| 559 | ctx->reg[4] = Core::g_app_core->GetReg(4); | ||
| 560 | ctx->reg[5] = Core::g_app_core->GetReg(5); | ||
| 561 | ctx->reg[6] = Core::g_app_core->GetReg(6); | ||
| 562 | ctx->reg[7] = Core::g_app_core->GetReg(7); | ||
| 563 | ctx->reg[8] = Core::g_app_core->GetReg(8); | ||
| 564 | ctx->reg[9] = Core::g_app_core->GetReg(9); | ||
| 565 | ctx->reg[10] = Core::g_app_core->GetReg(10); | ||
| 566 | ctx->reg[11] = Core::g_app_core->GetReg(11); | ||
| 567 | ctx->reg[12] = Core::g_app_core->GetReg(12); | ||
| 568 | ctx->reg[13] = Core::g_app_core->GetReg(13); | ||
| 569 | ctx->reg[14] = Core::g_app_core->GetReg(14); | ||
| 570 | ctx->reg[15] = Core::g_app_core->GetReg(15); | ||
| 571 | ctx->pc = Core::g_app_core->GetPC(); | ||
| 572 | ctx->cpsr = Core::g_app_core->GetCPSR(); | ||
| 573 | } | ||
| 574 | |||
| 575 | /// Loads a CPU context | ||
| 576 | void __KernelLoadContext(ThreadContext *ctx) { | ||
| 577 | Core::g_app_core->SetReg(0, ctx->reg[0]); | ||
| 578 | Core::g_app_core->SetReg(1, ctx->reg[1]); | ||
| 579 | Core::g_app_core->SetReg(2, ctx->reg[2]); | ||
| 580 | Core::g_app_core->SetReg(3, ctx->reg[3]); | ||
| 581 | Core::g_app_core->SetReg(4, ctx->reg[4]); | ||
| 582 | Core::g_app_core->SetReg(5, ctx->reg[5]); | ||
| 583 | Core::g_app_core->SetReg(6, ctx->reg[6]); | ||
| 584 | Core::g_app_core->SetReg(7, ctx->reg[7]); | ||
| 585 | Core::g_app_core->SetReg(8, ctx->reg[8]); | ||
| 586 | Core::g_app_core->SetReg(9, ctx->reg[9]); | ||
| 587 | Core::g_app_core->SetReg(10, ctx->reg[10]); | ||
| 588 | Core::g_app_core->SetReg(11, ctx->reg[11]); | ||
| 589 | Core::g_app_core->SetReg(12, ctx->reg[12]); | ||
| 590 | Core::g_app_core->SetReg(13, ctx->reg[13]); | ||
| 591 | Core::g_app_core->SetReg(14, ctx->reg[14]); | ||
| 592 | Core::g_app_core->SetReg(15, ctx->reg[15]); | ||
| 593 | Core::g_app_core->SetPC(ctx->pc); | ||
| 594 | Core::g_app_core->SetCPSR(ctx->cpsr); | ||
| 595 | } | ||
| 596 | |||
| 597 | /// Switches thread context | ||
| 598 | void __KernelSwitchContext(Thread *target, const char *reason) { | ||
| 599 | u32 old_pc = 0; | ||
| 600 | UID old_uid = 0; | ||
| 601 | const char *old_name = g_hle_current_thread_name != NULL ? g_hle_current_thread_name : "(none)"; | ||
| 602 | Thread *cur = __GetCurrentThread(); | 251 | Thread *cur = __GetCurrentThread(); |
| 603 | 252 | ||
| 604 | if (cur) { // It might just have been deleted. | 253 | if (cur && cur->IsRunning()) { |
| 605 | __KernelSaveContext(&cur->context); | 254 | next = g_thread_ready_queue.pop_first_better(cur->current_priority); |
| 606 | old_pc = Core::g_app_core->GetPC(); | 255 | } else { |
| 607 | old_uid = cur->GetUID(); | 256 | next = g_thread_ready_queue.pop_first(); |
| 608 | |||
| 609 | // Normally this is taken care of in __KernelNextThread(). | ||
| 610 | if (cur->IsRunning()) | ||
| 611 | __KernelChangeReadyState(cur, old_uid, true); | ||
| 612 | } | 257 | } |
| 613 | if (target) { | 258 | if (next < 0) { |
| 614 | __SetCurrentThread(target, target->GetUID(), target->nt.name); | 259 | return NULL; |
| 615 | __KernelChangeReadyState(target, g_current_thread, false); | ||
| 616 | |||
| 617 | target->nt.status = (target->nt.status | THREADSTATUS_RUNNING) & ~THREADSTATUS_READY; | ||
| 618 | |||
| 619 | __KernelLoadContext(&target->context); | ||
| 620 | } else { | ||
| 621 | __SetCurrentThread(NULL, 0, NULL); | ||
| 622 | } | 260 | } |
| 261 | return g_kernel_objects.GetFast<Thread>(next); | ||
| 623 | } | 262 | } |
| 624 | 263 | ||
| 625 | bool __KernelSwitchToThread(UID thread_id, const char *reason) { | 264 | /// Calls a thread by marking it as "ready" (note: will not actually execute until current thread yields) |
| 626 | if (!reason) { | 265 | void __KernelCallThread(Thread *t) { |
| 627 | reason = "switch to thread"; | 266 | // Stop waiting |
| 267 | if (t->wait_type != WAITTYPE_NONE) { | ||
| 268 | t->wait_type = WAITTYPE_NONE; | ||
| 628 | } | 269 | } |
| 629 | if (g_current_thread == thread_id) { | 270 | __KernelChangeThreadState(t, THREADSTATUS_READY); |
| 630 | return false; | ||
| 631 | } | ||
| 632 | u32 error; | ||
| 633 | Thread *t = g_kernel_objects.Get<Thread>(thread_id, error); | ||
| 634 | if (!t) { | ||
| 635 | ERROR_LOG(KERNEL, "__KernelSwitchToThread: %x doesn't exist", thread_id); | ||
| 636 | HLE::ReSchedule("switch to deleted thread"); | ||
| 637 | } else if (t->IsReady() || t->IsRunning()) { | ||
| 638 | Thread *current = __GetCurrentThread(); | ||
| 639 | if (current && current->IsRunning()) { | ||
| 640 | __KernelChangeReadyState(current, g_current_thread, true); | ||
| 641 | } | ||
| 642 | __KernelSwitchContext(t, reason); | ||
| 643 | return true; | ||
| 644 | } else { | ||
| 645 | HLE::ReSchedule("switch to waiting thread"); | ||
| 646 | } | ||
| 647 | return false; | ||
| 648 | } | 271 | } |
| 649 | 272 | ||
| 650 | /// Sets up the root (primary) thread of execution | 273 | /// Sets up the primary application thread |
| 651 | UID __KernelSetupRootThread(UID module_id, int arg, int prio, int stack_size) { | 274 | Handle __KernelSetupMainThread(s32 priority, int stack_size) { |
| 652 | UID id; | 275 | Handle handle; |
| 653 | |||
| 654 | Thread *thread = __KernelCreateThread(id, module_id, "root", prio, Core::g_app_core->GetPC(), | ||
| 655 | arg, Memory::SCRATCHPAD_VADDR_END, 0xFFFFFFFE, stack_size=stack_size); | ||
| 656 | |||
| 657 | if (thread->current_stack.start == 0) { | ||
| 658 | ERROR_LOG(KERNEL, "Unable to allocate stack for root thread."); | ||
| 659 | } | ||
| 660 | __KernelResetThread(thread, 0); | ||
| 661 | |||
| 662 | Thread *prev_thread = __GetCurrentThread(); | ||
| 663 | if (prev_thread && prev_thread->IsRunning()) | ||
| 664 | __KernelChangeReadyState(g_current_thread, true); | ||
| 665 | __SetCurrentThread(thread, id, "root"); | ||
| 666 | thread->nt.status = THREADSTATUS_RUNNING; // do not schedule | ||
| 667 | |||
| 668 | strcpy(thread->nt.name, "root"); | ||
| 669 | |||
| 670 | __KernelLoadContext(&thread->context); | ||
| 671 | 276 | ||
| 672 | // NOTE(bunnei): Not sure this is really correct, ignore args for now... | 277 | // Initialize new "main" thread |
| 673 | //Core::g_app_core->SetReg(0, args); | 278 | Thread *t = __KernelCreateThread(handle, "main", Core::g_app_core->GetPC(), priority, |
| 674 | //Core::g_app_core->SetReg(13, (args + 0xf) & ~0xf); // Setup SP - probably not correct | 279 | 0xFFFFFFFE, Memory::SCRATCHPAD_VADDR_END, stack_size); |
| 675 | //u32 location = Core::g_app_core->GetReg(13); // SP | ||
| 676 | //Core::g_app_core->SetReg(1, location); | ||
| 677 | 280 | ||
| 678 | //if (argp) | 281 | __KernelResetThread(t, 0); |
| 679 | // Memory::Memcpy(location, argp, args); | 282 | |
| 680 | //// Let's assume same as starting a new thread, 64 bytes for safety/kernel. | 283 | // If running another thread already, set it to "ready" state |
| 681 | //Core::g_app_core->SetReg(13, Core::g_app_core->GetReg(13) - 64); | ||
| 682 | |||
| 683 | return id; | ||
| 684 | } | ||
| 685 | |||
| 686 | int __KernelRotateThreadReadyQueue(int priority) { | ||
| 687 | Thread *cur = __GetCurrentThread(); | 284 | Thread *cur = __GetCurrentThread(); |
| 688 | 285 | if (cur && cur->IsRunning()) { | |
| 689 | // 0 is special, it means "my current priority." | 286 | __KernelChangeReadyState(cur, true); |
| 690 | if (priority == 0) { | ||
| 691 | priority = cur->nt.current_priority; | ||
| 692 | } | 287 | } |
| 693 | //if (priority <= 0x07 || priority > 0x77) | 288 | |
| 694 | // return SCE_KERNEL_ERROR_ILLEGAL_PRIORITY; | 289 | // Run new "main" thread |
| 290 | __SetCurrentThread(t); | ||
| 291 | t->status = THREADSTATUS_RUNNING; | ||
| 292 | __KernelLoadContext(t->context); | ||
| 695 | 293 | ||
| 696 | if (!g_thread_ready_queue.empty(priority)) { | 294 | return handle; |
| 697 | // In other words, yield to everyone else. | 295 | } |
| 698 | if (cur->nt.current_priority == priority) { | ||
| 699 | g_thread_ready_queue.push_back(priority, g_current_thread); | ||
| 700 | cur->nt.status = (cur->nt.status & ~THREADSTATUS_RUNNING) | THREADSTATUS_READY; | ||
| 701 | 296 | ||
| 702 | // Yield the next thread of this priority to all other threads of same priority. | 297 | /// Resumes a thread from waiting by marking it as "ready" |
| 703 | } else { | 298 | void __KernelResumeThreadFromWait(Handle handle) { |
| 704 | g_thread_ready_queue.rotate(priority); | 299 | u32 error; |
| 300 | Thread *t = g_kernel_objects.Get<Thread>(handle, error); | ||
| 301 | if (t) { | ||
| 302 | t->status &= ~THREADSTATUS_WAIT; | ||
| 303 | if (!(t->status & (THREADSTATUS_WAITSUSPEND | THREADSTATUS_DORMANT | THREADSTATUS_DEAD))) { | ||
| 304 | __KernelChangeReadyState(t, true); | ||
| 705 | } | 305 | } |
| 706 | } | 306 | } |
| 707 | HLE::EatCycles(250); | 307 | } |
| 708 | HLE::ReSchedule("rotatethreadreadyqueue"); | ||
| 709 | 308 | ||
| 710 | return 0; | 309 | /// Puts a thread in the wait state for the given type/reason |
| 310 | void __KernelWaitCurThread(WaitType wait_type, const char *reason) { | ||
| 311 | Thread *t = __GetCurrentThread(); | ||
| 312 | t->wait_type = wait_type; | ||
| 313 | __KernelChangeThreadState(t, ThreadStatus(THREADSTATUS_WAIT | (t->status & THREADSTATUS_SUSPEND))); | ||
| 711 | } | 314 | } |
| 712 | 315 | ||
| 316 | /// Reschedules to the next available thread (call after current thread is suspended) | ||
| 317 | void __KernelReschedule(const char *reason) { | ||
| 318 | Thread *next = __KernelNextThread(); | ||
| 319 | if (next > 0) { | ||
| 320 | __KernelSwitchContext(next, reason); | ||
| 321 | } | ||
| 322 | } | ||
| 323 | |||
| 324 | |||
| 713 | void __KernelThreadingInit() { | 325 | void __KernelThreadingInit() { |
| 714 | } | 326 | } |
| 715 | 327 | ||