summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/thread.cpp
diff options
context:
space:
mode:
authorGravatar bunnei2016-05-12 21:51:35 -0400
committerGravatar bunnei2016-05-12 21:51:35 -0400
commit0d8bd3ba369a4a6264ba99a66dbc17e1e14e1440 (patch)
treec7341fcf893002e87fbe18b0a04b221aff46bf13 /src/core/hle/kernel/thread.cpp
parentMerge pull request #1783 from JayFoxRox/cleanup-shadersetup (diff)
parentKernel/Threads: Dynamically allocate the TLS region for threads in the BASE r... (diff)
downloadyuzu-0d8bd3ba369a4a6264ba99a66dbc17e1e14e1440.tar.gz
yuzu-0d8bd3ba369a4a6264ba99a66dbc17e1e14e1440.tar.xz
yuzu-0d8bd3ba369a4a6264ba99a66dbc17e1e14e1440.zip
Merge pull request #1695 from Subv/tls_alloc
Kernel/Threads: Dynamically allocate the TLS region for threads.
Diffstat (limited to 'src/core/hle/kernel/thread.cpp')
-rw-r--r--src/core/hle/kernel/thread.cpp84
1 files changed, 67 insertions, 17 deletions
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 6dc95d0f1..68f026918 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -117,9 +117,10 @@ void Thread::Stop() {
117 } 117 }
118 wait_objects.clear(); 118 wait_objects.clear();
119 119
120 Kernel::g_current_process->used_tls_slots[tls_index] = false; 120 // Mark the TLS slot in the thread's page as free.
121 g_current_process->misc_memory_used -= Memory::TLS_ENTRY_SIZE; 121 u32 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::PAGE_SIZE;
122 g_current_process->memory_region->used -= Memory::TLS_ENTRY_SIZE; 122 u32 tls_slot = ((tls_address - Memory::TLS_AREA_VADDR) % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
123 Kernel::g_current_process->tls_slots[tls_page].reset(tls_slot);
123 124
124 HLE::Reschedule(__func__); 125 HLE::Reschedule(__func__);
125} 126}
@@ -366,6 +367,31 @@ static void DebugThreadQueue() {
366 } 367 }
367} 368}
368 369
370/**
371 * Finds a free location for the TLS section of a thread.
372 * @param tls_slots The TLS page array of the thread's owner process.
373 * Returns a tuple of (page, slot, alloc_needed) where:
374 * page: The index of the first allocated TLS page that has free slots.
375 * slot: The index of the first free slot in the indicated page.
376 * alloc_needed: Whether there's a need to allocate a new TLS page (All pages are full).
377 */
378std::tuple<u32, u32, bool> GetFreeThreadLocalSlot(std::vector<std::bitset<8>>& tls_slots) {
379 // Iterate over all the allocated pages, and try to find one where not all slots are used.
380 for (unsigned page = 0; page < tls_slots.size(); ++page) {
381 const auto& page_tls_slots = tls_slots[page];
382 if (!page_tls_slots.all()) {
383 // We found a page with at least one free slot, find which slot it is
384 for (unsigned slot = 0; slot < page_tls_slots.size(); ++slot) {
385 if (!page_tls_slots.test(slot)) {
386 return std::make_tuple(page, slot, false);
387 }
388 }
389 }
390 }
391
392 return std::make_tuple(0, 0, true);
393}
394
369ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point, s32 priority, 395ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point, s32 priority,
370 u32 arg, s32 processor_id, VAddr stack_top) { 396 u32 arg, s32 processor_id, VAddr stack_top) {
371 if (priority < THREADPRIO_HIGHEST || priority > THREADPRIO_LOWEST) { 397 if (priority < THREADPRIO_HIGHEST || priority > THREADPRIO_LOWEST) {
@@ -403,22 +429,50 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
403 thread->name = std::move(name); 429 thread->name = std::move(name);
404 thread->callback_handle = wakeup_callback_handle_table.Create(thread).MoveFrom(); 430 thread->callback_handle = wakeup_callback_handle_table.Create(thread).MoveFrom();
405 thread->owner_process = g_current_process; 431 thread->owner_process = g_current_process;
406 thread->tls_index = -1;
407 thread->waitsynch_waited = false; 432 thread->waitsynch_waited = false;
408 433
409 // Find the next available TLS index, and mark it as used 434 // Find the next available TLS index, and mark it as used
410 auto& used_tls_slots = Kernel::g_current_process->used_tls_slots; 435 auto& tls_slots = Kernel::g_current_process->tls_slots;
411 for (unsigned int i = 0; i < used_tls_slots.size(); ++i) { 436 bool needs_allocation = true;
412 if (used_tls_slots[i] == false) { 437 u32 available_page; // Which allocated page has free space
413 thread->tls_index = i; 438 u32 available_slot; // Which slot within the page is free
414 used_tls_slots[i] = true; 439
415 break; 440 std::tie(available_page, available_slot, needs_allocation) = GetFreeThreadLocalSlot(tls_slots);
441
442 if (needs_allocation) {
443 // There are no already-allocated pages with free slots, lets allocate a new one.
444 // TLS pages are allocated from the BASE region in the linear heap.
445 MemoryRegionInfo* memory_region = GetMemoryRegion(MemoryRegion::BASE);
446 auto& linheap_memory = memory_region->linear_heap_memory;
447
448 if (linheap_memory->size() + Memory::PAGE_SIZE > memory_region->size) {
449 LOG_ERROR(Kernel_SVC, "Not enough space in region to allocate a new TLS page for thread");
450 return ResultCode(ErrorDescription::OutOfMemory, ErrorModule::Kernel, ErrorSummary::OutOfResource, ErrorLevel::Permanent);
416 } 451 }
452
453 u32 offset = linheap_memory->size();
454
455 // Allocate some memory from the end of the linear heap for this region.
456 linheap_memory->insert(linheap_memory->end(), Memory::PAGE_SIZE, 0);
457 memory_region->used += Memory::PAGE_SIZE;
458 Kernel::g_current_process->linear_heap_used += Memory::PAGE_SIZE;
459
460 tls_slots.emplace_back(0); // The page is completely available at the start
461 available_page = tls_slots.size() - 1;
462 available_slot = 0; // Use the first slot in the new page
463
464 auto& vm_manager = Kernel::g_current_process->vm_manager;
465 vm_manager.RefreshMemoryBlockMappings(linheap_memory.get());
466
467 // Map the page to the current process' address space.
468 // TODO(Subv): Find the correct MemoryState for this region.
469 vm_manager.MapMemoryBlock(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE,
470 linheap_memory, offset, Memory::PAGE_SIZE, MemoryState::Private);
417 } 471 }
418 472
419 ASSERT_MSG(thread->tls_index != -1, "Out of TLS space"); 473 // Mark the slot as used
420 g_current_process->misc_memory_used += Memory::TLS_ENTRY_SIZE; 474 tls_slots[available_page].set(available_slot);
421 g_current_process->memory_region->used += Memory::TLS_ENTRY_SIZE; 475 thread->tls_address = Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE + available_slot * Memory::TLS_ENTRY_SIZE;
422 476
423 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used 477 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
424 // to initialize the context 478 // to initialize the context
@@ -509,10 +563,6 @@ void Thread::SetWaitSynchronizationOutput(s32 output) {
509 context.cpu_registers[1] = output; 563 context.cpu_registers[1] = output;
510} 564}
511 565
512VAddr Thread::GetTLSAddress() const {
513 return Memory::TLS_AREA_VADDR + tls_index * Memory::TLS_ENTRY_SIZE;
514}
515
516//////////////////////////////////////////////////////////////////////////////////////////////////// 566////////////////////////////////////////////////////////////////////////////////////////////////////
517 567
518void ThreadingInit() { 568void ThreadingInit() {