summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/core/hle/kernel/process.cpp58
-rw-r--r--src/core/hle/kernel/process.h28
-rw-r--r--src/core/hle/kernel/thread.cpp58
-rw-r--r--src/core/hle/kernel/thread.h13
4 files changed, 89 insertions, 68 deletions
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 914bbe0a1..0c8ea94fc 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -128,6 +128,64 @@ void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) {
128 Kernel::SetupMainThread(kernel, entry_point, main_thread_priority, *this); 128 Kernel::SetupMainThread(kernel, entry_point, main_thread_priority, *this);
129} 129}
130 130
131/**
132 * Finds a free location for the TLS section of a thread.
133 * @param tls_slots The TLS page array of the thread's owner process.
134 * Returns a tuple of (page, slot, alloc_needed) where:
135 * page: The index of the first allocated TLS page that has free slots.
136 * slot: The index of the first free slot in the indicated page.
137 * alloc_needed: Whether there's a need to allocate a new TLS page (All pages are full).
138 */
139static std::tuple<std::size_t, std::size_t, bool> FindFreeThreadLocalSlot(
140 const std::vector<std::bitset<8>>& tls_slots) {
141 // Iterate over all the allocated pages, and try to find one where not all slots are used.
142 for (std::size_t page = 0; page < tls_slots.size(); ++page) {
143 const auto& page_tls_slots = tls_slots[page];
144 if (!page_tls_slots.all()) {
145 // We found a page with at least one free slot, find which slot it is
146 for (std::size_t slot = 0; slot < page_tls_slots.size(); ++slot) {
147 if (!page_tls_slots.test(slot)) {
148 return std::make_tuple(page, slot, false);
149 }
150 }
151 }
152 }
153
154 return std::make_tuple(0, 0, true);
155}
156
157VAddr Process::MarkNextAvailableTLSSlotAsUsed(Thread& thread) {
158 auto [available_page, available_slot, needs_allocation] = FindFreeThreadLocalSlot(tls_slots);
159
160 if (needs_allocation) {
161 tls_slots.emplace_back(0); // The page is completely available at the start
162 available_page = tls_slots.size() - 1;
163 available_slot = 0; // Use the first slot in the new page
164
165 // Allocate some memory from the end of the linear heap for this region.
166 auto& tls_memory = thread.GetTLSMemory();
167 tls_memory->insert(tls_memory->end(), Memory::PAGE_SIZE, 0);
168
169 vm_manager.RefreshMemoryBlockMappings(tls_memory.get());
170
171 vm_manager.MapMemoryBlock(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE,
172 tls_memory, 0, Memory::PAGE_SIZE, MemoryState::ThreadLocal);
173 }
174
175 tls_slots[available_page].set(available_slot);
176
177 return Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE +
178 available_slot * Memory::TLS_ENTRY_SIZE;
179}
180
181void Process::FreeTLSSlot(VAddr tls_address) {
182 const VAddr tls_base = tls_address - Memory::TLS_AREA_VADDR;
183 const VAddr tls_page = tls_base / Memory::PAGE_SIZE;
184 const VAddr tls_slot = (tls_base % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
185
186 tls_slots[tls_page].reset(tls_slot);
187}
188
131void Process::LoadModule(SharedPtr<CodeSet> module_, VAddr base_addr) { 189void Process::LoadModule(SharedPtr<CodeSet> module_, VAddr base_addr) {
132 const auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, 190 const auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions,
133 MemoryState memory_state) { 191 MemoryState memory_state) {
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 81538f70c..84027a31a 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -176,8 +176,25 @@ public:
176 /////////////////////////////////////////////////////////////////////////////////////////////// 176 ///////////////////////////////////////////////////////////////////////////////////////////////
177 // Memory Management 177 // Memory Management
178 178
179 // Marks the next available region as used and returns the address of the slot.
180 VAddr MarkNextAvailableTLSSlotAsUsed(Thread& thread);
181
182 // Frees a used TLS slot identified by the given address
183 void FreeTLSSlot(VAddr tls_address);
184
185 ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
186 ResultCode HeapFree(VAddr target, u32 size);
187
188 ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size);
189
190 ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, u64 size);
191
179 VMManager vm_manager; 192 VMManager vm_manager;
180 193
194private:
195 explicit Process(KernelCore& kernel);
196 ~Process() override;
197
181 // Memory used to back the allocations in the regular heap. A single vector is used to cover 198 // Memory used to back the allocations in the regular heap. A single vector is used to cover
182 // the entire virtual address space extents that bound the allocations, including any holes. 199 // the entire virtual address space extents that bound the allocations, including any holes.
183 // This makes deallocation and reallocation of holes fast and keeps process memory contiguous 200 // This makes deallocation and reallocation of holes fast and keeps process memory contiguous
@@ -197,17 +214,6 @@ public:
197 std::vector<std::bitset<8>> tls_slots; 214 std::vector<std::bitset<8>> tls_slots;
198 215
199 std::string name; 216 std::string name;
200
201 ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
202 ResultCode HeapFree(VAddr target, u32 size);
203
204 ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size);
205
206 ResultCode UnmapMemory(VAddr dst_addr, VAddr src_addr, u64 size);
207
208private:
209 explicit Process(KernelCore& kernel);
210 ~Process() override;
211}; 217};
212 218
213} // namespace Kernel 219} // namespace Kernel
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index c2d7535c9..315f65338 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -65,10 +65,7 @@ void Thread::Stop() {
65 wait_objects.clear(); 65 wait_objects.clear();
66 66
67 // Mark the TLS slot in the thread's page as free. 67 // Mark the TLS slot in the thread's page as free.
68 const u64 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::PAGE_SIZE; 68 owner_process->FreeTLSSlot(tls_address);
69 const u64 tls_slot =
70 ((tls_address - Memory::TLS_AREA_VADDR) % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
71 Core::CurrentProcess()->tls_slots[tls_page].reset(tls_slot);
72} 69}
73 70
74void WaitCurrentThread_Sleep() { 71void WaitCurrentThread_Sleep() {
@@ -178,32 +175,6 @@ void Thread::ResumeFromWait() {
178} 175}
179 176
180/** 177/**
181 * Finds a free location for the TLS section of a thread.
182 * @param tls_slots The TLS page array of the thread's owner process.
183 * Returns a tuple of (page, slot, alloc_needed) where:
184 * page: The index of the first allocated TLS page that has free slots.
185 * slot: The index of the first free slot in the indicated page.
186 * alloc_needed: Whether there's a need to allocate a new TLS page (All pages are full).
187 */
188static std::tuple<std::size_t, std::size_t, bool> GetFreeThreadLocalSlot(
189 const std::vector<std::bitset<8>>& tls_slots) {
190 // Iterate over all the allocated pages, and try to find one where not all slots are used.
191 for (std::size_t page = 0; page < tls_slots.size(); ++page) {
192 const auto& page_tls_slots = tls_slots[page];
193 if (!page_tls_slots.all()) {
194 // We found a page with at least one free slot, find which slot it is
195 for (std::size_t slot = 0; slot < page_tls_slots.size(); ++slot) {
196 if (!page_tls_slots.test(slot)) {
197 return std::make_tuple(page, slot, false);
198 }
199 }
200 }
201 }
202
203 return std::make_tuple(0, 0, true);
204}
205
206/**
207 * Resets a thread context, making it ready to be scheduled and run by the CPU 178 * Resets a thread context, making it ready to be scheduled and run by the CPU
208 * @param context Thread context to reset 179 * @param context Thread context to reset
209 * @param stack_top Address of the top of the stack 180 * @param stack_top Address of the top of the stack
@@ -264,32 +235,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
264 thread->owner_process = owner_process; 235 thread->owner_process = owner_process;
265 thread->scheduler = Core::System::GetInstance().Scheduler(processor_id); 236 thread->scheduler = Core::System::GetInstance().Scheduler(processor_id);
266 thread->scheduler->AddThread(thread, priority); 237 thread->scheduler->AddThread(thread, priority);
267 238 thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread);
268 // Find the next available TLS index, and mark it as used
269 auto& tls_slots = owner_process->tls_slots;
270
271 auto [available_page, available_slot, needs_allocation] = GetFreeThreadLocalSlot(tls_slots);
272 if (needs_allocation) {
273 tls_slots.emplace_back(0); // The page is completely available at the start
274 available_page = tls_slots.size() - 1;
275 available_slot = 0; // Use the first slot in the new page
276
277 // Allocate some memory from the end of the linear heap for this region.
278 const std::size_t offset = thread->tls_memory->size();
279 thread->tls_memory->insert(thread->tls_memory->end(), Memory::PAGE_SIZE, 0);
280
281 auto& vm_manager = owner_process->vm_manager;
282 vm_manager.RefreshMemoryBlockMappings(thread->tls_memory.get());
283
284 vm_manager.MapMemoryBlock(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE,
285 thread->tls_memory, 0, Memory::PAGE_SIZE,
286 MemoryState::ThreadLocal);
287 }
288
289 // Mark the slot as used
290 tls_slots[available_page].set(available_slot);
291 thread->tls_address = Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE +
292 available_slot * Memory::TLS_ENTRY_SIZE;
293 239
294 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used 240 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
295 // to initialize the context 241 // to initialize the context
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index 91e9b79ec..4250144c3 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -62,6 +62,9 @@ enum class ThreadWakeupReason {
62 62
63class Thread final : public WaitObject { 63class Thread final : public WaitObject {
64public: 64public:
65 using TLSMemory = std::vector<u8>;
66 using TLSMemoryPtr = std::shared_ptr<TLSMemory>;
67
65 /** 68 /**
66 * Creates and returns a new thread. The new thread is immediately scheduled 69 * Creates and returns a new thread. The new thread is immediately scheduled
67 * @param kernel The kernel instance this thread will be created under. 70 * @param kernel The kernel instance this thread will be created under.
@@ -134,6 +137,14 @@ public:
134 return thread_id; 137 return thread_id;
135 } 138 }
136 139
140 TLSMemoryPtr& GetTLSMemory() {
141 return tls_memory;
142 }
143
144 const TLSMemoryPtr& GetTLSMemory() const {
145 return tls_memory;
146 }
147
137 /** 148 /**
138 * Resumes a thread from waiting 149 * Resumes a thread from waiting
139 */ 150 */
@@ -269,7 +280,7 @@ private:
269 explicit Thread(KernelCore& kernel); 280 explicit Thread(KernelCore& kernel);
270 ~Thread() override; 281 ~Thread() override;
271 282
272 std::shared_ptr<std::vector<u8>> tls_memory = std::make_shared<std::vector<u8>>(); 283 TLSMemoryPtr tls_memory = std::make_shared<TLSMemory>();
273}; 284};
274 285
275/** 286/**