summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel/process.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel/process.cpp')
-rw-r--r--src/core/hle/kernel/process.cpp135
1 files changed, 90 insertions, 45 deletions
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 7cfc513a1..f45ef05f6 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -3,6 +3,7 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <algorithm> 5#include <algorithm>
6#include <bitset>
6#include <memory> 7#include <memory>
7#include <random> 8#include <random>
8#include "common/alignment.h" 9#include "common/alignment.h"
@@ -48,8 +49,58 @@ void SetupMainThread(Process& owner_process, KernelCore& kernel, u32 priority) {
48} 49}
49} // Anonymous namespace 50} // Anonymous namespace
50 51
51SharedPtr<Process> Process::Create(Core::System& system, std::string name, 52// Represents a page used for thread-local storage.
52 Process::ProcessType type) { 53//
54// Each TLS page contains slots that may be used by processes and threads.
55// Every process and thread is created with a slot in some arbitrary page
56// (whichever page happens to have an available slot).
57class TLSPage {
58public:
59 static constexpr std::size_t num_slot_entries = Memory::PAGE_SIZE / Memory::TLS_ENTRY_SIZE;
60
61 explicit TLSPage(VAddr address) : base_address{address} {}
62
63 bool HasAvailableSlots() const {
64 return !is_slot_used.all();
65 }
66
67 VAddr GetBaseAddress() const {
68 return base_address;
69 }
70
71 std::optional<VAddr> ReserveSlot() {
72 for (std::size_t i = 0; i < is_slot_used.size(); i++) {
73 if (is_slot_used[i]) {
74 continue;
75 }
76
77 is_slot_used[i] = true;
78 return base_address + (i * Memory::TLS_ENTRY_SIZE);
79 }
80
81 return std::nullopt;
82 }
83
84 void ReleaseSlot(VAddr address) {
85 // Ensure that all given addresses are consistent with how TLS pages
86 // are intended to be used when releasing slots.
87 ASSERT(IsWithinPage(address));
88 ASSERT((address % Memory::TLS_ENTRY_SIZE) == 0);
89
90 const std::size_t index = (address - base_address) / Memory::TLS_ENTRY_SIZE;
91 is_slot_used[index] = false;
92 }
93
94private:
95 bool IsWithinPage(VAddr address) const {
96 return base_address <= address && address < base_address + Memory::PAGE_SIZE;
97 }
98
99 VAddr base_address;
100 std::bitset<num_slot_entries> is_slot_used;
101};
102
103SharedPtr<Process> Process::Create(Core::System& system, std::string name, ProcessType type) {
53 auto& kernel = system.Kernel(); 104 auto& kernel = system.Kernel();
54 105
55 SharedPtr<Process> process(new Process(system)); 106 SharedPtr<Process> process(new Process(system));
@@ -181,61 +232,55 @@ void Process::PrepareForTermination() {
181} 232}
182 233
183/** 234/**
184 * Finds a free location for the TLS section of a thread. 235 * Attempts to find a TLS page that contains a free slot for
185 * @param tls_slots The TLS page array of the thread's owner process. 236 * use by a thread.
186 * Returns a tuple of (page, slot, alloc_needed) where: 237 *
187 * page: The index of the first allocated TLS page that has free slots. 238 * @returns If a page with an available slot is found, then an iterator
188 * slot: The index of the first free slot in the indicated page. 239 * pointing to the page is returned. Otherwise the end iterator
189 * alloc_needed: Whether there's a need to allocate a new TLS page (All pages are full). 240 * is returned instead.
190 */ 241 */
191static std::tuple<std::size_t, std::size_t, bool> FindFreeThreadLocalSlot( 242static auto FindTLSPageWithAvailableSlots(std::vector<TLSPage>& tls_pages) {
192 const std::vector<std::bitset<8>>& tls_slots) { 243 return std::find_if(tls_pages.begin(), tls_pages.end(),
193 // Iterate over all the allocated pages, and try to find one where not all slots are used. 244 [](const auto& page) { return page.HasAvailableSlots(); });
194 for (std::size_t page = 0; page < tls_slots.size(); ++page) {
195 const auto& page_tls_slots = tls_slots[page];
196 if (!page_tls_slots.all()) {
197 // We found a page with at least one free slot, find which slot it is
198 for (std::size_t slot = 0; slot < page_tls_slots.size(); ++slot) {
199 if (!page_tls_slots.test(slot)) {
200 return std::make_tuple(page, slot, false);
201 }
202 }
203 }
204 }
205
206 return std::make_tuple(0, 0, true);
207} 245}
208 246
209VAddr Process::MarkNextAvailableTLSSlotAsUsed(Thread& thread) { 247VAddr Process::CreateTLSRegion() {
210 auto [available_page, available_slot, needs_allocation] = FindFreeThreadLocalSlot(tls_slots); 248 auto tls_page_iter = FindTLSPageWithAvailableSlots(tls_pages);
211 const VAddr tls_begin = vm_manager.GetTLSIORegionBaseAddress();
212 249
213 if (needs_allocation) { 250 if (tls_page_iter == tls_pages.cend()) {
214 tls_slots.emplace_back(0); // The page is completely available at the start 251 const auto region_address =
215 available_page = tls_slots.size() - 1; 252 vm_manager.FindFreeRegion(vm_manager.GetTLSIORegionBaseAddress(),
216 available_slot = 0; // Use the first slot in the new page 253 vm_manager.GetTLSIORegionEndAddress(), Memory::PAGE_SIZE);
254 ASSERT(region_address.Succeeded());
217 255
218 // Allocate some memory from the end of the linear heap for this region. 256 const auto map_result = vm_manager.MapMemoryBlock(
219 auto& tls_memory = thread.GetTLSMemory(); 257 *region_address, std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE), 0,
220 tls_memory->insert(tls_memory->end(), Memory::PAGE_SIZE, 0); 258 Memory::PAGE_SIZE, MemoryState::ThreadLocal);
259 ASSERT(map_result.Succeeded());
221 260
222 vm_manager.RefreshMemoryBlockMappings(tls_memory.get()); 261 tls_pages.emplace_back(*region_address);
223 262
224 vm_manager.MapMemoryBlock(tls_begin + available_page * Memory::PAGE_SIZE, tls_memory, 0, 263 const auto reserve_result = tls_pages.back().ReserveSlot();
225 Memory::PAGE_SIZE, MemoryState::ThreadLocal); 264 ASSERT(reserve_result.has_value());
226 }
227 265
228 tls_slots[available_page].set(available_slot); 266 return *reserve_result;
267 }
229 268
230 return tls_begin + available_page * Memory::PAGE_SIZE + available_slot * Memory::TLS_ENTRY_SIZE; 269 return *tls_page_iter->ReserveSlot();
231} 270}
232 271
233void Process::FreeTLSSlot(VAddr tls_address) { 272void Process::FreeTLSRegion(VAddr tls_address) {
234 const VAddr tls_base = tls_address - vm_manager.GetTLSIORegionBaseAddress(); 273 const VAddr aligned_address = Common::AlignDown(tls_address, Memory::PAGE_SIZE);
235 const VAddr tls_page = tls_base / Memory::PAGE_SIZE; 274 auto iter =
236 const VAddr tls_slot = (tls_base % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE; 275 std::find_if(tls_pages.begin(), tls_pages.end(), [aligned_address](const auto& page) {
276 return page.GetBaseAddress() == aligned_address;
277 });
278
279 // Something has gone very wrong if we're freeing a region
280 // with no actual page available.
281 ASSERT(iter != tls_pages.cend());
237 282
238 tls_slots[tls_page].reset(tls_slot); 283 iter->ReleaseSlot(tls_address);
239} 284}
240 285
241void Process::LoadModule(CodeSet module_, VAddr base_addr) { 286void Process::LoadModule(CodeSet module_, VAddr base_addr) {