summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
authorGravatar bunnei2021-11-09 22:06:49 -0800
committerGravatar bunnei2021-12-06 16:39:17 -0800
commitf3d6e31e7805711803d11607fd807f23715d3449 (patch)
tree2dbf864417a5423c16bd37b445cb5311f2ab8d65 /src/core/hle/kernel
parenthle: kernel: KThread: Migrate to updated KThreadQueue (part 2). (diff)
downloadyuzu-f3d6e31e7805711803d11607fd807f23715d3449.tar.gz
yuzu-f3d6e31e7805711803d11607fd807f23715d3449.tar.xz
yuzu-f3d6e31e7805711803d11607fd807f23715d3449.zip
hle: kernel: KConditionVariable: Various updates & simplifications.
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp184
-rw-r--r--src/core/hle/kernel/k_condition_variable.h2
2 files changed, 65 insertions, 121 deletions
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 34c1eae65..f343e3c2f 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -121,26 +121,31 @@ ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
121 121
122 // Determine the next tag. 122 // Determine the next tag.
123 u32 next_value{}; 123 u32 next_value{};
124 if (next_owner_thread) { 124 if (next_owner_thread != nullptr) {
125 next_value = next_owner_thread->GetAddressKeyValue(); 125 next_value = next_owner_thread->GetAddressKeyValue();
126 if (num_waiters > 1) { 126 if (num_waiters > 1) {
127 next_value |= Svc::HandleWaitMask; 127 next_value |= Svc::HandleWaitMask;
128 } 128 }
129 129
130 next_owner_thread->EndWait(ResultSuccess); 130 // Write the value to userspace.
131 } 131 ResultCode result{ResultSuccess};
132 132 if (WriteToUser(system, addr, std::addressof(next_value))) [[likely]] {
133 // Write the value to userspace. 133 result = ResultSuccess;
134 if (!WriteToUser(system, addr, std::addressof(next_value))) { 134 } else {
135 if (next_owner_thread) { 135 result = ResultInvalidCurrentMemory;
136 next_owner_thread->SetWaitResult(ResultInvalidCurrentMemory);
137 } 136 }
138 137
139 return ResultInvalidCurrentMemory; 138 // Signal the next owner thread.
139 next_owner_thread->EndWait(result);
140 return result;
141 } else {
142 // Just write the value to userspace.
143 R_UNLESS(WriteToUser(system, addr, std::addressof(next_value)),
144 ResultInvalidCurrentMemory);
145
146 return ResultSuccess;
140 } 147 }
141 } 148 }
142
143 return ResultSuccess;
144} 149}
145 150
146ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) { 151ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
@@ -148,58 +153,45 @@ ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 val
148 ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel); 153 ThreadQueueImplForKConditionVariableWaitForAddress wait_queue(kernel);
149 154
150 // Wait for the address. 155 // Wait for the address.
156 KThread* owner_thread{};
151 { 157 {
152 KScopedAutoObject<KThread> owner_thread; 158 KScopedSchedulerLock sl(kernel);
153 ASSERT(owner_thread.IsNull());
154 {
155 KScopedSchedulerLock sl(kernel);
156 cur_thread->SetWaitResult(ResultSuccess);
157 159
158 // Check if the thread should terminate. 160 // Check if the thread should terminate.
159 R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested); 161 R_UNLESS(!cur_thread->IsTerminationRequested(), ResultTerminationRequested);
160 162
161 { 163 // Read the tag from userspace.
162 // Read the tag from userspace. 164 u32 test_tag{};
163 u32 test_tag{}; 165 R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr), ResultInvalidCurrentMemory);
164 R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr),
165 ResultInvalidCurrentMemory);
166
167 // If the tag isn't the handle (with wait mask), we're done.
168 R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), ResultSuccess);
169
170 // Get the lock owner thread.
171 owner_thread =
172 kernel.CurrentProcess()->GetHandleTable().GetObjectWithoutPseudoHandle<KThread>(
173 handle);
174 R_UNLESS(owner_thread.IsNotNull(), ResultInvalidHandle);
175
176 // Update the lock.
177 cur_thread->SetAddressKey(addr, value);
178 owner_thread->AddWaiter(cur_thread);
179
180 // Begin waiting.
181 cur_thread->BeginWait(std::addressof(wait_queue));
182 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
183 cur_thread->SetMutexWaitAddressForDebugging(addr);
184 }
185 }
186 ASSERT(owner_thread.IsNotNull());
187 }
188 166
189 // Remove the thread as a waiter from the lock owner. 167 // If the tag isn't the handle (with wait mask), we're done.
190 { 168 R_SUCCEED_IF(test_tag != (handle | Svc::HandleWaitMask));
191 KScopedSchedulerLock sl(kernel); 169
192 KThread* owner_thread = cur_thread->GetLockOwner(); 170 // Get the lock owner thread.
193 if (owner_thread != nullptr) { 171 owner_thread = kernel.CurrentProcess()
194 owner_thread->RemoveWaiter(cur_thread); 172 ->GetHandleTable()
195 } 173 .GetObjectWithoutPseudoHandle<KThread>(handle)
174 .ReleasePointerUnsafe();
175 R_UNLESS(owner_thread != nullptr, ResultInvalidHandle);
176
177 // Update the lock.
178 cur_thread->SetAddressKey(addr, value);
179 owner_thread->AddWaiter(cur_thread);
180
181 // Begin waiting.
182 cur_thread->BeginWait(std::addressof(wait_queue));
183 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
184 cur_thread->SetMutexWaitAddressForDebugging(addr);
196 } 185 }
197 186
187 // Close our reference to the owner thread, now that the wait is over.
188 owner_thread->Close();
189
198 // Get the wait result. 190 // Get the wait result.
199 return cur_thread->GetWaitResult(); 191 return cur_thread->GetWaitResult();
200} 192}
201 193
202KThread* KConditionVariable::SignalImpl(KThread* thread) { 194void KConditionVariable::SignalImpl(KThread* thread) {
203 // Check pre-conditions. 195 // Check pre-conditions.
204 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 196 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
205 197
@@ -213,14 +205,13 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
213 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. 205 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
214 // TODO(bunnei): We should call CanAccessAtomic(..) here. 206 // TODO(bunnei): We should call CanAccessAtomic(..) here.
215 can_access = true; 207 can_access = true;
216 if (can_access) { 208 if (can_access) [[likely]] {
217 UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag, 209 UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
218 Svc::HandleWaitMask); 210 Svc::HandleWaitMask);
219 } 211 }
220 } 212 }
221 213
222 KThread* thread_to_close = nullptr; 214 if (can_access) [[likely]] {
223 if (can_access) {
224 if (prev_tag == Svc::InvalidHandle) { 215 if (prev_tag == Svc::InvalidHandle) {
225 // If nobody held the lock previously, we're all good. 216 // If nobody held the lock previously, we're all good.
226 thread->EndWait(ResultSuccess); 217 thread->EndWait(ResultSuccess);
@@ -232,10 +223,10 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
232 static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask)) 223 static_cast<Handle>(prev_tag & ~Svc::HandleWaitMask))
233 .ReleasePointerUnsafe(); 224 .ReleasePointerUnsafe();
234 225
235 if (owner_thread) { 226 if (owner_thread) [[likely]] {
236 // Add the thread as a waiter on the owner. 227 // Add the thread as a waiter on the owner.
237 owner_thread->AddWaiter(thread); 228 owner_thread->AddWaiter(thread);
238 thread_to_close = owner_thread; 229 owner_thread->Close();
239 } else { 230 } else {
240 // The lock was tagged with a thread that doesn't exist. 231 // The lock was tagged with a thread that doesn't exist.
241 thread->EndWait(ResultInvalidState); 232 thread->EndWait(ResultInvalidState);
@@ -245,20 +236,11 @@ KThread* KConditionVariable::SignalImpl(KThread* thread) {
245 // If the address wasn't accessible, note so. 236 // If the address wasn't accessible, note so.
246 thread->EndWait(ResultInvalidCurrentMemory); 237 thread->EndWait(ResultInvalidCurrentMemory);
247 } 238 }
248
249 return thread_to_close;
250} 239}
251 240
252void KConditionVariable::Signal(u64 cv_key, s32 count) { 241void KConditionVariable::Signal(u64 cv_key, s32 count) {
253 // Prepare for signaling.
254 constexpr int MaxThreads = 16;
255
256 KLinkedList<KThread> thread_list{kernel};
257 std::array<KThread*, MaxThreads> thread_array;
258 s32 num_to_close{};
259
260 // Perform signaling. 242 // Perform signaling.
261 s32 num_waiters{}; 243 int num_waiters = 0;
262 { 244 {
263 KScopedSchedulerLock sl(kernel); 245 KScopedSchedulerLock sl(kernel);
264 246
@@ -267,14 +249,7 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
267 (it->GetConditionVariableKey() == cv_key)) { 249 (it->GetConditionVariableKey() == cv_key)) {
268 KThread* target_thread = std::addressof(*it); 250 KThread* target_thread = std::addressof(*it);
269 251
270 if (KThread* thread = SignalImpl(target_thread); thread != nullptr) { 252 this->SignalImpl(target_thread);
271 if (num_to_close < MaxThreads) {
272 thread_array[num_to_close++] = thread;
273 } else {
274 thread_list.push_back(*thread);
275 }
276 }
277
278 it = thread_tree.erase(it); 253 it = thread_tree.erase(it);
279 target_thread->ClearConditionVariable(); 254 target_thread->ClearConditionVariable();
280 ++num_waiters; 255 ++num_waiters;
@@ -282,33 +257,20 @@ void KConditionVariable::Signal(u64 cv_key, s32 count) {
282 257
283 // If we have no waiters, clear the has waiter flag. 258 // If we have no waiters, clear the has waiter flag.
284 if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) { 259 if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
285 const u32 has_waiter_flag{}; 260 const u32 has_waiter_flag = 0;
286 WriteToUser(system, cv_key, std::addressof(has_waiter_flag)); 261 WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
287 } 262 }
288 } 263 }
289
290 // Close threads in the array.
291 for (auto i = 0; i < num_to_close; ++i) {
292 thread_array[i]->Close();
293 }
294
295 // Close threads in the list.
296 for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) {
297 (*it).Close();
298 }
299} 264}
300 265
301ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) { 266ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
302 // Prepare to wait. 267 // Prepare to wait.
303 KThread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread(); 268 KThread* cur_thread = GetCurrentThreadPointer(kernel);
304 ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue( 269 ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(
305 kernel, std::addressof(thread_tree)); 270 kernel, std::addressof(thread_tree));
306 271
307 { 272 {
308 KScopedSchedulerLockAndSleep slp{kernel, cur_thread, timeout}; 273 KScopedSchedulerLockAndSleep slp(kernel, cur_thread, timeout);
309
310 // Set the synced object.
311 cur_thread->SetWaitResult(ResultTimedOut);
312 274
313 // Check that the thread isn't terminating. 275 // Check that the thread isn't terminating.
314 if (cur_thread->IsTerminationRequested()) { 276 if (cur_thread->IsTerminationRequested()) {
@@ -350,38 +312,20 @@ ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout)
350 } 312 }
351 } 313 }
352 314
353 // Update condition variable tracking. 315 // If timeout is zero, time out.
354 { 316 R_UNLESS(timeout != 0, ResultTimedOut);
355 cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
356 thread_tree.insert(*cur_thread);
357 }
358 317
359 // If the timeout is non-zero, set the thread as waiting. 318 // Update condition variable tracking.
360 if (timeout != 0) { 319 cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
361 cur_thread->BeginWait(std::addressof(wait_queue)); 320 thread_tree.insert(*cur_thread);
362 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
363 cur_thread->SetMutexWaitAddressForDebugging(addr);
364 }
365 }
366
367 // Cancel the timer wait.
368 kernel.TimeManager().UnscheduleTimeEvent(cur_thread);
369
370 // Remove from the condition variable.
371 {
372 KScopedSchedulerLock sl(kernel);
373
374 if (KThread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
375 owner->RemoveWaiter(cur_thread);
376 }
377 321
378 if (cur_thread->IsWaitingForConditionVariable()) { 322 // Begin waiting.
379 thread_tree.erase(thread_tree.iterator_to(*cur_thread)); 323 cur_thread->BeginWait(std::addressof(wait_queue));
380 cur_thread->ClearConditionVariable(); 324 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
381 } 325 cur_thread->SetMutexWaitAddressForDebugging(addr);
382 } 326 }
383 327
384 // Get the result. 328 // Get the wait result.
385 return cur_thread->GetWaitResult(); 329 return cur_thread->GetWaitResult();
386} 330}
387 331
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
index 861dbd420..5e4815d08 100644
--- a/src/core/hle/kernel/k_condition_variable.h
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -34,7 +34,7 @@ public:
34 [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout); 34 [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
35 35
36private: 36private:
37 [[nodiscard]] KThread* SignalImpl(KThread* thread); 37 void SignalImpl(KThread* thread);
38 38
39 ThreadTree thread_tree; 39 ThreadTree thread_tree;
40 40