summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar bunnei2021-01-11 14:36:26 -0800
committerGravatar GitHub2021-01-11 14:36:26 -0800
commiteb3cb54aa53e23af61afb9b7e35af28c9d37ae2a (patch)
tree56a80760bd0ba8ecd85dc8d9f09fb9e2068c91d4
parentMerge pull request #5229 from Morph1984/fullscreen-opt (diff)
parenthle: kernel: thread: Preserve thread wait reason for debugging only. (diff)
downloadyuzu-eb3cb54aa53e23af61afb9b7e35af28c9d37ae2a.tar.gz
yuzu-eb3cb54aa53e23af61afb9b7e35af28c9d37ae2a.tar.xz
yuzu-eb3cb54aa53e23af61afb9b7e35af28c9d37ae2a.zip
Merge pull request #5266 from bunnei/kernel-synch
Rewrite KSynchronizationObject, KConditonVariable, and KAddressArbiter
-rw-r--r--src/common/CMakeLists.txt3
-rw-r--r--src/common/common_funcs.h8
-rw-r--r--src/common/intrusive_red_black_tree.h627
-rw-r--r--src/common/parent_of_member.h189
-rw-r--r--src/common/tree.h822
-rw-r--r--src/core/CMakeLists.txt16
-rw-r--r--src/core/arm/arm_interface.h7
-rw-r--r--src/core/core_timing.cpp1
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp317
-rw-r--r--src/core/hle/kernel/address_arbiter.h91
-rw-r--r--src/core/hle/kernel/client_port.cpp3
-rw-r--r--src/core/hle/kernel/client_session.cpp11
-rw-r--r--src/core/hle/kernel/client_session.h8
-rw-r--r--src/core/hle/kernel/errors.h3
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp367
-rw-r--r--src/core/hle/kernel/k_address_arbiter.h70
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp349
-rw-r--r--src/core/hle/kernel/k_condition_variable.h59
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp37
-rw-r--r--src/core/hle/kernel/k_scheduler.h5
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h2
-rw-r--r--src/core/hle/kernel/k_synchronization_object.cpp172
-rw-r--r--src/core/hle/kernel/k_synchronization_object.h58
-rw-r--r--src/core/hle/kernel/kernel.cpp19
-rw-r--r--src/core/hle/kernel/kernel.h7
-rw-r--r--src/core/hle/kernel/memory/memory_layout.h19
-rw-r--r--src/core/hle/kernel/mutex.cpp170
-rw-r--r--src/core/hle/kernel/mutex.h42
-rw-r--r--src/core/hle/kernel/object.h5
-rw-r--r--src/core/hle/kernel/process.cpp67
-rw-r--r--src/core/hle/kernel/process.h64
-rw-r--r--src/core/hle/kernel/readable_event.cpp18
-rw-r--r--src/core/hle/kernel/readable_event.h12
-rw-r--r--src/core/hle/kernel/server_port.cpp14
-rw-r--r--src/core/hle/kernel/server_port.h7
-rw-r--r--src/core/hle/kernel/server_session.cpp23
-rw-r--r--src/core/hle/kernel/server_session.h12
-rw-r--r--src/core/hle/kernel/session.cpp11
-rw-r--r--src/core/hle/kernel/session.h8
-rw-r--r--src/core/hle/kernel/svc.cpp397
-rw-r--r--src/core/hle/kernel/svc_common.h14
-rw-r--r--src/core/hle/kernel/svc_results.h20
-rw-r--r--src/core/hle/kernel/svc_types.h12
-rw-r--r--src/core/hle/kernel/svc_wrap.h47
-rw-r--r--src/core/hle/kernel/synchronization.cpp116
-rw-r--r--src/core/hle/kernel/synchronization.h44
-rw-r--r--src/core/hle/kernel/synchronization_object.cpp49
-rw-r--r--src/core/hle/kernel/synchronization_object.h77
-rw-r--r--src/core/hle/kernel/thread.cpp328
-rw-r--r--src/core/hle/kernel/thread.h497
-rw-r--r--src/core/hle/kernel/time_manager.cpp9
-rw-r--r--src/core/hle/service/nfp/nfp.cpp6
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.cpp4
-rw-r--r--src/core/hle/service/sm/sm.cpp3
-rw-r--r--src/yuzu/debugger/wait_tree.cpp128
-rw-r--r--src/yuzu/debugger/wait_tree.h17
56 files changed, 3583 insertions, 1908 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 2c2bd2ee8..5d781cd77 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -123,6 +123,7 @@ add_library(common STATIC
123 hash.h 123 hash.h
124 hex_util.cpp 124 hex_util.cpp
125 hex_util.h 125 hex_util.h
126 intrusive_red_black_tree.h
126 logging/backend.cpp 127 logging/backend.cpp
127 logging/backend.h 128 logging/backend.h
128 logging/filter.cpp 129 logging/filter.cpp
@@ -143,6 +144,7 @@ add_library(common STATIC
143 page_table.h 144 page_table.h
144 param_package.cpp 145 param_package.cpp
145 param_package.h 146 param_package.h
147 parent_of_member.h
146 quaternion.h 148 quaternion.h
147 ring_buffer.h 149 ring_buffer.h
148 scm_rev.cpp 150 scm_rev.cpp
@@ -167,6 +169,7 @@ add_library(common STATIC
167 time_zone.h 169 time_zone.h
168 timer.cpp 170 timer.cpp
169 timer.h 171 timer.h
172 tree.h
170 uint128.cpp 173 uint128.cpp
171 uint128.h 174 uint128.h
172 uuid.cpp 175 uuid.cpp
diff --git a/src/common/common_funcs.h b/src/common/common_funcs.h
index 367b6bf6e..c90978f9c 100644
--- a/src/common/common_funcs.h
+++ b/src/common/common_funcs.h
@@ -93,6 +93,14 @@ __declspec(dllimport) void __stdcall DebugBreak(void);
93 return static_cast<T>(key) == 0; \ 93 return static_cast<T>(key) == 0; \
94 } 94 }
95 95
96/// Evaluates a boolean expression, and returns a result unless that expression is true.
97#define R_UNLESS(expr, res) \
98 { \
99 if (!(expr)) { \
100 return res; \
101 } \
102 }
103
96namespace Common { 104namespace Common {
97 105
98[[nodiscard]] constexpr u32 MakeMagic(char a, char b, char c, char d) { 106[[nodiscard]] constexpr u32 MakeMagic(char a, char b, char c, char d) {
diff --git a/src/common/intrusive_red_black_tree.h b/src/common/intrusive_red_black_tree.h
new file mode 100644
index 000000000..929b5497e
--- /dev/null
+++ b/src/common/intrusive_red_black_tree.h
@@ -0,0 +1,627 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/parent_of_member.h"
8#include "common/tree.h"
9
10namespace Common {
11
12namespace impl {
13
14class IntrusiveRedBlackTreeImpl;
15
16}
17
18struct IntrusiveRedBlackTreeNode {
19
20private:
21 RB_ENTRY(IntrusiveRedBlackTreeNode) entry{};
22
23 friend class impl::IntrusiveRedBlackTreeImpl;
24
25 template <class, class, class>
26 friend class IntrusiveRedBlackTree;
27
28public:
29 constexpr IntrusiveRedBlackTreeNode() = default;
30};
31
32template <class T, class Traits, class Comparator>
33class IntrusiveRedBlackTree;
34
35namespace impl {
36
37class IntrusiveRedBlackTreeImpl {
38
39private:
40 template <class, class, class>
41 friend class ::Common::IntrusiveRedBlackTree;
42
43private:
44 RB_HEAD(IntrusiveRedBlackTreeRoot, IntrusiveRedBlackTreeNode);
45 using RootType = IntrusiveRedBlackTreeRoot;
46
47private:
48 IntrusiveRedBlackTreeRoot root;
49
50public:
51 template <bool Const>
52 class Iterator;
53
54 using value_type = IntrusiveRedBlackTreeNode;
55 using size_type = size_t;
56 using difference_type = ptrdiff_t;
57 using pointer = value_type*;
58 using const_pointer = const value_type*;
59 using reference = value_type&;
60 using const_reference = const value_type&;
61 using iterator = Iterator<false>;
62 using const_iterator = Iterator<true>;
63
64 template <bool Const>
65 class Iterator {
66 public:
67 using iterator_category = std::bidirectional_iterator_tag;
68 using value_type = typename IntrusiveRedBlackTreeImpl::value_type;
69 using difference_type = typename IntrusiveRedBlackTreeImpl::difference_type;
70 using pointer = std::conditional_t<Const, IntrusiveRedBlackTreeImpl::const_pointer,
71 IntrusiveRedBlackTreeImpl::pointer>;
72 using reference = std::conditional_t<Const, IntrusiveRedBlackTreeImpl::const_reference,
73 IntrusiveRedBlackTreeImpl::reference>;
74
75 private:
76 pointer node;
77
78 public:
79 explicit Iterator(pointer n) : node(n) {}
80
81 bool operator==(const Iterator& rhs) const {
82 return this->node == rhs.node;
83 }
84
85 bool operator!=(const Iterator& rhs) const {
86 return !(*this == rhs);
87 }
88
89 pointer operator->() const {
90 return this->node;
91 }
92
93 reference operator*() const {
94 return *this->node;
95 }
96
97 Iterator& operator++() {
98 this->node = GetNext(this->node);
99 return *this;
100 }
101
102 Iterator& operator--() {
103 this->node = GetPrev(this->node);
104 return *this;
105 }
106
107 Iterator operator++(int) {
108 const Iterator it{*this};
109 ++(*this);
110 return it;
111 }
112
113 Iterator operator--(int) {
114 const Iterator it{*this};
115 --(*this);
116 return it;
117 }
118
119 operator Iterator<true>() const {
120 return Iterator<true>(this->node);
121 }
122 };
123
124protected:
125 // Generate static implementations for non-comparison operations for IntrusiveRedBlackTreeRoot.
126 RB_GENERATE_WITHOUT_COMPARE_STATIC(IntrusiveRedBlackTreeRoot, IntrusiveRedBlackTreeNode, entry);
127
128private:
129 // Define accessors using RB_* functions.
130 constexpr void InitializeImpl() {
131 RB_INIT(&this->root);
132 }
133
134 bool EmptyImpl() const {
135 return RB_EMPTY(&this->root);
136 }
137
138 IntrusiveRedBlackTreeNode* GetMinImpl() const {
139 return RB_MIN(IntrusiveRedBlackTreeRoot,
140 const_cast<IntrusiveRedBlackTreeRoot*>(&this->root));
141 }
142
143 IntrusiveRedBlackTreeNode* GetMaxImpl() const {
144 return RB_MAX(IntrusiveRedBlackTreeRoot,
145 const_cast<IntrusiveRedBlackTreeRoot*>(&this->root));
146 }
147
148 IntrusiveRedBlackTreeNode* RemoveImpl(IntrusiveRedBlackTreeNode* node) {
149 return RB_REMOVE(IntrusiveRedBlackTreeRoot, &this->root, node);
150 }
151
152public:
153 static IntrusiveRedBlackTreeNode* GetNext(IntrusiveRedBlackTreeNode* node) {
154 return RB_NEXT(IntrusiveRedBlackTreeRoot, nullptr, node);
155 }
156
157 static IntrusiveRedBlackTreeNode* GetPrev(IntrusiveRedBlackTreeNode* node) {
158 return RB_PREV(IntrusiveRedBlackTreeRoot, nullptr, node);
159 }
160
161 static IntrusiveRedBlackTreeNode const* GetNext(const IntrusiveRedBlackTreeNode* node) {
162 return static_cast<const IntrusiveRedBlackTreeNode*>(
163 GetNext(const_cast<IntrusiveRedBlackTreeNode*>(node)));
164 }
165
166 static IntrusiveRedBlackTreeNode const* GetPrev(const IntrusiveRedBlackTreeNode* node) {
167 return static_cast<const IntrusiveRedBlackTreeNode*>(
168 GetPrev(const_cast<IntrusiveRedBlackTreeNode*>(node)));
169 }
170
171public:
172 constexpr IntrusiveRedBlackTreeImpl() : root() {
173 this->InitializeImpl();
174 }
175
176 // Iterator accessors.
177 iterator begin() {
178 return iterator(this->GetMinImpl());
179 }
180
181 const_iterator begin() const {
182 return const_iterator(this->GetMinImpl());
183 }
184
185 iterator end() {
186 return iterator(static_cast<IntrusiveRedBlackTreeNode*>(nullptr));
187 }
188
189 const_iterator end() const {
190 return const_iterator(static_cast<const IntrusiveRedBlackTreeNode*>(nullptr));
191 }
192
193 const_iterator cbegin() const {
194 return this->begin();
195 }
196
197 const_iterator cend() const {
198 return this->end();
199 }
200
201 iterator iterator_to(reference ref) {
202 return iterator(&ref);
203 }
204
205 const_iterator iterator_to(const_reference ref) const {
206 return const_iterator(&ref);
207 }
208
209 // Content management.
210 bool empty() const {
211 return this->EmptyImpl();
212 }
213
214 reference back() {
215 return *this->GetMaxImpl();
216 }
217
218 const_reference back() const {
219 return *this->GetMaxImpl();
220 }
221
222 reference front() {
223 return *this->GetMinImpl();
224 }
225
226 const_reference front() const {
227 return *this->GetMinImpl();
228 }
229
230 iterator erase(iterator it) {
231 auto cur = std::addressof(*it);
232 auto next = GetNext(cur);
233 this->RemoveImpl(cur);
234 return iterator(next);
235 }
236};
237
238} // namespace impl
239
240template <typename T>
241concept HasLightCompareType = requires {
242 { std::is_same<typename T::LightCompareType, void>::value }
243 ->std::convertible_to<bool>;
244};
245
246namespace impl {
247
248template <typename T, typename Default>
249consteval auto* GetLightCompareType() {
250 if constexpr (HasLightCompareType<T>) {
251 return static_cast<typename T::LightCompareType*>(nullptr);
252 } else {
253 return static_cast<Default*>(nullptr);
254 }
255}
256
257} // namespace impl
258
259template <typename T, typename Default>
260using LightCompareType = std::remove_pointer_t<decltype(impl::GetLightCompareType<T, Default>())>;
261
262template <class T, class Traits, class Comparator>
263class IntrusiveRedBlackTree {
264
265public:
266 using ImplType = impl::IntrusiveRedBlackTreeImpl;
267
268private:
269 ImplType impl{};
270
271public:
272 struct IntrusiveRedBlackTreeRootWithCompare : ImplType::IntrusiveRedBlackTreeRoot {};
273
274 template <bool Const>
275 class Iterator;
276
277 using value_type = T;
278 using size_type = size_t;
279 using difference_type = ptrdiff_t;
280 using pointer = T*;
281 using const_pointer = const T*;
282 using reference = T&;
283 using const_reference = const T&;
284 using iterator = Iterator<false>;
285 using const_iterator = Iterator<true>;
286
287 using light_value_type = LightCompareType<Comparator, value_type>;
288 using const_light_pointer = const light_value_type*;
289 using const_light_reference = const light_value_type&;
290
291 template <bool Const>
292 class Iterator {
293 public:
294 friend class IntrusiveRedBlackTree<T, Traits, Comparator>;
295
296 using ImplIterator =
297 std::conditional_t<Const, ImplType::const_iterator, ImplType::iterator>;
298
299 using iterator_category = std::bidirectional_iterator_tag;
300 using value_type = typename IntrusiveRedBlackTree::value_type;
301 using difference_type = typename IntrusiveRedBlackTree::difference_type;
302 using pointer = std::conditional_t<Const, IntrusiveRedBlackTree::const_pointer,
303 IntrusiveRedBlackTree::pointer>;
304 using reference = std::conditional_t<Const, IntrusiveRedBlackTree::const_reference,
305 IntrusiveRedBlackTree::reference>;
306
307 private:
308 ImplIterator iterator;
309
310 private:
311 explicit Iterator(ImplIterator it) : iterator(it) {}
312
313 explicit Iterator(typename std::conditional<Const, ImplType::const_iterator,
314 ImplType::iterator>::type::pointer ptr)
315 : iterator(ptr) {}
316
317 ImplIterator GetImplIterator() const {
318 return this->iterator;
319 }
320
321 public:
322 bool operator==(const Iterator& rhs) const {
323 return this->iterator == rhs.iterator;
324 }
325
326 bool operator!=(const Iterator& rhs) const {
327 return !(*this == rhs);
328 }
329
330 pointer operator->() const {
331 return Traits::GetParent(std::addressof(*this->iterator));
332 }
333
334 reference operator*() const {
335 return *Traits::GetParent(std::addressof(*this->iterator));
336 }
337
338 Iterator& operator++() {
339 ++this->iterator;
340 return *this;
341 }
342
343 Iterator& operator--() {
344 --this->iterator;
345 return *this;
346 }
347
348 Iterator operator++(int) {
349 const Iterator it{*this};
350 ++this->iterator;
351 return it;
352 }
353
354 Iterator operator--(int) {
355 const Iterator it{*this};
356 --this->iterator;
357 return it;
358 }
359
360 operator Iterator<true>() const {
361 return Iterator<true>(this->iterator);
362 }
363 };
364
365private:
366 // Generate static implementations for comparison operations for IntrusiveRedBlackTreeRoot.
367 RB_GENERATE_WITH_COMPARE_STATIC(IntrusiveRedBlackTreeRootWithCompare, IntrusiveRedBlackTreeNode,
368 entry, CompareImpl, LightCompareImpl);
369
370private:
371 static int CompareImpl(const IntrusiveRedBlackTreeNode* lhs,
372 const IntrusiveRedBlackTreeNode* rhs) {
373 return Comparator::Compare(*Traits::GetParent(lhs), *Traits::GetParent(rhs));
374 }
375
376 static int LightCompareImpl(const void* elm, const IntrusiveRedBlackTreeNode* rhs) {
377 return Comparator::Compare(*static_cast<const_light_pointer>(elm), *Traits::GetParent(rhs));
378 }
379
380 // Define accessors using RB_* functions.
381 IntrusiveRedBlackTreeNode* InsertImpl(IntrusiveRedBlackTreeNode* node) {
382 return RB_INSERT(IntrusiveRedBlackTreeRootWithCompare,
383 static_cast<IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root),
384 node);
385 }
386
387 IntrusiveRedBlackTreeNode* FindImpl(const IntrusiveRedBlackTreeNode* node) const {
388 return RB_FIND(
389 IntrusiveRedBlackTreeRootWithCompare,
390 const_cast<IntrusiveRedBlackTreeRootWithCompare*>(
391 static_cast<const IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root)),
392 const_cast<IntrusiveRedBlackTreeNode*>(node));
393 }
394
395 IntrusiveRedBlackTreeNode* NFindImpl(const IntrusiveRedBlackTreeNode* node) const {
396 return RB_NFIND(
397 IntrusiveRedBlackTreeRootWithCompare,
398 const_cast<IntrusiveRedBlackTreeRootWithCompare*>(
399 static_cast<const IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root)),
400 const_cast<IntrusiveRedBlackTreeNode*>(node));
401 }
402
403 IntrusiveRedBlackTreeNode* FindLightImpl(const_light_pointer lelm) const {
404 return RB_FIND_LIGHT(
405 IntrusiveRedBlackTreeRootWithCompare,
406 const_cast<IntrusiveRedBlackTreeRootWithCompare*>(
407 static_cast<const IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root)),
408 static_cast<const void*>(lelm));
409 }
410
411 IntrusiveRedBlackTreeNode* NFindLightImpl(const_light_pointer lelm) const {
412 return RB_NFIND_LIGHT(
413 IntrusiveRedBlackTreeRootWithCompare,
414 const_cast<IntrusiveRedBlackTreeRootWithCompare*>(
415 static_cast<const IntrusiveRedBlackTreeRootWithCompare*>(&this->impl.root)),
416 static_cast<const void*>(lelm));
417 }
418
419public:
420 constexpr IntrusiveRedBlackTree() = default;
421
422 // Iterator accessors.
423 iterator begin() {
424 return iterator(this->impl.begin());
425 }
426
427 const_iterator begin() const {
428 return const_iterator(this->impl.begin());
429 }
430
431 iterator end() {
432 return iterator(this->impl.end());
433 }
434
435 const_iterator end() const {
436 return const_iterator(this->impl.end());
437 }
438
439 const_iterator cbegin() const {
440 return this->begin();
441 }
442
443 const_iterator cend() const {
444 return this->end();
445 }
446
447 iterator iterator_to(reference ref) {
448 return iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
449 }
450
451 const_iterator iterator_to(const_reference ref) const {
452 return const_iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
453 }
454
455 // Content management.
456 bool empty() const {
457 return this->impl.empty();
458 }
459
460 reference back() {
461 return *Traits::GetParent(std::addressof(this->impl.back()));
462 }
463
464 const_reference back() const {
465 return *Traits::GetParent(std::addressof(this->impl.back()));
466 }
467
468 reference front() {
469 return *Traits::GetParent(std::addressof(this->impl.front()));
470 }
471
472 const_reference front() const {
473 return *Traits::GetParent(std::addressof(this->impl.front()));
474 }
475
476 iterator erase(iterator it) {
477 return iterator(this->impl.erase(it.GetImplIterator()));
478 }
479
480 iterator insert(reference ref) {
481 ImplType::pointer node = Traits::GetNode(std::addressof(ref));
482 this->InsertImpl(node);
483 return iterator(node);
484 }
485
486 iterator find(const_reference ref) const {
487 return iterator(this->FindImpl(Traits::GetNode(std::addressof(ref))));
488 }
489
490 iterator nfind(const_reference ref) const {
491 return iterator(this->NFindImpl(Traits::GetNode(std::addressof(ref))));
492 }
493
494 iterator find_light(const_light_reference ref) const {
495 return iterator(this->FindLightImpl(std::addressof(ref)));
496 }
497
498 iterator nfind_light(const_light_reference ref) const {
499 return iterator(this->NFindLightImpl(std::addressof(ref)));
500 }
501};
502
503template <auto T, class Derived = impl::GetParentType<T>>
504class IntrusiveRedBlackTreeMemberTraits;
505
506template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
507class IntrusiveRedBlackTreeMemberTraits<Member, Derived> {
508public:
509 template <class Comparator>
510 using TreeType = IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraits, Comparator>;
511 using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
512
513private:
514 template <class, class, class>
515 friend class IntrusiveRedBlackTree;
516
517 friend class impl::IntrusiveRedBlackTreeImpl;
518
519 static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
520 return std::addressof(parent->*Member);
521 }
522
523 static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
524 return std::addressof(parent->*Member);
525 }
526
527 static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
528 return GetParentPointer<Member, Derived>(node);
529 }
530
531 static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
532 return GetParentPointer<Member, Derived>(node);
533 }
534
535private:
536 static constexpr TYPED_STORAGE(Derived) DerivedStorage = {};
537 static_assert(GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage));
538};
539
540template <auto T, class Derived = impl::GetParentType<T>>
541class IntrusiveRedBlackTreeMemberTraitsDeferredAssert;
542
543template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
544class IntrusiveRedBlackTreeMemberTraitsDeferredAssert<Member, Derived> {
545public:
546 template <class Comparator>
547 using TreeType =
548 IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraitsDeferredAssert, Comparator>;
549 using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
550
551 static constexpr bool IsValid() {
552 TYPED_STORAGE(Derived) DerivedStorage = {};
553 return GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage);
554 }
555
556private:
557 template <class, class, class>
558 friend class IntrusiveRedBlackTree;
559
560 friend class impl::IntrusiveRedBlackTreeImpl;
561
562 static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
563 return std::addressof(parent->*Member);
564 }
565
566 static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
567 return std::addressof(parent->*Member);
568 }
569
570 static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
571 return GetParentPointer<Member, Derived>(node);
572 }
573
574 static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
575 return GetParentPointer<Member, Derived>(node);
576 }
577};
578
579template <class Derived>
580class IntrusiveRedBlackTreeBaseNode : public IntrusiveRedBlackTreeNode {
581public:
582 constexpr Derived* GetPrev() {
583 return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
584 }
585 constexpr const Derived* GetPrev() const {
586 return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
587 }
588
589 constexpr Derived* GetNext() {
590 return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
591 }
592 constexpr const Derived* GetNext() const {
593 return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
594 }
595};
596
597template <class Derived>
598class IntrusiveRedBlackTreeBaseTraits {
599public:
600 template <class Comparator>
601 using TreeType = IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeBaseTraits, Comparator>;
602 using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
603
604private:
605 template <class, class, class>
606 friend class IntrusiveRedBlackTree;
607
608 friend class impl::IntrusiveRedBlackTreeImpl;
609
610 static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
611 return static_cast<IntrusiveRedBlackTreeNode*>(parent);
612 }
613
614 static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
615 return static_cast<const IntrusiveRedBlackTreeNode*>(parent);
616 }
617
618 static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
619 return static_cast<Derived*>(node);
620 }
621
622 static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
623 return static_cast<const Derived*>(node);
624 }
625};
626
627} // namespace Common
diff --git a/src/common/parent_of_member.h b/src/common/parent_of_member.h
new file mode 100644
index 000000000..1af31ee44
--- /dev/null
+++ b/src/common/parent_of_member.h
@@ -0,0 +1,189 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <type_traits>
8
9#include "common/assert.h"
10#include "common/common_types.h"
11
12namespace Common {
13
14template <typename T, size_t Size, size_t Align>
15struct TypedStorage {
16 std::aligned_storage_t<Size, Align> storage_;
17};
18
19#define TYPED_STORAGE(...) TypedStorage<__VA_ARGS__, sizeof(__VA_ARGS__), alignof(__VA_ARGS__)>
20
21template <typename T>
22static constexpr T* GetPointer(TYPED_STORAGE(T) & ts) {
23 return static_cast<T*>(static_cast<void*>(std::addressof(ts.storage_)));
24}
25
26template <typename T>
27static constexpr const T* GetPointer(const TYPED_STORAGE(T) & ts) {
28 return static_cast<const T*>(static_cast<const void*>(std::addressof(ts.storage_)));
29}
30
31namespace impl {
32
33template <size_t MaxDepth>
34struct OffsetOfUnionHolder {
35 template <typename ParentType, typename MemberType, size_t Offset>
36 union UnionImpl {
37 using PaddingMember = char;
38 static constexpr size_t GetOffset() {
39 return Offset;
40 }
41
42#pragma pack(push, 1)
43 struct {
44 PaddingMember padding[Offset];
45 MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
46 } data;
47#pragma pack(pop)
48 UnionImpl<ParentType, MemberType, Offset + 1> next_union;
49 };
50
51 template <typename ParentType, typename MemberType>
52 union UnionImpl<ParentType, MemberType, 0> {
53 static constexpr size_t GetOffset() {
54 return 0;
55 }
56
57 struct {
58 MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
59 } data;
60 UnionImpl<ParentType, MemberType, 1> next_union;
61 };
62
63 template <typename ParentType, typename MemberType>
64 union UnionImpl<ParentType, MemberType, MaxDepth> {};
65};
66
67template <typename ParentType, typename MemberType>
68struct OffsetOfCalculator {
69 using UnionHolder =
70 typename OffsetOfUnionHolder<sizeof(MemberType)>::template UnionImpl<ParentType, MemberType,
71 0>;
72 union Union {
73 char c{};
74 UnionHolder first_union;
75 TYPED_STORAGE(ParentType) parent;
76
77 constexpr Union() : c() {}
78 };
79 static constexpr Union U = {};
80
81 static constexpr const MemberType* GetNextAddress(const MemberType* start,
82 const MemberType* target) {
83 while (start < target) {
84 start++;
85 }
86 return start;
87 }
88
89 static constexpr std::ptrdiff_t GetDifference(const MemberType* start,
90 const MemberType* target) {
91 return (target - start) * sizeof(MemberType);
92 }
93
94 template <typename CurUnion>
95 static constexpr std::ptrdiff_t OffsetOfImpl(MemberType ParentType::*member,
96 CurUnion& cur_union) {
97 constexpr size_t Offset = CurUnion::GetOffset();
98 const auto target = std::addressof(GetPointer(U.parent)->*member);
99 const auto start = std::addressof(cur_union.data.members[0]);
100 const auto next = GetNextAddress(start, target);
101
102 if (next != target) {
103 if constexpr (Offset < sizeof(MemberType) - 1) {
104 return OffsetOfImpl(member, cur_union.next_union);
105 } else {
106 UNREACHABLE();
107 }
108 }
109
110 return (next - start) * sizeof(MemberType) + Offset;
111 }
112
113 static constexpr std::ptrdiff_t OffsetOf(MemberType ParentType::*member) {
114 return OffsetOfImpl(member, U.first_union);
115 }
116};
117
118template <typename T>
119struct GetMemberPointerTraits;
120
121template <typename P, typename M>
122struct GetMemberPointerTraits<M P::*> {
123 using Parent = P;
124 using Member = M;
125};
126
127template <auto MemberPtr>
128using GetParentType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Parent;
129
130template <auto MemberPtr>
131using GetMemberType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Member;
132
133template <auto MemberPtr, typename RealParentType = GetParentType<MemberPtr>>
134static inline std::ptrdiff_t OffsetOf = [] {
135 using DeducedParentType = GetParentType<MemberPtr>;
136 using MemberType = GetMemberType<MemberPtr>;
137 static_assert(std::is_base_of<DeducedParentType, RealParentType>::value ||
138 std::is_same<RealParentType, DeducedParentType>::value);
139
140 return OffsetOfCalculator<RealParentType, MemberType>::OffsetOf(MemberPtr);
141}();
142
143} // namespace impl
144
145template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
146constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>* member) {
147 std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>;
148 return *static_cast<RealParentType*>(
149 static_cast<void*>(static_cast<uint8_t*>(static_cast<void*>(member)) - Offset));
150}
151
152template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
153constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const* member) {
154 std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>;
155 return *static_cast<const RealParentType*>(static_cast<const void*>(
156 static_cast<const uint8_t*>(static_cast<const void*>(member)) - Offset));
157}
158
159template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
160constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>* member) {
161 return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
162}
163
164template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
165constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const* member) {
166 return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
167}
168
169template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
170constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>& member) {
171 return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
172}
173
174template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
175constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const& member) {
176 return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
177}
178
179template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
180constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>& member) {
181 return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
182}
183
184template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
185constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const& member) {
186 return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
187}
188
189} // namespace Common
diff --git a/src/common/tree.h b/src/common/tree.h
new file mode 100644
index 000000000..a6b636646
--- /dev/null
+++ b/src/common/tree.h
@@ -0,0 +1,822 @@
1/* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */
2/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
3/* $FreeBSD$ */
4
5/*-
6 * Copyright 2002 Niels Provos <provos@citi.umich.edu>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#ifndef _SYS_TREE_H_
31#define _SYS_TREE_H_
32
33/* FreeBSD <sys/cdefs.h> has a lot of defines we don't really want. */
34/* tree.h only actually uses __inline and __unused, so we'll just define those. */
35
36/* #include <sys/cdefs.h> */
37
38#ifndef __inline
39#define __inline inline
40#endif
41
42/*
43 * This file defines data structures for different types of trees:
44 * splay trees and red-black trees.
45 *
46 * A splay tree is a self-organizing data structure. Every operation
47 * on the tree causes a splay to happen. The splay moves the requested
48 * node to the root of the tree and partly rebalances it.
49 *
50 * This has the benefit that request locality causes faster lookups as
51 * the requested nodes move to the top of the tree. On the other hand,
52 * every lookup causes memory writes.
53 *
54 * The Balance Theorem bounds the total access time for m operations
55 * and n inserts on an initially empty tree as O((m + n)lg n). The
56 * amortized cost for a sequence of m accesses to a splay tree is O(lg n);
57 *
58 * A red-black tree is a binary search tree with the node color as an
59 * extra attribute. It fulfills a set of conditions:
60 * - every search path from the root to a leaf consists of the
61 * same number of black nodes,
62 * - each red node (except for the root) has a black parent,
63 * - each leaf node is black.
64 *
65 * Every operation on a red-black tree is bounded as O(lg n).
66 * The maximum height of a red-black tree is 2lg (n+1).
67 */
68
69#define SPLAY_HEAD(name, type) \
70 struct name { \
71 struct type* sph_root; /* root of the tree */ \
72 }
73
74#define SPLAY_INITIALIZER(root) \
75 { NULL }
76
77#define SPLAY_INIT(root) \
78 do { \
79 (root)->sph_root = NULL; \
80 } while (/*CONSTCOND*/ 0)
81
82#define SPLAY_ENTRY(type) \
83 struct { \
84 struct type* spe_left; /* left element */ \
85 struct type* spe_right; /* right element */ \
86 }
87
88#define SPLAY_LEFT(elm, field) (elm)->field.spe_left
89#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
90#define SPLAY_ROOT(head) (head)->sph_root
91#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
92
93/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
94#define SPLAY_ROTATE_RIGHT(head, tmp, field) \
95 do { \
96 SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
97 SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
98 (head)->sph_root = tmp; \
99 } while (/*CONSTCOND*/ 0)
100
101#define SPLAY_ROTATE_LEFT(head, tmp, field) \
102 do { \
103 SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
104 SPLAY_LEFT(tmp, field) = (head)->sph_root; \
105 (head)->sph_root = tmp; \
106 } while (/*CONSTCOND*/ 0)
107
108#define SPLAY_LINKLEFT(head, tmp, field) \
109 do { \
110 SPLAY_LEFT(tmp, field) = (head)->sph_root; \
111 tmp = (head)->sph_root; \
112 (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
113 } while (/*CONSTCOND*/ 0)
114
115#define SPLAY_LINKRIGHT(head, tmp, field) \
116 do { \
117 SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
118 tmp = (head)->sph_root; \
119 (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
120 } while (/*CONSTCOND*/ 0)
121
122#define SPLAY_ASSEMBLE(head, node, left, right, field) \
123 do { \
124 SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
125 SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field); \
126 SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
127 SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
128 } while (/*CONSTCOND*/ 0)
129
130/* Generates prototypes and inline functions */
131
132#define SPLAY_PROTOTYPE(name, type, field, cmp) \
133 void name##_SPLAY(struct name*, struct type*); \
134 void name##_SPLAY_MINMAX(struct name*, int); \
135 struct type* name##_SPLAY_INSERT(struct name*, struct type*); \
136 struct type* name##_SPLAY_REMOVE(struct name*, struct type*); \
137 \
138 /* Finds the node with the same key as elm */ \
139 static __inline struct type* name##_SPLAY_FIND(struct name* head, struct type* elm) { \
140 if (SPLAY_EMPTY(head)) \
141 return (NULL); \
142 name##_SPLAY(head, elm); \
143 if ((cmp)(elm, (head)->sph_root) == 0) \
144 return (head->sph_root); \
145 return (NULL); \
146 } \
147 \
148 static __inline struct type* name##_SPLAY_NEXT(struct name* head, struct type* elm) { \
149 name##_SPLAY(head, elm); \
150 if (SPLAY_RIGHT(elm, field) != NULL) { \
151 elm = SPLAY_RIGHT(elm, field); \
152 while (SPLAY_LEFT(elm, field) != NULL) { \
153 elm = SPLAY_LEFT(elm, field); \
154 } \
155 } else \
156 elm = NULL; \
157 return (elm); \
158 } \
159 \
160 static __inline struct type* name##_SPLAY_MIN_MAX(struct name* head, int val) { \
161 name##_SPLAY_MINMAX(head, val); \
162 return (SPLAY_ROOT(head)); \
163 }
164
165/* Main splay operation.
166 * Moves node close to the key of elm to top
167 */
168#define SPLAY_GENERATE(name, type, field, cmp) \
169 struct type* name##_SPLAY_INSERT(struct name* head, struct type* elm) { \
170 if (SPLAY_EMPTY(head)) { \
171 SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
172 } else { \
173 int __comp; \
174 name##_SPLAY(head, elm); \
175 __comp = (cmp)(elm, (head)->sph_root); \
176 if (__comp < 0) { \
177 SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field); \
178 SPLAY_RIGHT(elm, field) = (head)->sph_root; \
179 SPLAY_LEFT((head)->sph_root, field) = NULL; \
180 } else if (__comp > 0) { \
181 SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field); \
182 SPLAY_LEFT(elm, field) = (head)->sph_root; \
183 SPLAY_RIGHT((head)->sph_root, field) = NULL; \
184 } else \
185 return ((head)->sph_root); \
186 } \
187 (head)->sph_root = (elm); \
188 return (NULL); \
189 } \
190 \
191 struct type* name##_SPLAY_REMOVE(struct name* head, struct type* elm) { \
192 struct type* __tmp; \
193 if (SPLAY_EMPTY(head)) \
194 return (NULL); \
195 name##_SPLAY(head, elm); \
196 if ((cmp)(elm, (head)->sph_root) == 0) { \
197 if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
198 (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
199 } else { \
200 __tmp = SPLAY_RIGHT((head)->sph_root, field); \
201 (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
202 name##_SPLAY(head, elm); \
203 SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
204 } \
205 return (elm); \
206 } \
207 return (NULL); \
208 } \
209 \
210 void name##_SPLAY(struct name* head, struct type* elm) { \
211 struct type __node, *__left, *__right, *__tmp; \
212 int __comp; \
213 \
214 SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \
215 __left = __right = &__node; \
216 \
217 while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \
218 if (__comp < 0) { \
219 __tmp = SPLAY_LEFT((head)->sph_root, field); \
220 if (__tmp == NULL) \
221 break; \
222 if ((cmp)(elm, __tmp) < 0) { \
223 SPLAY_ROTATE_RIGHT(head, __tmp, field); \
224 if (SPLAY_LEFT((head)->sph_root, field) == NULL) \
225 break; \
226 } \
227 SPLAY_LINKLEFT(head, __right, field); \
228 } else if (__comp > 0) { \
229 __tmp = SPLAY_RIGHT((head)->sph_root, field); \
230 if (__tmp == NULL) \
231 break; \
232 if ((cmp)(elm, __tmp) > 0) { \
233 SPLAY_ROTATE_LEFT(head, __tmp, field); \
234 if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \
235 break; \
236 } \
237 SPLAY_LINKRIGHT(head, __left, field); \
238 } \
239 } \
240 SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
241 } \
242 \
243 /* Splay with either the minimum or the maximum element \
244 * Used to find minimum or maximum element in tree. \
245 */ \
246 void name##_SPLAY_MINMAX(struct name* head, int __comp) { \
247 struct type __node, *__left, *__right, *__tmp; \
248 \
249 SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \
250 __left = __right = &__node; \
251 \
252 while (1) { \
253 if (__comp < 0) { \
254 __tmp = SPLAY_LEFT((head)->sph_root, field); \
255 if (__tmp == NULL) \
256 break; \
257 if (__comp < 0) { \
258 SPLAY_ROTATE_RIGHT(head, __tmp, field); \
259 if (SPLAY_LEFT((head)->sph_root, field) == NULL) \
260 break; \
261 } \
262 SPLAY_LINKLEFT(head, __right, field); \
263 } else if (__comp > 0) { \
264 __tmp = SPLAY_RIGHT((head)->sph_root, field); \
265 if (__tmp == NULL) \
266 break; \
267 if (__comp > 0) { \
268 SPLAY_ROTATE_LEFT(head, __tmp, field); \
269 if (SPLAY_RIGHT((head)->sph_root, field) == NULL) \
270 break; \
271 } \
272 SPLAY_LINKRIGHT(head, __left, field); \
273 } \
274 } \
275 SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
276 }
277
278#define SPLAY_NEGINF -1
279#define SPLAY_INF 1
280
281#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
282#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
283#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
284#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
285#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
286#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL : name##_SPLAY_MIN_MAX(x, SPLAY_INF))
287
288#define SPLAY_FOREACH(x, name, head) \
289 for ((x) = SPLAY_MIN(name, head); (x) != NULL; (x) = SPLAY_NEXT(name, head, x))
290
291/* Macros that define a red-black tree */
292#define RB_HEAD(name, type) \
293 struct name { \
294 struct type* rbh_root; /* root of the tree */ \
295 }
296
297#define RB_INITIALIZER(root) \
298 { NULL }
299
300#define RB_INIT(root) \
301 do { \
302 (root)->rbh_root = NULL; \
303 } while (/*CONSTCOND*/ 0)
304
305#define RB_BLACK 0
306#define RB_RED 1
307#define RB_ENTRY(type) \
308 struct { \
309 struct type* rbe_left; /* left element */ \
310 struct type* rbe_right; /* right element */ \
311 struct type* rbe_parent; /* parent element */ \
312 int rbe_color; /* node color */ \
313 }
314
315#define RB_LEFT(elm, field) (elm)->field.rbe_left
316#define RB_RIGHT(elm, field) (elm)->field.rbe_right
317#define RB_PARENT(elm, field) (elm)->field.rbe_parent
318#define RB_COLOR(elm, field) (elm)->field.rbe_color
319#define RB_ROOT(head) (head)->rbh_root
320#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
321
322#define RB_SET(elm, parent, field) \
323 do { \
324 RB_PARENT(elm, field) = parent; \
325 RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
326 RB_COLOR(elm, field) = RB_RED; \
327 } while (/*CONSTCOND*/ 0)
328
329#define RB_SET_BLACKRED(black, red, field) \
330 do { \
331 RB_COLOR(black, field) = RB_BLACK; \
332 RB_COLOR(red, field) = RB_RED; \
333 } while (/*CONSTCOND*/ 0)
334
335#ifndef RB_AUGMENT
336#define RB_AUGMENT(x) \
337 do { \
338 } while (0)
339#endif
340
341#define RB_ROTATE_LEFT(head, elm, tmp, field) \
342 do { \
343 (tmp) = RB_RIGHT(elm, field); \
344 if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \
345 RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
346 } \
347 RB_AUGMENT(elm); \
348 if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
349 if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
350 RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
351 else \
352 RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
353 } else \
354 (head)->rbh_root = (tmp); \
355 RB_LEFT(tmp, field) = (elm); \
356 RB_PARENT(elm, field) = (tmp); \
357 RB_AUGMENT(tmp); \
358 if ((RB_PARENT(tmp, field))) \
359 RB_AUGMENT(RB_PARENT(tmp, field)); \
360 } while (/*CONSTCOND*/ 0)
361
362#define RB_ROTATE_RIGHT(head, elm, tmp, field) \
363 do { \
364 (tmp) = RB_LEFT(elm, field); \
365 if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \
366 RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
367 } \
368 RB_AUGMENT(elm); \
369 if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field)) != NULL) { \
370 if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
371 RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
372 else \
373 RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
374 } else \
375 (head)->rbh_root = (tmp); \
376 RB_RIGHT(tmp, field) = (elm); \
377 RB_PARENT(elm, field) = (tmp); \
378 RB_AUGMENT(tmp); \
379 if ((RB_PARENT(tmp, field))) \
380 RB_AUGMENT(RB_PARENT(tmp, field)); \
381 } while (/*CONSTCOND*/ 0)
382
383/* Generates prototypes and inline functions */
384#define RB_PROTOTYPE(name, type, field, cmp) RB_PROTOTYPE_INTERNAL(name, type, field, cmp, )
385#define RB_PROTOTYPE_STATIC(name, type, field, cmp) \
386 RB_PROTOTYPE_INTERNAL(name, type, field, cmp, static)
387#define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \
388 RB_PROTOTYPE_INSERT_COLOR(name, type, attr); \
389 RB_PROTOTYPE_REMOVE_COLOR(name, type, attr); \
390 RB_PROTOTYPE_INSERT(name, type, attr); \
391 RB_PROTOTYPE_REMOVE(name, type, attr); \
392 RB_PROTOTYPE_FIND(name, type, attr); \
393 RB_PROTOTYPE_NFIND(name, type, attr); \
394 RB_PROTOTYPE_FIND_LIGHT(name, type, attr); \
395 RB_PROTOTYPE_NFIND_LIGHT(name, type, attr); \
396 RB_PROTOTYPE_NEXT(name, type, attr); \
397 RB_PROTOTYPE_PREV(name, type, attr); \
398 RB_PROTOTYPE_MINMAX(name, type, attr);
399#define RB_PROTOTYPE_INSERT_COLOR(name, type, attr) \
400 attr void name##_RB_INSERT_COLOR(struct name*, struct type*)
401#define RB_PROTOTYPE_REMOVE_COLOR(name, type, attr) \
402 attr void name##_RB_REMOVE_COLOR(struct name*, struct type*, struct type*)
403#define RB_PROTOTYPE_REMOVE(name, type, attr) \
404 attr struct type* name##_RB_REMOVE(struct name*, struct type*)
405#define RB_PROTOTYPE_INSERT(name, type, attr) \
406 attr struct type* name##_RB_INSERT(struct name*, struct type*)
407#define RB_PROTOTYPE_FIND(name, type, attr) \
408 attr struct type* name##_RB_FIND(struct name*, struct type*)
409#define RB_PROTOTYPE_NFIND(name, type, attr) \
410 attr struct type* name##_RB_NFIND(struct name*, struct type*)
411#define RB_PROTOTYPE_FIND_LIGHT(name, type, attr) \
412 attr struct type* name##_RB_FIND_LIGHT(struct name*, const void*)
413#define RB_PROTOTYPE_NFIND_LIGHT(name, type, attr) \
414 attr struct type* name##_RB_NFIND_LIGHT(struct name*, const void*)
415#define RB_PROTOTYPE_NEXT(name, type, attr) attr struct type* name##_RB_NEXT(struct type*)
416#define RB_PROTOTYPE_PREV(name, type, attr) attr struct type* name##_RB_PREV(struct type*)
417#define RB_PROTOTYPE_MINMAX(name, type, attr) attr struct type* name##_RB_MINMAX(struct name*, int)
418
419/* Main rb operation.
420 * Moves node close to the key of elm to top
421 */
422#define RB_GENERATE_WITHOUT_COMPARE(name, type, field) \
423 RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, )
424#define RB_GENERATE_WITHOUT_COMPARE_STATIC(name, type, field) \
425 RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, static)
426#define RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, attr) \
427 RB_GENERATE_REMOVE_COLOR(name, type, field, attr) \
428 RB_GENERATE_REMOVE(name, type, field, attr) \
429 RB_GENERATE_NEXT(name, type, field, attr) \
430 RB_GENERATE_PREV(name, type, field, attr) \
431 RB_GENERATE_MINMAX(name, type, field, attr)
432
433#define RB_GENERATE_WITH_COMPARE(name, type, field, cmp, lcmp) \
434 RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, lcmp, )
435#define RB_GENERATE_WITH_COMPARE_STATIC(name, type, field, cmp, lcmp) \
436 RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, lcmp, static)
437#define RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, lcmp, attr) \
438 RB_GENERATE_INSERT_COLOR(name, type, field, attr) \
439 RB_GENERATE_INSERT(name, type, field, cmp, attr) \
440 RB_GENERATE_FIND(name, type, field, cmp, attr) \
441 RB_GENERATE_NFIND(name, type, field, cmp, attr) \
442 RB_GENERATE_FIND_LIGHT(name, type, field, lcmp, attr) \
443 RB_GENERATE_NFIND_LIGHT(name, type, field, lcmp, attr)
444
445#define RB_GENERATE_ALL(name, type, field, cmp) RB_GENERATE_ALL_INTERNAL(name, type, field, cmp, )
446#define RB_GENERATE_ALL_STATIC(name, type, field, cmp) \
447 RB_GENERATE_ALL_INTERNAL(name, type, field, cmp, static)
448#define RB_GENERATE_ALL_INTERNAL(name, type, field, cmp, attr) \
449 RB_GENERATE_WITHOUT_COMPARE_INTERNAL(name, type, field, attr) \
450 RB_GENERATE_WITH_COMPARE_INTERNAL(name, type, field, cmp, attr)
451
452#define RB_GENERATE_INSERT_COLOR(name, type, field, attr) \
453 attr void name##_RB_INSERT_COLOR(struct name* head, struct type* elm) { \
454 struct type *parent, *gparent, *tmp; \
455 while ((parent = RB_PARENT(elm, field)) != NULL && RB_COLOR(parent, field) == RB_RED) { \
456 gparent = RB_PARENT(parent, field); \
457 if (parent == RB_LEFT(gparent, field)) { \
458 tmp = RB_RIGHT(gparent, field); \
459 if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
460 RB_COLOR(tmp, field) = RB_BLACK; \
461 RB_SET_BLACKRED(parent, gparent, field); \
462 elm = gparent; \
463 continue; \
464 } \
465 if (RB_RIGHT(parent, field) == elm) { \
466 RB_ROTATE_LEFT(head, parent, tmp, field); \
467 tmp = parent; \
468 parent = elm; \
469 elm = tmp; \
470 } \
471 RB_SET_BLACKRED(parent, gparent, field); \
472 RB_ROTATE_RIGHT(head, gparent, tmp, field); \
473 } else { \
474 tmp = RB_LEFT(gparent, field); \
475 if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
476 RB_COLOR(tmp, field) = RB_BLACK; \
477 RB_SET_BLACKRED(parent, gparent, field); \
478 elm = gparent; \
479 continue; \
480 } \
481 if (RB_LEFT(parent, field) == elm) { \
482 RB_ROTATE_RIGHT(head, parent, tmp, field); \
483 tmp = parent; \
484 parent = elm; \
485 elm = tmp; \
486 } \
487 RB_SET_BLACKRED(parent, gparent, field); \
488 RB_ROTATE_LEFT(head, gparent, tmp, field); \
489 } \
490 } \
491 RB_COLOR(head->rbh_root, field) = RB_BLACK; \
492 }
493
494#define RB_GENERATE_REMOVE_COLOR(name, type, field, attr) \
495 attr void name##_RB_REMOVE_COLOR(struct name* head, struct type* parent, struct type* elm) { \
496 struct type* tmp; \
497 while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && elm != RB_ROOT(head)) { \
498 if (RB_LEFT(parent, field) == elm) { \
499 tmp = RB_RIGHT(parent, field); \
500 if (RB_COLOR(tmp, field) == RB_RED) { \
501 RB_SET_BLACKRED(tmp, parent, field); \
502 RB_ROTATE_LEFT(head, parent, tmp, field); \
503 tmp = RB_RIGHT(parent, field); \
504 } \
505 if ((RB_LEFT(tmp, field) == NULL || \
506 RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \
507 (RB_RIGHT(tmp, field) == NULL || \
508 RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \
509 RB_COLOR(tmp, field) = RB_RED; \
510 elm = parent; \
511 parent = RB_PARENT(elm, field); \
512 } else { \
513 if (RB_RIGHT(tmp, field) == NULL || \
514 RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) { \
515 struct type* oleft; \
516 if ((oleft = RB_LEFT(tmp, field)) != NULL) \
517 RB_COLOR(oleft, field) = RB_BLACK; \
518 RB_COLOR(tmp, field) = RB_RED; \
519 RB_ROTATE_RIGHT(head, tmp, oleft, field); \
520 tmp = RB_RIGHT(parent, field); \
521 } \
522 RB_COLOR(tmp, field) = RB_COLOR(parent, field); \
523 RB_COLOR(parent, field) = RB_BLACK; \
524 if (RB_RIGHT(tmp, field)) \
525 RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK; \
526 RB_ROTATE_LEFT(head, parent, tmp, field); \
527 elm = RB_ROOT(head); \
528 break; \
529 } \
530 } else { \
531 tmp = RB_LEFT(parent, field); \
532 if (RB_COLOR(tmp, field) == RB_RED) { \
533 RB_SET_BLACKRED(tmp, parent, field); \
534 RB_ROTATE_RIGHT(head, parent, tmp, field); \
535 tmp = RB_LEFT(parent, field); \
536 } \
537 if ((RB_LEFT(tmp, field) == NULL || \
538 RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) && \
539 (RB_RIGHT(tmp, field) == NULL || \
540 RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) { \
541 RB_COLOR(tmp, field) = RB_RED; \
542 elm = parent; \
543 parent = RB_PARENT(elm, field); \
544 } else { \
545 if (RB_LEFT(tmp, field) == NULL || \
546 RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) { \
547 struct type* oright; \
548 if ((oright = RB_RIGHT(tmp, field)) != NULL) \
549 RB_COLOR(oright, field) = RB_BLACK; \
550 RB_COLOR(tmp, field) = RB_RED; \
551 RB_ROTATE_LEFT(head, tmp, oright, field); \
552 tmp = RB_LEFT(parent, field); \
553 } \
554 RB_COLOR(tmp, field) = RB_COLOR(parent, field); \
555 RB_COLOR(parent, field) = RB_BLACK; \
556 if (RB_LEFT(tmp, field)) \
557 RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK; \
558 RB_ROTATE_RIGHT(head, parent, tmp, field); \
559 elm = RB_ROOT(head); \
560 break; \
561 } \
562 } \
563 } \
564 if (elm) \
565 RB_COLOR(elm, field) = RB_BLACK; \
566 }
567
568#define RB_GENERATE_REMOVE(name, type, field, attr) \
569 attr struct type* name##_RB_REMOVE(struct name* head, struct type* elm) { \
570 struct type *child, *parent, *old = elm; \
571 int color; \
572 if (RB_LEFT(elm, field) == NULL) \
573 child = RB_RIGHT(elm, field); \
574 else if (RB_RIGHT(elm, field) == NULL) \
575 child = RB_LEFT(elm, field); \
576 else { \
577 struct type* left; \
578 elm = RB_RIGHT(elm, field); \
579 while ((left = RB_LEFT(elm, field)) != NULL) \
580 elm = left; \
581 child = RB_RIGHT(elm, field); \
582 parent = RB_PARENT(elm, field); \
583 color = RB_COLOR(elm, field); \
584 if (child) \
585 RB_PARENT(child, field) = parent; \
586 if (parent) { \
587 if (RB_LEFT(parent, field) == elm) \
588 RB_LEFT(parent, field) = child; \
589 else \
590 RB_RIGHT(parent, field) = child; \
591 RB_AUGMENT(parent); \
592 } else \
593 RB_ROOT(head) = child; \
594 if (RB_PARENT(elm, field) == old) \
595 parent = elm; \
596 (elm)->field = (old)->field; \
597 if (RB_PARENT(old, field)) { \
598 if (RB_LEFT(RB_PARENT(old, field), field) == old) \
599 RB_LEFT(RB_PARENT(old, field), field) = elm; \
600 else \
601 RB_RIGHT(RB_PARENT(old, field), field) = elm; \
602 RB_AUGMENT(RB_PARENT(old, field)); \
603 } else \
604 RB_ROOT(head) = elm; \
605 RB_PARENT(RB_LEFT(old, field), field) = elm; \
606 if (RB_RIGHT(old, field)) \
607 RB_PARENT(RB_RIGHT(old, field), field) = elm; \
608 if (parent) { \
609 left = parent; \
610 do { \
611 RB_AUGMENT(left); \
612 } while ((left = RB_PARENT(left, field)) != NULL); \
613 } \
614 goto color; \
615 } \
616 parent = RB_PARENT(elm, field); \
617 color = RB_COLOR(elm, field); \
618 if (child) \
619 RB_PARENT(child, field) = parent; \
620 if (parent) { \
621 if (RB_LEFT(parent, field) == elm) \
622 RB_LEFT(parent, field) = child; \
623 else \
624 RB_RIGHT(parent, field) = child; \
625 RB_AUGMENT(parent); \
626 } else \
627 RB_ROOT(head) = child; \
628 color: \
629 if (color == RB_BLACK) \
630 name##_RB_REMOVE_COLOR(head, parent, child); \
631 return (old); \
632 }
633
634#define RB_GENERATE_INSERT(name, type, field, cmp, attr) \
635 /* Inserts a node into the RB tree */ \
636 attr struct type* name##_RB_INSERT(struct name* head, struct type* elm) { \
637 struct type* tmp; \
638 struct type* parent = NULL; \
639 int comp = 0; \
640 tmp = RB_ROOT(head); \
641 while (tmp) { \
642 parent = tmp; \
643 comp = (cmp)(elm, parent); \
644 if (comp < 0) \
645 tmp = RB_LEFT(tmp, field); \
646 else if (comp > 0) \
647 tmp = RB_RIGHT(tmp, field); \
648 else \
649 return (tmp); \
650 } \
651 RB_SET(elm, parent, field); \
652 if (parent != NULL) { \
653 if (comp < 0) \
654 RB_LEFT(parent, field) = elm; \
655 else \
656 RB_RIGHT(parent, field) = elm; \
657 RB_AUGMENT(parent); \
658 } else \
659 RB_ROOT(head) = elm; \
660 name##_RB_INSERT_COLOR(head, elm); \
661 return (NULL); \
662 }
663
664#define RB_GENERATE_FIND(name, type, field, cmp, attr) \
665 /* Finds the node with the same key as elm */ \
666 attr struct type* name##_RB_FIND(struct name* head, struct type* elm) { \
667 struct type* tmp = RB_ROOT(head); \
668 int comp; \
669 while (tmp) { \
670 comp = cmp(elm, tmp); \
671 if (comp < 0) \
672 tmp = RB_LEFT(tmp, field); \
673 else if (comp > 0) \
674 tmp = RB_RIGHT(tmp, field); \
675 else \
676 return (tmp); \
677 } \
678 return (NULL); \
679 }
680
681#define RB_GENERATE_NFIND(name, type, field, cmp, attr) \
682 /* Finds the first node greater than or equal to the search key */ \
683 attr struct type* name##_RB_NFIND(struct name* head, struct type* elm) { \
684 struct type* tmp = RB_ROOT(head); \
685 struct type* res = NULL; \
686 int comp; \
687 while (tmp) { \
688 comp = cmp(elm, tmp); \
689 if (comp < 0) { \
690 res = tmp; \
691 tmp = RB_LEFT(tmp, field); \
692 } else if (comp > 0) \
693 tmp = RB_RIGHT(tmp, field); \
694 else \
695 return (tmp); \
696 } \
697 return (res); \
698 }
699
700#define RB_GENERATE_FIND_LIGHT(name, type, field, lcmp, attr) \
701 /* Finds the node with the same key as elm */ \
702 attr struct type* name##_RB_FIND_LIGHT(struct name* head, const void* lelm) { \
703 struct type* tmp = RB_ROOT(head); \
704 int comp; \
705 while (tmp) { \
706 comp = lcmp(lelm, tmp); \
707 if (comp < 0) \
708 tmp = RB_LEFT(tmp, field); \
709 else if (comp > 0) \
710 tmp = RB_RIGHT(tmp, field); \
711 else \
712 return (tmp); \
713 } \
714 return (NULL); \
715 }
716
717#define RB_GENERATE_NFIND_LIGHT(name, type, field, lcmp, attr) \
718 /* Finds the first node greater than or equal to the search key */ \
719 attr struct type* name##_RB_NFIND_LIGHT(struct name* head, const void* lelm) { \
720 struct type* tmp = RB_ROOT(head); \
721 struct type* res = NULL; \
722 int comp; \
723 while (tmp) { \
724 comp = lcmp(lelm, tmp); \
725 if (comp < 0) { \
726 res = tmp; \
727 tmp = RB_LEFT(tmp, field); \
728 } else if (comp > 0) \
729 tmp = RB_RIGHT(tmp, field); \
730 else \
731 return (tmp); \
732 } \
733 return (res); \
734 }
735
736#define RB_GENERATE_NEXT(name, type, field, attr) \
737 /* ARGSUSED */ \
738 attr struct type* name##_RB_NEXT(struct type* elm) { \
739 if (RB_RIGHT(elm, field)) { \
740 elm = RB_RIGHT(elm, field); \
741 while (RB_LEFT(elm, field)) \
742 elm = RB_LEFT(elm, field); \
743 } else { \
744 if (RB_PARENT(elm, field) && (elm == RB_LEFT(RB_PARENT(elm, field), field))) \
745 elm = RB_PARENT(elm, field); \
746 else { \
747 while (RB_PARENT(elm, field) && (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
748 elm = RB_PARENT(elm, field); \
749 elm = RB_PARENT(elm, field); \
750 } \
751 } \
752 return (elm); \
753 }
754
755#define RB_GENERATE_PREV(name, type, field, attr) \
756 /* ARGSUSED */ \
757 attr struct type* name##_RB_PREV(struct type* elm) { \
758 if (RB_LEFT(elm, field)) { \
759 elm = RB_LEFT(elm, field); \
760 while (RB_RIGHT(elm, field)) \
761 elm = RB_RIGHT(elm, field); \
762 } else { \
763 if (RB_PARENT(elm, field) && (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \
764 elm = RB_PARENT(elm, field); \
765 else { \
766 while (RB_PARENT(elm, field) && (elm == RB_LEFT(RB_PARENT(elm, field), field))) \
767 elm = RB_PARENT(elm, field); \
768 elm = RB_PARENT(elm, field); \
769 } \
770 } \
771 return (elm); \
772 }
773
774#define RB_GENERATE_MINMAX(name, type, field, attr) \
775 attr struct type* name##_RB_MINMAX(struct name* head, int val) { \
776 struct type* tmp = RB_ROOT(head); \
777 struct type* parent = NULL; \
778 while (tmp) { \
779 parent = tmp; \
780 if (val < 0) \
781 tmp = RB_LEFT(tmp, field); \
782 else \
783 tmp = RB_RIGHT(tmp, field); \
784 } \
785 return (parent); \
786 }
787
788#define RB_NEGINF -1
789#define RB_INF 1
790
791#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
792#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
793#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
794#define RB_NFIND(name, x, y) name##_RB_NFIND(x, y)
795#define RB_FIND_LIGHT(name, x, y) name##_RB_FIND_LIGHT(x, y)
796#define RB_NFIND_LIGHT(name, x, y) name##_RB_NFIND_LIGHT(x, y)
797#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
798#define RB_PREV(name, x, y) name##_RB_PREV(y)
799#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
800#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
801
802#define RB_FOREACH(x, name, head) \
803 for ((x) = RB_MIN(name, head); (x) != NULL; (x) = name##_RB_NEXT(x))
804
805#define RB_FOREACH_FROM(x, name, y) \
806 for ((x) = (y); ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); (x) = (y))
807
808#define RB_FOREACH_SAFE(x, name, head, y) \
809 for ((x) = RB_MIN(name, head); ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \
810 (x) = (y))
811
812#define RB_FOREACH_REVERSE(x, name, head) \
813 for ((x) = RB_MAX(name, head); (x) != NULL; (x) = name##_RB_PREV(x))
814
815#define RB_FOREACH_REVERSE_FROM(x, name, y) \
816 for ((x) = (y); ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); (x) = (y))
817
818#define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \
819 for ((x) = RB_MAX(name, head); ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
820 (x) = (y))
821
822#endif /* _SYS_TREE_H_ */
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 893df433a..1b8ad476e 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -142,8 +142,6 @@ add_library(core STATIC
142 hardware_interrupt_manager.h 142 hardware_interrupt_manager.h
143 hle/ipc.h 143 hle/ipc.h
144 hle/ipc_helpers.h 144 hle/ipc_helpers.h
145 hle/kernel/address_arbiter.cpp
146 hle/kernel/address_arbiter.h
147 hle/kernel/client_port.cpp 145 hle/kernel/client_port.cpp
148 hle/kernel/client_port.h 146 hle/kernel/client_port.h
149 hle/kernel/client_session.cpp 147 hle/kernel/client_session.cpp
@@ -157,13 +155,19 @@ add_library(core STATIC
157 hle/kernel/handle_table.h 155 hle/kernel/handle_table.h
158 hle/kernel/hle_ipc.cpp 156 hle/kernel/hle_ipc.cpp
159 hle/kernel/hle_ipc.h 157 hle/kernel/hle_ipc.h
158 hle/kernel/k_address_arbiter.cpp
159 hle/kernel/k_address_arbiter.h
160 hle/kernel/k_affinity_mask.h 160 hle/kernel/k_affinity_mask.h
161 hle/kernel/k_condition_variable.cpp
162 hle/kernel/k_condition_variable.h
161 hle/kernel/k_priority_queue.h 163 hle/kernel/k_priority_queue.h
162 hle/kernel/k_scheduler.cpp 164 hle/kernel/k_scheduler.cpp
163 hle/kernel/k_scheduler.h 165 hle/kernel/k_scheduler.h
164 hle/kernel/k_scheduler_lock.h 166 hle/kernel/k_scheduler_lock.h
165 hle/kernel/k_scoped_lock.h 167 hle/kernel/k_scoped_lock.h
166 hle/kernel/k_scoped_scheduler_lock_and_sleep.h 168 hle/kernel/k_scoped_scheduler_lock_and_sleep.h
169 hle/kernel/k_synchronization_object.cpp
170 hle/kernel/k_synchronization_object.h
167 hle/kernel/kernel.cpp 171 hle/kernel/kernel.cpp
168 hle/kernel/kernel.h 172 hle/kernel/kernel.h
169 hle/kernel/memory/address_space_info.cpp 173 hle/kernel/memory/address_space_info.cpp
@@ -183,8 +187,6 @@ add_library(core STATIC
183 hle/kernel/memory/slab_heap.h 187 hle/kernel/memory/slab_heap.h
184 hle/kernel/memory/system_control.cpp 188 hle/kernel/memory/system_control.cpp
185 hle/kernel/memory/system_control.h 189 hle/kernel/memory/system_control.h
186 hle/kernel/mutex.cpp
187 hle/kernel/mutex.h
188 hle/kernel/object.cpp 190 hle/kernel/object.cpp
189 hle/kernel/object.h 191 hle/kernel/object.h
190 hle/kernel/physical_core.cpp 192 hle/kernel/physical_core.cpp
@@ -210,12 +212,10 @@ add_library(core STATIC
210 hle/kernel/shared_memory.h 212 hle/kernel/shared_memory.h
211 hle/kernel/svc.cpp 213 hle/kernel/svc.cpp
212 hle/kernel/svc.h 214 hle/kernel/svc.h
215 hle/kernel/svc_common.h
216 hle/kernel/svc_results.h
213 hle/kernel/svc_types.h 217 hle/kernel/svc_types.h
214 hle/kernel/svc_wrap.h 218 hle/kernel/svc_wrap.h
215 hle/kernel/synchronization_object.cpp
216 hle/kernel/synchronization_object.h
217 hle/kernel/synchronization.cpp
218 hle/kernel/synchronization.h
219 hle/kernel/thread.cpp 219 hle/kernel/thread.cpp
220 hle/kernel/thread.h 220 hle/kernel/thread.h
221 hle/kernel/time_manager.cpp 221 hle/kernel/time_manager.cpp
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h
index 70098c526..9a0151736 100644
--- a/src/core/arm/arm_interface.h
+++ b/src/core/arm/arm_interface.h
@@ -26,9 +26,10 @@ using CPUInterrupts = std::array<CPUInterruptHandler, Core::Hardware::NUM_CPU_CO
26/// Generic ARMv8 CPU interface 26/// Generic ARMv8 CPU interface
27class ARM_Interface : NonCopyable { 27class ARM_Interface : NonCopyable {
28public: 28public:
29 explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers, bool uses_wall_clock) 29 explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers_,
30 : system{system_}, interrupt_handlers{interrupt_handlers}, uses_wall_clock{ 30 bool uses_wall_clock_)
31 uses_wall_clock} {} 31 : system{system_}, interrupt_handlers{interrupt_handlers_}, uses_wall_clock{
32 uses_wall_clock_} {}
32 virtual ~ARM_Interface() = default; 33 virtual ~ARM_Interface() = default;
33 34
34 struct ThreadContext32 { 35 struct ThreadContext32 {
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index e6c8461a5..874b5673a 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -49,6 +49,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
49 Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh); 49 Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh);
50 instance.on_thread_init(); 50 instance.on_thread_init();
51 instance.ThreadLoop(); 51 instance.ThreadLoop();
52 MicroProfileOnThreadExit();
52} 53}
53 54
54void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) { 55void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
deleted file mode 100644
index 20ffa7d47..000000000
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ /dev/null
@@ -1,317 +0,0 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <vector>
7
8#include "common/assert.h"
9#include "common/common_types.h"
10#include "core/arm/exclusive_monitor.h"
11#include "core/core.h"
12#include "core/hle/kernel/address_arbiter.h"
13#include "core/hle/kernel/errors.h"
14#include "core/hle/kernel/handle_table.h"
15#include "core/hle/kernel/k_scheduler.h"
16#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
17#include "core/hle/kernel/kernel.h"
18#include "core/hle/kernel/thread.h"
19#include "core/hle/kernel/time_manager.h"
20#include "core/hle/result.h"
21#include "core/memory.h"
22
23namespace Kernel {
24
25// Wake up num_to_wake (or all) threads in a vector.
26void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads,
27 s32 num_to_wake) {
28 // Only process up to 'target' threads, unless 'target' is <= 0, in which case process
29 // them all.
30 std::size_t last = waiting_threads.size();
31 if (num_to_wake > 0) {
32 last = std::min(last, static_cast<std::size_t>(num_to_wake));
33 }
34
35 // Signal the waiting threads.
36 for (std::size_t i = 0; i < last; i++) {
37 waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
38 RemoveThread(waiting_threads[i]);
39 waiting_threads[i]->WaitForArbitration(false);
40 waiting_threads[i]->ResumeFromWait();
41 }
42}
43
44AddressArbiter::AddressArbiter(Core::System& system) : system{system} {}
45AddressArbiter::~AddressArbiter() = default;
46
47ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 value,
48 s32 num_to_wake) {
49 switch (type) {
50 case SignalType::Signal:
51 return SignalToAddressOnly(address, num_to_wake);
52 case SignalType::IncrementAndSignalIfEqual:
53 return IncrementAndSignalToAddressIfEqual(address, value, num_to_wake);
54 case SignalType::ModifyByWaitingCountAndSignalIfEqual:
55 return ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, num_to_wake);
56 default:
57 return ERR_INVALID_ENUM_VALUE;
58 }
59}
60
61ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
62 KScopedSchedulerLock lock(system.Kernel());
63 const std::vector<std::shared_ptr<Thread>> waiting_threads =
64 GetThreadsWaitingOnAddress(address);
65 WakeThreads(waiting_threads, num_to_wake);
66 return RESULT_SUCCESS;
67}
68
69ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
70 s32 num_to_wake) {
71 KScopedSchedulerLock lock(system.Kernel());
72 auto& memory = system.Memory();
73
74 // Ensure that we can write to the address.
75 if (!memory.IsValidVirtualAddress(address)) {
76 return ERR_INVALID_ADDRESS_STATE;
77 }
78
79 const std::size_t current_core = system.CurrentCoreIndex();
80 auto& monitor = system.Monitor();
81 u32 current_value;
82 do {
83 current_value = monitor.ExclusiveRead32(current_core, address);
84
85 if (current_value != static_cast<u32>(value)) {
86 return ERR_INVALID_STATE;
87 }
88 current_value++;
89 } while (!monitor.ExclusiveWrite32(current_core, address, current_value));
90
91 return SignalToAddressOnly(address, num_to_wake);
92}
93
94ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
95 s32 num_to_wake) {
96 KScopedSchedulerLock lock(system.Kernel());
97 auto& memory = system.Memory();
98
99 // Ensure that we can write to the address.
100 if (!memory.IsValidVirtualAddress(address)) {
101 return ERR_INVALID_ADDRESS_STATE;
102 }
103
104 // Get threads waiting on the address.
105 const std::vector<std::shared_ptr<Thread>> waiting_threads =
106 GetThreadsWaitingOnAddress(address);
107
108 const std::size_t current_core = system.CurrentCoreIndex();
109 auto& monitor = system.Monitor();
110 s32 updated_value;
111 do {
112 updated_value = monitor.ExclusiveRead32(current_core, address);
113
114 if (updated_value != value) {
115 return ERR_INVALID_STATE;
116 }
117 // Determine the modified value depending on the waiting count.
118 if (num_to_wake <= 0) {
119 if (waiting_threads.empty()) {
120 updated_value = value + 1;
121 } else {
122 updated_value = value - 1;
123 }
124 } else {
125 if (waiting_threads.empty()) {
126 updated_value = value + 1;
127 } else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
128 updated_value = value - 1;
129 } else {
130 updated_value = value;
131 }
132 }
133 } while (!monitor.ExclusiveWrite32(current_core, address, updated_value));
134
135 WakeThreads(waiting_threads, num_to_wake);
136 return RESULT_SUCCESS;
137}
138
139ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s32 value,
140 s64 timeout_ns) {
141 switch (type) {
142 case ArbitrationType::WaitIfLessThan:
143 return WaitForAddressIfLessThan(address, value, timeout_ns, false);
144 case ArbitrationType::DecrementAndWaitIfLessThan:
145 return WaitForAddressIfLessThan(address, value, timeout_ns, true);
146 case ArbitrationType::WaitIfEqual:
147 return WaitForAddressIfEqual(address, value, timeout_ns);
148 default:
149 return ERR_INVALID_ENUM_VALUE;
150 }
151}
152
153ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
154 bool should_decrement) {
155 auto& memory = system.Memory();
156 auto& kernel = system.Kernel();
157 Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
158
159 Handle event_handle = InvalidHandle;
160 {
161 KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
162
163 if (current_thread->IsPendingTermination()) {
164 lock.CancelSleep();
165 return ERR_THREAD_TERMINATING;
166 }
167
168 // Ensure that we can read the address.
169 if (!memory.IsValidVirtualAddress(address)) {
170 lock.CancelSleep();
171 return ERR_INVALID_ADDRESS_STATE;
172 }
173
174 s32 current_value = static_cast<s32>(memory.Read32(address));
175 if (current_value >= value) {
176 lock.CancelSleep();
177 return ERR_INVALID_STATE;
178 }
179
180 current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
181
182 s32 decrement_value;
183
184 const std::size_t current_core = system.CurrentCoreIndex();
185 auto& monitor = system.Monitor();
186 do {
187 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
188 if (should_decrement) {
189 decrement_value = current_value - 1;
190 } else {
191 decrement_value = current_value;
192 }
193 } while (
194 !monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value)));
195
196 // Short-circuit without rescheduling, if timeout is zero.
197 if (timeout == 0) {
198 lock.CancelSleep();
199 return RESULT_TIMEOUT;
200 }
201
202 current_thread->SetArbiterWaitAddress(address);
203 InsertThread(SharedFrom(current_thread));
204 current_thread->SetStatus(ThreadStatus::WaitArb);
205 current_thread->WaitForArbitration(true);
206 }
207
208 if (event_handle != InvalidHandle) {
209 auto& time_manager = kernel.TimeManager();
210 time_manager.UnscheduleTimeEvent(event_handle);
211 }
212
213 {
214 KScopedSchedulerLock lock(kernel);
215 if (current_thread->IsWaitingForArbitration()) {
216 RemoveThread(SharedFrom(current_thread));
217 current_thread->WaitForArbitration(false);
218 }
219 }
220
221 return current_thread->GetSignalingResult();
222}
223
224ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
225 auto& memory = system.Memory();
226 auto& kernel = system.Kernel();
227 Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
228
229 Handle event_handle = InvalidHandle;
230 {
231 KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
232
233 if (current_thread->IsPendingTermination()) {
234 lock.CancelSleep();
235 return ERR_THREAD_TERMINATING;
236 }
237
238 // Ensure that we can read the address.
239 if (!memory.IsValidVirtualAddress(address)) {
240 lock.CancelSleep();
241 return ERR_INVALID_ADDRESS_STATE;
242 }
243
244 s32 current_value = static_cast<s32>(memory.Read32(address));
245 if (current_value != value) {
246 lock.CancelSleep();
247 return ERR_INVALID_STATE;
248 }
249
250 // Short-circuit without rescheduling, if timeout is zero.
251 if (timeout == 0) {
252 lock.CancelSleep();
253 return RESULT_TIMEOUT;
254 }
255
256 current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
257 current_thread->SetArbiterWaitAddress(address);
258 InsertThread(SharedFrom(current_thread));
259 current_thread->SetStatus(ThreadStatus::WaitArb);
260 current_thread->WaitForArbitration(true);
261 }
262
263 if (event_handle != InvalidHandle) {
264 auto& time_manager = kernel.TimeManager();
265 time_manager.UnscheduleTimeEvent(event_handle);
266 }
267
268 {
269 KScopedSchedulerLock lock(kernel);
270 if (current_thread->IsWaitingForArbitration()) {
271 RemoveThread(SharedFrom(current_thread));
272 current_thread->WaitForArbitration(false);
273 }
274 }
275
276 return current_thread->GetSignalingResult();
277}
278
279void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) {
280 const VAddr arb_addr = thread->GetArbiterWaitAddress();
281 std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
282
283 const auto iter =
284 std::find_if(thread_list.cbegin(), thread_list.cend(), [&thread](const auto& entry) {
285 return entry->GetPriority() >= thread->GetPriority();
286 });
287
288 if (iter == thread_list.cend()) {
289 thread_list.push_back(std::move(thread));
290 } else {
291 thread_list.insert(iter, std::move(thread));
292 }
293}
294
295void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) {
296 const VAddr arb_addr = thread->GetArbiterWaitAddress();
297 std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
298
299 const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(),
300 [&thread](const auto& entry) { return thread == entry; });
301
302 if (iter != thread_list.cend()) {
303 thread_list.erase(iter);
304 }
305}
306
307std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(
308 VAddr address) const {
309 const auto iter = arb_threads.find(address);
310 if (iter == arb_threads.cend()) {
311 return {};
312 }
313
314 const std::list<std::shared_ptr<Thread>>& thread_list = iter->second;
315 return {thread_list.cbegin(), thread_list.cend()};
316}
317} // namespace Kernel
diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h
deleted file mode 100644
index b91edc67d..000000000
--- a/src/core/hle/kernel/address_arbiter.h
+++ /dev/null
@@ -1,91 +0,0 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <list>
8#include <memory>
9#include <unordered_map>
10#include <vector>
11
12#include "common/common_types.h"
13
14union ResultCode;
15
16namespace Core {
17class System;
18}
19
20namespace Kernel {
21
22class Thread;
23
24class AddressArbiter {
25public:
26 enum class ArbitrationType {
27 WaitIfLessThan = 0,
28 DecrementAndWaitIfLessThan = 1,
29 WaitIfEqual = 2,
30 };
31
32 enum class SignalType {
33 Signal = 0,
34 IncrementAndSignalIfEqual = 1,
35 ModifyByWaitingCountAndSignalIfEqual = 2,
36 };
37
38 explicit AddressArbiter(Core::System& system);
39 ~AddressArbiter();
40
41 AddressArbiter(const AddressArbiter&) = delete;
42 AddressArbiter& operator=(const AddressArbiter&) = delete;
43
44 AddressArbiter(AddressArbiter&&) = default;
45 AddressArbiter& operator=(AddressArbiter&&) = delete;
46
47 /// Signals an address being waited on with a particular signaling type.
48 ResultCode SignalToAddress(VAddr address, SignalType type, s32 value, s32 num_to_wake);
49
50 /// Waits on an address with a particular arbitration type.
51 ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns);
52
53private:
54 /// Signals an address being waited on.
55 ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake);
56
57 /// Signals an address being waited on and increments its value if equal to the value argument.
58 ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake);
59
60 /// Signals an address being waited on and modifies its value based on waiting thread count if
61 /// equal to the value argument.
62 ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
63 s32 num_to_wake);
64
65 /// Waits on an address if the value passed is less than the argument value,
66 /// optionally decrementing.
67 ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
68 bool should_decrement);
69
70 /// Waits on an address if the value passed is equal to the argument value.
71 ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout);
72
73 /// Wake up num_to_wake (or all) threads in a vector.
74 void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake);
75
76 /// Insert a thread into the address arbiter container
77 void InsertThread(std::shared_ptr<Thread> thread);
78
79 /// Removes a thread from the address arbiter container
80 void RemoveThread(std::shared_ptr<Thread> thread);
81
82 // Gets the threads waiting on an address.
83 std::vector<std::shared_ptr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const;
84
85 /// List of threads waiting for a address arbiter
86 std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> arb_threads;
87
88 Core::System& system;
89};
90
91} // namespace Kernel
diff --git a/src/core/hle/kernel/client_port.cpp b/src/core/hle/kernel/client_port.cpp
index 8aff2227a..f8f005f15 100644
--- a/src/core/hle/kernel/client_port.cpp
+++ b/src/core/hle/kernel/client_port.cpp
@@ -33,9 +33,6 @@ ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
33 server_port->AppendPendingSession(std::move(server)); 33 server_port->AppendPendingSession(std::move(server));
34 } 34 }
35 35
36 // Wake the threads waiting on the ServerPort
37 server_port->Signal();
38
39 return MakeResult(std::move(client)); 36 return MakeResult(std::move(client));
40} 37}
41 38
diff --git a/src/core/hle/kernel/client_session.cpp b/src/core/hle/kernel/client_session.cpp
index be9eba519..e8e52900d 100644
--- a/src/core/hle/kernel/client_session.cpp
+++ b/src/core/hle/kernel/client_session.cpp
@@ -12,7 +12,7 @@
12 12
13namespace Kernel { 13namespace Kernel {
14 14
15ClientSession::ClientSession(KernelCore& kernel) : SynchronizationObject{kernel} {} 15ClientSession::ClientSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
16 16
17ClientSession::~ClientSession() { 17ClientSession::~ClientSession() {
18 // This destructor will be called automatically when the last ClientSession handle is closed by 18 // This destructor will be called automatically when the last ClientSession handle is closed by
@@ -22,15 +22,6 @@ ClientSession::~ClientSession() {
22 } 22 }
23} 23}
24 24
25bool ClientSession::ShouldWait(const Thread* thread) const {
26 UNIMPLEMENTED();
27 return {};
28}
29
30void ClientSession::Acquire(Thread* thread) {
31 UNIMPLEMENTED();
32}
33
34bool ClientSession::IsSignaled() const { 25bool ClientSession::IsSignaled() const {
35 UNIMPLEMENTED(); 26 UNIMPLEMENTED();
36 return true; 27 return true;
diff --git a/src/core/hle/kernel/client_session.h b/src/core/hle/kernel/client_session.h
index e5e0690c2..d5c9ebee8 100644
--- a/src/core/hle/kernel/client_session.h
+++ b/src/core/hle/kernel/client_session.h
@@ -7,7 +7,7 @@
7#include <memory> 7#include <memory>
8#include <string> 8#include <string>
9 9
10#include "core/hle/kernel/synchronization_object.h" 10#include "core/hle/kernel/k_synchronization_object.h"
11#include "core/hle/result.h" 11#include "core/hle/result.h"
12 12
13union ResultCode; 13union ResultCode;
@@ -26,7 +26,7 @@ class KernelCore;
26class Session; 26class Session;
27class Thread; 27class Thread;
28 28
29class ClientSession final : public SynchronizationObject { 29class ClientSession final : public KSynchronizationObject {
30public: 30public:
31 explicit ClientSession(KernelCore& kernel); 31 explicit ClientSession(KernelCore& kernel);
32 ~ClientSession() override; 32 ~ClientSession() override;
@@ -49,10 +49,6 @@ public:
49 ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory, 49 ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory,
50 Core::Timing::CoreTiming& core_timing); 50 Core::Timing::CoreTiming& core_timing);
51 51
52 bool ShouldWait(const Thread* thread) const override;
53
54 void Acquire(Thread* thread) override;
55
56 bool IsSignaled() const override; 52 bool IsSignaled() const override;
57 53
58private: 54private:
diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h
index d4e5d88cf..7d32a39f0 100644
--- a/src/core/hle/kernel/errors.h
+++ b/src/core/hle/kernel/errors.h
@@ -13,12 +13,14 @@ namespace Kernel {
13constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7}; 13constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7};
14constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14}; 14constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14};
15constexpr ResultCode ERR_THREAD_TERMINATING{ErrorModule::Kernel, 59}; 15constexpr ResultCode ERR_THREAD_TERMINATING{ErrorModule::Kernel, 59};
16constexpr ResultCode ERR_TERMINATION_REQUESTED{ErrorModule::Kernel, 59};
16constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101}; 17constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101};
17constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102}; 18constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102};
18constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103}; 19constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103};
19constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104}; 20constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104};
20constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105}; 21constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105};
21constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106}; 22constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106};
23constexpr ResultCode ERR_INVALID_CURRENT_MEMORY{ErrorModule::Kernel, 106};
22constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS{ErrorModule::Kernel, 108}; 24constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS{ErrorModule::Kernel, 108};
23constexpr ResultCode ERR_INVALID_MEMORY_RANGE{ErrorModule::Kernel, 110}; 25constexpr ResultCode ERR_INVALID_MEMORY_RANGE{ErrorModule::Kernel, 110};
24constexpr ResultCode ERR_INVALID_PROCESSOR_ID{ErrorModule::Kernel, 113}; 26constexpr ResultCode ERR_INVALID_PROCESSOR_ID{ErrorModule::Kernel, 113};
@@ -28,6 +30,7 @@ constexpr ResultCode ERR_INVALID_POINTER{ErrorModule::Kernel, 115};
28constexpr ResultCode ERR_INVALID_COMBINATION{ErrorModule::Kernel, 116}; 30constexpr ResultCode ERR_INVALID_COMBINATION{ErrorModule::Kernel, 116};
29constexpr ResultCode RESULT_TIMEOUT{ErrorModule::Kernel, 117}; 31constexpr ResultCode RESULT_TIMEOUT{ErrorModule::Kernel, 117};
30constexpr ResultCode ERR_SYNCHRONIZATION_CANCELED{ErrorModule::Kernel, 118}; 32constexpr ResultCode ERR_SYNCHRONIZATION_CANCELED{ErrorModule::Kernel, 118};
33constexpr ResultCode ERR_CANCELLED{ErrorModule::Kernel, 118};
31constexpr ResultCode ERR_OUT_OF_RANGE{ErrorModule::Kernel, 119}; 34constexpr ResultCode ERR_OUT_OF_RANGE{ErrorModule::Kernel, 119};
32constexpr ResultCode ERR_INVALID_ENUM_VALUE{ErrorModule::Kernel, 120}; 35constexpr ResultCode ERR_INVALID_ENUM_VALUE{ErrorModule::Kernel, 120};
33constexpr ResultCode ERR_NOT_FOUND{ErrorModule::Kernel, 121}; 36constexpr ResultCode ERR_NOT_FOUND{ErrorModule::Kernel, 121};
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
new file mode 100644
index 000000000..d9e702f13
--- /dev/null
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -0,0 +1,367 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/arm/exclusive_monitor.h"
6#include "core/core.h"
7#include "core/hle/kernel/k_address_arbiter.h"
8#include "core/hle/kernel/k_scheduler.h"
9#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
10#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/svc_results.h"
12#include "core/hle/kernel/thread.h"
13#include "core/hle/kernel/time_manager.h"
14#include "core/memory.h"
15
16namespace Kernel {
17
18KAddressArbiter::KAddressArbiter(Core::System& system_)
19 : system{system_}, kernel{system.Kernel()} {}
20KAddressArbiter::~KAddressArbiter() = default;
21
22namespace {
23
24bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
25 *out = system.Memory().Read32(address);
26 return true;
27}
28
29bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
30 auto& monitor = system.Monitor();
31 const auto current_core = system.CurrentCoreIndex();
32
33 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
34 // TODO(bunnei): We should call CanAccessAtomic(..) here.
35
36 // Load the value from the address.
37 const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
38
39 // Compare it to the desired one.
40 if (current_value < value) {
41 // If less than, we want to try to decrement.
42 const s32 decrement_value = current_value - 1;
43
44 // Decrement and try to store.
45 if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value))) {
46 // If we failed to store, try again.
47 DecrementIfLessThan(system, out, address, value);
48 }
49 } else {
50 // Otherwise, clear our exclusive hold and finish
51 monitor.ClearExclusive();
52 }
53
54 // We're done.
55 *out = current_value;
56 return true;
57}
58
59bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
60 auto& monitor = system.Monitor();
61 const auto current_core = system.CurrentCoreIndex();
62
63 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
64 // TODO(bunnei): We should call CanAccessAtomic(..) here.
65
66 // Load the value from the address.
67 const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
68
69 // Compare it to the desired one.
70 if (current_value == value) {
71 // If equal, we want to try to write the new value.
72
73 // Try to store.
74 if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(new_value))) {
75 // If we failed to store, try again.
76 UpdateIfEqual(system, out, address, value, new_value);
77 }
78 } else {
79 // Otherwise, clear our exclusive hold and finish.
80 monitor.ClearExclusive();
81 }
82
83 // We're done.
84 *out = current_value;
85 return true;
86}
87
88} // namespace
89
90ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
91 // Perform signaling.
92 s32 num_waiters{};
93 {
94 KScopedSchedulerLock sl(kernel);
95
96 auto it = thread_tree.nfind_light({addr, -1});
97 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
98 (it->GetAddressArbiterKey() == addr)) {
99 Thread* target_thread = std::addressof(*it);
100 target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
101
102 ASSERT(target_thread->IsWaitingForAddressArbiter());
103 target_thread->Wakeup();
104
105 it = thread_tree.erase(it);
106 target_thread->ClearAddressArbiter();
107 ++num_waiters;
108 }
109 }
110 return RESULT_SUCCESS;
111}
112
113ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
114 // Perform signaling.
115 s32 num_waiters{};
116 {
117 KScopedSchedulerLock sl(kernel);
118
119 // Check the userspace value.
120 s32 user_value{};
121 R_UNLESS(UpdateIfEqual(system, std::addressof(user_value), addr, value, value + 1),
122 Svc::ResultInvalidCurrentMemory);
123 R_UNLESS(user_value == value, Svc::ResultInvalidState);
124
125 auto it = thread_tree.nfind_light({addr, -1});
126 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
127 (it->GetAddressArbiterKey() == addr)) {
128 Thread* target_thread = std::addressof(*it);
129 target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
130
131 ASSERT(target_thread->IsWaitingForAddressArbiter());
132 target_thread->Wakeup();
133
134 it = thread_tree.erase(it);
135 target_thread->ClearAddressArbiter();
136 ++num_waiters;
137 }
138 }
139 return RESULT_SUCCESS;
140}
141
142ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
143 // Perform signaling.
144 s32 num_waiters{};
145 {
146 KScopedSchedulerLock sl(kernel);
147
148 auto it = thread_tree.nfind_light({addr, -1});
149 // Determine the updated value.
150 s32 new_value{};
151 if (/*GetTargetFirmware() >= TargetFirmware_7_0_0*/ true) {
152 if (count <= 0) {
153 if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
154 new_value = value - 2;
155 } else {
156 new_value = value + 1;
157 }
158 } else {
159 if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
160 auto tmp_it = it;
161 s32 tmp_num_waiters{};
162 while ((++tmp_it != thread_tree.end()) &&
163 (tmp_it->GetAddressArbiterKey() == addr)) {
164 if ((tmp_num_waiters++) >= count) {
165 break;
166 }
167 }
168
169 if (tmp_num_waiters < count) {
170 new_value = value - 1;
171 } else {
172 new_value = value;
173 }
174 } else {
175 new_value = value + 1;
176 }
177 }
178 } else {
179 if (count <= 0) {
180 if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
181 new_value = value - 1;
182 } else {
183 new_value = value + 1;
184 }
185 } else {
186 auto tmp_it = it;
187 s32 tmp_num_waiters{};
188 while ((tmp_it != thread_tree.end()) && (tmp_it->GetAddressArbiterKey() == addr) &&
189 (tmp_num_waiters < count + 1)) {
190 ++tmp_num_waiters;
191 ++tmp_it;
192 }
193
194 if (tmp_num_waiters == 0) {
195 new_value = value + 1;
196 } else if (tmp_num_waiters <= count) {
197 new_value = value - 1;
198 } else {
199 new_value = value;
200 }
201 }
202 }
203
204 // Check the userspace value.
205 s32 user_value{};
206 bool succeeded{};
207 if (value != new_value) {
208 succeeded = UpdateIfEqual(system, std::addressof(user_value), addr, value, new_value);
209 } else {
210 succeeded = ReadFromUser(system, std::addressof(user_value), addr);
211 }
212
213 R_UNLESS(succeeded, Svc::ResultInvalidCurrentMemory);
214 R_UNLESS(user_value == value, Svc::ResultInvalidState);
215
216 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
217 (it->GetAddressArbiterKey() == addr)) {
218 Thread* target_thread = std::addressof(*it);
219 target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
220
221 ASSERT(target_thread->IsWaitingForAddressArbiter());
222 target_thread->Wakeup();
223
224 it = thread_tree.erase(it);
225 target_thread->ClearAddressArbiter();
226 ++num_waiters;
227 }
228 }
229 return RESULT_SUCCESS;
230}
231
232ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
233 // Prepare to wait.
234 Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
235 Handle timer = InvalidHandle;
236
237 {
238 KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout);
239
240 // Check that the thread isn't terminating.
241 if (cur_thread->IsTerminationRequested()) {
242 slp.CancelSleep();
243 return Svc::ResultTerminationRequested;
244 }
245
246 // Set the synced object.
247 cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
248
249 // Read the value from userspace.
250 s32 user_value{};
251 bool succeeded{};
252 if (decrement) {
253 succeeded = DecrementIfLessThan(system, std::addressof(user_value), addr, value);
254 } else {
255 succeeded = ReadFromUser(system, std::addressof(user_value), addr);
256 }
257
258 if (!succeeded) {
259 slp.CancelSleep();
260 return Svc::ResultInvalidCurrentMemory;
261 }
262
263 // Check that the value is less than the specified one.
264 if (user_value >= value) {
265 slp.CancelSleep();
266 return Svc::ResultInvalidState;
267 }
268
269 // Check that the timeout is non-zero.
270 if (timeout == 0) {
271 slp.CancelSleep();
272 return Svc::ResultTimedOut;
273 }
274
275 // Set the arbiter.
276 cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr);
277 thread_tree.insert(*cur_thread);
278 cur_thread->SetState(ThreadState::Waiting);
279 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
280 }
281
282 // Cancel the timer wait.
283 if (timer != InvalidHandle) {
284 auto& time_manager = kernel.TimeManager();
285 time_manager.UnscheduleTimeEvent(timer);
286 }
287
288 // Remove from the address arbiter.
289 {
290 KScopedSchedulerLock sl(kernel);
291
292 if (cur_thread->IsWaitingForAddressArbiter()) {
293 thread_tree.erase(thread_tree.iterator_to(*cur_thread));
294 cur_thread->ClearAddressArbiter();
295 }
296 }
297
298 // Get the result.
299 KSynchronizationObject* dummy{};
300 return cur_thread->GetWaitResult(std::addressof(dummy));
301}
302
303ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
304 // Prepare to wait.
305 Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
306 Handle timer = InvalidHandle;
307
308 {
309 KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout);
310
311 // Check that the thread isn't terminating.
312 if (cur_thread->IsTerminationRequested()) {
313 slp.CancelSleep();
314 return Svc::ResultTerminationRequested;
315 }
316
317 // Set the synced object.
318 cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
319
320 // Read the value from userspace.
321 s32 user_value{};
322 if (!ReadFromUser(system, std::addressof(user_value), addr)) {
323 slp.CancelSleep();
324 return Svc::ResultInvalidCurrentMemory;
325 }
326
327 // Check that the value is equal.
328 if (value != user_value) {
329 slp.CancelSleep();
330 return Svc::ResultInvalidState;
331 }
332
333 // Check that the timeout is non-zero.
334 if (timeout == 0) {
335 slp.CancelSleep();
336 return Svc::ResultTimedOut;
337 }
338
339 // Set the arbiter.
340 cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr);
341 thread_tree.insert(*cur_thread);
342 cur_thread->SetState(ThreadState::Waiting);
343 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
344 }
345
346 // Cancel the timer wait.
347 if (timer != InvalidHandle) {
348 auto& time_manager = kernel.TimeManager();
349 time_manager.UnscheduleTimeEvent(timer);
350 }
351
352 // Remove from the address arbiter.
353 {
354 KScopedSchedulerLock sl(kernel);
355
356 if (cur_thread->IsWaitingForAddressArbiter()) {
357 thread_tree.erase(thread_tree.iterator_to(*cur_thread));
358 cur_thread->ClearAddressArbiter();
359 }
360 }
361
362 // Get the result.
363 KSynchronizationObject* dummy{};
364 return cur_thread->GetWaitResult(std::addressof(dummy));
365}
366
367} // namespace Kernel
diff --git a/src/core/hle/kernel/k_address_arbiter.h b/src/core/hle/kernel/k_address_arbiter.h
new file mode 100644
index 000000000..8d379b524
--- /dev/null
+++ b/src/core/hle/kernel/k_address_arbiter.h
@@ -0,0 +1,70 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/assert.h"
8#include "common/common_types.h"
9#include "core/hle/kernel/k_condition_variable.h"
10#include "core/hle/kernel/svc_types.h"
11
12union ResultCode;
13
14namespace Core {
15class System;
16}
17
18namespace Kernel {
19
20class KernelCore;
21
22class KAddressArbiter {
23public:
24 using ThreadTree = KConditionVariable::ThreadTree;
25
26 explicit KAddressArbiter(Core::System& system_);
27 ~KAddressArbiter();
28
29 [[nodiscard]] ResultCode SignalToAddress(VAddr addr, Svc::SignalType type, s32 value,
30 s32 count) {
31 switch (type) {
32 case Svc::SignalType::Signal:
33 return Signal(addr, count);
34 case Svc::SignalType::SignalAndIncrementIfEqual:
35 return SignalAndIncrementIfEqual(addr, value, count);
36 case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
37 return SignalAndModifyByWaitingCountIfEqual(addr, value, count);
38 }
39 UNREACHABLE();
40 return RESULT_UNKNOWN;
41 }
42
43 [[nodiscard]] ResultCode WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
44 s64 timeout) {
45 switch (type) {
46 case Svc::ArbitrationType::WaitIfLessThan:
47 return WaitIfLessThan(addr, value, false, timeout);
48 case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
49 return WaitIfLessThan(addr, value, true, timeout);
50 case Svc::ArbitrationType::WaitIfEqual:
51 return WaitIfEqual(addr, value, timeout);
52 }
53 UNREACHABLE();
54 return RESULT_UNKNOWN;
55 }
56
57private:
58 [[nodiscard]] ResultCode Signal(VAddr addr, s32 count);
59 [[nodiscard]] ResultCode SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
60 [[nodiscard]] ResultCode SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
61 [[nodiscard]] ResultCode WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
62 [[nodiscard]] ResultCode WaitIfEqual(VAddr addr, s32 value, s64 timeout);
63
64 ThreadTree thread_tree;
65
66 Core::System& system;
67 KernelCore& kernel;
68};
69
70} // namespace Kernel
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
new file mode 100644
index 000000000..49a068310
--- /dev/null
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -0,0 +1,349 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <vector>
6
7#include "core/arm/exclusive_monitor.h"
8#include "core/core.h"
9#include "core/hle/kernel/k_condition_variable.h"
10#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
12#include "core/hle/kernel/k_synchronization_object.h"
13#include "core/hle/kernel/kernel.h"
14#include "core/hle/kernel/process.h"
15#include "core/hle/kernel/svc_common.h"
16#include "core/hle/kernel/svc_results.h"
17#include "core/hle/kernel/thread.h"
18#include "core/memory.h"
19
20namespace Kernel {
21
22namespace {
23
24bool ReadFromUser(Core::System& system, u32* out, VAddr address) {
25 *out = system.Memory().Read32(address);
26 return true;
27}
28
29bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
30 system.Memory().Write32(address, *p);
31 return true;
32}
33
34bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
35 u32 new_orr_mask) {
36 auto& monitor = system.Monitor();
37 const auto current_core = system.CurrentCoreIndex();
38
39 // Load the value from the address.
40 const auto expected = monitor.ExclusiveRead32(current_core, address);
41
42 // Orr in the new mask.
43 u32 value = expected | new_orr_mask;
44
45 // If the value is zero, use the if_zero value, otherwise use the newly orr'd value.
46 if (!expected) {
47 value = if_zero;
48 }
49
50 // Try to store.
51 if (!monitor.ExclusiveWrite32(current_core, address, value)) {
52 // If we failed to store, try again.
53 return UpdateLockAtomic(system, out, address, if_zero, new_orr_mask);
54 }
55
56 // We're done.
57 *out = expected;
58 return true;
59}
60
61} // namespace
62
63KConditionVariable::KConditionVariable(Core::System& system_)
64 : system{system_}, kernel{system.Kernel()} {}
65
66KConditionVariable::~KConditionVariable() = default;
67
68ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
69 Thread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
70
71 // Signal the address.
72 {
73 KScopedSchedulerLock sl(kernel);
74
75 // Remove waiter thread.
76 s32 num_waiters{};
77 Thread* next_owner_thread =
78 owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
79
80 // Determine the next tag.
81 u32 next_value{};
82 if (next_owner_thread) {
83 next_value = next_owner_thread->GetAddressKeyValue();
84 if (num_waiters > 1) {
85 next_value |= Svc::HandleWaitMask;
86 }
87
88 next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
89 next_owner_thread->Wakeup();
90 }
91
92 // Write the value to userspace.
93 if (!WriteToUser(system, addr, std::addressof(next_value))) {
94 if (next_owner_thread) {
95 next_owner_thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory);
96 }
97
98 return Svc::ResultInvalidCurrentMemory;
99 }
100 }
101
102 return RESULT_SUCCESS;
103}
104
105ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
106 Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
107
108 // Wait for the address.
109 {
110 std::shared_ptr<Thread> owner_thread;
111 ASSERT(!owner_thread);
112 {
113 KScopedSchedulerLock sl(kernel);
114 cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
115
116 // Check if the thread should terminate.
117 R_UNLESS(!cur_thread->IsTerminationRequested(), Svc::ResultTerminationRequested);
118
119 {
120 // Read the tag from userspace.
121 u32 test_tag{};
122 R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr),
123 Svc::ResultInvalidCurrentMemory);
124
125 // If the tag isn't the handle (with wait mask), we're done.
126 R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS);
127
128 // Get the lock owner thread.
129 owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<Thread>(handle);
130 R_UNLESS(owner_thread, Svc::ResultInvalidHandle);
131
132 // Update the lock.
133 cur_thread->SetAddressKey(addr, value);
134 owner_thread->AddWaiter(cur_thread);
135 cur_thread->SetState(ThreadState::Waiting);
136 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
137 cur_thread->SetMutexWaitAddressForDebugging(addr);
138 }
139 }
140 ASSERT(owner_thread);
141 }
142
143 // Remove the thread as a waiter from the lock owner.
144 {
145 KScopedSchedulerLock sl(kernel);
146 Thread* owner_thread = cur_thread->GetLockOwner();
147 if (owner_thread != nullptr) {
148 owner_thread->RemoveWaiter(cur_thread);
149 }
150 }
151
152 // Get the wait result.
153 KSynchronizationObject* dummy{};
154 return cur_thread->GetWaitResult(std::addressof(dummy));
155}
156
157Thread* KConditionVariable::SignalImpl(Thread* thread) {
158 // Check pre-conditions.
159 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
160
161 // Update the tag.
162 VAddr address = thread->GetAddressKey();
163 u32 own_tag = thread->GetAddressKeyValue();
164
165 u32 prev_tag{};
166 bool can_access{};
167 {
168 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
169 // TODO(bunnei): We should call CanAccessAtomic(..) here.
170 can_access = true;
171 if (can_access) {
172 UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
173 Svc::HandleWaitMask);
174 }
175 }
176
177 Thread* thread_to_close = nullptr;
178 if (can_access) {
179 if (prev_tag == InvalidHandle) {
180 // If nobody held the lock previously, we're all good.
181 thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
182 thread->Wakeup();
183 } else {
184 // Get the previous owner.
185 auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<Thread>(
186 prev_tag & ~Svc::HandleWaitMask);
187
188 if (owner_thread) {
189 // Add the thread as a waiter on the owner.
190 owner_thread->AddWaiter(thread);
191 thread_to_close = owner_thread.get();
192 } else {
193 // The lock was tagged with a thread that doesn't exist.
194 thread->SetSyncedObject(nullptr, Svc::ResultInvalidState);
195 thread->Wakeup();
196 }
197 }
198 } else {
199 // If the address wasn't accessible, note so.
200 thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory);
201 thread->Wakeup();
202 }
203
204 return thread_to_close;
205}
206
207void KConditionVariable::Signal(u64 cv_key, s32 count) {
208 // Prepare for signaling.
209 constexpr int MaxThreads = 16;
210
211 // TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using
212 // std::shared_ptr.
213 std::vector<std::shared_ptr<Thread>> thread_list;
214 std::array<Thread*, MaxThreads> thread_array;
215 s32 num_to_close{};
216
217 // Perform signaling.
218 s32 num_waiters{};
219 {
220 KScopedSchedulerLock sl(kernel);
221
222 auto it = thread_tree.nfind_light({cv_key, -1});
223 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
224 (it->GetConditionVariableKey() == cv_key)) {
225 Thread* target_thread = std::addressof(*it);
226
227 if (Thread* thread = SignalImpl(target_thread); thread != nullptr) {
228 if (num_to_close < MaxThreads) {
229 thread_array[num_to_close++] = thread;
230 } else {
231 thread_list.push_back(SharedFrom(thread));
232 }
233 }
234
235 it = thread_tree.erase(it);
236 target_thread->ClearConditionVariable();
237 ++num_waiters;
238 }
239
240 // If we have no waiters, clear the has waiter flag.
241 if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
242 const u32 has_waiter_flag{};
243 WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
244 }
245 }
246
247 // Close threads in the array.
248 for (auto i = 0; i < num_to_close; ++i) {
249 thread_array[i]->Close();
250 }
251
252 // Close threads in the list.
253 for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) {
254 (*it)->Close();
255 }
256}
257
258ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
259 // Prepare to wait.
260 Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
261 Handle timer = InvalidHandle;
262
263 {
264 KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout);
265
266 // Set the synced object.
267 cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
268
269 // Check that the thread isn't terminating.
270 if (cur_thread->IsTerminationRequested()) {
271 slp.CancelSleep();
272 return Svc::ResultTerminationRequested;
273 }
274
275 // Update the value and process for the next owner.
276 {
277 // Remove waiter thread.
278 s32 num_waiters{};
279 Thread* next_owner_thread =
280 cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
281
282 // Update for the next owner thread.
283 u32 next_value{};
284 if (next_owner_thread != nullptr) {
285 // Get the next tag value.
286 next_value = next_owner_thread->GetAddressKeyValue();
287 if (num_waiters > 1) {
288 next_value |= Svc::HandleWaitMask;
289 }
290
291 // Wake up the next owner.
292 next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
293 next_owner_thread->Wakeup();
294 }
295
296 // Write to the cv key.
297 {
298 const u32 has_waiter_flag = 1;
299 WriteToUser(system, key, std::addressof(has_waiter_flag));
300 // TODO(bunnei): We should call DataMemoryBarrier(..) here.
301 }
302
303 // Write the value to userspace.
304 if (!WriteToUser(system, addr, std::addressof(next_value))) {
305 slp.CancelSleep();
306 return Svc::ResultInvalidCurrentMemory;
307 }
308 }
309
310 // Update condition variable tracking.
311 {
312 cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
313 thread_tree.insert(*cur_thread);
314 }
315
316 // If the timeout is non-zero, set the thread as waiting.
317 if (timeout != 0) {
318 cur_thread->SetState(ThreadState::Waiting);
319 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
320 cur_thread->SetMutexWaitAddressForDebugging(addr);
321 }
322 }
323
324 // Cancel the timer wait.
325 if (timer != InvalidHandle) {
326 auto& time_manager = kernel.TimeManager();
327 time_manager.UnscheduleTimeEvent(timer);
328 }
329
330 // Remove from the condition variable.
331 {
332 KScopedSchedulerLock sl(kernel);
333
334 if (Thread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
335 owner->RemoveWaiter(cur_thread);
336 }
337
338 if (cur_thread->IsWaitingForConditionVariable()) {
339 thread_tree.erase(thread_tree.iterator_to(*cur_thread));
340 cur_thread->ClearConditionVariable();
341 }
342 }
343
344 // Get the result.
345 KSynchronizationObject* dummy{};
346 return cur_thread->GetWaitResult(std::addressof(dummy));
347}
348
349} // namespace Kernel
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
new file mode 100644
index 000000000..98ed5b323
--- /dev/null
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -0,0 +1,59 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/assert.h"
8#include "common/common_types.h"
9
10#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/kernel.h"
12#include "core/hle/kernel/thread.h"
13#include "core/hle/result.h"
14
15namespace Core {
16class System;
17}
18
19namespace Kernel {
20
21class KConditionVariable {
22public:
23 using ThreadTree = typename Thread::ConditionVariableThreadTreeType;
24
25 explicit KConditionVariable(Core::System& system_);
26 ~KConditionVariable();
27
28 // Arbitration
29 [[nodiscard]] ResultCode SignalToAddress(VAddr addr);
30 [[nodiscard]] ResultCode WaitForAddress(Handle handle, VAddr addr, u32 value);
31
32 // Condition variable
33 void Signal(u64 cv_key, s32 count);
34 [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
35
36private:
37 [[nodiscard]] Thread* SignalImpl(Thread* thread);
38
39 ThreadTree thread_tree;
40
41 Core::System& system;
42 KernelCore& kernel;
43};
44
45inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
46 Thread* thread) {
47 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
48
49 tree->erase(tree->iterator_to(*thread));
50}
51
52inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
53 Thread* thread) {
54 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
55
56 tree->insert(*thread);
57}
58
59} // namespace Kernel
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index c5fd82a6b..42f0ea483 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -180,22 +180,22 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
180 return cores_needing_scheduling; 180 return cores_needing_scheduling;
181} 181}
182 182
183void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) { 183void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, ThreadState old_state) {
184 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 184 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
185 185
186 // Check if the state has changed, because if it hasn't there's nothing to do. 186 // Check if the state has changed, because if it hasn't there's nothing to do.
187 const auto cur_state = thread->scheduling_state; 187 const auto cur_state = thread->GetRawState();
188 if (cur_state == old_state) { 188 if (cur_state == old_state) {
189 return; 189 return;
190 } 190 }
191 191
192 // Update the priority queues. 192 // Update the priority queues.
193 if (old_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { 193 if (old_state == ThreadState::Runnable) {
194 // If we were previously runnable, then we're not runnable now, and we should remove. 194 // If we were previously runnable, then we're not runnable now, and we should remove.
195 GetPriorityQueue(kernel).Remove(thread); 195 GetPriorityQueue(kernel).Remove(thread);
196 IncrementScheduledCount(thread); 196 IncrementScheduledCount(thread);
197 SetSchedulerUpdateNeeded(kernel); 197 SetSchedulerUpdateNeeded(kernel);
198 } else if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { 198 } else if (cur_state == ThreadState::Runnable) {
199 // If we're now runnable, then we weren't previously, and we should add. 199 // If we're now runnable, then we weren't previously, and we should add.
200 GetPriorityQueue(kernel).PushBack(thread); 200 GetPriorityQueue(kernel).PushBack(thread);
201 IncrementScheduledCount(thread); 201 IncrementScheduledCount(thread);
@@ -203,13 +203,11 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 ol
203 } 203 }
204} 204}
205 205
206void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread, 206void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, s32 old_priority) {
207 u32 old_priority) {
208
209 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 207 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
210 208
211 // If the thread is runnable, we want to change its priority in the queue. 209 // If the thread is runnable, we want to change its priority in the queue.
212 if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { 210 if (thread->GetRawState() == ThreadState::Runnable) {
213 GetPriorityQueue(kernel).ChangePriority( 211 GetPriorityQueue(kernel).ChangePriority(
214 old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread); 212 old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
215 IncrementScheduledCount(thread); 213 IncrementScheduledCount(thread);
@@ -222,7 +220,7 @@ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
222 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 220 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
223 221
224 // If the thread is runnable, we want to change its affinity in the queue. 222 // If the thread is runnable, we want to change its affinity in the queue.
225 if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { 223 if (thread->GetRawState() == ThreadState::Runnable) {
226 GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread); 224 GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread);
227 IncrementScheduledCount(thread); 225 IncrementScheduledCount(thread);
228 SetSchedulerUpdateNeeded(kernel); 226 SetSchedulerUpdateNeeded(kernel);
@@ -292,7 +290,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
292 290
293 // If the best thread we can choose has a priority the same or worse than ours, try to 291 // If the best thread we can choose has a priority the same or worse than ours, try to
294 // migrate a higher priority thread. 292 // migrate a higher priority thread.
295 if (best_thread != nullptr && best_thread->GetPriority() >= static_cast<u32>(priority)) { 293 if (best_thread != nullptr && best_thread->GetPriority() >= priority) {
296 Thread* suggested = priority_queue.GetSuggestedFront(core_id); 294 Thread* suggested = priority_queue.GetSuggestedFront(core_id);
297 while (suggested != nullptr) { 295 while (suggested != nullptr) {
298 // If the suggestion's priority is the same as ours, don't bother. 296 // If the suggestion's priority is the same as ours, don't bother.
@@ -395,8 +393,8 @@ void KScheduler::YieldWithoutCoreMigration() {
395 { 393 {
396 KScopedSchedulerLock lock(kernel); 394 KScopedSchedulerLock lock(kernel);
397 395
398 const auto cur_state = cur_thread.scheduling_state; 396 const auto cur_state = cur_thread.GetRawState();
399 if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { 397 if (cur_state == ThreadState::Runnable) {
400 // Put the current thread at the back of the queue. 398 // Put the current thread at the back of the queue.
401 Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); 399 Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
402 IncrementScheduledCount(std::addressof(cur_thread)); 400 IncrementScheduledCount(std::addressof(cur_thread));
@@ -436,8 +434,8 @@ void KScheduler::YieldWithCoreMigration() {
436 { 434 {
437 KScopedSchedulerLock lock(kernel); 435 KScopedSchedulerLock lock(kernel);
438 436
439 const auto cur_state = cur_thread.scheduling_state; 437 const auto cur_state = cur_thread.GetRawState();
440 if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { 438 if (cur_state == ThreadState::Runnable) {
441 // Get the current active core. 439 // Get the current active core.
442 const s32 core_id = cur_thread.GetActiveCore(); 440 const s32 core_id = cur_thread.GetActiveCore();
443 441
@@ -526,8 +524,8 @@ void KScheduler::YieldToAnyThread() {
526 { 524 {
527 KScopedSchedulerLock lock(kernel); 525 KScopedSchedulerLock lock(kernel);
528 526
529 const auto cur_state = cur_thread.scheduling_state; 527 const auto cur_state = cur_thread.GetRawState();
530 if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { 528 if (cur_state == ThreadState::Runnable) {
531 // Get the current active core. 529 // Get the current active core.
532 const s32 core_id = cur_thread.GetActiveCore(); 530 const s32 core_id = cur_thread.GetActiveCore();
533 531
@@ -645,8 +643,7 @@ void KScheduler::Unload(Thread* thread) {
645 643
646void KScheduler::Reload(Thread* thread) { 644void KScheduler::Reload(Thread* thread) {
647 if (thread) { 645 if (thread) {
648 ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable, 646 ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
649 "Thread must be runnable.");
650 647
651 // Cancel any outstanding wakeup events for this thread 648 // Cancel any outstanding wakeup events for this thread
652 thread->SetIsRunning(true); 649 thread->SetIsRunning(true);
@@ -725,7 +722,7 @@ void KScheduler::SwitchToCurrent() {
725 do { 722 do {
726 if (current_thread != nullptr && !current_thread->IsHLEThread()) { 723 if (current_thread != nullptr && !current_thread->IsHLEThread()) {
727 current_thread->context_guard.lock(); 724 current_thread->context_guard.lock();
728 if (!current_thread->IsRunnable()) { 725 if (current_thread->GetRawState() != ThreadState::Runnable) {
729 current_thread->context_guard.unlock(); 726 current_thread->context_guard.unlock();
730 break; 727 break;
731 } 728 }
@@ -772,7 +769,7 @@ void KScheduler::Initialize() {
772 769
773 { 770 {
774 KScopedSchedulerLock lock{system.Kernel()}; 771 KScopedSchedulerLock lock{system.Kernel()};
775 idle_thread->SetStatus(ThreadStatus::Ready); 772 idle_thread->SetState(ThreadState::Runnable);
776 } 773 }
777} 774}
778 775
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index e84abc84c..783665123 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -100,11 +100,10 @@ public:
100 void YieldToAnyThread(); 100 void YieldToAnyThread();
101 101
102 /// Notify the scheduler a thread's status has changed. 102 /// Notify the scheduler a thread's status has changed.
103 static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state); 103 static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, ThreadState old_state);
104 104
105 /// Notify the scheduler a thread's priority has changed. 105 /// Notify the scheduler a thread's priority has changed.
106 static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread, 106 static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, s32 old_priority);
107 u32 old_priority);
108 107
109 /// Notify the scheduler a thread's core and/or affinity mask has changed. 108 /// Notify the scheduler a thread's core and/or affinity mask has changed.
110 static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, 109 static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index 2f1c1f691..9b40bd22c 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -19,7 +19,7 @@ class KernelCore;
19template <typename SchedulerType> 19template <typename SchedulerType>
20class KAbstractSchedulerLock { 20class KAbstractSchedulerLock {
21public: 21public:
22 explicit KAbstractSchedulerLock(KernelCore& kernel) : kernel{kernel} {} 22 explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {}
23 23
24 bool IsLockedByCurrentThread() const { 24 bool IsLockedByCurrentThread() const {
25 return this->owner_thread == kernel.GetCurrentEmuThreadID(); 25 return this->owner_thread == kernel.GetCurrentEmuThreadID();
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp
new file mode 100644
index 000000000..1c508cb55
--- /dev/null
+++ b/src/core/hle/kernel/k_synchronization_object.cpp
@@ -0,0 +1,172 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "common/common_types.h"
7#include "core/hle/kernel/k_scheduler.h"
8#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
9#include "core/hle/kernel/k_synchronization_object.h"
10#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/svc_results.h"
12#include "core/hle/kernel/thread.h"
13
14namespace Kernel {
15
16ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
17 KSynchronizationObject** objects, const s32 num_objects,
18 s64 timeout) {
19 // Allocate space on stack for thread nodes.
20 std::vector<ThreadListNode> thread_nodes(num_objects);
21
22 // Prepare for wait.
23 Thread* thread = kernel.CurrentScheduler()->GetCurrentThread();
24 Handle timer = InvalidHandle;
25
26 {
27 // Setup the scheduling lock and sleep.
28 KScopedSchedulerLockAndSleep slp(kernel, timer, thread, timeout);
29
30 // Check if any of the objects are already signaled.
31 for (auto i = 0; i < num_objects; ++i) {
32 ASSERT(objects[i] != nullptr);
33
34 if (objects[i]->IsSignaled()) {
35 *out_index = i;
36 slp.CancelSleep();
37 return RESULT_SUCCESS;
38 }
39 }
40
41 // Check if the timeout is zero.
42 if (timeout == 0) {
43 slp.CancelSleep();
44 return Svc::ResultTimedOut;
45 }
46
47 // Check if the thread should terminate.
48 if (thread->IsTerminationRequested()) {
49 slp.CancelSleep();
50 return Svc::ResultTerminationRequested;
51 }
52
53 // Check if waiting was canceled.
54 if (thread->IsWaitCancelled()) {
55 slp.CancelSleep();
56 thread->ClearWaitCancelled();
57 return Svc::ResultCancelled;
58 }
59
60 // Add the waiters.
61 for (auto i = 0; i < num_objects; ++i) {
62 thread_nodes[i].thread = thread;
63 thread_nodes[i].next = nullptr;
64
65 if (objects[i]->thread_list_tail == nullptr) {
66 objects[i]->thread_list_head = std::addressof(thread_nodes[i]);
67 } else {
68 objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]);
69 }
70
71 objects[i]->thread_list_tail = std::addressof(thread_nodes[i]);
72 }
73
74 // For debugging only
75 thread->SetWaitObjectsForDebugging({objects, static_cast<std::size_t>(num_objects)});
76
77 // Mark the thread as waiting.
78 thread->SetCancellable();
79 thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
80 thread->SetState(ThreadState::Waiting);
81 thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
82 }
83
84 // The lock/sleep is done, so we should be able to get our result.
85
86 // Thread is no longer cancellable.
87 thread->ClearCancellable();
88
89 // For debugging only
90 thread->SetWaitObjectsForDebugging({});
91
92 // Cancel the timer as needed.
93 if (timer != InvalidHandle) {
94 auto& time_manager = kernel.TimeManager();
95 time_manager.UnscheduleTimeEvent(timer);
96 }
97
98 // Get the wait result.
99 ResultCode wait_result{RESULT_SUCCESS};
100 s32 sync_index = -1;
101 {
102 KScopedSchedulerLock lock(kernel);
103 KSynchronizationObject* synced_obj;
104 wait_result = thread->GetWaitResult(std::addressof(synced_obj));
105
106 for (auto i = 0; i < num_objects; ++i) {
107 // Unlink the object from the list.
108 ThreadListNode* prev_ptr =
109 reinterpret_cast<ThreadListNode*>(std::addressof(objects[i]->thread_list_head));
110 ThreadListNode* prev_val = nullptr;
111 ThreadListNode *prev, *tail_prev;
112
113 do {
114 prev = prev_ptr;
115 prev_ptr = prev_ptr->next;
116 tail_prev = prev_val;
117 prev_val = prev_ptr;
118 } while (prev_ptr != std::addressof(thread_nodes[i]));
119
120 if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) {
121 objects[i]->thread_list_tail = tail_prev;
122 }
123
124 prev->next = thread_nodes[i].next;
125
126 if (objects[i] == synced_obj) {
127 sync_index = i;
128 }
129 }
130 }
131
132 // Set output.
133 *out_index = sync_index;
134 return wait_result;
135}
136
137KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : Object{kernel} {}
138
139KSynchronizationObject ::~KSynchronizationObject() = default;
140
141void KSynchronizationObject::NotifyAvailable(ResultCode result) {
142 KScopedSchedulerLock lock(kernel);
143
144 // If we're not signaled, we've nothing to notify.
145 if (!this->IsSignaled()) {
146 return;
147 }
148
149 // Iterate over each thread.
150 for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
151 Thread* thread = cur_node->thread;
152 if (thread->GetState() == ThreadState::Waiting) {
153 thread->SetSyncedObject(this, result);
154 thread->SetState(ThreadState::Runnable);
155 }
156 }
157}
158
159std::vector<Thread*> KSynchronizationObject::GetWaitingThreadsForDebugging() const {
160 std::vector<Thread*> threads;
161
162 // If debugging, dump the list of waiters.
163 {
164 KScopedSchedulerLock lock(kernel);
165 for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
166 threads.emplace_back(cur_node->thread);
167 }
168 }
169
170 return threads;
171}
172} // namespace Kernel
diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h
new file mode 100644
index 000000000..14d80ebf1
--- /dev/null
+++ b/src/core/hle/kernel/k_synchronization_object.h
@@ -0,0 +1,58 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <vector>
8
9#include "core/hle/kernel/object.h"
10#include "core/hle/result.h"
11
12namespace Kernel {
13
14class KernelCore;
15class Synchronization;
16class Thread;
17
18/// Class that represents a Kernel object that a thread can be waiting on
19class KSynchronizationObject : public Object {
20public:
21 struct ThreadListNode {
22 ThreadListNode* next{};
23 Thread* thread{};
24 };
25
26 [[nodiscard]] static ResultCode Wait(KernelCore& kernel, s32* out_index,
27 KSynchronizationObject** objects, const s32 num_objects,
28 s64 timeout);
29
30 [[nodiscard]] virtual bool IsSignaled() const = 0;
31
32 [[nodiscard]] std::vector<Thread*> GetWaitingThreadsForDebugging() const;
33
34protected:
35 explicit KSynchronizationObject(KernelCore& kernel);
36 virtual ~KSynchronizationObject();
37
38 void NotifyAvailable(ResultCode result);
39 void NotifyAvailable() {
40 return this->NotifyAvailable(RESULT_SUCCESS);
41 }
42
43private:
44 ThreadListNode* thread_list_head{};
45 ThreadListNode* thread_list_tail{};
46};
47
48// Specialization of DynamicObjectCast for KSynchronizationObjects
49template <>
50inline std::shared_ptr<KSynchronizationObject> DynamicObjectCast<KSynchronizationObject>(
51 std::shared_ptr<Object> object) {
52 if (object != nullptr && object->IsWaitable()) {
53 return std::static_pointer_cast<KSynchronizationObject>(object);
54 }
55 return nullptr;
56}
57
58} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index e8ece8164..c0ff287a6 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -38,7 +38,6 @@
38#include "core/hle/kernel/resource_limit.h" 38#include "core/hle/kernel/resource_limit.h"
39#include "core/hle/kernel/service_thread.h" 39#include "core/hle/kernel/service_thread.h"
40#include "core/hle/kernel/shared_memory.h" 40#include "core/hle/kernel/shared_memory.h"
41#include "core/hle/kernel/synchronization.h"
42#include "core/hle/kernel/thread.h" 41#include "core/hle/kernel/thread.h"
43#include "core/hle/kernel/time_manager.h" 42#include "core/hle/kernel/time_manager.h"
44#include "core/hle/lock.h" 43#include "core/hle/lock.h"
@@ -51,8 +50,7 @@ namespace Kernel {
51 50
52struct KernelCore::Impl { 51struct KernelCore::Impl {
53 explicit Impl(Core::System& system, KernelCore& kernel) 52 explicit Impl(Core::System& system, KernelCore& kernel)
54 : synchronization{system}, time_manager{system}, global_handle_table{kernel}, system{ 53 : time_manager{system}, global_handle_table{kernel}, system{system} {}
55 system} {}
56 54
57 void SetMulticore(bool is_multicore) { 55 void SetMulticore(bool is_multicore) {
58 this->is_multicore = is_multicore; 56 this->is_multicore = is_multicore;
@@ -307,7 +305,6 @@ struct KernelCore::Impl {
307 std::vector<std::shared_ptr<Process>> process_list; 305 std::vector<std::shared_ptr<Process>> process_list;
308 Process* current_process = nullptr; 306 Process* current_process = nullptr;
309 std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context; 307 std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
310 Kernel::Synchronization synchronization;
311 Kernel::TimeManager time_manager; 308 Kernel::TimeManager time_manager;
312 309
313 std::shared_ptr<ResourceLimit> system_resource_limit; 310 std::shared_ptr<ResourceLimit> system_resource_limit;
@@ -461,14 +458,6 @@ const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Kern
461 return impl->interrupts; 458 return impl->interrupts;
462} 459}
463 460
464Kernel::Synchronization& KernelCore::Synchronization() {
465 return impl->synchronization;
466}
467
468const Kernel::Synchronization& KernelCore::Synchronization() const {
469 return impl->synchronization;
470}
471
472Kernel::TimeManager& KernelCore::TimeManager() { 461Kernel::TimeManager& KernelCore::TimeManager() {
473 return impl->time_manager; 462 return impl->time_manager;
474} 463}
@@ -613,9 +602,11 @@ void KernelCore::Suspend(bool in_suspention) {
613 const bool should_suspend = exception_exited || in_suspention; 602 const bool should_suspend = exception_exited || in_suspention;
614 { 603 {
615 KScopedSchedulerLock lock(*this); 604 KScopedSchedulerLock lock(*this);
616 ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep; 605 const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting;
617 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 606 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
618 impl->suspend_threads[i]->SetStatus(status); 607 impl->suspend_threads[i]->SetState(state);
608 impl->suspend_threads[i]->SetWaitReasonForDebugging(
609 ThreadWaitReasonForDebugging::Suspended);
619 } 610 }
620 } 611 }
621} 612}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index e3169f5a7..933d9a7d6 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -33,7 +33,6 @@ template <typename T>
33class SlabHeap; 33class SlabHeap;
34} // namespace Memory 34} // namespace Memory
35 35
36class AddressArbiter;
37class ClientPort; 36class ClientPort;
38class GlobalSchedulerContext; 37class GlobalSchedulerContext;
39class HandleTable; 38class HandleTable;
@@ -129,12 +128,6 @@ public:
129 /// Gets the an instance of the current physical CPU core. 128 /// Gets the an instance of the current physical CPU core.
130 const Kernel::PhysicalCore& CurrentPhysicalCore() const; 129 const Kernel::PhysicalCore& CurrentPhysicalCore() const;
131 130
132 /// Gets the an instance of the Synchronization Interface.
133 Kernel::Synchronization& Synchronization();
134
135 /// Gets the an instance of the Synchronization Interface.
136 const Kernel::Synchronization& Synchronization() const;
137
138 /// Gets the an instance of the TimeManager Interface. 131 /// Gets the an instance of the TimeManager Interface.
139 Kernel::TimeManager& TimeManager(); 132 Kernel::TimeManager& TimeManager();
140 133
diff --git a/src/core/hle/kernel/memory/memory_layout.h b/src/core/hle/kernel/memory/memory_layout.h
index 9b3d6267a..c7c0b2f49 100644
--- a/src/core/hle/kernel/memory/memory_layout.h
+++ b/src/core/hle/kernel/memory/memory_layout.h
@@ -5,9 +5,28 @@
5#pragma once 5#pragma once
6 6
7#include "common/common_types.h" 7#include "common/common_types.h"
8#include "core/device_memory.h"
8 9
9namespace Kernel::Memory { 10namespace Kernel::Memory {
10 11
12constexpr std::size_t KernelAslrAlignment = 2 * 1024 * 1024;
13constexpr std::size_t KernelVirtualAddressSpaceWidth = 1ULL << 39;
14constexpr std::size_t KernelPhysicalAddressSpaceWidth = 1ULL << 48;
15constexpr std::size_t KernelVirtualAddressSpaceBase = 0ULL - KernelVirtualAddressSpaceWidth;
16constexpr std::size_t KernelVirtualAddressSpaceEnd =
17 KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment);
18constexpr std::size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1;
19constexpr std::size_t KernelVirtualAddressSpaceSize =
20 KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase;
21
22constexpr bool IsKernelAddressKey(VAddr key) {
23 return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast;
24}
25
26constexpr bool IsKernelAddress(VAddr address) {
27 return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd;
28}
29
11class MemoryRegion final { 30class MemoryRegion final {
12 friend class MemoryLayout; 31 friend class MemoryLayout;
13 32
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
deleted file mode 100644
index 4f8075e0e..000000000
--- a/src/core/hle/kernel/mutex.cpp
+++ /dev/null
@@ -1,170 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <memory>
6#include <utility>
7#include <vector>
8
9#include "common/assert.h"
10#include "common/logging/log.h"
11#include "core/core.h"
12#include "core/hle/kernel/errors.h"
13#include "core/hle/kernel/handle_table.h"
14#include "core/hle/kernel/k_scheduler.h"
15#include "core/hle/kernel/kernel.h"
16#include "core/hle/kernel/mutex.h"
17#include "core/hle/kernel/object.h"
18#include "core/hle/kernel/process.h"
19#include "core/hle/kernel/thread.h"
20#include "core/hle/result.h"
21#include "core/memory.h"
22
23namespace Kernel {
24
25/// Returns the number of threads that are waiting for a mutex, and the highest priority one among
26/// those.
27static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThread(
28 const std::shared_ptr<Thread>& current_thread, VAddr mutex_addr) {
29
30 std::shared_ptr<Thread> highest_priority_thread;
31 u32 num_waiters = 0;
32
33 for (const auto& thread : current_thread->GetMutexWaitingThreads()) {
34 if (thread->GetMutexWaitAddress() != mutex_addr)
35 continue;
36
37 ++num_waiters;
38 if (highest_priority_thread == nullptr ||
39 thread->GetPriority() < highest_priority_thread->GetPriority()) {
40 highest_priority_thread = thread;
41 }
42 }
43
44 return {highest_priority_thread, num_waiters};
45}
46
47/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner.
48static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread,
49 std::shared_ptr<Thread> new_owner) {
50 current_thread->RemoveMutexWaiter(new_owner);
51 const auto threads = current_thread->GetMutexWaitingThreads();
52 for (const auto& thread : threads) {
53 if (thread->GetMutexWaitAddress() != mutex_addr)
54 continue;
55
56 ASSERT(thread->GetLockOwner() == current_thread.get());
57 current_thread->RemoveMutexWaiter(thread);
58 if (new_owner != thread)
59 new_owner->AddMutexWaiter(thread);
60 }
61}
62
63Mutex::Mutex(Core::System& system) : system{system} {}
64Mutex::~Mutex() = default;
65
66ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
67 Handle requesting_thread_handle) {
68 // The mutex address must be 4-byte aligned
69 if ((address % sizeof(u32)) != 0) {
70 LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
71 return ERR_INVALID_ADDRESS;
72 }
73
74 auto& kernel = system.Kernel();
75 std::shared_ptr<Thread> current_thread =
76 SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
77 {
78 KScopedSchedulerLock lock(kernel);
79 // The mutex address must be 4-byte aligned
80 if ((address % sizeof(u32)) != 0) {
81 return ERR_INVALID_ADDRESS;
82 }
83
84 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
85 std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
86 std::shared_ptr<Thread> requesting_thread =
87 handle_table.Get<Thread>(requesting_thread_handle);
88
89 // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of
90 // another thread.
91 ASSERT(requesting_thread == current_thread);
92
93 current_thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
94
95 const u32 addr_value = system.Memory().Read32(address);
96
97 // If the mutex isn't being held, just return success.
98 if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
99 return RESULT_SUCCESS;
100 }
101
102 if (holding_thread == nullptr) {
103 return ERR_INVALID_HANDLE;
104 }
105
106 // Wait until the mutex is released
107 current_thread->SetMutexWaitAddress(address);
108 current_thread->SetWaitHandle(requesting_thread_handle);
109
110 current_thread->SetStatus(ThreadStatus::WaitMutex);
111
112 // Update the lock holder thread's priority to prevent priority inversion.
113 holding_thread->AddMutexWaiter(current_thread);
114 }
115
116 {
117 KScopedSchedulerLock lock(kernel);
118 auto* owner = current_thread->GetLockOwner();
119 if (owner != nullptr) {
120 owner->RemoveMutexWaiter(current_thread);
121 }
122 }
123 return current_thread->GetSignalingResult();
124}
125
126std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thread> owner,
127 VAddr address) {
128 // The mutex address must be 4-byte aligned
129 if ((address % sizeof(u32)) != 0) {
130 LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
131 return {ERR_INVALID_ADDRESS, nullptr};
132 }
133
134 auto [new_owner, num_waiters] = GetHighestPriorityMutexWaitingThread(owner, address);
135 if (new_owner == nullptr) {
136 system.Memory().Write32(address, 0);
137 return {RESULT_SUCCESS, nullptr};
138 }
139 // Transfer the ownership of the mutex from the previous owner to the new one.
140 TransferMutexOwnership(address, owner, new_owner);
141 u32 mutex_value = new_owner->GetWaitHandle();
142 if (num_waiters >= 2) {
143 // Notify the guest that there are still some threads waiting for the mutex
144 mutex_value |= Mutex::MutexHasWaitersFlag;
145 }
146 new_owner->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
147 new_owner->SetLockOwner(nullptr);
148 new_owner->ResumeFromWait();
149
150 system.Memory().Write32(address, mutex_value);
151 return {RESULT_SUCCESS, new_owner};
152}
153
154ResultCode Mutex::Release(VAddr address) {
155 auto& kernel = system.Kernel();
156 KScopedSchedulerLock lock(kernel);
157
158 std::shared_ptr<Thread> current_thread =
159 SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
160
161 auto [result, new_owner] = Unlock(current_thread, address);
162
163 if (result != RESULT_SUCCESS && new_owner != nullptr) {
164 new_owner->SetSynchronizationResults(nullptr, result);
165 }
166
167 return result;
168}
169
170} // namespace Kernel
diff --git a/src/core/hle/kernel/mutex.h b/src/core/hle/kernel/mutex.h
deleted file mode 100644
index 3b81dc3df..000000000
--- a/src/core/hle/kernel/mutex.h
+++ /dev/null
@@ -1,42 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_types.h"
8
9union ResultCode;
10
11namespace Core {
12class System;
13}
14
15namespace Kernel {
16
17class Mutex final {
18public:
19 explicit Mutex(Core::System& system);
20 ~Mutex();
21
22 /// Flag that indicates that a mutex still has threads waiting for it.
23 static constexpr u32 MutexHasWaitersFlag = 0x40000000;
24 /// Mask of the bits in a mutex address value that contain the mutex owner.
25 static constexpr u32 MutexOwnerMask = 0xBFFFFFFF;
26
27 /// Attempts to acquire a mutex at the specified address.
28 ResultCode TryAcquire(VAddr address, Handle holding_thread_handle,
29 Handle requesting_thread_handle);
30
31 /// Unlocks a mutex for owner at address
32 std::pair<ResultCode, std::shared_ptr<Thread>> Unlock(std::shared_ptr<Thread> owner,
33 VAddr address);
34
35 /// Releases the mutex at the specified address.
36 ResultCode Release(VAddr address);
37
38private:
39 Core::System& system;
40};
41
42} // namespace Kernel
diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h
index e3391e2af..27124ef67 100644
--- a/src/core/hle/kernel/object.h
+++ b/src/core/hle/kernel/object.h
@@ -50,6 +50,11 @@ public:
50 } 50 }
51 virtual HandleType GetHandleType() const = 0; 51 virtual HandleType GetHandleType() const = 0;
52 52
53 void Close() {
54 // TODO(bunnei): This is a placeholder to decrement the reference count, which we will use
55 // when we implement KAutoObject instead of using shared_ptr.
56 }
57
53 /** 58 /**
54 * Check if a thread can wait on the object 59 * Check if a thread can wait on the object
55 * @return True if a thread can wait on the object, otherwise false 60 * @return True if a thread can wait on the object, otherwise false
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index b905b486a..37b77fa6e 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -55,7 +55,7 @@ void SetupMainThread(Core::System& system, Process& owner_process, u32 priority,
55 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires 55 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
56 { 56 {
57 KScopedSchedulerLock lock{kernel}; 57 KScopedSchedulerLock lock{kernel};
58 thread->SetStatus(ThreadStatus::Ready); 58 thread->SetState(ThreadState::Runnable);
59 } 59 }
60} 60}
61} // Anonymous namespace 61} // Anonymous namespace
@@ -162,48 +162,6 @@ u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
162 return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); 162 return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
163} 163}
164 164
165void Process::InsertConditionVariableThread(std::shared_ptr<Thread> thread) {
166 VAddr cond_var_addr = thread->GetCondVarWaitAddress();
167 std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
168 auto it = thread_list.begin();
169 while (it != thread_list.end()) {
170 const std::shared_ptr<Thread> current_thread = *it;
171 if (current_thread->GetPriority() > thread->GetPriority()) {
172 thread_list.insert(it, thread);
173 return;
174 }
175 ++it;
176 }
177 thread_list.push_back(thread);
178}
179
180void Process::RemoveConditionVariableThread(std::shared_ptr<Thread> thread) {
181 VAddr cond_var_addr = thread->GetCondVarWaitAddress();
182 std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
183 auto it = thread_list.begin();
184 while (it != thread_list.end()) {
185 const std::shared_ptr<Thread> current_thread = *it;
186 if (current_thread.get() == thread.get()) {
187 thread_list.erase(it);
188 return;
189 }
190 ++it;
191 }
192}
193
194std::vector<std::shared_ptr<Thread>> Process::GetConditionVariableThreads(
195 const VAddr cond_var_addr) {
196 std::vector<std::shared_ptr<Thread>> result{};
197 std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
198 auto it = thread_list.begin();
199 while (it != thread_list.end()) {
200 std::shared_ptr<Thread> current_thread = *it;
201 result.push_back(current_thread);
202 ++it;
203 }
204 return result;
205}
206
207void Process::RegisterThread(const Thread* thread) { 165void Process::RegisterThread(const Thread* thread) {
208 thread_list.push_back(thread); 166 thread_list.push_back(thread);
209} 167}
@@ -318,7 +276,7 @@ void Process::PrepareForTermination() {
318 continue; 276 continue;
319 277
320 // TODO(Subv): When are the other running/ready threads terminated? 278 // TODO(Subv): When are the other running/ready threads terminated?
321 ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitSynch, 279 ASSERT_MSG(thread->GetState() == ThreadState::Waiting,
322 "Exiting processes with non-waiting threads is currently unimplemented"); 280 "Exiting processes with non-waiting threads is currently unimplemented");
323 281
324 thread->Stop(); 282 thread->Stop();
@@ -406,21 +364,18 @@ void Process::LoadModule(CodeSet code_set, VAddr base_addr) {
406 ReprotectSegment(code_set.DataSegment(), Memory::MemoryPermission::ReadAndWrite); 364 ReprotectSegment(code_set.DataSegment(), Memory::MemoryPermission::ReadAndWrite);
407} 365}
408 366
367bool Process::IsSignaled() const {
368 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
369 return is_signaled;
370}
371
409Process::Process(Core::System& system) 372Process::Process(Core::System& system)
410 : SynchronizationObject{system.Kernel()}, page_table{std::make_unique<Memory::PageTable>( 373 : KSynchronizationObject{system.Kernel()},
411 system)}, 374 page_table{std::make_unique<Memory::PageTable>(system)}, handle_table{system.Kernel()},
412 handle_table{system.Kernel()}, address_arbiter{system}, mutex{system}, system{system} {} 375 address_arbiter{system}, condition_var{system}, system{system} {}
413 376
414Process::~Process() = default; 377Process::~Process() = default;
415 378
416void Process::Acquire(Thread* thread) {
417 ASSERT_MSG(!ShouldWait(thread), "Object unavailable!");
418}
419
420bool Process::ShouldWait(const Thread* thread) const {
421 return !is_signaled;
422}
423
424void Process::ChangeStatus(ProcessStatus new_status) { 379void Process::ChangeStatus(ProcessStatus new_status) {
425 if (status == new_status) { 380 if (status == new_status) {
426 return; 381 return;
@@ -428,7 +383,7 @@ void Process::ChangeStatus(ProcessStatus new_status) {
428 383
429 status = new_status; 384 status = new_status;
430 is_signaled = true; 385 is_signaled = true;
431 Signal(); 386 NotifyAvailable();
432} 387}
433 388
434ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) { 389ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) {
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index e412e58aa..564e1f27d 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -11,11 +11,11 @@
11#include <unordered_map> 11#include <unordered_map>
12#include <vector> 12#include <vector>
13#include "common/common_types.h" 13#include "common/common_types.h"
14#include "core/hle/kernel/address_arbiter.h"
15#include "core/hle/kernel/handle_table.h" 14#include "core/hle/kernel/handle_table.h"
16#include "core/hle/kernel/mutex.h" 15#include "core/hle/kernel/k_address_arbiter.h"
16#include "core/hle/kernel/k_condition_variable.h"
17#include "core/hle/kernel/k_synchronization_object.h"
17#include "core/hle/kernel/process_capability.h" 18#include "core/hle/kernel/process_capability.h"
18#include "core/hle/kernel/synchronization_object.h"
19#include "core/hle/result.h" 19#include "core/hle/result.h"
20 20
21namespace Core { 21namespace Core {
@@ -63,7 +63,7 @@ enum class ProcessStatus {
63 DebugBreak, 63 DebugBreak,
64}; 64};
65 65
66class Process final : public SynchronizationObject { 66class Process final : public KSynchronizationObject {
67public: 67public:
68 explicit Process(Core::System& system); 68 explicit Process(Core::System& system);
69 ~Process() override; 69 ~Process() override;
@@ -123,24 +123,30 @@ public:
123 return handle_table; 123 return handle_table;
124 } 124 }
125 125
126 /// Gets a reference to the process' address arbiter. 126 ResultCode SignalToAddress(VAddr address) {
127 AddressArbiter& GetAddressArbiter() { 127 return condition_var.SignalToAddress(address);
128 return address_arbiter;
129 } 128 }
130 129
131 /// Gets a const reference to the process' address arbiter. 130 ResultCode WaitForAddress(Handle handle, VAddr address, u32 tag) {
132 const AddressArbiter& GetAddressArbiter() const { 131 return condition_var.WaitForAddress(handle, address, tag);
133 return address_arbiter;
134 } 132 }
135 133
136 /// Gets a reference to the process' mutex lock. 134 void SignalConditionVariable(u64 cv_key, int32_t count) {
137 Mutex& GetMutex() { 135 return condition_var.Signal(cv_key, count);
138 return mutex;
139 } 136 }
140 137
141 /// Gets a const reference to the process' mutex lock 138 ResultCode WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
142 const Mutex& GetMutex() const { 139 return condition_var.Wait(address, cv_key, tag, ns);
143 return mutex; 140 }
141
142 ResultCode SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value,
143 s32 count) {
144 return address_arbiter.SignalToAddress(address, signal_type, value, count);
145 }
146
147 ResultCode WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
148 s64 timeout) {
149 return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
144 } 150 }
145 151
146 /// Gets the address to the process' dedicated TLS region. 152 /// Gets the address to the process' dedicated TLS region.
@@ -250,15 +256,6 @@ public:
250 return thread_list; 256 return thread_list;
251 } 257 }
252 258
253 /// Insert a thread into the condition variable wait container
254 void InsertConditionVariableThread(std::shared_ptr<Thread> thread);
255
256 /// Remove a thread from the condition variable wait container
257 void RemoveConditionVariableThread(std::shared_ptr<Thread> thread);
258
259 /// Obtain all condition variable threads waiting for some address
260 std::vector<std::shared_ptr<Thread>> GetConditionVariableThreads(VAddr cond_var_addr);
261
262 /// Registers a thread as being created under this process, 259 /// Registers a thread as being created under this process,
263 /// adding it to this process' thread list. 260 /// adding it to this process' thread list.
264 void RegisterThread(const Thread* thread); 261 void RegisterThread(const Thread* thread);
@@ -304,6 +301,8 @@ public:
304 301
305 void LoadModule(CodeSet code_set, VAddr base_addr); 302 void LoadModule(CodeSet code_set, VAddr base_addr);
306 303
304 bool IsSignaled() const override;
305
307 /////////////////////////////////////////////////////////////////////////////////////////////// 306 ///////////////////////////////////////////////////////////////////////////////////////////////
308 // Thread-local storage management 307 // Thread-local storage management
309 308
@@ -314,12 +313,6 @@ public:
314 void FreeTLSRegion(VAddr tls_address); 313 void FreeTLSRegion(VAddr tls_address);
315 314
316private: 315private:
317 /// Checks if the specified thread should wait until this process is available.
318 bool ShouldWait(const Thread* thread) const override;
319
320 /// Acquires/locks this process for the specified thread if it's available.
321 void Acquire(Thread* thread) override;
322
323 /// Changes the process status. If the status is different 316 /// Changes the process status. If the status is different
324 /// from the current process status, then this will trigger 317 /// from the current process status, then this will trigger
325 /// a process signal. 318 /// a process signal.
@@ -373,12 +366,12 @@ private:
373 HandleTable handle_table; 366 HandleTable handle_table;
374 367
375 /// Per-process address arbiter. 368 /// Per-process address arbiter.
376 AddressArbiter address_arbiter; 369 KAddressArbiter address_arbiter;
377 370
378 /// The per-process mutex lock instance used for handling various 371 /// The per-process mutex lock instance used for handling various
379 /// forms of services, such as lock arbitration, and condition 372 /// forms of services, such as lock arbitration, and condition
380 /// variable related facilities. 373 /// variable related facilities.
381 Mutex mutex; 374 KConditionVariable condition_var;
382 375
383 /// Address indicating the location of the process' dedicated TLS region. 376 /// Address indicating the location of the process' dedicated TLS region.
384 VAddr tls_region_address = 0; 377 VAddr tls_region_address = 0;
@@ -389,9 +382,6 @@ private:
389 /// List of threads that are running with this process as their owner. 382 /// List of threads that are running with this process as their owner.
390 std::list<const Thread*> thread_list; 383 std::list<const Thread*> thread_list;
391 384
392 /// List of threads waiting for a condition variable
393 std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> cond_var_threads;
394
395 /// Address of the top of the main thread's stack 385 /// Address of the top of the main thread's stack
396 VAddr main_thread_stack_top{}; 386 VAddr main_thread_stack_top{};
397 387
@@ -410,6 +400,8 @@ private:
410 /// Schedule count of this process 400 /// Schedule count of this process
411 s64 schedule_count{}; 401 s64 schedule_count{};
412 402
403 bool is_signaled{};
404
413 /// System context 405 /// System context
414 Core::System& system; 406 Core::System& system;
415}; 407};
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index cea262ce0..99ed0857e 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -14,24 +14,22 @@
14 14
15namespace Kernel { 15namespace Kernel {
16 16
17ReadableEvent::ReadableEvent(KernelCore& kernel) : SynchronizationObject{kernel} {} 17ReadableEvent::ReadableEvent(KernelCore& kernel) : KSynchronizationObject{kernel} {}
18ReadableEvent::~ReadableEvent() = default; 18ReadableEvent::~ReadableEvent() = default;
19 19
20bool ReadableEvent::ShouldWait(const Thread* thread) const {
21 return !is_signaled;
22}
23
24void ReadableEvent::Acquire(Thread* thread) {
25 ASSERT_MSG(IsSignaled(), "object unavailable!");
26}
27
28void ReadableEvent::Signal() { 20void ReadableEvent::Signal() {
29 if (is_signaled) { 21 if (is_signaled) {
30 return; 22 return;
31 } 23 }
32 24
33 is_signaled = true; 25 is_signaled = true;
34 SynchronizationObject::Signal(); 26 NotifyAvailable();
27}
28
29bool ReadableEvent::IsSignaled() const {
30 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
31
32 return is_signaled;
35} 33}
36 34
37void ReadableEvent::Clear() { 35void ReadableEvent::Clear() {
diff --git a/src/core/hle/kernel/readable_event.h b/src/core/hle/kernel/readable_event.h
index 3264dd066..34e477274 100644
--- a/src/core/hle/kernel/readable_event.h
+++ b/src/core/hle/kernel/readable_event.h
@@ -4,8 +4,8 @@
4 4
5#pragma once 5#pragma once
6 6
7#include "core/hle/kernel/k_synchronization_object.h"
7#include "core/hle/kernel/object.h" 8#include "core/hle/kernel/object.h"
8#include "core/hle/kernel/synchronization_object.h"
9 9
10union ResultCode; 10union ResultCode;
11 11
@@ -14,7 +14,7 @@ namespace Kernel {
14class KernelCore; 14class KernelCore;
15class WritableEvent; 15class WritableEvent;
16 16
17class ReadableEvent final : public SynchronizationObject { 17class ReadableEvent final : public KSynchronizationObject {
18 friend class WritableEvent; 18 friend class WritableEvent;
19 19
20public: 20public:
@@ -32,9 +32,6 @@ public:
32 return HANDLE_TYPE; 32 return HANDLE_TYPE;
33 } 33 }
34 34
35 bool ShouldWait(const Thread* thread) const override;
36 void Acquire(Thread* thread) override;
37
38 /// Unconditionally clears the readable event's state. 35 /// Unconditionally clears the readable event's state.
39 void Clear(); 36 void Clear();
40 37
@@ -46,11 +43,14 @@ public:
46 /// then ERR_INVALID_STATE will be returned. 43 /// then ERR_INVALID_STATE will be returned.
47 ResultCode Reset(); 44 ResultCode Reset();
48 45
49 void Signal() override; 46 void Signal();
47
48 bool IsSignaled() const override;
50 49
51private: 50private:
52 explicit ReadableEvent(KernelCore& kernel); 51 explicit ReadableEvent(KernelCore& kernel);
53 52
53 bool is_signaled{};
54 std::string name; ///< Name of event (optional) 54 std::string name; ///< Name of event (optional)
55}; 55};
56 56
diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp
index a549ae9d7..82857f93b 100644
--- a/src/core/hle/kernel/server_port.cpp
+++ b/src/core/hle/kernel/server_port.cpp
@@ -13,7 +13,7 @@
13 13
14namespace Kernel { 14namespace Kernel {
15 15
16ServerPort::ServerPort(KernelCore& kernel) : SynchronizationObject{kernel} {} 16ServerPort::ServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
17ServerPort::~ServerPort() = default; 17ServerPort::~ServerPort() = default;
18 18
19ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() { 19ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() {
@@ -28,15 +28,9 @@ ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() {
28 28
29void ServerPort::AppendPendingSession(std::shared_ptr<ServerSession> pending_session) { 29void ServerPort::AppendPendingSession(std::shared_ptr<ServerSession> pending_session) {
30 pending_sessions.push_back(std::move(pending_session)); 30 pending_sessions.push_back(std::move(pending_session));
31} 31 if (pending_sessions.size() == 1) {
32 32 NotifyAvailable();
33bool ServerPort::ShouldWait(const Thread* thread) const { 33 }
34 // If there are no pending sessions, we wait until a new one is added.
35 return pending_sessions.empty();
36}
37
38void ServerPort::Acquire(Thread* thread) {
39 ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
40} 34}
41 35
42bool ServerPort::IsSignaled() const { 36bool ServerPort::IsSignaled() const {
diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h
index 41b191b86..6470df993 100644
--- a/src/core/hle/kernel/server_port.h
+++ b/src/core/hle/kernel/server_port.h
@@ -9,8 +9,8 @@
9#include <utility> 9#include <utility>
10#include <vector> 10#include <vector>
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "core/hle/kernel/k_synchronization_object.h"
12#include "core/hle/kernel/object.h" 13#include "core/hle/kernel/object.h"
13#include "core/hle/kernel/synchronization_object.h"
14#include "core/hle/result.h" 14#include "core/hle/result.h"
15 15
16namespace Kernel { 16namespace Kernel {
@@ -20,7 +20,7 @@ class KernelCore;
20class ServerSession; 20class ServerSession;
21class SessionRequestHandler; 21class SessionRequestHandler;
22 22
23class ServerPort final : public SynchronizationObject { 23class ServerPort final : public KSynchronizationObject {
24public: 24public:
25 explicit ServerPort(KernelCore& kernel); 25 explicit ServerPort(KernelCore& kernel);
26 ~ServerPort() override; 26 ~ServerPort() override;
@@ -79,9 +79,6 @@ public:
79 /// waiting to be accepted by this port. 79 /// waiting to be accepted by this port.
80 void AppendPendingSession(std::shared_ptr<ServerSession> pending_session); 80 void AppendPendingSession(std::shared_ptr<ServerSession> pending_session);
81 81
82 bool ShouldWait(const Thread* thread) const override;
83 void Acquire(Thread* thread) override;
84
85 bool IsSignaled() const override; 82 bool IsSignaled() const override;
86 83
87private: 84private:
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index b40fe3916..4f2bb7822 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -24,7 +24,7 @@
24 24
25namespace Kernel { 25namespace Kernel {
26 26
27ServerSession::ServerSession(KernelCore& kernel) : SynchronizationObject{kernel} {} 27ServerSession::ServerSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
28 28
29ServerSession::~ServerSession() { 29ServerSession::~ServerSession() {
30 kernel.ReleaseServiceThread(service_thread); 30 kernel.ReleaseServiceThread(service_thread);
@@ -42,16 +42,6 @@ ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kern
42 return MakeResult(std::move(session)); 42 return MakeResult(std::move(session));
43} 43}
44 44
45bool ServerSession::ShouldWait(const Thread* thread) const {
46 // Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
47 if (!parent->Client()) {
48 return false;
49 }
50
51 // Wait if we have no pending requests, or if we're currently handling a request.
52 return pending_requesting_threads.empty() || currently_handling != nullptr;
53}
54
55bool ServerSession::IsSignaled() const { 45bool ServerSession::IsSignaled() const {
56 // Closed sessions should never wait, an error will be returned from svcReplyAndReceive. 46 // Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
57 if (!parent->Client()) { 47 if (!parent->Client()) {
@@ -62,15 +52,6 @@ bool ServerSession::IsSignaled() const {
62 return !pending_requesting_threads.empty() && currently_handling == nullptr; 52 return !pending_requesting_threads.empty() && currently_handling == nullptr;
63} 53}
64 54
65void ServerSession::Acquire(Thread* thread) {
66 ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
67 // We are now handling a request, pop it from the stack.
68 // TODO(Subv): What happens if the client endpoint is closed before any requests are made?
69 ASSERT(!pending_requesting_threads.empty());
70 currently_handling = pending_requesting_threads.back();
71 pending_requesting_threads.pop_back();
72}
73
74void ServerSession::ClientDisconnected() { 55void ServerSession::ClientDisconnected() {
75 // We keep a shared pointer to the hle handler to keep it alive throughout 56 // We keep a shared pointer to the hle handler to keep it alive throughout
76 // the call to ClientDisconnected, as ClientDisconnected invalidates the 57 // the call to ClientDisconnected, as ClientDisconnected invalidates the
@@ -172,7 +153,7 @@ ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) {
172 { 153 {
173 KScopedSchedulerLock lock(kernel); 154 KScopedSchedulerLock lock(kernel);
174 if (!context.IsThreadWaiting()) { 155 if (!context.IsThreadWaiting()) {
175 context.GetThread().ResumeFromWait(); 156 context.GetThread().Wakeup();
176 context.GetThread().SetSynchronizationResults(nullptr, result); 157 context.GetThread().SetSynchronizationResults(nullptr, result);
177 } 158 }
178 } 159 }
diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h
index e8d1d99ea..9155cf7f5 100644
--- a/src/core/hle/kernel/server_session.h
+++ b/src/core/hle/kernel/server_session.h
@@ -10,8 +10,8 @@
10#include <vector> 10#include <vector>
11 11
12#include "common/threadsafe_queue.h" 12#include "common/threadsafe_queue.h"
13#include "core/hle/kernel/k_synchronization_object.h"
13#include "core/hle/kernel/service_thread.h" 14#include "core/hle/kernel/service_thread.h"
14#include "core/hle/kernel/synchronization_object.h"
15#include "core/hle/result.h" 15#include "core/hle/result.h"
16 16
17namespace Core::Memory { 17namespace Core::Memory {
@@ -43,7 +43,7 @@ class Thread;
43 * After the server replies to the request, the response is marshalled back to the caller's 43 * After the server replies to the request, the response is marshalled back to the caller's
44 * TLS buffer and control is transferred back to it. 44 * TLS buffer and control is transferred back to it.
45 */ 45 */
46class ServerSession final : public SynchronizationObject { 46class ServerSession final : public KSynchronizationObject {
47 friend class ServiceThread; 47 friend class ServiceThread;
48 48
49public: 49public:
@@ -77,8 +77,6 @@ public:
77 return parent.get(); 77 return parent.get();
78 } 78 }
79 79
80 bool IsSignaled() const override;
81
82 /** 80 /**
83 * Sets the HLE handler for the session. This handler will be called to service IPC requests 81 * Sets the HLE handler for the session. This handler will be called to service IPC requests
84 * instead of the regular IPC machinery. (The regular IPC machinery is currently not 82 * instead of the regular IPC machinery. (The regular IPC machinery is currently not
@@ -100,10 +98,6 @@ public:
100 ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory, 98 ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory,
101 Core::Timing::CoreTiming& core_timing); 99 Core::Timing::CoreTiming& core_timing);
102 100
103 bool ShouldWait(const Thread* thread) const override;
104
105 void Acquire(Thread* thread) override;
106
107 /// Called when a client disconnection occurs. 101 /// Called when a client disconnection occurs.
108 void ClientDisconnected(); 102 void ClientDisconnected();
109 103
@@ -130,6 +124,8 @@ public:
130 convert_to_domain = true; 124 convert_to_domain = true;
131 } 125 }
132 126
127 bool IsSignaled() const override;
128
133private: 129private:
134 /// Queues a sync request from the emulated application. 130 /// Queues a sync request from the emulated application.
135 ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory); 131 ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory);
diff --git a/src/core/hle/kernel/session.cpp b/src/core/hle/kernel/session.cpp
index e4dd53e24..75304b961 100644
--- a/src/core/hle/kernel/session.cpp
+++ b/src/core/hle/kernel/session.cpp
@@ -9,7 +9,7 @@
9 9
10namespace Kernel { 10namespace Kernel {
11 11
12Session::Session(KernelCore& kernel) : SynchronizationObject{kernel} {} 12Session::Session(KernelCore& kernel) : KSynchronizationObject{kernel} {}
13Session::~Session() = default; 13Session::~Session() = default;
14 14
15Session::SessionPair Session::Create(KernelCore& kernel, std::string name) { 15Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
@@ -24,18 +24,9 @@ Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
24 return std::make_pair(std::move(client_session), std::move(server_session)); 24 return std::make_pair(std::move(client_session), std::move(server_session));
25} 25}
26 26
27bool Session::ShouldWait(const Thread* thread) const {
28 UNIMPLEMENTED();
29 return {};
30}
31
32bool Session::IsSignaled() const { 27bool Session::IsSignaled() const {
33 UNIMPLEMENTED(); 28 UNIMPLEMENTED();
34 return true; 29 return true;
35} 30}
36 31
37void Session::Acquire(Thread* thread) {
38 UNIMPLEMENTED();
39}
40
41} // namespace Kernel 32} // namespace Kernel
diff --git a/src/core/hle/kernel/session.h b/src/core/hle/kernel/session.h
index 7cd9c0d77..f6dd2c1d2 100644
--- a/src/core/hle/kernel/session.h
+++ b/src/core/hle/kernel/session.h
@@ -8,7 +8,7 @@
8#include <string> 8#include <string>
9#include <utility> 9#include <utility>
10 10
11#include "core/hle/kernel/synchronization_object.h" 11#include "core/hle/kernel/k_synchronization_object.h"
12 12
13namespace Kernel { 13namespace Kernel {
14 14
@@ -19,7 +19,7 @@ class ServerSession;
19 * Parent structure to link the client and server endpoints of a session with their associated 19 * Parent structure to link the client and server endpoints of a session with their associated
20 * client port. 20 * client port.
21 */ 21 */
22class Session final : public SynchronizationObject { 22class Session final : public KSynchronizationObject {
23public: 23public:
24 explicit Session(KernelCore& kernel); 24 explicit Session(KernelCore& kernel);
25 ~Session() override; 25 ~Session() override;
@@ -37,12 +37,8 @@ public:
37 return HANDLE_TYPE; 37 return HANDLE_TYPE;
38 } 38 }
39 39
40 bool ShouldWait(const Thread* thread) const override;
41
42 bool IsSignaled() const override; 40 bool IsSignaled() const override;
43 41
44 void Acquire(Thread* thread) override;
45
46 std::shared_ptr<ClientSession> Client() { 42 std::shared_ptr<ClientSession> Client() {
47 if (auto result{client.lock()}) { 43 if (auto result{client.lock()}) {
48 return result; 44 return result;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index de3ed25da..cc8b661af 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -10,6 +10,7 @@
10 10
11#include "common/alignment.h" 11#include "common/alignment.h"
12#include "common/assert.h" 12#include "common/assert.h"
13#include "common/common_funcs.h"
13#include "common/fiber.h" 14#include "common/fiber.h"
14#include "common/logging/log.h" 15#include "common/logging/log.h"
15#include "common/microprofile.h" 16#include "common/microprofile.h"
@@ -19,26 +20,28 @@
19#include "core/core_timing.h" 20#include "core/core_timing.h"
20#include "core/core_timing_util.h" 21#include "core/core_timing_util.h"
21#include "core/cpu_manager.h" 22#include "core/cpu_manager.h"
22#include "core/hle/kernel/address_arbiter.h"
23#include "core/hle/kernel/client_port.h" 23#include "core/hle/kernel/client_port.h"
24#include "core/hle/kernel/client_session.h" 24#include "core/hle/kernel/client_session.h"
25#include "core/hle/kernel/errors.h" 25#include "core/hle/kernel/errors.h"
26#include "core/hle/kernel/handle_table.h" 26#include "core/hle/kernel/handle_table.h"
27#include "core/hle/kernel/k_address_arbiter.h"
28#include "core/hle/kernel/k_condition_variable.h"
27#include "core/hle/kernel/k_scheduler.h" 29#include "core/hle/kernel/k_scheduler.h"
28#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 30#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
31#include "core/hle/kernel/k_synchronization_object.h"
29#include "core/hle/kernel/kernel.h" 32#include "core/hle/kernel/kernel.h"
30#include "core/hle/kernel/memory/memory_block.h" 33#include "core/hle/kernel/memory/memory_block.h"
34#include "core/hle/kernel/memory/memory_layout.h"
31#include "core/hle/kernel/memory/page_table.h" 35#include "core/hle/kernel/memory/page_table.h"
32#include "core/hle/kernel/mutex.h"
33#include "core/hle/kernel/physical_core.h" 36#include "core/hle/kernel/physical_core.h"
34#include "core/hle/kernel/process.h" 37#include "core/hle/kernel/process.h"
35#include "core/hle/kernel/readable_event.h" 38#include "core/hle/kernel/readable_event.h"
36#include "core/hle/kernel/resource_limit.h" 39#include "core/hle/kernel/resource_limit.h"
37#include "core/hle/kernel/shared_memory.h" 40#include "core/hle/kernel/shared_memory.h"
38#include "core/hle/kernel/svc.h" 41#include "core/hle/kernel/svc.h"
42#include "core/hle/kernel/svc_results.h"
39#include "core/hle/kernel/svc_types.h" 43#include "core/hle/kernel/svc_types.h"
40#include "core/hle/kernel/svc_wrap.h" 44#include "core/hle/kernel/svc_wrap.h"
41#include "core/hle/kernel/synchronization.h"
42#include "core/hle/kernel/thread.h" 45#include "core/hle/kernel/thread.h"
43#include "core/hle/kernel/time_manager.h" 46#include "core/hle/kernel/time_manager.h"
44#include "core/hle/kernel/transfer_memory.h" 47#include "core/hle/kernel/transfer_memory.h"
@@ -343,27 +346,11 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
343 auto thread = kernel.CurrentScheduler()->GetCurrentThread(); 346 auto thread = kernel.CurrentScheduler()->GetCurrentThread();
344 { 347 {
345 KScopedSchedulerLock lock(kernel); 348 KScopedSchedulerLock lock(kernel);
346 thread->InvalidateHLECallback(); 349 thread->SetState(ThreadState::Waiting);
347 thread->SetStatus(ThreadStatus::WaitIPC); 350 thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
348 session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); 351 session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming());
349 } 352 }
350 353
351 if (thread->HasHLECallback()) {
352 Handle event_handle = thread->GetHLETimeEvent();
353 if (event_handle != InvalidHandle) {
354 auto& time_manager = kernel.TimeManager();
355 time_manager.UnscheduleTimeEvent(event_handle);
356 }
357
358 {
359 KScopedSchedulerLock lock(kernel);
360 auto* sync_object = thread->GetHLESyncObject();
361 sync_object->RemoveWaitingThread(SharedFrom(thread));
362 }
363
364 thread->InvokeHLECallback(SharedFrom(thread));
365 }
366
367 return thread->GetSignalingResult(); 354 return thread->GetSignalingResult();
368} 355}
369 356
@@ -436,7 +423,7 @@ static ResultCode GetProcessId32(Core::System& system, u32* process_id_low, u32*
436} 423}
437 424
438/// Wait for the given handles to synchronize, timeout after the specified nanoseconds 425/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
439static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr handles_address, 426static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
440 u64 handle_count, s64 nano_seconds) { 427 u64 handle_count, s64 nano_seconds) {
441 LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}", 428 LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}",
442 handles_address, handle_count, nano_seconds); 429 handles_address, handle_count, nano_seconds);
@@ -458,28 +445,26 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr
458 } 445 }
459 446
460 auto& kernel = system.Kernel(); 447 auto& kernel = system.Kernel();
461 Thread::ThreadSynchronizationObjects objects(handle_count); 448 std::vector<KSynchronizationObject*> objects(handle_count);
462 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); 449 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
463 450
464 for (u64 i = 0; i < handle_count; ++i) { 451 for (u64 i = 0; i < handle_count; ++i) {
465 const Handle handle = memory.Read32(handles_address + i * sizeof(Handle)); 452 const Handle handle = memory.Read32(handles_address + i * sizeof(Handle));
466 const auto object = handle_table.Get<SynchronizationObject>(handle); 453 const auto object = handle_table.Get<KSynchronizationObject>(handle);
467 454
468 if (object == nullptr) { 455 if (object == nullptr) {
469 LOG_ERROR(Kernel_SVC, "Object is a nullptr"); 456 LOG_ERROR(Kernel_SVC, "Object is a nullptr");
470 return ERR_INVALID_HANDLE; 457 return ERR_INVALID_HANDLE;
471 } 458 }
472 459
473 objects[i] = object; 460 objects[i] = object.get();
474 } 461 }
475 auto& synchronization = kernel.Synchronization(); 462 return KSynchronizationObject::Wait(kernel, index, objects.data(),
476 const auto [result, handle_result] = synchronization.WaitFor(objects, nano_seconds); 463 static_cast<s32>(objects.size()), nano_seconds);
477 *index = handle_result;
478 return result;
479} 464}
480 465
481static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address, 466static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
482 s32 handle_count, u32 timeout_high, Handle* index) { 467 s32 handle_count, u32 timeout_high, s32* index) {
483 const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)}; 468 const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
484 return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds); 469 return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds);
485} 470}
@@ -504,56 +489,37 @@ static ResultCode CancelSynchronization32(Core::System& system, Handle thread_ha
504 return CancelSynchronization(system, thread_handle); 489 return CancelSynchronization(system, thread_handle);
505} 490}
506 491
507/// Attempts to locks a mutex, creating it if it does not already exist 492/// Attempts to locks a mutex
508static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_handle, 493static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address,
509 VAddr mutex_addr, Handle requesting_thread_handle) { 494 u32 tag) {
510 LOG_TRACE(Kernel_SVC, 495 LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
511 "called holding_thread_handle=0x{:08X}, mutex_addr=0x{:X}, " 496 thread_handle, address, tag);
512 "requesting_current_thread_handle=0x{:08X}",
513 holding_thread_handle, mutex_addr, requesting_thread_handle);
514
515 if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
516 LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
517 mutex_addr);
518 return ERR_INVALID_ADDRESS_STATE;
519 }
520 497
521 if (!Common::IsWordAligned(mutex_addr)) { 498 // Validate the input address.
522 LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr); 499 R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
523 return ERR_INVALID_ADDRESS; 500 R_UNLESS(Common::IsAligned(address, sizeof(u32)), Svc::ResultInvalidAddress);
524 }
525 501
526 auto* const current_process = system.Kernel().CurrentProcess(); 502 return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag);
527 return current_process->GetMutex().TryAcquire(mutex_addr, holding_thread_handle,
528 requesting_thread_handle);
529} 503}
530 504
531static ResultCode ArbitrateLock32(Core::System& system, Handle holding_thread_handle, 505static ResultCode ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address,
532 u32 mutex_addr, Handle requesting_thread_handle) { 506 u32 tag) {
533 return ArbitrateLock(system, holding_thread_handle, mutex_addr, requesting_thread_handle); 507 return ArbitrateLock(system, thread_handle, address, tag);
534} 508}
535 509
536/// Unlock a mutex 510/// Unlock a mutex
537static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) { 511static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) {
538 LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr); 512 LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
539
540 if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
541 LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
542 mutex_addr);
543 return ERR_INVALID_ADDRESS_STATE;
544 }
545 513
546 if (!Common::IsWordAligned(mutex_addr)) { 514 // Validate the input address.
547 LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr); 515 R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
548 return ERR_INVALID_ADDRESS; 516 R_UNLESS(Common::IsAligned(address, sizeof(u32)), Svc::ResultInvalidAddress);
549 }
550 517
551 auto* const current_process = system.Kernel().CurrentProcess(); 518 return system.Kernel().CurrentProcess()->SignalToAddress(address);
552 return current_process->GetMutex().Release(mutex_addr);
553} 519}
554 520
555static ResultCode ArbitrateUnlock32(Core::System& system, u32 mutex_addr) { 521static ResultCode ArbitrateUnlock32(Core::System& system, u32 address) {
556 return ArbitrateUnlock(system, mutex_addr); 522 return ArbitrateUnlock(system, address);
557} 523}
558 524
559enum class BreakType : u32 { 525enum class BreakType : u32 {
@@ -1180,7 +1146,7 @@ static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 pri
1180 return ERR_INVALID_HANDLE; 1146 return ERR_INVALID_HANDLE;
1181 } 1147 }
1182 1148
1183 thread->SetPriority(priority); 1149 thread->SetBasePriority(priority);
1184 1150
1185 return RESULT_SUCCESS; 1151 return RESULT_SUCCESS;
1186} 1152}
@@ -1559,7 +1525,7 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) {
1559 return ERR_INVALID_HANDLE; 1525 return ERR_INVALID_HANDLE;
1560 } 1526 }
1561 1527
1562 ASSERT(thread->GetStatus() == ThreadStatus::Dormant); 1528 ASSERT(thread->GetState() == ThreadState::Initialized);
1563 1529
1564 return thread->Start(); 1530 return thread->Start();
1565} 1531}
@@ -1620,224 +1586,135 @@ static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanosec
1620} 1586}
1621 1587
1622/// Wait process wide key atomic 1588/// Wait process wide key atomic
1623static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_addr, 1589static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key,
1624 VAddr condition_variable_addr, Handle thread_handle, 1590 u32 tag, s64 timeout_ns) {
1625 s64 nano_seconds) { 1591 LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
1626 LOG_TRACE( 1592 cv_key, tag, timeout_ns);
1627 Kernel_SVC, 1593
1628 "called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}", 1594 // Validate input.
1629 mutex_addr, condition_variable_addr, thread_handle, nano_seconds); 1595 R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
1630 1596 R_UNLESS(Common::IsAligned(address, sizeof(int32_t)), Svc::ResultInvalidAddress);
1631 if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) { 1597
1632 LOG_ERROR( 1598 // Convert timeout from nanoseconds to ticks.
1633 Kernel_SVC, 1599 s64 timeout{};
1634 "Given mutex address must not be within the kernel address space. address=0x{:016X}", 1600 if (timeout_ns > 0) {
1635 mutex_addr); 1601 const s64 offset_tick(timeout_ns);
1636 return ERR_INVALID_ADDRESS_STATE; 1602 if (offset_tick > 0) {
1637 } 1603 timeout = offset_tick + 2;
1638 1604 if (timeout <= 0) {
1639 if (!Common::IsWordAligned(mutex_addr)) { 1605 timeout = std::numeric_limits<s64>::max();
1640 LOG_ERROR(Kernel_SVC, "Given mutex address must be word-aligned. address=0x{:016X}", 1606 }
1641 mutex_addr); 1607 } else {
1642 return ERR_INVALID_ADDRESS; 1608 timeout = std::numeric_limits<s64>::max();
1643 }
1644
1645 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
1646 auto& kernel = system.Kernel();
1647 Handle event_handle;
1648 Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
1649 auto* const current_process = kernel.CurrentProcess();
1650 {
1651 KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds);
1652 const auto& handle_table = current_process->GetHandleTable();
1653 std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
1654 ASSERT(thread);
1655
1656 current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
1657
1658 if (thread->IsPendingTermination()) {
1659 lock.CancelSleep();
1660 return ERR_THREAD_TERMINATING;
1661 }
1662
1663 const auto release_result = current_process->GetMutex().Release(mutex_addr);
1664 if (release_result.IsError()) {
1665 lock.CancelSleep();
1666 return release_result;
1667 }
1668
1669 if (nano_seconds == 0) {
1670 lock.CancelSleep();
1671 return RESULT_TIMEOUT;
1672 }
1673
1674 current_thread->SetCondVarWaitAddress(condition_variable_addr);
1675 current_thread->SetMutexWaitAddress(mutex_addr);
1676 current_thread->SetWaitHandle(thread_handle);
1677 current_thread->SetStatus(ThreadStatus::WaitCondVar);
1678 current_process->InsertConditionVariableThread(SharedFrom(current_thread));
1679 }
1680
1681 if (event_handle != InvalidHandle) {
1682 auto& time_manager = kernel.TimeManager();
1683 time_manager.UnscheduleTimeEvent(event_handle);
1684 }
1685
1686 {
1687 KScopedSchedulerLock lock(kernel);
1688
1689 auto* owner = current_thread->GetLockOwner();
1690 if (owner != nullptr) {
1691 owner->RemoveMutexWaiter(SharedFrom(current_thread));
1692 } 1609 }
1693 1610 } else {
1694 current_process->RemoveConditionVariableThread(SharedFrom(current_thread)); 1611 timeout = timeout_ns;
1695 } 1612 }
1696 // Note: Deliberately don't attempt to inherit the lock owner's priority.
1697 1613
1698 return current_thread->GetSignalingResult(); 1614 // Wait on the condition variable.
1615 return system.Kernel().CurrentProcess()->WaitConditionVariable(
1616 address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout);
1699} 1617}
1700 1618
1701static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 mutex_addr, 1619static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
1702 u32 condition_variable_addr, Handle thread_handle, 1620 u32 timeout_ns_low, u32 timeout_ns_high) {
1703 u32 nanoseconds_low, u32 nanoseconds_high) { 1621 const auto timeout_ns = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
1704 const auto nanoseconds = static_cast<s64>(nanoseconds_low | (u64{nanoseconds_high} << 32)); 1622 return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns);
1705 return WaitProcessWideKeyAtomic(system, mutex_addr, condition_variable_addr, thread_handle,
1706 nanoseconds);
1707} 1623}
1708 1624
1709/// Signal process wide key 1625/// Signal process wide key
1710static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_addr, s32 target) { 1626static void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count) {
1711 LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}", 1627 LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count);
1712 condition_variable_addr, target);
1713 1628
1714 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); 1629 // Signal the condition variable.
1630 return system.Kernel().CurrentProcess()->SignalConditionVariable(
1631 Common::AlignDown(cv_key, sizeof(u32)), count);
1632}
1715 1633
1716 // Retrieve a list of all threads that are waiting for this condition variable. 1634static void SignalProcessWideKey32(Core::System& system, u32 cv_key, s32 count) {
1717 auto& kernel = system.Kernel(); 1635 SignalProcessWideKey(system, cv_key, count);
1718 KScopedSchedulerLock lock(kernel); 1636}
1719 auto* const current_process = kernel.CurrentProcess();
1720 std::vector<std::shared_ptr<Thread>> waiting_threads =
1721 current_process->GetConditionVariableThreads(condition_variable_addr);
1722
1723 // Only process up to 'target' threads, unless 'target' is less equal 0, in which case process
1724 // them all.
1725 std::size_t last = waiting_threads.size();
1726 if (target > 0) {
1727 last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
1728 }
1729 for (std::size_t index = 0; index < last; ++index) {
1730 auto& thread = waiting_threads[index];
1731
1732 ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
1733
1734 // liberate Cond Var Thread.
1735 current_process->RemoveConditionVariableThread(thread);
1736
1737 const std::size_t current_core = system.CurrentCoreIndex();
1738 auto& monitor = system.Monitor();
1739
1740 // Atomically read the value of the mutex.
1741 u32 mutex_val = 0;
1742 u32 update_val = 0;
1743 const VAddr mutex_address = thread->GetMutexWaitAddress();
1744 do {
1745 // If the mutex is not yet acquired, acquire it.
1746 mutex_val = monitor.ExclusiveRead32(current_core, mutex_address);
1747
1748 if (mutex_val != 0) {
1749 update_val = mutex_val | Mutex::MutexHasWaitersFlag;
1750 } else {
1751 update_val = thread->GetWaitHandle();
1752 }
1753 } while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val));
1754 monitor.ClearExclusive();
1755 if (mutex_val == 0) {
1756 // We were able to acquire the mutex, resume this thread.
1757 auto* const lock_owner = thread->GetLockOwner();
1758 if (lock_owner != nullptr) {
1759 lock_owner->RemoveMutexWaiter(thread);
1760 }
1761 1637
1762 thread->SetLockOwner(nullptr); 1638namespace {
1763 thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
1764 thread->ResumeFromWait();
1765 } else {
1766 // The mutex is already owned by some other thread, make this thread wait on it.
1767 const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
1768 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1769 auto owner = handle_table.Get<Thread>(owner_handle);
1770 ASSERT(owner);
1771 if (thread->GetStatus() == ThreadStatus::WaitCondVar) {
1772 thread->SetStatus(ThreadStatus::WaitMutex);
1773 }
1774 1639
1775 owner->AddMutexWaiter(thread); 1640constexpr bool IsValidSignalType(Svc::SignalType type) {
1776 } 1641 switch (type) {
1642 case Svc::SignalType::Signal:
1643 case Svc::SignalType::SignalAndIncrementIfEqual:
1644 case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
1645 return true;
1646 default:
1647 return false;
1777 } 1648 }
1778} 1649}
1779 1650
1780static void SignalProcessWideKey32(Core::System& system, u32 condition_variable_addr, s32 target) { 1651constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
1781 SignalProcessWideKey(system, condition_variable_addr, target); 1652 switch (type) {
1653 case Svc::ArbitrationType::WaitIfLessThan:
1654 case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
1655 case Svc::ArbitrationType::WaitIfEqual:
1656 return true;
1657 default:
1658 return false;
1659 }
1782} 1660}
1783 1661
1784// Wait for an address (via Address Arbiter) 1662} // namespace
1785static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, s32 value,
1786 s64 timeout) {
1787 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, timeout={}", address,
1788 type, value, timeout);
1789
1790 // If the passed address is a kernel virtual address, return invalid memory state.
1791 if (Core::Memory::IsKernelVirtualAddress(address)) {
1792 LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
1793 return ERR_INVALID_ADDRESS_STATE;
1794 }
1795 1663
1796 // If the address is not properly aligned to 4 bytes, return invalid address. 1664// Wait for an address (via Address Arbiter)
1797 if (!Common::IsWordAligned(address)) { 1665static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
1798 LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address); 1666 s32 value, s64 timeout_ns) {
1799 return ERR_INVALID_ADDRESS; 1667 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
1668 address, arb_type, value, timeout_ns);
1669
1670 // Validate input.
1671 R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
1672 R_UNLESS(Common::IsAligned(address, sizeof(int32_t)), Svc::ResultInvalidAddress);
1673 R_UNLESS(IsValidArbitrationType(arb_type), Svc::ResultInvalidEnumValue);
1674
1675 // Convert timeout from nanoseconds to ticks.
1676 s64 timeout{};
1677 if (timeout_ns > 0) {
1678 const s64 offset_tick(timeout_ns);
1679 if (offset_tick > 0) {
1680 timeout = offset_tick + 2;
1681 if (timeout <= 0) {
1682 timeout = std::numeric_limits<s64>::max();
1683 }
1684 } else {
1685 timeout = std::numeric_limits<s64>::max();
1686 }
1687 } else {
1688 timeout = timeout_ns;
1800 } 1689 }
1801 1690
1802 const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type); 1691 return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout);
1803 auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
1804 const ResultCode result =
1805 address_arbiter.WaitForAddress(address, arbitration_type, value, timeout);
1806 return result;
1807} 1692}
1808 1693
1809static ResultCode WaitForAddress32(Core::System& system, u32 address, u32 type, s32 value, 1694static ResultCode WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
1810 u32 timeout_low, u32 timeout_high) { 1695 s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
1811 const auto timeout = static_cast<s64>(timeout_low | (u64{timeout_high} << 32)); 1696 const auto timeout = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
1812 return WaitForAddress(system, address, type, value, timeout); 1697 return WaitForAddress(system, address, arb_type, value, timeout);
1813} 1698}
1814 1699
1815// Signals to an address (via Address Arbiter) 1700// Signals to an address (via Address Arbiter)
1816static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value, 1701static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
1817 s32 num_to_wake) { 1702 s32 value, s32 count) {
1818 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, num_to_wake=0x{:X}", 1703 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
1819 address, type, value, num_to_wake); 1704 address, signal_type, value, count);
1820
1821 // If the passed address is a kernel virtual address, return invalid memory state.
1822 if (Core::Memory::IsKernelVirtualAddress(address)) {
1823 LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
1824 return ERR_INVALID_ADDRESS_STATE;
1825 }
1826 1705
1827 // If the address is not properly aligned to 4 bytes, return invalid address. 1706 // Validate input.
1828 if (!Common::IsWordAligned(address)) { 1707 R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
1829 LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address); 1708 R_UNLESS(Common::IsAligned(address, sizeof(s32)), Svc::ResultInvalidAddress);
1830 return ERR_INVALID_ADDRESS; 1709 R_UNLESS(IsValidSignalType(signal_type), Svc::ResultInvalidEnumValue);
1831 }
1832 1710
1833 const auto signal_type = static_cast<AddressArbiter::SignalType>(type); 1711 return system.Kernel().CurrentProcess()->SignalAddressArbiter(address, signal_type, value,
1834 auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter(); 1712 count);
1835 return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake);
1836} 1713}
1837 1714
1838static ResultCode SignalToAddress32(Core::System& system, u32 address, u32 type, s32 value, 1715static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
1839 s32 num_to_wake) { 1716 s32 value, s32 count) {
1840 return SignalToAddress(system, address, type, value, num_to_wake); 1717 return SignalToAddress(system, address, signal_type, value, count);
1841} 1718}
1842 1719
1843static void KernelDebug([[maybe_unused]] Core::System& system, 1720static void KernelDebug([[maybe_unused]] Core::System& system,
diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h
new file mode 100644
index 000000000..4af049551
--- /dev/null
+++ b/src/core/hle/kernel/svc_common.h
@@ -0,0 +1,14 @@
1// Copyright 2020 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_types.h"
8
9namespace Kernel::Svc {
10
11constexpr s32 ArgumentHandleCountMax = 0x40;
12constexpr u32 HandleWaitMask{1u << 30};
13
14} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h
new file mode 100644
index 000000000..78282f021
--- /dev/null
+++ b/src/core/hle/kernel/svc_results.h
@@ -0,0 +1,20 @@
1// Copyright 2020 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "core/hle/result.h"
8
9namespace Kernel::Svc {
10
11constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59};
12constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102};
13constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
14constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114};
15constexpr ResultCode ResultTimedOut{ErrorModule::Kernel, 117};
16constexpr ResultCode ResultCancelled{ErrorModule::Kernel, 118};
17constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120};
18constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125};
19
20} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 11e1d8e2d..d623f7a50 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -65,4 +65,16 @@ struct MemoryInfo {
65 u32 padding{}; 65 u32 padding{};
66}; 66};
67 67
68enum class SignalType : u32 {
69 Signal = 0,
70 SignalAndIncrementIfEqual = 1,
71 SignalAndModifyByWaitingCountIfEqual = 2,
72};
73
74enum class ArbitrationType : u32 {
75 WaitIfLessThan = 0,
76 DecrementAndWaitIfLessThan = 1,
77 WaitIfEqual = 2,
78};
79
68} // namespace Kernel::Svc 80} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 0b6dd9df0..a32750ed7 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -7,6 +7,7 @@
7#include "common/common_types.h" 7#include "common/common_types.h"
8#include "core/arm/arm_interface.h" 8#include "core/arm/arm_interface.h"
9#include "core/core.h" 9#include "core/core.h"
10#include "core/hle/kernel/svc_types.h"
10#include "core/hle/result.h" 11#include "core/hle/result.h"
11 12
12namespace Kernel { 13namespace Kernel {
@@ -215,9 +216,10 @@ void SvcWrap64(Core::System& system) {
215 func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2)).raw); 216 func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2)).raw);
216} 217}
217 218
218template <ResultCode func(Core::System&, u32*, u64, u64, s64)> 219// Used by WaitSynchronization
220template <ResultCode func(Core::System&, s32*, u64, u64, s64)>
219void SvcWrap64(Core::System& system) { 221void SvcWrap64(Core::System& system) {
220 u32 param_1 = 0; 222 s32 param_1 = 0;
221 const u32 retval = func(system, &param_1, Param(system, 1), static_cast<u32>(Param(system, 2)), 223 const u32 retval = func(system, &param_1, Param(system, 1), static_cast<u32>(Param(system, 2)),
222 static_cast<s64>(Param(system, 3))) 224 static_cast<s64>(Param(system, 3)))
223 .raw; 225 .raw;
@@ -276,18 +278,22 @@ void SvcWrap64(Core::System& system) {
276 FuncReturn(system, retval); 278 FuncReturn(system, retval);
277} 279}
278 280
279template <ResultCode func(Core::System&, u64, u32, s32, s64)> 281// Used by WaitForAddress
282template <ResultCode func(Core::System&, u64, Svc::ArbitrationType, s32, s64)>
280void SvcWrap64(Core::System& system) { 283void SvcWrap64(Core::System& system) {
281 FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)), 284 FuncReturn(system,
282 static_cast<s32>(Param(system, 2)), static_cast<s64>(Param(system, 3))) 285 func(system, Param(system, 0), static_cast<Svc::ArbitrationType>(Param(system, 1)),
283 .raw); 286 static_cast<s32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
287 .raw);
284} 288}
285 289
286template <ResultCode func(Core::System&, u64, u32, s32, s32)> 290// Used by SignalToAddress
291template <ResultCode func(Core::System&, u64, Svc::SignalType, s32, s32)>
287void SvcWrap64(Core::System& system) { 292void SvcWrap64(Core::System& system) {
288 FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)), 293 FuncReturn(system,
289 static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3))) 294 func(system, Param(system, 0), static_cast<Svc::SignalType>(Param(system, 1)),
290 .raw); 295 static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
296 .raw);
291} 297}
292 298
293//////////////////////////////////////////////////////////////////////////////////////////////////// 299////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -503,22 +509,23 @@ void SvcWrap32(Core::System& system) {
503} 509}
504 510
505// Used by WaitForAddress32 511// Used by WaitForAddress32
506template <ResultCode func(Core::System&, u32, u32, s32, u32, u32)> 512template <ResultCode func(Core::System&, u32, Svc::ArbitrationType, s32, u32, u32)>
507void SvcWrap32(Core::System& system) { 513void SvcWrap32(Core::System& system) {
508 const u32 retval = func(system, static_cast<u32>(Param(system, 0)), 514 const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
509 static_cast<u32>(Param(system, 1)), static_cast<s32>(Param(system, 2)), 515 static_cast<Svc::ArbitrationType>(Param(system, 1)),
510 static_cast<u32>(Param(system, 3)), static_cast<u32>(Param(system, 4))) 516 static_cast<s32>(Param(system, 2)), static_cast<u32>(Param(system, 3)),
517 static_cast<u32>(Param(system, 4)))
511 .raw; 518 .raw;
512 FuncReturn(system, retval); 519 FuncReturn(system, retval);
513} 520}
514 521
515// Used by SignalToAddress32 522// Used by SignalToAddress32
516template <ResultCode func(Core::System&, u32, u32, s32, s32)> 523template <ResultCode func(Core::System&, u32, Svc::SignalType, s32, s32)>
517void SvcWrap32(Core::System& system) { 524void SvcWrap32(Core::System& system) {
518 const u32 retval = 525 const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
519 func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)), 526 static_cast<Svc::SignalType>(Param(system, 1)),
520 static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3))) 527 static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
521 .raw; 528 .raw;
522 FuncReturn(system, retval); 529 FuncReturn(system, retval);
523} 530}
524 531
@@ -539,9 +546,9 @@ void SvcWrap32(Core::System& system) {
539} 546}
540 547
541// Used by WaitSynchronization32 548// Used by WaitSynchronization32
542template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)> 549template <ResultCode func(Core::System&, u32, u32, s32, u32, s32*)>
543void SvcWrap32(Core::System& system) { 550void SvcWrap32(Core::System& system) {
544 u32 param_1 = 0; 551 s32 param_1 = 0;
545 const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2), 552 const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2),
546 Param32(system, 3), &param_1) 553 Param32(system, 3), &param_1)
547 .raw; 554 .raw;
diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp
deleted file mode 100644
index d3f520ea2..000000000
--- a/src/core/hle/kernel/synchronization.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/core.h"
6#include "core/hle/kernel/errors.h"
7#include "core/hle/kernel/handle_table.h"
8#include "core/hle/kernel/k_scheduler.h"
9#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
10#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/synchronization.h"
12#include "core/hle/kernel/synchronization_object.h"
13#include "core/hle/kernel/thread.h"
14#include "core/hle/kernel/time_manager.h"
15
16namespace Kernel {
17
18Synchronization::Synchronization(Core::System& system) : system{system} {}
19
20void Synchronization::SignalObject(SynchronizationObject& obj) const {
21 auto& kernel = system.Kernel();
22 KScopedSchedulerLock lock(kernel);
23 if (obj.IsSignaled()) {
24 for (auto thread : obj.GetWaitingThreads()) {
25 if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) {
26 if (thread->GetStatus() != ThreadStatus::WaitHLEEvent) {
27 ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch);
28 ASSERT(thread->IsWaitingSync());
29 }
30 thread->SetSynchronizationResults(&obj, RESULT_SUCCESS);
31 thread->ResumeFromWait();
32 }
33 }
34 obj.ClearWaitingThreads();
35 }
36}
37
38std::pair<ResultCode, Handle> Synchronization::WaitFor(
39 std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) {
40 auto& kernel = system.Kernel();
41 auto* const thread = kernel.CurrentScheduler()->GetCurrentThread();
42 Handle event_handle = InvalidHandle;
43 {
44 KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
45 const auto itr =
46 std::find_if(sync_objects.begin(), sync_objects.end(),
47 [thread](const std::shared_ptr<SynchronizationObject>& object) {
48 return object->IsSignaled();
49 });
50
51 if (itr != sync_objects.end()) {
52 // We found a ready object, acquire it and set the result value
53 SynchronizationObject* object = itr->get();
54 object->Acquire(thread);
55 const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
56 lock.CancelSleep();
57 return {RESULT_SUCCESS, index};
58 }
59
60 if (nano_seconds == 0) {
61 lock.CancelSleep();
62 return {RESULT_TIMEOUT, InvalidHandle};
63 }
64
65 if (thread->IsPendingTermination()) {
66 lock.CancelSleep();
67 return {ERR_THREAD_TERMINATING, InvalidHandle};
68 }
69
70 if (thread->IsSyncCancelled()) {
71 thread->SetSyncCancelled(false);
72 lock.CancelSleep();
73 return {ERR_SYNCHRONIZATION_CANCELED, InvalidHandle};
74 }
75
76 for (auto& object : sync_objects) {
77 object->AddWaitingThread(SharedFrom(thread));
78 }
79
80 thread->SetSynchronizationObjects(&sync_objects);
81 thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
82 thread->SetStatus(ThreadStatus::WaitSynch);
83 thread->SetWaitingSync(true);
84 }
85 thread->SetWaitingSync(false);
86
87 if (event_handle != InvalidHandle) {
88 auto& time_manager = kernel.TimeManager();
89 time_manager.UnscheduleTimeEvent(event_handle);
90 }
91
92 {
93 KScopedSchedulerLock lock(kernel);
94 ResultCode signaling_result = thread->GetSignalingResult();
95 SynchronizationObject* signaling_object = thread->GetSignalingObject();
96 thread->SetSynchronizationObjects(nullptr);
97 auto shared_thread = SharedFrom(thread);
98 for (auto& obj : sync_objects) {
99 obj->RemoveWaitingThread(shared_thread);
100 }
101 if (signaling_object != nullptr) {
102 const auto itr = std::find_if(
103 sync_objects.begin(), sync_objects.end(),
104 [signaling_object](const std::shared_ptr<SynchronizationObject>& object) {
105 return object.get() == signaling_object;
106 });
107 ASSERT(itr != sync_objects.end());
108 signaling_object->Acquire(thread);
109 const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
110 return {signaling_result, index};
111 }
112 return {signaling_result, -1};
113 }
114}
115
116} // namespace Kernel
diff --git a/src/core/hle/kernel/synchronization.h b/src/core/hle/kernel/synchronization.h
deleted file mode 100644
index 379f4b1d3..000000000
--- a/src/core/hle/kernel/synchronization.h
+++ /dev/null
@@ -1,44 +0,0 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <utility>
9#include <vector>
10
11#include "core/hle/kernel/object.h"
12#include "core/hle/result.h"
13
14namespace Core {
15class System;
16} // namespace Core
17
18namespace Kernel {
19
20class SynchronizationObject;
21
22/**
23 * The 'Synchronization' class is an interface for handling synchronization methods
24 * used by Synchronization objects and synchronization SVCs. This centralizes processing of
25 * such
26 */
27class Synchronization {
28public:
29 explicit Synchronization(Core::System& system);
30
31 /// Signals a synchronization object, waking up all its waiting threads
32 void SignalObject(SynchronizationObject& obj) const;
33
34 /// Tries to see if waiting for any of the sync_objects is necessary, if not
35 /// it returns Success and the handle index of the signaled sync object. In
36 /// case not, the current thread will be locked and wait for nano_seconds or
37 /// for a synchronization object to signal.
38 std::pair<ResultCode, Handle> WaitFor(
39 std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds);
40
41private:
42 Core::System& system;
43};
44} // namespace Kernel
diff --git a/src/core/hle/kernel/synchronization_object.cpp b/src/core/hle/kernel/synchronization_object.cpp
deleted file mode 100644
index ba4d39157..000000000
--- a/src/core/hle/kernel/synchronization_object.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include "common/assert.h"
7#include "common/common_types.h"
8#include "common/logging/log.h"
9#include "core/core.h"
10#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/object.h"
12#include "core/hle/kernel/process.h"
13#include "core/hle/kernel/synchronization.h"
14#include "core/hle/kernel/synchronization_object.h"
15#include "core/hle/kernel/thread.h"
16
17namespace Kernel {
18
19SynchronizationObject::SynchronizationObject(KernelCore& kernel) : Object{kernel} {}
20SynchronizationObject::~SynchronizationObject() = default;
21
22void SynchronizationObject::Signal() {
23 kernel.Synchronization().SignalObject(*this);
24}
25
26void SynchronizationObject::AddWaitingThread(std::shared_ptr<Thread> thread) {
27 auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread);
28 if (itr == waiting_threads.end())
29 waiting_threads.push_back(std::move(thread));
30}
31
32void SynchronizationObject::RemoveWaitingThread(std::shared_ptr<Thread> thread) {
33 auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread);
34 // If a thread passed multiple handles to the same object,
35 // the kernel might attempt to remove the thread from the object's
36 // waiting threads list multiple times.
37 if (itr != waiting_threads.end())
38 waiting_threads.erase(itr);
39}
40
41void SynchronizationObject::ClearWaitingThreads() {
42 waiting_threads.clear();
43}
44
45const std::vector<std::shared_ptr<Thread>>& SynchronizationObject::GetWaitingThreads() const {
46 return waiting_threads;
47}
48
49} // namespace Kernel
diff --git a/src/core/hle/kernel/synchronization_object.h b/src/core/hle/kernel/synchronization_object.h
deleted file mode 100644
index 7408ed51f..000000000
--- a/src/core/hle/kernel/synchronization_object.h
+++ /dev/null
@@ -1,77 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8#include <memory>
9#include <vector>
10
11#include "core/hle/kernel/object.h"
12
13namespace Kernel {
14
15class KernelCore;
16class Synchronization;
17class Thread;
18
19/// Class that represents a Kernel object that a thread can be waiting on
20class SynchronizationObject : public Object {
21public:
22 explicit SynchronizationObject(KernelCore& kernel);
23 ~SynchronizationObject() override;
24
25 /**
26 * Check if the specified thread should wait until the object is available
27 * @param thread The thread about which we're deciding.
28 * @return True if the current thread should wait due to this object being unavailable
29 */
30 virtual bool ShouldWait(const Thread* thread) const = 0;
31
32 /// Acquire/lock the object for the specified thread if it is available
33 virtual void Acquire(Thread* thread) = 0;
34
35 /// Signal this object
36 virtual void Signal();
37
38 virtual bool IsSignaled() const {
39 return is_signaled;
40 }
41
42 /**
43 * Add a thread to wait on this object
44 * @param thread Pointer to thread to add
45 */
46 void AddWaitingThread(std::shared_ptr<Thread> thread);
47
48 /**
49 * Removes a thread from waiting on this object (e.g. if it was resumed already)
50 * @param thread Pointer to thread to remove
51 */
52 void RemoveWaitingThread(std::shared_ptr<Thread> thread);
53
54 /// Get a const reference to the waiting threads list for debug use
55 const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const;
56
57 void ClearWaitingThreads();
58
59protected:
60 std::atomic_bool is_signaled{}; // Tells if this sync object is signaled
61
62private:
63 /// Threads waiting for this object to become available
64 std::vector<std::shared_ptr<Thread>> waiting_threads;
65};
66
67// Specialization of DynamicObjectCast for SynchronizationObjects
68template <>
69inline std::shared_ptr<SynchronizationObject> DynamicObjectCast<SynchronizationObject>(
70 std::shared_ptr<Object> object) {
71 if (object != nullptr && object->IsWaitable()) {
72 return std::static_pointer_cast<SynchronizationObject>(object);
73 }
74 return nullptr;
75}
76
77} // namespace Kernel
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index a4f9e0d97..d97323255 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -17,9 +17,11 @@
17#include "core/hardware_properties.h" 17#include "core/hardware_properties.h"
18#include "core/hle/kernel/errors.h" 18#include "core/hle/kernel/errors.h"
19#include "core/hle/kernel/handle_table.h" 19#include "core/hle/kernel/handle_table.h"
20#include "core/hle/kernel/k_condition_variable.h"
20#include "core/hle/kernel/k_scheduler.h" 21#include "core/hle/kernel/k_scheduler.h"
21#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 22#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
22#include "core/hle/kernel/kernel.h" 23#include "core/hle/kernel/kernel.h"
24#include "core/hle/kernel/memory/memory_layout.h"
23#include "core/hle/kernel/object.h" 25#include "core/hle/kernel/object.h"
24#include "core/hle/kernel/process.h" 26#include "core/hle/kernel/process.h"
25#include "core/hle/kernel/thread.h" 27#include "core/hle/kernel/thread.h"
@@ -34,26 +36,19 @@
34 36
35namespace Kernel { 37namespace Kernel {
36 38
37bool Thread::ShouldWait(const Thread* thread) const {
38 return status != ThreadStatus::Dead;
39}
40
41bool Thread::IsSignaled() const { 39bool Thread::IsSignaled() const {
42 return status == ThreadStatus::Dead; 40 return signaled;
43}
44
45void Thread::Acquire(Thread* thread) {
46 ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
47} 41}
48 42
49Thread::Thread(KernelCore& kernel) : SynchronizationObject{kernel} {} 43Thread::Thread(KernelCore& kernel) : KSynchronizationObject{kernel} {}
50Thread::~Thread() = default; 44Thread::~Thread() = default;
51 45
52void Thread::Stop() { 46void Thread::Stop() {
53 { 47 {
54 KScopedSchedulerLock lock(kernel); 48 KScopedSchedulerLock lock(kernel);
55 SetStatus(ThreadStatus::Dead); 49 SetState(ThreadState::Terminated);
56 Signal(); 50 signaled = true;
51 NotifyAvailable();
57 kernel.GlobalHandleTable().Close(global_handle); 52 kernel.GlobalHandleTable().Close(global_handle);
58 53
59 if (owner_process) { 54 if (owner_process) {
@@ -67,59 +62,27 @@ void Thread::Stop() {
67 global_handle = 0; 62 global_handle = 0;
68} 63}
69 64
70void Thread::ResumeFromWait() { 65void Thread::Wakeup() {
71 KScopedSchedulerLock lock(kernel); 66 KScopedSchedulerLock lock(kernel);
72 switch (status) { 67 SetState(ThreadState::Runnable);
73 case ThreadStatus::Paused:
74 case ThreadStatus::WaitSynch:
75 case ThreadStatus::WaitHLEEvent:
76 case ThreadStatus::WaitSleep:
77 case ThreadStatus::WaitIPC:
78 case ThreadStatus::WaitMutex:
79 case ThreadStatus::WaitCondVar:
80 case ThreadStatus::WaitArb:
81 case ThreadStatus::Dormant:
82 break;
83
84 case ThreadStatus::Ready:
85 // The thread's wakeup callback must have already been cleared when the thread was first
86 // awoken.
87 ASSERT(hle_callback == nullptr);
88 // If the thread is waiting on multiple wait objects, it might be awoken more than once
89 // before actually resuming. We can ignore subsequent wakeups if the thread status has
90 // already been set to ThreadStatus::Ready.
91 return;
92 case ThreadStatus::Dead:
93 // This should never happen, as threads must complete before being stopped.
94 DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.",
95 GetObjectId());
96 return;
97 }
98
99 SetStatus(ThreadStatus::Ready);
100}
101
102void Thread::OnWakeUp() {
103 KScopedSchedulerLock lock(kernel);
104 SetStatus(ThreadStatus::Ready);
105} 68}
106 69
107ResultCode Thread::Start() { 70ResultCode Thread::Start() {
108 KScopedSchedulerLock lock(kernel); 71 KScopedSchedulerLock lock(kernel);
109 SetStatus(ThreadStatus::Ready); 72 SetState(ThreadState::Runnable);
110 return RESULT_SUCCESS; 73 return RESULT_SUCCESS;
111} 74}
112 75
113void Thread::CancelWait() { 76void Thread::CancelWait() {
114 KScopedSchedulerLock lock(kernel); 77 KScopedSchedulerLock lock(kernel);
115 if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) { 78 if (GetState() != ThreadState::Waiting || !is_cancellable) {
116 is_sync_cancelled = true; 79 is_sync_cancelled = true;
117 return; 80 return;
118 } 81 }
119 // TODO(Blinkhawk): Implement cancel of server session 82 // TODO(Blinkhawk): Implement cancel of server session
120 is_sync_cancelled = false; 83 is_sync_cancelled = false;
121 SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED); 84 SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED);
122 SetStatus(ThreadStatus::Ready); 85 SetState(ThreadState::Runnable);
123} 86}
124 87
125static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, 88static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
@@ -183,25 +146,24 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
183 std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel); 146 std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel);
184 147
185 thread->thread_id = kernel.CreateNewThreadID(); 148 thread->thread_id = kernel.CreateNewThreadID();
186 thread->status = ThreadStatus::Dormant; 149 thread->thread_state = ThreadState::Initialized;
187 thread->entry_point = entry_point; 150 thread->entry_point = entry_point;
188 thread->stack_top = stack_top; 151 thread->stack_top = stack_top;
189 thread->disable_count = 1; 152 thread->disable_count = 1;
190 thread->tpidr_el0 = 0; 153 thread->tpidr_el0 = 0;
191 thread->nominal_priority = thread->current_priority = priority; 154 thread->current_priority = priority;
155 thread->base_priority = priority;
156 thread->lock_owner = nullptr;
192 thread->schedule_count = -1; 157 thread->schedule_count = -1;
193 thread->last_scheduled_tick = 0; 158 thread->last_scheduled_tick = 0;
194 thread->processor_id = processor_id; 159 thread->processor_id = processor_id;
195 thread->ideal_core = processor_id; 160 thread->ideal_core = processor_id;
196 thread->affinity_mask.SetAffinity(processor_id, true); 161 thread->affinity_mask.SetAffinity(processor_id, true);
197 thread->wait_objects = nullptr;
198 thread->mutex_wait_address = 0;
199 thread->condvar_wait_address = 0;
200 thread->wait_handle = 0;
201 thread->name = std::move(name); 162 thread->name = std::move(name);
202 thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap(); 163 thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap();
203 thread->owner_process = owner_process; 164 thread->owner_process = owner_process;
204 thread->type = type_flags; 165 thread->type = type_flags;
166 thread->signaled = false;
205 if ((type_flags & THREADTYPE_IDLE) == 0) { 167 if ((type_flags & THREADTYPE_IDLE) == 0) {
206 auto& scheduler = kernel.GlobalSchedulerContext(); 168 auto& scheduler = kernel.GlobalSchedulerContext();
207 scheduler.AddThread(thread); 169 scheduler.AddThread(thread);
@@ -226,153 +188,185 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
226 return MakeResult<std::shared_ptr<Thread>>(std::move(thread)); 188 return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
227} 189}
228 190
229void Thread::SetPriority(u32 priority) { 191void Thread::SetBasePriority(u32 priority) {
230 KScopedSchedulerLock lock(kernel);
231 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, 192 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
232 "Invalid priority value."); 193 "Invalid priority value.");
233 nominal_priority = priority; 194
234 UpdatePriority(); 195 KScopedSchedulerLock lock(kernel);
196
197 // Change our base priority.
198 base_priority = priority;
199
200 // Perform a priority restoration.
201 RestorePriority(kernel, this);
235} 202}
236 203
237void Thread::SetSynchronizationResults(SynchronizationObject* object, ResultCode result) { 204void Thread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) {
238 signaling_object = object; 205 signaling_object = object;
239 signaling_result = result; 206 signaling_result = result;
240} 207}
241 208
242s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const {
243 ASSERT_MSG(!wait_objects->empty(), "Thread is not waiting for anything");
244 const auto match = std::find(wait_objects->rbegin(), wait_objects->rend(), object);
245 return static_cast<s32>(std::distance(match, wait_objects->rend()) - 1);
246}
247
248VAddr Thread::GetCommandBufferAddress() const { 209VAddr Thread::GetCommandBufferAddress() const {
249 // Offset from the start of TLS at which the IPC command buffer begins. 210 // Offset from the start of TLS at which the IPC command buffer begins.
250 constexpr u64 command_header_offset = 0x80; 211 constexpr u64 command_header_offset = 0x80;
251 return GetTLSAddress() + command_header_offset; 212 return GetTLSAddress() + command_header_offset;
252} 213}
253 214
254void Thread::SetStatus(ThreadStatus new_status) { 215void Thread::SetState(ThreadState state) {
255 if (new_status == status) { 216 KScopedSchedulerLock sl(kernel);
256 return;
257 }
258 217
259 switch (new_status) { 218 // Clear debugging state
260 case ThreadStatus::Ready: 219 SetMutexWaitAddressForDebugging({});
261 SetSchedulingStatus(ThreadSchedStatus::Runnable); 220 SetWaitReasonForDebugging({});
262 break;
263 case ThreadStatus::Dormant:
264 SetSchedulingStatus(ThreadSchedStatus::None);
265 break;
266 case ThreadStatus::Dead:
267 SetSchedulingStatus(ThreadSchedStatus::Exited);
268 break;
269 default:
270 SetSchedulingStatus(ThreadSchedStatus::Paused);
271 break;
272 }
273 221
274 status = new_status; 222 const ThreadState old_state = thread_state;
223 thread_state =
224 static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask));
225 if (thread_state != old_state) {
226 KScheduler::OnThreadStateChanged(kernel, this, old_state);
227 }
275} 228}
276 229
277void Thread::AddMutexWaiter(std::shared_ptr<Thread> thread) { 230void Thread::AddWaiterImpl(Thread* thread) {
278 if (thread->lock_owner.get() == this) { 231 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
279 // If the thread is already waiting for this thread to release the mutex, ensure that the 232
280 // waiters list is consistent and return without doing anything. 233 // Find the right spot to insert the waiter.
281 const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); 234 auto it = waiter_list.begin();
282 ASSERT(iter != wait_mutex_threads.end()); 235 while (it != waiter_list.end()) {
283 return; 236 if (it->GetPriority() > thread->GetPriority()) {
237 break;
238 }
239 it++;
284 } 240 }
285 241
286 // A thread can't wait on two different mutexes at the same time. 242 // Keep track of how many kernel waiters we have.
287 ASSERT(thread->lock_owner == nullptr); 243 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
244 ASSERT((num_kernel_waiters++) >= 0);
245 }
288 246
289 // Ensure that the thread is not already in the list of mutex waiters 247 // Insert the waiter.
290 const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); 248 waiter_list.insert(it, *thread);
291 ASSERT(iter == wait_mutex_threads.end()); 249 thread->SetLockOwner(this);
250}
292 251
293 // Keep the list in an ordered fashion 252void Thread::RemoveWaiterImpl(Thread* thread) {
294 const auto insertion_point = std::find_if( 253 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
295 wait_mutex_threads.begin(), wait_mutex_threads.end(),
296 [&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); });
297 wait_mutex_threads.insert(insertion_point, thread);
298 thread->lock_owner = SharedFrom(this);
299 254
300 UpdatePriority(); 255 // Keep track of how many kernel waiters we have.
301} 256 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
257 ASSERT((num_kernel_waiters--) > 0);
258 }
302 259
303void Thread::RemoveMutexWaiter(std::shared_ptr<Thread> thread) { 260 // Remove the waiter.
304 ASSERT(thread->lock_owner.get() == this); 261 waiter_list.erase(waiter_list.iterator_to(*thread));
262 thread->SetLockOwner(nullptr);
263}
305 264
306 // Ensure that the thread is in the list of mutex waiters 265void Thread::RestorePriority(KernelCore& kernel, Thread* thread) {
307 const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); 266 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
308 ASSERT(iter != wait_mutex_threads.end());
309 267
310 wait_mutex_threads.erase(iter); 268 while (true) {
269 // We want to inherit priority where possible.
270 s32 new_priority = thread->GetBasePriority();
271 if (thread->HasWaiters()) {
272 new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority());
273 }
311 274
312 thread->lock_owner = nullptr; 275 // If the priority we would inherit is not different from ours, don't do anything.
313 UpdatePriority(); 276 if (new_priority == thread->GetPriority()) {
314} 277 return;
278 }
315 279
316void Thread::UpdatePriority() { 280 // Ensure we don't violate condition variable red black tree invariants.
317 // If any of the threads waiting on the mutex have a higher priority 281 if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
318 // (taking into account priority inheritance), then this thread inherits 282 BeforeUpdatePriority(kernel, cv_tree, thread);
319 // that thread's priority.
320 u32 new_priority = nominal_priority;
321 if (!wait_mutex_threads.empty()) {
322 if (wait_mutex_threads.front()->current_priority < new_priority) {
323 new_priority = wait_mutex_threads.front()->current_priority;
324 } 283 }
325 }
326 284
327 if (new_priority == current_priority) { 285 // Change the priority.
328 return; 286 const s32 old_priority = thread->GetPriority();
329 } 287 thread->SetPriority(new_priority);
330 288
331 if (GetStatus() == ThreadStatus::WaitCondVar) { 289 // Restore the condition variable, if relevant.
332 owner_process->RemoveConditionVariableThread(SharedFrom(this)); 290 if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
333 } 291 AfterUpdatePriority(kernel, cv_tree, thread);
292 }
334 293
335 SetCurrentPriority(new_priority); 294 // Update the scheduler.
295 KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority);
336 296
337 if (GetStatus() == ThreadStatus::WaitCondVar) { 297 // Keep the lock owner up to date.
338 owner_process->InsertConditionVariableThread(SharedFrom(this)); 298 Thread* lock_owner = thread->GetLockOwner();
339 } 299 if (lock_owner == nullptr) {
300 return;
301 }
340 302
341 if (!lock_owner) { 303 // Update the thread in the lock owner's sorted list, and continue inheriting.
342 return; 304 lock_owner->RemoveWaiterImpl(thread);
305 lock_owner->AddWaiterImpl(thread);
306 thread = lock_owner;
343 } 307 }
308}
344 309
345 // Ensure that the thread is within the correct location in the waiting list. 310void Thread::AddWaiter(Thread* thread) {
346 auto old_owner = lock_owner; 311 AddWaiterImpl(thread);
347 lock_owner->RemoveMutexWaiter(SharedFrom(this)); 312 RestorePriority(kernel, this);
348 old_owner->AddMutexWaiter(SharedFrom(this));
349
350 // Recursively update the priority of the thread that depends on the priority of this one.
351 lock_owner->UpdatePriority();
352} 313}
353 314
354bool Thread::AllSynchronizationObjectsReady() const { 315void Thread::RemoveWaiter(Thread* thread) {
355 return std::none_of(wait_objects->begin(), wait_objects->end(), 316 RemoveWaiterImpl(thread);
356 [this](const std::shared_ptr<SynchronizationObject>& object) { 317 RestorePriority(kernel, this);
357 return object->ShouldWait(this);
358 });
359} 318}
360 319
361bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) { 320Thread* Thread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
362 ASSERT(hle_callback); 321 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
363 return hle_callback(std::move(thread)); 322
323 s32 num_waiters{};
324 Thread* next_lock_owner{};
325 auto it = waiter_list.begin();
326 while (it != waiter_list.end()) {
327 if (it->GetAddressKey() == key) {
328 Thread* thread = std::addressof(*it);
329
330 // Keep track of how many kernel waiters we have.
331 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
332 ASSERT((num_kernel_waiters--) > 0);
333 }
334 it = waiter_list.erase(it);
335
336 // Update the next lock owner.
337 if (next_lock_owner == nullptr) {
338 next_lock_owner = thread;
339 next_lock_owner->SetLockOwner(nullptr);
340 } else {
341 next_lock_owner->AddWaiterImpl(thread);
342 }
343 num_waiters++;
344 } else {
345 it++;
346 }
347 }
348
349 // Do priority updates, if we have a next owner.
350 if (next_lock_owner) {
351 RestorePriority(kernel, this);
352 RestorePriority(kernel, next_lock_owner);
353 }
354
355 // Return output.
356 *out_num_waiters = num_waiters;
357 return next_lock_owner;
364} 358}
365 359
366ResultCode Thread::SetActivity(ThreadActivity value) { 360ResultCode Thread::SetActivity(ThreadActivity value) {
367 KScopedSchedulerLock lock(kernel); 361 KScopedSchedulerLock lock(kernel);
368 362
369 auto sched_status = GetSchedulingStatus(); 363 auto sched_status = GetState();
370 364
371 if (sched_status != ThreadSchedStatus::Runnable && sched_status != ThreadSchedStatus::Paused) { 365 if (sched_status != ThreadState::Runnable && sched_status != ThreadState::Waiting) {
372 return ERR_INVALID_STATE; 366 return ERR_INVALID_STATE;
373 } 367 }
374 368
375 if (IsPendingTermination()) { 369 if (IsTerminationRequested()) {
376 return RESULT_SUCCESS; 370 return RESULT_SUCCESS;
377 } 371 }
378 372
@@ -394,7 +388,8 @@ ResultCode Thread::Sleep(s64 nanoseconds) {
394 Handle event_handle{}; 388 Handle event_handle{};
395 { 389 {
396 KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); 390 KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
397 SetStatus(ThreadStatus::WaitSleep); 391 SetState(ThreadState::Waiting);
392 SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
398 } 393 }
399 394
400 if (event_handle != InvalidHandle) { 395 if (event_handle != InvalidHandle) {
@@ -405,34 +400,21 @@ ResultCode Thread::Sleep(s64 nanoseconds) {
405} 400}
406 401
407void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { 402void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
408 const u32 old_state = scheduling_state; 403 const auto old_state = GetRawState();
409 pausing_state |= static_cast<u32>(flag); 404 pausing_state |= static_cast<u32>(flag);
410 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); 405 const auto base_scheduling = GetState();
411 scheduling_state = base_scheduling | pausing_state; 406 thread_state = base_scheduling | static_cast<ThreadState>(pausing_state);
412 KScheduler::OnThreadStateChanged(kernel, this, old_state); 407 KScheduler::OnThreadStateChanged(kernel, this, old_state);
413} 408}
414 409
415void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { 410void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
416 const u32 old_state = scheduling_state; 411 const auto old_state = GetRawState();
417 pausing_state &= ~static_cast<u32>(flag); 412 pausing_state &= ~static_cast<u32>(flag);
418 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); 413 const auto base_scheduling = GetState();
419 scheduling_state = base_scheduling | pausing_state; 414 thread_state = base_scheduling | static_cast<ThreadState>(pausing_state);
420 KScheduler::OnThreadStateChanged(kernel, this, old_state); 415 KScheduler::OnThreadStateChanged(kernel, this, old_state);
421} 416}
422 417
423void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
424 const u32 old_state = scheduling_state;
425 scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
426 static_cast<u32>(new_status);
427 KScheduler::OnThreadStateChanged(kernel, this, old_state);
428}
429
430void Thread::SetCurrentPriority(u32 new_priority) {
431 const u32 old_priority = std::exchange(current_priority, new_priority);
432 KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(),
433 old_priority);
434}
435
436ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { 418ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
437 KScopedSchedulerLock lock(kernel); 419 KScopedSchedulerLock lock(kernel);
438 const auto HighestSetCore = [](u64 mask, u32 max_cores) { 420 const auto HighestSetCore = [](u64 mask, u32 max_cores) {
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index 11ef29888..6b66c9a0e 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -6,16 +6,21 @@
6 6
7#include <array> 7#include <array>
8#include <functional> 8#include <functional>
9#include <span>
9#include <string> 10#include <string>
10#include <utility> 11#include <utility>
11#include <vector> 12#include <vector>
12 13
14#include <boost/intrusive/list.hpp>
15
13#include "common/common_types.h" 16#include "common/common_types.h"
17#include "common/intrusive_red_black_tree.h"
14#include "common/spin_lock.h" 18#include "common/spin_lock.h"
15#include "core/arm/arm_interface.h" 19#include "core/arm/arm_interface.h"
16#include "core/hle/kernel/k_affinity_mask.h" 20#include "core/hle/kernel/k_affinity_mask.h"
21#include "core/hle/kernel/k_synchronization_object.h"
17#include "core/hle/kernel/object.h" 22#include "core/hle/kernel/object.h"
18#include "core/hle/kernel/synchronization_object.h" 23#include "core/hle/kernel/svc_common.h"
19#include "core/hle/result.h" 24#include "core/hle/result.h"
20 25
21namespace Common { 26namespace Common {
@@ -73,19 +78,24 @@ enum ThreadProcessorId : s32 {
73 (1 << THREADPROCESSORID_2) | (1 << THREADPROCESSORID_3) 78 (1 << THREADPROCESSORID_2) | (1 << THREADPROCESSORID_3)
74}; 79};
75 80
76enum class ThreadStatus { 81enum class ThreadState : u16 {
77 Ready, ///< Ready to run 82 Initialized = 0,
78 Paused, ///< Paused by SetThreadActivity or debug 83 Waiting = 1,
79 WaitHLEEvent, ///< Waiting for hle event to finish 84 Runnable = 2,
80 WaitSleep, ///< Waiting due to a SleepThread SVC 85 Terminated = 3,
81 WaitIPC, ///< Waiting for the reply from an IPC request 86
82 WaitSynch, ///< Waiting due to WaitSynchronization 87 SuspendShift = 4,
83 WaitMutex, ///< Waiting due to an ArbitrateLock svc 88 Mask = (1 << SuspendShift) - 1,
84 WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc 89
85 WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc 90 ProcessSuspended = (1 << (0 + SuspendShift)),
86 Dormant, ///< Created but not yet made ready 91 ThreadSuspended = (1 << (1 + SuspendShift)),
87 Dead ///< Run to completion, or forcefully terminated 92 DebugSuspended = (1 << (2 + SuspendShift)),
93 BacktraceSuspended = (1 << (3 + SuspendShift)),
94 InitSuspended = (1 << (4 + SuspendShift)),
95
96 SuspendFlagMask = ((1 << 5) - 1) << SuspendShift,
88}; 97};
98DECLARE_ENUM_FLAG_OPERATORS(ThreadState);
89 99
90enum class ThreadWakeupReason { 100enum class ThreadWakeupReason {
91 Signal, // The thread was woken up by WakeupAllWaitingThreads due to an object signal. 101 Signal, // The thread was woken up by WakeupAllWaitingThreads due to an object signal.
@@ -97,13 +107,6 @@ enum class ThreadActivity : u32 {
97 Paused = 1, 107 Paused = 1,
98}; 108};
99 109
100enum class ThreadSchedStatus : u32 {
101 None = 0,
102 Paused = 1,
103 Runnable = 2,
104 Exited = 3,
105};
106
107enum class ThreadSchedFlags : u32 { 110enum class ThreadSchedFlags : u32 {
108 ProcessPauseFlag = 1 << 4, 111 ProcessPauseFlag = 1 << 4,
109 ThreadPauseFlag = 1 << 5, 112 ThreadPauseFlag = 1 << 5,
@@ -111,13 +114,20 @@ enum class ThreadSchedFlags : u32 {
111 KernelInitPauseFlag = 1 << 8, 114 KernelInitPauseFlag = 1 << 8,
112}; 115};
113 116
114enum class ThreadSchedMasks : u32 { 117enum class ThreadWaitReasonForDebugging : u32 {
115 LowMask = 0x000f, 118 None, ///< Thread is not waiting
116 HighMask = 0xfff0, 119 Sleep, ///< Thread is waiting due to a SleepThread SVC
117 ForcePauseMask = 0x0070, 120 IPC, ///< Thread is waiting for the reply from an IPC request
121 Synchronization, ///< Thread is waiting due to a WaitSynchronization SVC
122 ConditionVar, ///< Thread is waiting due to a WaitProcessWideKey SVC
123 Arbitration, ///< Thread is waiting due to a SignalToAddress/WaitForAddress SVC
124 Suspended, ///< Thread is waiting due to process suspension
118}; 125};
119 126
120class Thread final : public SynchronizationObject { 127class Thread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> {
128 friend class KScheduler;
129 friend class Process;
130
121public: 131public:
122 explicit Thread(KernelCore& kernel); 132 explicit Thread(KernelCore& kernel);
123 ~Thread() override; 133 ~Thread() override;
@@ -127,10 +137,6 @@ public:
127 using ThreadContext32 = Core::ARM_Interface::ThreadContext32; 137 using ThreadContext32 = Core::ARM_Interface::ThreadContext32;
128 using ThreadContext64 = Core::ARM_Interface::ThreadContext64; 138 using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
129 139
130 using ThreadSynchronizationObjects = std::vector<std::shared_ptr<SynchronizationObject>>;
131
132 using HLECallback = std::function<bool(std::shared_ptr<Thread> thread)>;
133
134 /** 140 /**
135 * Creates and returns a new thread. The new thread is immediately scheduled 141 * Creates and returns a new thread. The new thread is immediately scheduled
136 * @param system The instance of the whole system 142 * @param system The instance of the whole system
@@ -186,59 +192,54 @@ public:
186 return HANDLE_TYPE; 192 return HANDLE_TYPE;
187 } 193 }
188 194
189 bool ShouldWait(const Thread* thread) const override;
190 void Acquire(Thread* thread) override;
191 bool IsSignaled() const override;
192
193 /** 195 /**
194 * Gets the thread's current priority 196 * Gets the thread's current priority
195 * @return The current thread's priority 197 * @return The current thread's priority
196 */ 198 */
197 u32 GetPriority() const { 199 [[nodiscard]] s32 GetPriority() const {
198 return current_priority; 200 return current_priority;
199 } 201 }
200 202
201 /** 203 /**
204 * Sets the thread's current priority.
205 * @param priority The new priority.
206 */
207 void SetPriority(s32 priority) {
208 current_priority = priority;
209 }
210
211 /**
202 * Gets the thread's nominal priority. 212 * Gets the thread's nominal priority.
203 * @return The current thread's nominal priority. 213 * @return The current thread's nominal priority.
204 */ 214 */
205 u32 GetNominalPriority() const { 215 [[nodiscard]] s32 GetBasePriority() const {
206 return nominal_priority; 216 return base_priority;
207 } 217 }
208 218
209 /** 219 /**
210 * Sets the thread's current priority 220 * Sets the thread's nominal priority.
211 * @param priority The new priority 221 * @param priority The new priority.
212 */ 222 */
213 void SetPriority(u32 priority); 223 void SetBasePriority(u32 priority);
214
215 /// Adds a thread to the list of threads that are waiting for a lock held by this thread.
216 void AddMutexWaiter(std::shared_ptr<Thread> thread);
217
218 /// Removes a thread from the list of threads that are waiting for a lock held by this thread.
219 void RemoveMutexWaiter(std::shared_ptr<Thread> thread);
220
221 /// Recalculates the current priority taking into account priority inheritance.
222 void UpdatePriority();
223 224
224 /// Changes the core that the thread is running or scheduled to run on. 225 /// Changes the core that the thread is running or scheduled to run on.
225 ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask); 226 [[nodiscard]] ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
226 227
227 /** 228 /**
228 * Gets the thread's thread ID 229 * Gets the thread's thread ID
229 * @return The thread's ID 230 * @return The thread's ID
230 */ 231 */
231 u64 GetThreadID() const { 232 [[nodiscard]] u64 GetThreadID() const {
232 return thread_id; 233 return thread_id;
233 } 234 }
234 235
235 /// Resumes a thread from waiting 236 /// Resumes a thread from waiting
236 void ResumeFromWait(); 237 void Wakeup();
237
238 void OnWakeUp();
239 238
240 ResultCode Start(); 239 ResultCode Start();
241 240
241 virtual bool IsSignaled() const override;
242
242 /// Cancels a waiting operation that this thread may or may not be within. 243 /// Cancels a waiting operation that this thread may or may not be within.
243 /// 244 ///
244 /// When the thread is within a waiting state, this will set the thread's 245 /// When the thread is within a waiting state, this will set the thread's
@@ -247,29 +248,20 @@ public:
247 /// 248 ///
248 void CancelWait(); 249 void CancelWait();
249 250
250 void SetSynchronizationResults(SynchronizationObject* object, ResultCode result); 251 void SetSynchronizationResults(KSynchronizationObject* object, ResultCode result);
251 252
252 SynchronizationObject* GetSignalingObject() const { 253 void SetSyncedObject(KSynchronizationObject* object, ResultCode result) {
253 return signaling_object; 254 SetSynchronizationResults(object, result);
254 } 255 }
255 256
256 ResultCode GetSignalingResult() const { 257 ResultCode GetWaitResult(KSynchronizationObject** out) const {
258 *out = signaling_object;
257 return signaling_result; 259 return signaling_result;
258 } 260 }
259 261
260 /** 262 ResultCode GetSignalingResult() const {
261 * Retrieves the index that this particular object occupies in the list of objects 263 return signaling_result;
262 * that the thread passed to WaitSynchronization, starting the search from the last element. 264 }
263 *
264 * It is used to set the output index of WaitSynchronization when the thread is awakened.
265 *
266 * When a thread wakes up due to an object signal, the kernel will use the index of the last
267 * matching object in the wait objects list in case of having multiple instances of the same
268 * object in the list.
269 *
270 * @param object Object to query the index of.
271 */
272 s32 GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const;
273 265
274 /** 266 /**
275 * Stops a thread, invalidating it from further use 267 * Stops a thread, invalidating it from further use
@@ -341,18 +333,22 @@ public:
341 333
342 std::shared_ptr<Common::Fiber>& GetHostContext(); 334 std::shared_ptr<Common::Fiber>& GetHostContext();
343 335
344 ThreadStatus GetStatus() const { 336 ThreadState GetState() const {
345 return status; 337 return thread_state & ThreadState::Mask;
338 }
339
340 ThreadState GetRawState() const {
341 return thread_state;
346 } 342 }
347 343
348 void SetStatus(ThreadStatus new_status); 344 void SetState(ThreadState state);
349 345
350 s64 GetLastScheduledTick() const { 346 s64 GetLastScheduledTick() const {
351 return this->last_scheduled_tick; 347 return last_scheduled_tick;
352 } 348 }
353 349
354 void SetLastScheduledTick(s64 tick) { 350 void SetLastScheduledTick(s64 tick) {
355 this->last_scheduled_tick = tick; 351 last_scheduled_tick = tick;
356 } 352 }
357 353
358 u64 GetTotalCPUTimeTicks() const { 354 u64 GetTotalCPUTimeTicks() const {
@@ -387,98 +383,18 @@ public:
387 return owner_process; 383 return owner_process;
388 } 384 }
389 385
390 const ThreadSynchronizationObjects& GetSynchronizationObjects() const {
391 return *wait_objects;
392 }
393
394 void SetSynchronizationObjects(ThreadSynchronizationObjects* objects) {
395 wait_objects = objects;
396 }
397
398 void ClearSynchronizationObjects() {
399 for (const auto& waiting_object : *wait_objects) {
400 waiting_object->RemoveWaitingThread(SharedFrom(this));
401 }
402 wait_objects->clear();
403 }
404
405 /// Determines whether all the objects this thread is waiting on are ready.
406 bool AllSynchronizationObjectsReady() const;
407
408 const MutexWaitingThreads& GetMutexWaitingThreads() const { 386 const MutexWaitingThreads& GetMutexWaitingThreads() const {
409 return wait_mutex_threads; 387 return wait_mutex_threads;
410 } 388 }
411 389
412 Thread* GetLockOwner() const { 390 Thread* GetLockOwner() const {
413 return lock_owner.get(); 391 return lock_owner;
414 }
415
416 void SetLockOwner(std::shared_ptr<Thread> owner) {
417 lock_owner = std::move(owner);
418 }
419
420 VAddr GetCondVarWaitAddress() const {
421 return condvar_wait_address;
422 }
423
424 void SetCondVarWaitAddress(VAddr address) {
425 condvar_wait_address = address;
426 }
427
428 VAddr GetMutexWaitAddress() const {
429 return mutex_wait_address;
430 }
431
432 void SetMutexWaitAddress(VAddr address) {
433 mutex_wait_address = address;
434 }
435
436 Handle GetWaitHandle() const {
437 return wait_handle;
438 }
439
440 void SetWaitHandle(Handle handle) {
441 wait_handle = handle;
442 }
443
444 VAddr GetArbiterWaitAddress() const {
445 return arb_wait_address;
446 }
447
448 void SetArbiterWaitAddress(VAddr address) {
449 arb_wait_address = address;
450 }
451
452 bool HasHLECallback() const {
453 return hle_callback != nullptr;
454 }
455
456 void SetHLECallback(HLECallback callback) {
457 hle_callback = std::move(callback);
458 }
459
460 void SetHLETimeEvent(Handle time_event) {
461 hle_time_event = time_event;
462 }
463
464 void SetHLESyncObject(SynchronizationObject* object) {
465 hle_object = object;
466 }
467
468 Handle GetHLETimeEvent() const {
469 return hle_time_event;
470 }
471
472 SynchronizationObject* GetHLESyncObject() const {
473 return hle_object;
474 } 392 }
475 393
476 void InvalidateHLECallback() { 394 void SetLockOwner(Thread* owner) {
477 SetHLECallback(nullptr); 395 lock_owner = owner;
478 } 396 }
479 397
480 bool InvokeHLECallback(std::shared_ptr<Thread> thread);
481
482 u32 GetIdealCore() const { 398 u32 GetIdealCore() const {
483 return ideal_core; 399 return ideal_core;
484 } 400 }
@@ -493,20 +409,11 @@ public:
493 ResultCode Sleep(s64 nanoseconds); 409 ResultCode Sleep(s64 nanoseconds);
494 410
495 s64 GetYieldScheduleCount() const { 411 s64 GetYieldScheduleCount() const {
496 return this->schedule_count; 412 return schedule_count;
497 } 413 }
498 414
499 void SetYieldScheduleCount(s64 count) { 415 void SetYieldScheduleCount(s64 count) {
500 this->schedule_count = count; 416 schedule_count = count;
501 }
502
503 ThreadSchedStatus GetSchedulingStatus() const {
504 return static_cast<ThreadSchedStatus>(scheduling_state &
505 static_cast<u32>(ThreadSchedMasks::LowMask));
506 }
507
508 bool IsRunnable() const {
509 return scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable);
510 } 417 }
511 418
512 bool IsRunning() const { 419 bool IsRunning() const {
@@ -517,36 +424,32 @@ public:
517 is_running = value; 424 is_running = value;
518 } 425 }
519 426
520 bool IsSyncCancelled() const { 427 bool IsWaitCancelled() const {
521 return is_sync_cancelled; 428 return is_sync_cancelled;
522 } 429 }
523 430
524 void SetSyncCancelled(bool value) { 431 void ClearWaitCancelled() {
525 is_sync_cancelled = value; 432 is_sync_cancelled = false;
526 } 433 }
527 434
528 Handle GetGlobalHandle() const { 435 Handle GetGlobalHandle() const {
529 return global_handle; 436 return global_handle;
530 } 437 }
531 438
532 bool IsWaitingForArbitration() const { 439 bool IsCancellable() const {
533 return waiting_for_arbitration; 440 return is_cancellable;
534 } 441 }
535 442
536 void WaitForArbitration(bool set) { 443 void SetCancellable() {
537 waiting_for_arbitration = set; 444 is_cancellable = true;
538 } 445 }
539 446
540 bool IsWaitingSync() const { 447 void ClearCancellable() {
541 return is_waiting_on_sync; 448 is_cancellable = false;
542 } 449 }
543 450
544 void SetWaitingSync(bool is_waiting) { 451 bool IsTerminationRequested() const {
545 is_waiting_on_sync = is_waiting; 452 return will_be_terminated || GetRawState() == ThreadState::Terminated;
546 }
547
548 bool IsPendingTermination() const {
549 return will_be_terminated || GetSchedulingStatus() == ThreadSchedStatus::Exited;
550 } 453 }
551 454
552 bool IsPaused() const { 455 bool IsPaused() const {
@@ -578,21 +481,21 @@ public:
578 constexpr QueueEntry() = default; 481 constexpr QueueEntry() = default;
579 482
580 constexpr void Initialize() { 483 constexpr void Initialize() {
581 this->prev = nullptr; 484 prev = nullptr;
582 this->next = nullptr; 485 next = nullptr;
583 } 486 }
584 487
585 constexpr Thread* GetPrev() const { 488 constexpr Thread* GetPrev() const {
586 return this->prev; 489 return prev;
587 } 490 }
588 constexpr Thread* GetNext() const { 491 constexpr Thread* GetNext() const {
589 return this->next; 492 return next;
590 } 493 }
591 constexpr void SetPrev(Thread* thread) { 494 constexpr void SetPrev(Thread* thread) {
592 this->prev = thread; 495 prev = thread;
593 } 496 }
594 constexpr void SetNext(Thread* thread) { 497 constexpr void SetNext(Thread* thread) {
595 this->next = thread; 498 next = thread;
596 } 499 }
597 500
598 private: 501 private:
@@ -601,11 +504,11 @@ public:
601 }; 504 };
602 505
603 QueueEntry& GetPriorityQueueEntry(s32 core) { 506 QueueEntry& GetPriorityQueueEntry(s32 core) {
604 return this->per_core_priority_queue_entry[core]; 507 return per_core_priority_queue_entry[core];
605 } 508 }
606 509
607 const QueueEntry& GetPriorityQueueEntry(s32 core) const { 510 const QueueEntry& GetPriorityQueueEntry(s32 core) const {
608 return this->per_core_priority_queue_entry[core]; 511 return per_core_priority_queue_entry[core];
609 } 512 }
610 513
611 s32 GetDisableDispatchCount() const { 514 s32 GetDisableDispatchCount() const {
@@ -622,24 +525,170 @@ public:
622 disable_count--; 525 disable_count--;
623 } 526 }
624 527
528 void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) {
529 wait_reason_for_debugging = reason;
530 }
531
532 [[nodiscard]] ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const {
533 return wait_reason_for_debugging;
534 }
535
536 void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
537 wait_objects_for_debugging.clear();
538 wait_objects_for_debugging.reserve(objects.size());
539 for (const auto& object : objects) {
540 wait_objects_for_debugging.emplace_back(object);
541 }
542 }
543
544 [[nodiscard]] const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const {
545 return wait_objects_for_debugging;
546 }
547
548 void SetMutexWaitAddressForDebugging(VAddr address) {
549 mutex_wait_address_for_debugging = address;
550 }
551
552 [[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const {
553 return mutex_wait_address_for_debugging;
554 }
555
556 void AddWaiter(Thread* thread);
557
558 void RemoveWaiter(Thread* thread);
559
560 [[nodiscard]] Thread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
561
562 [[nodiscard]] VAddr GetAddressKey() const {
563 return address_key;
564 }
565
566 [[nodiscard]] u32 GetAddressKeyValue() const {
567 return address_key_value;
568 }
569
570 void SetAddressKey(VAddr key) {
571 address_key = key;
572 }
573
574 void SetAddressKey(VAddr key, u32 val) {
575 address_key = key;
576 address_key_value = val;
577 }
578
625private: 579private:
626 friend class GlobalSchedulerContext; 580 static constexpr size_t PriorityInheritanceCountMax = 10;
627 friend class KScheduler; 581 union SyncObjectBuffer {
628 friend class Process; 582 std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> sync_objects{};
583 std::array<Handle,
584 Svc::ArgumentHandleCountMax*(sizeof(KSynchronizationObject*) / sizeof(Handle))>
585 handles;
586 constexpr SyncObjectBuffer() {}
587 };
588 static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
589
590 struct ConditionVariableComparator {
591 struct LightCompareType {
592 u64 cv_key{};
593 s32 priority{};
594
595 [[nodiscard]] constexpr u64 GetConditionVariableKey() const {
596 return cv_key;
597 }
598
599 [[nodiscard]] constexpr s32 GetPriority() const {
600 return priority;
601 }
602 };
603
604 template <typename T>
605 requires(
606 std::same_as<T, Thread> ||
607 std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs,
608 const Thread& rhs) {
609 const uintptr_t l_key = lhs.GetConditionVariableKey();
610 const uintptr_t r_key = rhs.GetConditionVariableKey();
611
612 if (l_key < r_key) {
613 // Sort first by key
614 return -1;
615 } else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) {
616 // And then by priority.
617 return -1;
618 } else {
619 return 1;
620 }
621 }
622 };
623
624 Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{};
625
626 using ConditionVariableThreadTreeTraits =
627 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&Thread::condvar_arbiter_tree_node>;
628 using ConditionVariableThreadTree =
629 ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
630
631public:
632 using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
633
634 [[nodiscard]] uintptr_t GetConditionVariableKey() const {
635 return condvar_key;
636 }
637
638 [[nodiscard]] uintptr_t GetAddressArbiterKey() const {
639 return condvar_key;
640 }
629 641
630 void SetSchedulingStatus(ThreadSchedStatus new_status); 642 void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, uintptr_t cv_key,
643 u32 value) {
644 condvar_tree = tree;
645 condvar_key = cv_key;
646 address_key = address;
647 address_key_value = value;
648 }
649
650 void ClearConditionVariable() {
651 condvar_tree = nullptr;
652 }
653
654 [[nodiscard]] bool IsWaitingForConditionVariable() const {
655 return condvar_tree != nullptr;
656 }
657
658 void SetAddressArbiter(ConditionVariableThreadTree* tree, uintptr_t address) {
659 condvar_tree = tree;
660 condvar_key = address;
661 }
662
663 void ClearAddressArbiter() {
664 condvar_tree = nullptr;
665 }
666
667 [[nodiscard]] bool IsWaitingForAddressArbiter() const {
668 return condvar_tree != nullptr;
669 }
670
671 [[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const {
672 return condvar_tree;
673 }
674
675 [[nodiscard]] bool HasWaiters() const {
676 return !waiter_list.empty();
677 }
678
679private:
631 void AddSchedulingFlag(ThreadSchedFlags flag); 680 void AddSchedulingFlag(ThreadSchedFlags flag);
632 void RemoveSchedulingFlag(ThreadSchedFlags flag); 681 void RemoveSchedulingFlag(ThreadSchedFlags flag);
633 682 void AddWaiterImpl(Thread* thread);
634 void SetCurrentPriority(u32 new_priority); 683 void RemoveWaiterImpl(Thread* thread);
684 static void RestorePriority(KernelCore& kernel, Thread* thread);
635 685
636 Common::SpinLock context_guard{}; 686 Common::SpinLock context_guard{};
637 ThreadContext32 context_32{}; 687 ThreadContext32 context_32{};
638 ThreadContext64 context_64{}; 688 ThreadContext64 context_64{};
639 std::shared_ptr<Common::Fiber> host_context{}; 689 std::shared_ptr<Common::Fiber> host_context{};
640 690
641 ThreadStatus status = ThreadStatus::Dormant; 691 ThreadState thread_state = ThreadState::Initialized;
642 u32 scheduling_state = 0;
643 692
644 u64 thread_id = 0; 693 u64 thread_id = 0;
645 694
@@ -652,11 +701,11 @@ private:
652 /// Nominal thread priority, as set by the emulated application. 701 /// Nominal thread priority, as set by the emulated application.
653 /// The nominal priority is the thread priority without priority 702 /// The nominal priority is the thread priority without priority
654 /// inheritance taken into account. 703 /// inheritance taken into account.
655 u32 nominal_priority = 0; 704 s32 base_priority{};
656 705
657 /// Current thread priority. This may change over the course of the 706 /// Current thread priority. This may change over the course of the
658 /// thread's lifetime in order to facilitate priority inheritance. 707 /// thread's lifetime in order to facilitate priority inheritance.
659 u32 current_priority = 0; 708 s32 current_priority{};
660 709
661 u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. 710 u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
662 s64 schedule_count{}; 711 s64 schedule_count{};
@@ -671,37 +720,27 @@ private:
671 Process* owner_process; 720 Process* owner_process;
672 721
673 /// Objects that the thread is waiting on, in the same order as they were 722 /// Objects that the thread is waiting on, in the same order as they were
674 /// passed to WaitSynchronization. 723 /// passed to WaitSynchronization. This is used for debugging only.
675 ThreadSynchronizationObjects* wait_objects; 724 std::vector<KSynchronizationObject*> wait_objects_for_debugging;
676 725
677 SynchronizationObject* signaling_object; 726 /// The current mutex wait address. This is used for debugging only.
727 VAddr mutex_wait_address_for_debugging{};
728
729 /// The reason the thread is waiting. This is used for debugging only.
730 ThreadWaitReasonForDebugging wait_reason_for_debugging{};
731
732 KSynchronizationObject* signaling_object;
678 ResultCode signaling_result{RESULT_SUCCESS}; 733 ResultCode signaling_result{RESULT_SUCCESS};
679 734
680 /// List of threads that are waiting for a mutex that is held by this thread. 735 /// List of threads that are waiting for a mutex that is held by this thread.
681 MutexWaitingThreads wait_mutex_threads; 736 MutexWaitingThreads wait_mutex_threads;
682 737
683 /// Thread that owns the lock that this thread is waiting for. 738 /// Thread that owns the lock that this thread is waiting for.
684 std::shared_ptr<Thread> lock_owner; 739 Thread* lock_owner{};
685
686 /// If waiting on a ConditionVariable, this is the ConditionVariable address
687 VAddr condvar_wait_address = 0;
688 /// If waiting on a Mutex, this is the mutex address
689 VAddr mutex_wait_address = 0;
690 /// The handle used to wait for the mutex.
691 Handle wait_handle = 0;
692
693 /// If waiting for an AddressArbiter, this is the address being waited on.
694 VAddr arb_wait_address{0};
695 bool waiting_for_arbitration{};
696 740
697 /// Handle used as userdata to reference this object when inserting into the CoreTiming queue. 741 /// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
698 Handle global_handle = 0; 742 Handle global_handle = 0;
699 743
700 /// Callback for HLE Events
701 HLECallback hle_callback;
702 Handle hle_time_event;
703 SynchronizationObject* hle_object;
704
705 KScheduler* scheduler = nullptr; 744 KScheduler* scheduler = nullptr;
706 745
707 std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{}; 746 std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
@@ -714,7 +753,7 @@ private:
714 753
715 u32 pausing_state = 0; 754 u32 pausing_state = 0;
716 bool is_running = false; 755 bool is_running = false;
717 bool is_waiting_on_sync = false; 756 bool is_cancellable = false;
718 bool is_sync_cancelled = false; 757 bool is_sync_cancelled = false;
719 758
720 bool is_continuous_on_svc = false; 759 bool is_continuous_on_svc = false;
@@ -725,6 +764,18 @@ private:
725 764
726 bool was_running = false; 765 bool was_running = false;
727 766
767 bool signaled{};
768
769 ConditionVariableThreadTree* condvar_tree{};
770 uintptr_t condvar_key{};
771 VAddr address_key{};
772 u32 address_key_value{};
773 s32 num_kernel_waiters{};
774
775 using WaiterList = boost::intrusive::list<Thread>;
776 WaiterList waiter_list{};
777 WaiterList pinned_waiter_list{};
778
728 std::string name; 779 std::string name;
729}; 780};
730 781
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index 79628e2b4..832edd629 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -18,12 +18,10 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
18 time_manager_event_type = Core::Timing::CreateEvent( 18 time_manager_event_type = Core::Timing::CreateEvent(
19 "Kernel::TimeManagerCallback", 19 "Kernel::TimeManagerCallback",
20 [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { 20 [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
21 const KScopedSchedulerLock lock(system.Kernel());
22 const auto proper_handle = static_cast<Handle>(thread_handle);
23
24 std::shared_ptr<Thread> thread; 21 std::shared_ptr<Thread> thread;
25 { 22 {
26 std::lock_guard lock{mutex}; 23 std::lock_guard lock{mutex};
24 const auto proper_handle = static_cast<Handle>(thread_handle);
27 if (cancelled_events[proper_handle]) { 25 if (cancelled_events[proper_handle]) {
28 return; 26 return;
29 } 27 }
@@ -32,7 +30,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
32 30
33 if (thread) { 31 if (thread) {
34 // Thread can be null if process has exited 32 // Thread can be null if process has exited
35 thread->OnWakeUp(); 33 thread->Wakeup();
36 } 34 }
37 }); 35 });
38} 36}
@@ -42,8 +40,7 @@ void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64
42 event_handle = timetask->GetGlobalHandle(); 40 event_handle = timetask->GetGlobalHandle();
43 if (nanoseconds > 0) { 41 if (nanoseconds > 0) {
44 ASSERT(timetask); 42 ASSERT(timetask);
45 ASSERT(timetask->GetStatus() != ThreadStatus::Ready); 43 ASSERT(timetask->GetState() != ThreadState::Runnable);
46 ASSERT(timetask->GetStatus() != ThreadStatus::WaitMutex);
47 system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds}, 44 system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds},
48 time_manager_event_type, event_handle); 45 time_manager_event_type, event_handle);
49 } else { 46 } else {
diff --git a/src/core/hle/service/nfp/nfp.cpp b/src/core/hle/service/nfp/nfp.cpp
index 5557da72e..641bcadea 100644
--- a/src/core/hle/service/nfp/nfp.cpp
+++ b/src/core/hle/service/nfp/nfp.cpp
@@ -190,12 +190,6 @@ private:
190 void GetDeviceState(Kernel::HLERequestContext& ctx) { 190 void GetDeviceState(Kernel::HLERequestContext& ctx) {
191 LOG_DEBUG(Service_NFP, "called"); 191 LOG_DEBUG(Service_NFP, "called");
192 192
193 auto nfc_event = nfp_interface.GetNFCEvent();
194 if (!nfc_event->ShouldWait(&ctx.GetThread()) && !has_attached_handle) {
195 device_state = DeviceState::TagFound;
196 nfc_event->Clear();
197 }
198
199 IPC::ResponseBuilder rb{ctx, 3}; 193 IPC::ResponseBuilder rb{ctx, 3};
200 rb.Push(RESULT_SUCCESS); 194 rb.Push(RESULT_SUCCESS);
201 rb.Push<u32>(static_cast<u32>(device_state)); 195 rb.Push<u32>(static_cast<u32>(device_state));
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp
index 4b3581949..ceaa93d28 100644
--- a/src/core/hle/service/nvflinger/nvflinger.cpp
+++ b/src/core/hle/service/nvflinger/nvflinger.cpp
@@ -38,6 +38,10 @@ void NVFlinger::SplitVSync() {
38 system.RegisterHostThread(); 38 system.RegisterHostThread();
39 std::string name = "yuzu:VSyncThread"; 39 std::string name = "yuzu:VSyncThread";
40 MicroProfileOnThreadCreate(name.c_str()); 40 MicroProfileOnThreadCreate(name.c_str());
41
42 // Cleanup
43 SCOPE_EXIT({ MicroProfileOnThreadExit(); });
44
41 Common::SetCurrentThreadName(name.c_str()); 45 Common::SetCurrentThreadName(name.c_str());
42 Common::SetCurrentThreadPriority(Common::ThreadPriority::High); 46 Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
43 s64 delay = 0; 47 s64 delay = 0;
diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp
index 4da69f503..2b91a89d1 100644
--- a/src/core/hle/service/sm/sm.cpp
+++ b/src/core/hle/service/sm/sm.cpp
@@ -139,9 +139,6 @@ void SM::GetService(Kernel::HLERequestContext& ctx) {
139 server_port->AppendPendingSession(server); 139 server_port->AppendPendingSession(server);
140 } 140 }
141 141
142 // Wake the threads waiting on the ServerPort
143 server_port->Signal();
144
145 LOG_DEBUG(Service_SM, "called service={} -> session={}", name, client->GetObjectId()); 142 LOG_DEBUG(Service_SM, "called service={} -> session={}", name, client->GetObjectId());
146 IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles}; 143 IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles};
147 rb.Push(RESULT_SUCCESS); 144 rb.Push(RESULT_SUCCESS);
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp
index 0925c10b4..a93b5d3c2 100644
--- a/src/yuzu/debugger/wait_tree.cpp
+++ b/src/yuzu/debugger/wait_tree.cpp
@@ -14,10 +14,10 @@
14#include "core/core.h" 14#include "core/core.h"
15#include "core/hle/kernel/handle_table.h" 15#include "core/hle/kernel/handle_table.h"
16#include "core/hle/kernel/k_scheduler.h" 16#include "core/hle/kernel/k_scheduler.h"
17#include "core/hle/kernel/mutex.h" 17#include "core/hle/kernel/k_synchronization_object.h"
18#include "core/hle/kernel/process.h" 18#include "core/hle/kernel/process.h"
19#include "core/hle/kernel/readable_event.h" 19#include "core/hle/kernel/readable_event.h"
20#include "core/hle/kernel/synchronization_object.h" 20#include "core/hle/kernel/svc_common.h"
21#include "core/hle/kernel/thread.h" 21#include "core/hle/kernel/thread.h"
22#include "core/memory.h" 22#include "core/memory.h"
23 23
@@ -116,7 +116,7 @@ QString WaitTreeText::GetText() const {
116WaitTreeMutexInfo::WaitTreeMutexInfo(VAddr mutex_address, const Kernel::HandleTable& handle_table) 116WaitTreeMutexInfo::WaitTreeMutexInfo(VAddr mutex_address, const Kernel::HandleTable& handle_table)
117 : mutex_address(mutex_address) { 117 : mutex_address(mutex_address) {
118 mutex_value = Core::System::GetInstance().Memory().Read32(mutex_address); 118 mutex_value = Core::System::GetInstance().Memory().Read32(mutex_address);
119 owner_handle = static_cast<Kernel::Handle>(mutex_value & Kernel::Mutex::MutexOwnerMask); 119 owner_handle = static_cast<Kernel::Handle>(mutex_value & Kernel::Svc::HandleWaitMask);
120 owner = handle_table.Get<Kernel::Thread>(owner_handle); 120 owner = handle_table.Get<Kernel::Thread>(owner_handle);
121} 121}
122 122
@@ -127,7 +127,7 @@ QString WaitTreeMutexInfo::GetText() const {
127} 127}
128 128
129std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutexInfo::GetChildren() const { 129std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutexInfo::GetChildren() const {
130 const bool has_waiters = (mutex_value & Kernel::Mutex::MutexHasWaitersFlag) != 0; 130 const bool has_waiters = (mutex_value & Kernel::Svc::HandleWaitMask) != 0;
131 131
132 std::vector<std::unique_ptr<WaitTreeItem>> list; 132 std::vector<std::unique_ptr<WaitTreeItem>> list;
133 list.push_back(std::make_unique<WaitTreeText>(tr("has waiters: %1").arg(has_waiters))); 133 list.push_back(std::make_unique<WaitTreeText>(tr("has waiters: %1").arg(has_waiters)));
@@ -169,7 +169,8 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() cons
169 return list; 169 return list;
170} 170}
171 171
172WaitTreeSynchronizationObject::WaitTreeSynchronizationObject(const Kernel::SynchronizationObject& o) 172WaitTreeSynchronizationObject::WaitTreeSynchronizationObject(
173 const Kernel::KSynchronizationObject& o)
173 : object(o) {} 174 : object(o) {}
174WaitTreeSynchronizationObject::~WaitTreeSynchronizationObject() = default; 175WaitTreeSynchronizationObject::~WaitTreeSynchronizationObject() = default;
175 176
@@ -188,7 +189,7 @@ QString WaitTreeSynchronizationObject::GetText() const {
188} 189}
189 190
190std::unique_ptr<WaitTreeSynchronizationObject> WaitTreeSynchronizationObject::make( 191std::unique_ptr<WaitTreeSynchronizationObject> WaitTreeSynchronizationObject::make(
191 const Kernel::SynchronizationObject& object) { 192 const Kernel::KSynchronizationObject& object) {
192 switch (object.GetHandleType()) { 193 switch (object.GetHandleType()) {
193 case Kernel::HandleType::ReadableEvent: 194 case Kernel::HandleType::ReadableEvent:
194 return std::make_unique<WaitTreeEvent>(static_cast<const Kernel::ReadableEvent&>(object)); 195 return std::make_unique<WaitTreeEvent>(static_cast<const Kernel::ReadableEvent&>(object));
@@ -202,7 +203,7 @@ std::unique_ptr<WaitTreeSynchronizationObject> WaitTreeSynchronizationObject::ma
202std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeSynchronizationObject::GetChildren() const { 203std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeSynchronizationObject::GetChildren() const {
203 std::vector<std::unique_ptr<WaitTreeItem>> list; 204 std::vector<std::unique_ptr<WaitTreeItem>> list;
204 205
205 const auto& threads = object.GetWaitingThreads(); 206 const auto& threads = object.GetWaitingThreadsForDebugging();
206 if (threads.empty()) { 207 if (threads.empty()) {
207 list.push_back(std::make_unique<WaitTreeText>(tr("waited by no thread"))); 208 list.push_back(std::make_unique<WaitTreeText>(tr("waited by no thread")));
208 } else { 209 } else {
@@ -211,8 +212,8 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeSynchronizationObject::GetChi
211 return list; 212 return list;
212} 213}
213 214
214WaitTreeObjectList::WaitTreeObjectList( 215WaitTreeObjectList::WaitTreeObjectList(const std::vector<Kernel::KSynchronizationObject*>& list,
215 const std::vector<std::shared_ptr<Kernel::SynchronizationObject>>& list, bool w_all) 216 bool w_all)
216 : object_list(list), wait_all(w_all) {} 217 : object_list(list), wait_all(w_all) {}
217 218
218WaitTreeObjectList::~WaitTreeObjectList() = default; 219WaitTreeObjectList::~WaitTreeObjectList() = default;
@@ -237,8 +238,8 @@ WaitTreeThread::~WaitTreeThread() = default;
237QString WaitTreeThread::GetText() const { 238QString WaitTreeThread::GetText() const {
238 const auto& thread = static_cast<const Kernel::Thread&>(object); 239 const auto& thread = static_cast<const Kernel::Thread&>(object);
239 QString status; 240 QString status;
240 switch (thread.GetStatus()) { 241 switch (thread.GetState()) {
241 case Kernel::ThreadStatus::Ready: 242 case Kernel::ThreadState::Runnable:
242 if (!thread.IsPaused()) { 243 if (!thread.IsPaused()) {
243 if (thread.WasRunning()) { 244 if (thread.WasRunning()) {
244 status = tr("running"); 245 status = tr("running");
@@ -249,35 +250,39 @@ QString WaitTreeThread::GetText() const {
249 status = tr("paused"); 250 status = tr("paused");
250 } 251 }
251 break; 252 break;
252 case Kernel::ThreadStatus::Paused: 253 case Kernel::ThreadState::Waiting:
253 status = tr("paused"); 254 switch (thread.GetWaitReasonForDebugging()) {
254 break; 255 case Kernel::ThreadWaitReasonForDebugging::Sleep:
255 case Kernel::ThreadStatus::WaitHLEEvent: 256 status = tr("sleeping");
256 status = tr("waiting for HLE return"); 257 break;
257 break; 258 case Kernel::ThreadWaitReasonForDebugging::IPC:
258 case Kernel::ThreadStatus::WaitSleep: 259 status = tr("waiting for IPC reply");
259 status = tr("sleeping"); 260 break;
260 break; 261 case Kernel::ThreadWaitReasonForDebugging::Synchronization:
261 case Kernel::ThreadStatus::WaitIPC: 262 status = tr("waiting for objects");
262 status = tr("waiting for IPC reply"); 263 break;
263 break; 264 case Kernel::ThreadWaitReasonForDebugging::ConditionVar:
264 case Kernel::ThreadStatus::WaitSynch: 265 status = tr("waiting for condition variable");
265 status = tr("waiting for objects"); 266 break;
266 break; 267 case Kernel::ThreadWaitReasonForDebugging::Arbitration:
267 case Kernel::ThreadStatus::WaitMutex: 268 status = tr("waiting for address arbiter");
268 status = tr("waiting for mutex"); 269 break;
269 break; 270 case Kernel::ThreadWaitReasonForDebugging::Suspended:
270 case Kernel::ThreadStatus::WaitCondVar: 271 status = tr("waiting for suspend resume");
271 status = tr("waiting for condition variable"); 272 break;
273 default:
274 status = tr("waiting");
275 break;
276 }
272 break; 277 break;
273 case Kernel::ThreadStatus::WaitArb: 278 case Kernel::ThreadState::Initialized:
274 status = tr("waiting for address arbiter"); 279 status = tr("initialized");
275 break; 280 break;
276 case Kernel::ThreadStatus::Dormant: 281 case Kernel::ThreadState::Terminated:
277 status = tr("dormant"); 282 status = tr("terminated");
278 break; 283 break;
279 case Kernel::ThreadStatus::Dead: 284 default:
280 status = tr("dead"); 285 status = tr("unknown");
281 break; 286 break;
282 } 287 }
283 288
@@ -293,8 +298,8 @@ QColor WaitTreeThread::GetColor() const {
293 const std::size_t color_index = IsDarkTheme() ? 1 : 0; 298 const std::size_t color_index = IsDarkTheme() ? 1 : 0;
294 299
295 const auto& thread = static_cast<const Kernel::Thread&>(object); 300 const auto& thread = static_cast<const Kernel::Thread&>(object);
296 switch (thread.GetStatus()) { 301 switch (thread.GetState()) {
297 case Kernel::ThreadStatus::Ready: 302 case Kernel::ThreadState::Runnable:
298 if (!thread.IsPaused()) { 303 if (!thread.IsPaused()) {
299 if (thread.WasRunning()) { 304 if (thread.WasRunning()) {
300 return QColor(WaitTreeColors[0][color_index]); 305 return QColor(WaitTreeColors[0][color_index]);
@@ -304,21 +309,24 @@ QColor WaitTreeThread::GetColor() const {
304 } else { 309 } else {
305 return QColor(WaitTreeColors[2][color_index]); 310 return QColor(WaitTreeColors[2][color_index]);
306 } 311 }
307 case Kernel::ThreadStatus::Paused: 312 case Kernel::ThreadState::Waiting:
308 return QColor(WaitTreeColors[3][color_index]); 313 switch (thread.GetWaitReasonForDebugging()) {
309 case Kernel::ThreadStatus::WaitHLEEvent: 314 case Kernel::ThreadWaitReasonForDebugging::IPC:
310 case Kernel::ThreadStatus::WaitIPC: 315 return QColor(WaitTreeColors[4][color_index]);
311 return QColor(WaitTreeColors[4][color_index]); 316 case Kernel::ThreadWaitReasonForDebugging::Sleep:
312 case Kernel::ThreadStatus::WaitSleep: 317 return QColor(WaitTreeColors[5][color_index]);
313 return QColor(WaitTreeColors[5][color_index]); 318 case Kernel::ThreadWaitReasonForDebugging::Synchronization:
314 case Kernel::ThreadStatus::WaitSynch: 319 case Kernel::ThreadWaitReasonForDebugging::ConditionVar:
315 case Kernel::ThreadStatus::WaitMutex: 320 case Kernel::ThreadWaitReasonForDebugging::Arbitration:
316 case Kernel::ThreadStatus::WaitCondVar: 321 case Kernel::ThreadWaitReasonForDebugging::Suspended:
317 case Kernel::ThreadStatus::WaitArb: 322 return QColor(WaitTreeColors[6][color_index]);
318 return QColor(WaitTreeColors[6][color_index]); 323 break;
319 case Kernel::ThreadStatus::Dormant: 324 default:
325 return QColor(WaitTreeColors[3][color_index]);
326 }
327 case Kernel::ThreadState::Initialized:
320 return QColor(WaitTreeColors[7][color_index]); 328 return QColor(WaitTreeColors[7][color_index]);
321 case Kernel::ThreadStatus::Dead: 329 case Kernel::ThreadState::Terminated:
322 return QColor(WaitTreeColors[8][color_index]); 330 return QColor(WaitTreeColors[8][color_index]);
323 default: 331 default:
324 return WaitTreeItem::GetColor(); 332 return WaitTreeItem::GetColor();
@@ -354,11 +362,11 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
354 list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID()))); 362 list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID())));
355 list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)") 363 list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
356 .arg(thread.GetPriority()) 364 .arg(thread.GetPriority())
357 .arg(thread.GetNominalPriority()))); 365 .arg(thread.GetBasePriority())));
358 list.push_back(std::make_unique<WaitTreeText>( 366 list.push_back(std::make_unique<WaitTreeText>(
359 tr("last running ticks = %1").arg(thread.GetLastScheduledTick()))); 367 tr("last running ticks = %1").arg(thread.GetLastScheduledTick())));
360 368
361 const VAddr mutex_wait_address = thread.GetMutexWaitAddress(); 369 const VAddr mutex_wait_address = thread.GetMutexWaitAddressForDebugging();
362 if (mutex_wait_address != 0) { 370 if (mutex_wait_address != 0) {
363 const auto& handle_table = thread.GetOwnerProcess()->GetHandleTable(); 371 const auto& handle_table = thread.GetOwnerProcess()->GetHandleTable();
364 list.push_back(std::make_unique<WaitTreeMutexInfo>(mutex_wait_address, handle_table)); 372 list.push_back(std::make_unique<WaitTreeMutexInfo>(mutex_wait_address, handle_table));
@@ -366,9 +374,11 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
366 list.push_back(std::make_unique<WaitTreeText>(tr("not waiting for mutex"))); 374 list.push_back(std::make_unique<WaitTreeText>(tr("not waiting for mutex")));
367 } 375 }
368 376
369 if (thread.GetStatus() == Kernel::ThreadStatus::WaitSynch) { 377 if (thread.GetState() == Kernel::ThreadState::Waiting &&
370 list.push_back(std::make_unique<WaitTreeObjectList>(thread.GetSynchronizationObjects(), 378 thread.GetWaitReasonForDebugging() ==
371 thread.IsWaitingSync())); 379 Kernel::ThreadWaitReasonForDebugging::Synchronization) {
380 list.push_back(std::make_unique<WaitTreeObjectList>(thread.GetWaitObjectsForDebugging(),
381 thread.IsCancellable()));
372 } 382 }
373 383
374 list.push_back(std::make_unique<WaitTreeCallstack>(thread)); 384 list.push_back(std::make_unique<WaitTreeCallstack>(thread));
@@ -380,7 +390,7 @@ WaitTreeEvent::WaitTreeEvent(const Kernel::ReadableEvent& object)
380 : WaitTreeSynchronizationObject(object) {} 390 : WaitTreeSynchronizationObject(object) {}
381WaitTreeEvent::~WaitTreeEvent() = default; 391WaitTreeEvent::~WaitTreeEvent() = default;
382 392
383WaitTreeThreadList::WaitTreeThreadList(const std::vector<std::shared_ptr<Kernel::Thread>>& list) 393WaitTreeThreadList::WaitTreeThreadList(const std::vector<Kernel::Thread*>& list)
384 : thread_list(list) {} 394 : thread_list(list) {}
385WaitTreeThreadList::~WaitTreeThreadList() = default; 395WaitTreeThreadList::~WaitTreeThreadList() = default;
386 396
diff --git a/src/yuzu/debugger/wait_tree.h b/src/yuzu/debugger/wait_tree.h
index 8e3bc4b24..cf96911ea 100644
--- a/src/yuzu/debugger/wait_tree.h
+++ b/src/yuzu/debugger/wait_tree.h
@@ -18,8 +18,8 @@ class EmuThread;
18 18
19namespace Kernel { 19namespace Kernel {
20class HandleTable; 20class HandleTable;
21class KSynchronizationObject;
21class ReadableEvent; 22class ReadableEvent;
22class SynchronizationObject;
23class Thread; 23class Thread;
24} // namespace Kernel 24} // namespace Kernel
25 25
@@ -102,30 +102,29 @@ private:
102class WaitTreeSynchronizationObject : public WaitTreeExpandableItem { 102class WaitTreeSynchronizationObject : public WaitTreeExpandableItem {
103 Q_OBJECT 103 Q_OBJECT
104public: 104public:
105 explicit WaitTreeSynchronizationObject(const Kernel::SynchronizationObject& object); 105 explicit WaitTreeSynchronizationObject(const Kernel::KSynchronizationObject& object);
106 ~WaitTreeSynchronizationObject() override; 106 ~WaitTreeSynchronizationObject() override;
107 107
108 static std::unique_ptr<WaitTreeSynchronizationObject> make( 108 static std::unique_ptr<WaitTreeSynchronizationObject> make(
109 const Kernel::SynchronizationObject& object); 109 const Kernel::KSynchronizationObject& object);
110 QString GetText() const override; 110 QString GetText() const override;
111 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override; 111 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
112 112
113protected: 113protected:
114 const Kernel::SynchronizationObject& object; 114 const Kernel::KSynchronizationObject& object;
115}; 115};
116 116
117class WaitTreeObjectList : public WaitTreeExpandableItem { 117class WaitTreeObjectList : public WaitTreeExpandableItem {
118 Q_OBJECT 118 Q_OBJECT
119public: 119public:
120 WaitTreeObjectList(const std::vector<std::shared_ptr<Kernel::SynchronizationObject>>& list, 120 WaitTreeObjectList(const std::vector<Kernel::KSynchronizationObject*>& list, bool wait_all);
121 bool wait_all);
122 ~WaitTreeObjectList() override; 121 ~WaitTreeObjectList() override;
123 122
124 QString GetText() const override; 123 QString GetText() const override;
125 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override; 124 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
126 125
127private: 126private:
128 const std::vector<std::shared_ptr<Kernel::SynchronizationObject>>& object_list; 127 const std::vector<Kernel::KSynchronizationObject*>& object_list;
129 bool wait_all; 128 bool wait_all;
130}; 129};
131 130
@@ -150,14 +149,14 @@ public:
150class WaitTreeThreadList : public WaitTreeExpandableItem { 149class WaitTreeThreadList : public WaitTreeExpandableItem {
151 Q_OBJECT 150 Q_OBJECT
152public: 151public:
153 explicit WaitTreeThreadList(const std::vector<std::shared_ptr<Kernel::Thread>>& list); 152 explicit WaitTreeThreadList(const std::vector<Kernel::Thread*>& list);
154 ~WaitTreeThreadList() override; 153 ~WaitTreeThreadList() override;
155 154
156 QString GetText() const override; 155 QString GetText() const override;
157 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override; 156 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
158 157
159private: 158private:
160 const std::vector<std::shared_ptr<Kernel::Thread>>& thread_list; 159 const std::vector<Kernel::Thread*>& thread_list;
161}; 160};
162 161
163class WaitTreeModel : public QAbstractItemModel { 162class WaitTreeModel : public QAbstractItemModel {