summaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
authorGravatar Liam2023-03-06 22:23:56 -0500
committerGravatar Liam2023-03-12 22:06:53 -0400
commit467adc1acde8316dce83d7f293217127412e18e9 (patch)
treee202b529ef7787eac6558b3505ff96214204bdb1 /src/core/hle/kernel
parentkernel: move KMemoryLayout for NX board (diff)
downloadyuzu-467adc1acde8316dce83d7f293217127412e18e9.tar.gz
yuzu-467adc1acde8316dce83d7f293217127412e18e9.tar.xz
yuzu-467adc1acde8316dce83d7f293217127412e18e9.zip
kernel: convert KMemoryLayout, KMemoryRegion*, KPageTableSlabHeap, KPriorityQueue
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/k_memory_layout.cpp19
-rw-r--r--src/core/hle/kernel/k_memory_layout.h34
-rw-r--r--src/core/hle/kernel/k_memory_region.h70
-rw-r--r--src/core/hle/kernel/k_object_name.h2
-rw-r--r--src/core/hle/kernel/k_page_table_slab_heap.h3
-rw-r--r--src/core/hle/kernel/k_priority_queue.h114
6 files changed, 121 insertions, 121 deletions
diff --git a/src/core/hle/kernel/k_memory_layout.cpp b/src/core/hle/kernel/k_memory_layout.cpp
index 72c3ee4b7..9ff751119 100644
--- a/src/core/hle/kernel/k_memory_layout.cpp
+++ b/src/core/hle/kernel/k_memory_layout.cpp
@@ -18,11 +18,11 @@ KMemoryRegion* AllocateRegion(KMemoryRegionAllocator& memory_region_allocator, A
18 18
19} // namespace 19} // namespace
20 20
21KMemoryRegionTree::KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_) 21KMemoryRegionTree::KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator)
22 : memory_region_allocator{memory_region_allocator_} {} 22 : m_memory_region_allocator{memory_region_allocator} {}
23 23
24void KMemoryRegionTree::InsertDirectly(u64 address, u64 last_address, u32 attr, u32 type_id) { 24void KMemoryRegionTree::InsertDirectly(u64 address, u64 last_address, u32 attr, u32 type_id) {
25 this->insert(*AllocateRegion(memory_region_allocator, address, last_address, attr, type_id)); 25 this->insert(*AllocateRegion(m_memory_region_allocator, address, last_address, attr, type_id));
26} 26}
27 27
28bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) { 28bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) {
@@ -69,7 +69,7 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at
69 const u64 new_pair = (old_pair != std::numeric_limits<u64>::max()) 69 const u64 new_pair = (old_pair != std::numeric_limits<u64>::max())
70 ? old_pair + (address - old_address) 70 ? old_pair + (address - old_address)
71 : old_pair; 71 : old_pair;
72 this->insert(*AllocateRegion(memory_region_allocator, address, inserted_region_last, 72 this->insert(*AllocateRegion(m_memory_region_allocator, address, inserted_region_last,
73 new_pair, new_attr, type_id)); 73 new_pair, new_attr, type_id));
74 } 74 }
75 75
@@ -78,7 +78,7 @@ bool KMemoryRegionTree::Insert(u64 address, size_t size, u32 type_id, u32 new_at
78 const u64 after_pair = (old_pair != std::numeric_limits<u64>::max()) 78 const u64 after_pair = (old_pair != std::numeric_limits<u64>::max())
79 ? old_pair + (inserted_region_end - old_address) 79 ? old_pair + (inserted_region_end - old_address)
80 : old_pair; 80 : old_pair;
81 this->insert(*AllocateRegion(memory_region_allocator, inserted_region_end, old_last, 81 this->insert(*AllocateRegion(m_memory_region_allocator, inserted_region_end, old_last,
82 after_pair, old_attr, old_type)); 82 after_pair, old_attr, old_type));
83 } 83 }
84 84
@@ -126,14 +126,15 @@ VAddr KMemoryRegionTree::GetRandomAlignedRegion(size_t size, size_t alignment, u
126} 126}
127 127
128KMemoryLayout::KMemoryLayout() 128KMemoryLayout::KMemoryLayout()
129 : virtual_tree{memory_region_allocator}, physical_tree{memory_region_allocator}, 129 : m_virtual_tree{m_memory_region_allocator}, m_physical_tree{m_memory_region_allocator},
130 virtual_linear_tree{memory_region_allocator}, physical_linear_tree{memory_region_allocator} {} 130 m_virtual_linear_tree{m_memory_region_allocator}, m_physical_linear_tree{
131 m_memory_region_allocator} {}
131 132
132void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start, 133void KMemoryLayout::InitializeLinearMemoryRegionTrees(PAddr aligned_linear_phys_start,
133 VAddr linear_virtual_start) { 134 VAddr linear_virtual_start) {
134 // Set static differences. 135 // Set static differences.
135 linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start; 136 m_linear_phys_to_virt_diff = linear_virtual_start - aligned_linear_phys_start;
136 linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start; 137 m_linear_virt_to_phys_diff = aligned_linear_phys_start - linear_virtual_start;
137 138
138 // Initialize linear trees. 139 // Initialize linear trees.
139 for (auto& region : GetPhysicalMemoryRegionTree()) { 140 for (auto& region : GetPhysicalMemoryRegionTree()) {
diff --git a/src/core/hle/kernel/k_memory_layout.h b/src/core/hle/kernel/k_memory_layout.h
index 17fa1a6ed..551b7a0e4 100644
--- a/src/core/hle/kernel/k_memory_layout.h
+++ b/src/core/hle/kernel/k_memory_layout.h
@@ -80,35 +80,35 @@ public:
80 KMemoryLayout(); 80 KMemoryLayout();
81 81
82 KMemoryRegionTree& GetVirtualMemoryRegionTree() { 82 KMemoryRegionTree& GetVirtualMemoryRegionTree() {
83 return virtual_tree; 83 return m_virtual_tree;
84 } 84 }
85 const KMemoryRegionTree& GetVirtualMemoryRegionTree() const { 85 const KMemoryRegionTree& GetVirtualMemoryRegionTree() const {
86 return virtual_tree; 86 return m_virtual_tree;
87 } 87 }
88 KMemoryRegionTree& GetPhysicalMemoryRegionTree() { 88 KMemoryRegionTree& GetPhysicalMemoryRegionTree() {
89 return physical_tree; 89 return m_physical_tree;
90 } 90 }
91 const KMemoryRegionTree& GetPhysicalMemoryRegionTree() const { 91 const KMemoryRegionTree& GetPhysicalMemoryRegionTree() const {
92 return physical_tree; 92 return m_physical_tree;
93 } 93 }
94 KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() { 94 KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() {
95 return virtual_linear_tree; 95 return m_virtual_linear_tree;
96 } 96 }
97 const KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() const { 97 const KMemoryRegionTree& GetVirtualLinearMemoryRegionTree() const {
98 return virtual_linear_tree; 98 return m_virtual_linear_tree;
99 } 99 }
100 KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() { 100 KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() {
101 return physical_linear_tree; 101 return m_physical_linear_tree;
102 } 102 }
103 const KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() const { 103 const KMemoryRegionTree& GetPhysicalLinearMemoryRegionTree() const {
104 return physical_linear_tree; 104 return m_physical_linear_tree;
105 } 105 }
106 106
107 VAddr GetLinearVirtualAddress(PAddr address) const { 107 VAddr GetLinearVirtualAddress(PAddr address) const {
108 return address + linear_phys_to_virt_diff; 108 return address + m_linear_phys_to_virt_diff;
109 } 109 }
110 PAddr GetLinearPhysicalAddress(VAddr address) const { 110 PAddr GetLinearPhysicalAddress(VAddr address) const {
111 return address + linear_virt_to_phys_diff; 111 return address + m_linear_virt_to_phys_diff;
112 } 112 }
113 113
114 const KMemoryRegion* FindVirtual(VAddr address) const { 114 const KMemoryRegion* FindVirtual(VAddr address) const {
@@ -391,13 +391,13 @@ private:
391 } 391 }
392 392
393private: 393private:
394 u64 linear_phys_to_virt_diff{}; 394 u64 m_linear_phys_to_virt_diff{};
395 u64 linear_virt_to_phys_diff{}; 395 u64 m_linear_virt_to_phys_diff{};
396 KMemoryRegionAllocator memory_region_allocator; 396 KMemoryRegionAllocator m_memory_region_allocator;
397 KMemoryRegionTree virtual_tree; 397 KMemoryRegionTree m_virtual_tree;
398 KMemoryRegionTree physical_tree; 398 KMemoryRegionTree m_physical_tree;
399 KMemoryRegionTree virtual_linear_tree; 399 KMemoryRegionTree m_virtual_linear_tree;
400 KMemoryRegionTree physical_linear_tree; 400 KMemoryRegionTree m_physical_linear_tree;
401}; 401};
402 402
403namespace Init { 403namespace Init {
diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h
index 5037e657f..cfe86fb82 100644
--- a/src/core/hle/kernel/k_memory_region.h
+++ b/src/core/hle/kernel/k_memory_region.h
@@ -21,15 +21,15 @@ public:
21 YUZU_NON_MOVEABLE(KMemoryRegion); 21 YUZU_NON_MOVEABLE(KMemoryRegion);
22 22
23 constexpr KMemoryRegion() = default; 23 constexpr KMemoryRegion() = default;
24 constexpr KMemoryRegion(u64 address_, u64 last_address_) 24 constexpr KMemoryRegion(u64 address, u64 last_address)
25 : address{address_}, last_address{last_address_} {} 25 : m_address{address}, m_last_address{last_address} {}
26 constexpr KMemoryRegion(u64 address_, u64 last_address_, u64 pair_address_, u32 attributes_, 26 constexpr KMemoryRegion(u64 address, u64 last_address, u64 pair_address, u32 attributes,
27 u32 type_id_) 27 u32 type_id)
28 : address(address_), last_address(last_address_), pair_address(pair_address_), 28 : m_address(address), m_last_address(last_address), m_pair_address(pair_address),
29 attributes(attributes_), type_id(type_id_) {} 29 m_attributes(attributes), m_type_id(type_id) {}
30 constexpr KMemoryRegion(u64 address_, u64 last_address_, u32 attributes_, u32 type_id_) 30 constexpr KMemoryRegion(u64 address, u64 last_address, u32 attributes, u32 type_id)
31 : KMemoryRegion(address_, last_address_, std::numeric_limits<u64>::max(), attributes_, 31 : KMemoryRegion(address, last_address, std::numeric_limits<u64>::max(), attributes,
32 type_id_) {} 32 type_id) {}
33 33
34 ~KMemoryRegion() = default; 34 ~KMemoryRegion() = default;
35 35
@@ -44,15 +44,15 @@ public:
44 } 44 }
45 45
46 constexpr u64 GetAddress() const { 46 constexpr u64 GetAddress() const {
47 return address; 47 return m_address;
48 } 48 }
49 49
50 constexpr u64 GetPairAddress() const { 50 constexpr u64 GetPairAddress() const {
51 return pair_address; 51 return m_pair_address;
52 } 52 }
53 53
54 constexpr u64 GetLastAddress() const { 54 constexpr u64 GetLastAddress() const {
55 return last_address; 55 return m_last_address;
56 } 56 }
57 57
58 constexpr u64 GetEndAddress() const { 58 constexpr u64 GetEndAddress() const {
@@ -64,16 +64,16 @@ public:
64 } 64 }
65 65
66 constexpr u32 GetAttributes() const { 66 constexpr u32 GetAttributes() const {
67 return attributes; 67 return m_attributes;
68 } 68 }
69 69
70 constexpr u32 GetType() const { 70 constexpr u32 GetType() const {
71 return type_id; 71 return m_type_id;
72 } 72 }
73 73
74 constexpr void SetType(u32 type) { 74 constexpr void SetType(u32 type) {
75 ASSERT(this->CanDerive(type)); 75 ASSERT(this->CanDerive(type));
76 type_id = type; 76 m_type_id = type;
77 } 77 }
78 78
79 constexpr bool Contains(u64 addr) const { 79 constexpr bool Contains(u64 addr) const {
@@ -94,27 +94,27 @@ public:
94 } 94 }
95 95
96 constexpr void SetPairAddress(u64 a) { 96 constexpr void SetPairAddress(u64 a) {
97 pair_address = a; 97 m_pair_address = a;
98 } 98 }
99 99
100 constexpr void SetTypeAttribute(u32 attr) { 100 constexpr void SetTypeAttribute(u32 attr) {
101 type_id |= attr; 101 m_type_id |= attr;
102 } 102 }
103 103
104private: 104private:
105 constexpr void Reset(u64 a, u64 la, u64 p, u32 r, u32 t) { 105 constexpr void Reset(u64 a, u64 la, u64 p, u32 r, u32 t) {
106 address = a; 106 m_address = a;
107 pair_address = p; 107 m_pair_address = p;
108 last_address = la; 108 m_last_address = la;
109 attributes = r; 109 m_attributes = r;
110 type_id = t; 110 m_type_id = t;
111 } 111 }
112 112
113 u64 address{}; 113 u64 m_address{};
114 u64 last_address{}; 114 u64 m_last_address{};
115 u64 pair_address{}; 115 u64 m_pair_address{};
116 u32 attributes{}; 116 u32 m_attributes{};
117 u32 type_id{}; 117 u32 m_type_id{};
118}; 118};
119 119
120class KMemoryRegionTree final { 120class KMemoryRegionTree final {
@@ -322,7 +322,7 @@ public:
322 322
323private: 323private:
324 TreeType m_tree{}; 324 TreeType m_tree{};
325 KMemoryRegionAllocator& memory_region_allocator; 325 KMemoryRegionAllocator& m_memory_region_allocator;
326}; 326};
327 327
328class KMemoryRegionAllocator final { 328class KMemoryRegionAllocator final {
@@ -338,18 +338,18 @@ public:
338 template <typename... Args> 338 template <typename... Args>
339 KMemoryRegion* Allocate(Args&&... args) { 339 KMemoryRegion* Allocate(Args&&... args) {
340 // Ensure we stay within the bounds of our heap. 340 // Ensure we stay within the bounds of our heap.
341 ASSERT(this->num_regions < MaxMemoryRegions); 341 ASSERT(m_num_regions < MaxMemoryRegions);
342 342
343 // Create the new region. 343 // Create the new region.
344 KMemoryRegion* region = std::addressof(this->region_heap[this->num_regions++]); 344 KMemoryRegion* region = std::addressof(m_region_heap[m_num_regions++]);
345 new (region) KMemoryRegion(std::forward<Args>(args)...); 345 std::construct_at(region, std::forward<Args>(args)...);
346 346
347 return region; 347 return region;
348 } 348 }
349 349
350private: 350private:
351 std::array<KMemoryRegion, MaxMemoryRegions> region_heap{}; 351 std::array<KMemoryRegion, MaxMemoryRegions> m_region_heap{};
352 size_t num_regions{}; 352 size_t m_num_regions{};
353}; 353};
354 354
355} // namespace Kernel 355} // namespace Kernel
diff --git a/src/core/hle/kernel/k_object_name.h b/src/core/hle/kernel/k_object_name.h
index b7f943134..2d97fc777 100644
--- a/src/core/hle/kernel/k_object_name.h
+++ b/src/core/hle/kernel/k_object_name.h
@@ -41,7 +41,7 @@ public:
41 // Check that the object is closed. 41 // Check that the object is closed.
42 R_UNLESS(derived->IsServerClosed(), ResultInvalidState); 42 R_UNLESS(derived->IsServerClosed(), ResultInvalidState);
43 43
44 return Delete(kernel, obj.GetPointerUnsafe(), name); 44 R_RETURN(Delete(kernel, obj.GetPointerUnsafe(), name));
45 } 45 }
46 46
47 template <typename Derived> 47 template <typename Derived>
diff --git a/src/core/hle/kernel/k_page_table_slab_heap.h b/src/core/hle/kernel/k_page_table_slab_heap.h
index a9543cbd0..9a8d77316 100644
--- a/src/core/hle/kernel/k_page_table_slab_heap.h
+++ b/src/core/hle/kernel/k_page_table_slab_heap.h
@@ -20,7 +20,8 @@ public:
20 PageTablePage() = default; 20 PageTablePage() = default;
21 21
22private: 22private:
23 std::array<u8, PageSize> m_buffer{}; 23 // Initializer intentionally skipped
24 std::array<u8, PageSize> m_buffer;
24}; 25};
25static_assert(sizeof(PageTablePage) == PageSize); 26static_assert(sizeof(PageTablePage) == PageSize);
26 27
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h
index 645c5b531..26677ec65 100644
--- a/src/core/hle/kernel/k_priority_queue.h
+++ b/src/core/hle/kernel/k_priority_queue.h
@@ -77,11 +77,11 @@ private:
77public: 77public:
78 class KPerCoreQueue { 78 class KPerCoreQueue {
79 private: 79 private:
80 std::array<Entry, NumCores> root{}; 80 std::array<Entry, NumCores> m_root{};
81 81
82 public: 82 public:
83 constexpr KPerCoreQueue() { 83 constexpr KPerCoreQueue() {
84 for (auto& per_core_root : root) { 84 for (auto& per_core_root : m_root) {
85 per_core_root.Initialize(); 85 per_core_root.Initialize();
86 } 86 }
87 } 87 }
@@ -91,15 +91,15 @@ public:
91 Entry& member_entry = member->GetPriorityQueueEntry(core); 91 Entry& member_entry = member->GetPriorityQueueEntry(core);
92 92
93 // Get the entry associated with the end of the queue. 93 // Get the entry associated with the end of the queue.
94 Member* tail = this->root[core].GetPrev(); 94 Member* tail = m_root[core].GetPrev();
95 Entry& tail_entry = 95 Entry& tail_entry =
96 (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : this->root[core]; 96 (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : m_root[core];
97 97
98 // Link the entries. 98 // Link the entries.
99 member_entry.SetPrev(tail); 99 member_entry.SetPrev(tail);
100 member_entry.SetNext(nullptr); 100 member_entry.SetNext(nullptr);
101 tail_entry.SetNext(member); 101 tail_entry.SetNext(member);
102 this->root[core].SetPrev(member); 102 m_root[core].SetPrev(member);
103 103
104 return tail == nullptr; 104 return tail == nullptr;
105 } 105 }
@@ -109,15 +109,15 @@ public:
109 Entry& member_entry = member->GetPriorityQueueEntry(core); 109 Entry& member_entry = member->GetPriorityQueueEntry(core);
110 110
111 // Get the entry associated with the front of the queue. 111 // Get the entry associated with the front of the queue.
112 Member* head = this->root[core].GetNext(); 112 Member* head = m_root[core].GetNext();
113 Entry& head_entry = 113 Entry& head_entry =
114 (head != nullptr) ? head->GetPriorityQueueEntry(core) : this->root[core]; 114 (head != nullptr) ? head->GetPriorityQueueEntry(core) : m_root[core];
115 115
116 // Link the entries. 116 // Link the entries.
117 member_entry.SetPrev(nullptr); 117 member_entry.SetPrev(nullptr);
118 member_entry.SetNext(head); 118 member_entry.SetNext(head);
119 head_entry.SetPrev(member); 119 head_entry.SetPrev(member);
120 this->root[core].SetNext(member); 120 m_root[core].SetNext(member);
121 121
122 return (head == nullptr); 122 return (head == nullptr);
123 } 123 }
@@ -130,9 +130,9 @@ public:
130 Member* prev = member_entry.GetPrev(); 130 Member* prev = member_entry.GetPrev();
131 Member* next = member_entry.GetNext(); 131 Member* next = member_entry.GetNext();
132 Entry& prev_entry = 132 Entry& prev_entry =
133 (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : this->root[core]; 133 (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : m_root[core];
134 Entry& next_entry = 134 Entry& next_entry =
135 (next != nullptr) ? next->GetPriorityQueueEntry(core) : this->root[core]; 135 (next != nullptr) ? next->GetPriorityQueueEntry(core) : m_root[core];
136 136
137 // Unlink. 137 // Unlink.
138 prev_entry.SetNext(next); 138 prev_entry.SetNext(next);
@@ -142,7 +142,7 @@ public:
142 } 142 }
143 143
144 constexpr Member* GetFront(s32 core) const { 144 constexpr Member* GetFront(s32 core) const {
145 return this->root[core].GetNext(); 145 return m_root[core].GetNext();
146 } 146 }
147 }; 147 };
148 148
@@ -158,8 +158,8 @@ public:
158 return; 158 return;
159 } 159 }
160 160
161 if (this->queues[priority].PushBack(core, member)) { 161 if (m_queues[priority].PushBack(core, member)) {
162 this->available_priorities[core].SetBit(priority); 162 m_available_priorities[core].SetBit(priority);
163 } 163 }
164 } 164 }
165 165
@@ -171,8 +171,8 @@ public:
171 return; 171 return;
172 } 172 }
173 173
174 if (this->queues[priority].PushFront(core, member)) { 174 if (m_queues[priority].PushFront(core, member)) {
175 this->available_priorities[core].SetBit(priority); 175 m_available_priorities[core].SetBit(priority);
176 } 176 }
177 } 177 }
178 178
@@ -184,18 +184,17 @@ public:
184 return; 184 return;
185 } 185 }
186 186
187 if (this->queues[priority].Remove(core, member)) { 187 if (m_queues[priority].Remove(core, member)) {
188 this->available_priorities[core].ClearBit(priority); 188 m_available_priorities[core].ClearBit(priority);
189 } 189 }
190 } 190 }
191 191
192 constexpr Member* GetFront(s32 core) const { 192 constexpr Member* GetFront(s32 core) const {
193 ASSERT(IsValidCore(core)); 193 ASSERT(IsValidCore(core));
194 194
195 const s32 priority = 195 const s32 priority = static_cast<s32>(m_available_priorities[core].CountLeadingZero());
196 static_cast<s32>(this->available_priorities[core].CountLeadingZero());
197 if (priority <= LowestPriority) { 196 if (priority <= LowestPriority) {
198 return this->queues[priority].GetFront(core); 197 return m_queues[priority].GetFront(core);
199 } else { 198 } else {
200 return nullptr; 199 return nullptr;
201 } 200 }
@@ -206,7 +205,7 @@ public:
206 ASSERT(IsValidPriority(priority)); 205 ASSERT(IsValidPriority(priority));
207 206
208 if (priority <= LowestPriority) { 207 if (priority <= LowestPriority) {
209 return this->queues[priority].GetFront(core); 208 return m_queues[priority].GetFront(core);
210 } else { 209 } else {
211 return nullptr; 210 return nullptr;
212 } 211 }
@@ -218,9 +217,9 @@ public:
218 Member* next = member->GetPriorityQueueEntry(core).GetNext(); 217 Member* next = member->GetPriorityQueueEntry(core).GetNext();
219 if (next == nullptr) { 218 if (next == nullptr) {
220 const s32 priority = static_cast<s32>( 219 const s32 priority = static_cast<s32>(
221 this->available_priorities[core].GetNextSet(member->GetPriority())); 220 m_available_priorities[core].GetNextSet(member->GetPriority()));
222 if (priority <= LowestPriority) { 221 if (priority <= LowestPriority) {
223 next = this->queues[priority].GetFront(core); 222 next = m_queues[priority].GetFront(core);
224 } 223 }
225 } 224 }
226 return next; 225 return next;
@@ -231,8 +230,8 @@ public:
231 ASSERT(IsValidPriority(priority)); 230 ASSERT(IsValidPriority(priority));
232 231
233 if (priority <= LowestPriority) { 232 if (priority <= LowestPriority) {
234 this->queues[priority].Remove(core, member); 233 m_queues[priority].Remove(core, member);
235 this->queues[priority].PushFront(core, member); 234 m_queues[priority].PushFront(core, member);
236 } 235 }
237 } 236 }
238 237
@@ -241,29 +240,29 @@ public:
241 ASSERT(IsValidPriority(priority)); 240 ASSERT(IsValidPriority(priority));
242 241
243 if (priority <= LowestPriority) { 242 if (priority <= LowestPriority) {
244 this->queues[priority].Remove(core, member); 243 m_queues[priority].Remove(core, member);
245 this->queues[priority].PushBack(core, member); 244 m_queues[priority].PushBack(core, member);
246 return this->queues[priority].GetFront(core); 245 return m_queues[priority].GetFront(core);
247 } else { 246 } else {
248 return nullptr; 247 return nullptr;
249 } 248 }
250 } 249 }
251 250
252 private: 251 private:
253 std::array<KPerCoreQueue, NumPriority> queues{}; 252 std::array<KPerCoreQueue, NumPriority> m_queues{};
254 std::array<Common::BitSet64<NumPriority>, NumCores> available_priorities{}; 253 std::array<Common::BitSet64<NumPriority>, NumCores> m_available_priorities{};
255 }; 254 };
256 255
257private: 256private:
258 KPriorityQueueImpl scheduled_queue; 257 KPriorityQueueImpl m_scheduled_queue;
259 KPriorityQueueImpl suggested_queue; 258 KPriorityQueueImpl m_suggested_queue;
260 259
261private: 260private:
262 constexpr void ClearAffinityBit(u64& affinity, s32 core) { 261 static constexpr void ClearAffinityBit(u64& affinity, s32 core) {
263 affinity &= ~(UINT64_C(1) << core); 262 affinity &= ~(UINT64_C(1) << core);
264 } 263 }
265 264
266 constexpr s32 GetNextCore(u64& affinity) { 265 static constexpr s32 GetNextCore(u64& affinity) {
267 const s32 core = std::countr_zero(affinity); 266 const s32 core = std::countr_zero(affinity);
268 ClearAffinityBit(affinity, core); 267 ClearAffinityBit(affinity, core);
269 return core; 268 return core;
@@ -275,13 +274,13 @@ private:
275 // Push onto the scheduled queue for its core, if we can. 274 // Push onto the scheduled queue for its core, if we can.
276 u64 affinity = member->GetAffinityMask().GetAffinityMask(); 275 u64 affinity = member->GetAffinityMask().GetAffinityMask();
277 if (const s32 core = member->GetActiveCore(); core >= 0) { 276 if (const s32 core = member->GetActiveCore(); core >= 0) {
278 this->scheduled_queue.PushBack(priority, core, member); 277 m_scheduled_queue.PushBack(priority, core, member);
279 ClearAffinityBit(affinity, core); 278 ClearAffinityBit(affinity, core);
280 } 279 }
281 280
282 // And suggest the thread for all other cores. 281 // And suggest the thread for all other cores.
283 while (affinity) { 282 while (affinity) {
284 this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); 283 m_suggested_queue.PushBack(priority, GetNextCore(affinity), member);
285 } 284 }
286 } 285 }
287 286
@@ -291,14 +290,14 @@ private:
291 // Push onto the scheduled queue for its core, if we can. 290 // Push onto the scheduled queue for its core, if we can.
292 u64 affinity = member->GetAffinityMask().GetAffinityMask(); 291 u64 affinity = member->GetAffinityMask().GetAffinityMask();
293 if (const s32 core = member->GetActiveCore(); core >= 0) { 292 if (const s32 core = member->GetActiveCore(); core >= 0) {
294 this->scheduled_queue.PushFront(priority, core, member); 293 m_scheduled_queue.PushFront(priority, core, member);
295 ClearAffinityBit(affinity, core); 294 ClearAffinityBit(affinity, core);
296 } 295 }
297 296
298 // And suggest the thread for all other cores. 297 // And suggest the thread for all other cores.
299 // Note: Nintendo pushes onto the back of the suggested queue, not the front. 298 // Note: Nintendo pushes onto the back of the suggested queue, not the front.
300 while (affinity) { 299 while (affinity) {
301 this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); 300 m_suggested_queue.PushBack(priority, GetNextCore(affinity), member);
302 } 301 }
303 } 302 }
304 303
@@ -308,13 +307,13 @@ private:
308 // Remove from the scheduled queue for its core. 307 // Remove from the scheduled queue for its core.
309 u64 affinity = member->GetAffinityMask().GetAffinityMask(); 308 u64 affinity = member->GetAffinityMask().GetAffinityMask();
310 if (const s32 core = member->GetActiveCore(); core >= 0) { 309 if (const s32 core = member->GetActiveCore(); core >= 0) {
311 this->scheduled_queue.Remove(priority, core, member); 310 m_scheduled_queue.Remove(priority, core, member);
312 ClearAffinityBit(affinity, core); 311 ClearAffinityBit(affinity, core);
313 } 312 }
314 313
315 // Remove from the suggested queue for all other cores. 314 // Remove from the suggested queue for all other cores.
316 while (affinity) { 315 while (affinity) {
317 this->suggested_queue.Remove(priority, GetNextCore(affinity), member); 316 m_suggested_queue.Remove(priority, GetNextCore(affinity), member);
318 } 317 }
319 } 318 }
320 319
@@ -323,27 +322,27 @@ public:
323 322
324 // Getters. 323 // Getters.
325 constexpr Member* GetScheduledFront(s32 core) const { 324 constexpr Member* GetScheduledFront(s32 core) const {
326 return this->scheduled_queue.GetFront(core); 325 return m_scheduled_queue.GetFront(core);
327 } 326 }
328 327
329 constexpr Member* GetScheduledFront(s32 core, s32 priority) const { 328 constexpr Member* GetScheduledFront(s32 core, s32 priority) const {
330 return this->scheduled_queue.GetFront(priority, core); 329 return m_scheduled_queue.GetFront(priority, core);
331 } 330 }
332 331
333 constexpr Member* GetSuggestedFront(s32 core) const { 332 constexpr Member* GetSuggestedFront(s32 core) const {
334 return this->suggested_queue.GetFront(core); 333 return m_suggested_queue.GetFront(core);
335 } 334 }
336 335
337 constexpr Member* GetSuggestedFront(s32 core, s32 priority) const { 336 constexpr Member* GetSuggestedFront(s32 core, s32 priority) const {
338 return this->suggested_queue.GetFront(priority, core); 337 return m_suggested_queue.GetFront(priority, core);
339 } 338 }
340 339
341 constexpr Member* GetScheduledNext(s32 core, const Member* member) const { 340 constexpr Member* GetScheduledNext(s32 core, const Member* member) const {
342 return this->scheduled_queue.GetNext(core, member); 341 return m_scheduled_queue.GetNext(core, member);
343 } 342 }
344 343
345 constexpr Member* GetSuggestedNext(s32 core, const Member* member) const { 344 constexpr Member* GetSuggestedNext(s32 core, const Member* member) const {
346 return this->suggested_queue.GetNext(core, member); 345 return m_suggested_queue.GetNext(core, member);
347 } 346 }
348 347
349 constexpr Member* GetSamePriorityNext(s32 core, const Member* member) const { 348 constexpr Member* GetSamePriorityNext(s32 core, const Member* member) const {
@@ -375,7 +374,7 @@ public:
375 return; 374 return;
376 } 375 }
377 376
378 this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member); 377 m_scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
379 } 378 }
380 379
381 constexpr KThread* MoveToScheduledBack(Member* member) { 380 constexpr KThread* MoveToScheduledBack(Member* member) {
@@ -384,8 +383,7 @@ public:
384 return {}; 383 return {};
385 } 384 }
386 385
387 return this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), 386 return m_scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), member);
388 member);
389 } 387 }
390 388
391 // First class fancy operations. 389 // First class fancy operations.
@@ -425,9 +423,9 @@ public:
425 for (s32 core = 0; core < static_cast<s32>(NumCores); core++) { 423 for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
426 if (prev_affinity.GetAffinity(core)) { 424 if (prev_affinity.GetAffinity(core)) {
427 if (core == prev_core) { 425 if (core == prev_core) {
428 this->scheduled_queue.Remove(priority, core, member); 426 m_scheduled_queue.Remove(priority, core, member);
429 } else { 427 } else {
430 this->suggested_queue.Remove(priority, core, member); 428 m_suggested_queue.Remove(priority, core, member);
431 } 429 }
432 } 430 }
433 } 431 }
@@ -436,9 +434,9 @@ public:
436 for (s32 core = 0; core < static_cast<s32>(NumCores); core++) { 434 for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
437 if (new_affinity.GetAffinity(core)) { 435 if (new_affinity.GetAffinity(core)) {
438 if (core == new_core) { 436 if (core == new_core) {
439 this->scheduled_queue.PushBack(priority, core, member); 437 m_scheduled_queue.PushBack(priority, core, member);
440 } else { 438 } else {
441 this->suggested_queue.PushBack(priority, core, member); 439 m_suggested_queue.PushBack(priority, core, member);
442 } 440 }
443 } 441 }
444 } 442 }
@@ -458,22 +456,22 @@ public:
458 if (prev_core != new_core) { 456 if (prev_core != new_core) {
459 // Remove from the scheduled queue for the previous core. 457 // Remove from the scheduled queue for the previous core.
460 if (prev_core >= 0) { 458 if (prev_core >= 0) {
461 this->scheduled_queue.Remove(priority, prev_core, member); 459 m_scheduled_queue.Remove(priority, prev_core, member);
462 } 460 }
463 461
464 // Remove from the suggested queue and add to the scheduled queue for the new core. 462 // Remove from the suggested queue and add to the scheduled queue for the new core.
465 if (new_core >= 0) { 463 if (new_core >= 0) {
466 this->suggested_queue.Remove(priority, new_core, member); 464 m_suggested_queue.Remove(priority, new_core, member);
467 if (to_front) { 465 if (to_front) {
468 this->scheduled_queue.PushFront(priority, new_core, member); 466 m_scheduled_queue.PushFront(priority, new_core, member);
469 } else { 467 } else {
470 this->scheduled_queue.PushBack(priority, new_core, member); 468 m_scheduled_queue.PushBack(priority, new_core, member);
471 } 469 }
472 } 470 }
473 471
474 // Add to the suggested queue for the previous core. 472 // Add to the suggested queue for the previous core.
475 if (prev_core >= 0) { 473 if (prev_core >= 0) {
476 this->suggested_queue.PushBack(priority, prev_core, member); 474 m_suggested_queue.PushBack(priority, prev_core, member);
477 } 475 }
478 } 476 }
479 } 477 }