summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/common/CMakeLists.txt1
-rw-r--r--src/common/bit_util.h39
-rw-r--r--src/common/multi_level_queue.h337
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp6
-rw-r--r--src/core/hle/kernel/kernel.cpp3
-rw-r--r--src/core/hle/kernel/scheduler.cpp60
-rw-r--r--src/core/hle/kernel/scheduler.h6
-rw-r--r--src/core/hle/kernel/svc.cpp28
-rw-r--r--src/core/hle/kernel/thread.cpp5
-rw-r--r--src/core/hle/kernel/thread.h3
-rw-r--r--src/core/hle/kernel/vm_manager.cpp76
-rw-r--r--src/core/hle/kernel/vm_manager.h55
-rw-r--r--src/core/hle/service/fatal/fatal.cpp89
-rw-r--r--src/tests/CMakeLists.txt2
-rw-r--r--src/tests/common/bit_utils.cpp23
-rw-r--r--src/tests/common/multi_level_queue.cpp55
-rw-r--r--src/video_core/gpu.cpp9
-rw-r--r--src/video_core/gpu.h6
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer_cache.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_manager.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_manager.h2
-rw-r--r--src/yuzu/debugger/wait_tree.cpp4
22 files changed, 662 insertions, 153 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 43ae8a9e7..850ce8006 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -98,6 +98,7 @@ add_library(common STATIC
98 microprofile.h 98 microprofile.h
99 microprofileui.h 99 microprofileui.h
100 misc.cpp 100 misc.cpp
101 multi_level_queue.h
101 page_table.cpp 102 page_table.cpp
102 page_table.h 103 page_table.h
103 param_package.cpp 104 param_package.cpp
diff --git a/src/common/bit_util.h b/src/common/bit_util.h
index 1eea17ba1..a4f9ed4aa 100644
--- a/src/common/bit_util.h
+++ b/src/common/bit_util.h
@@ -58,4 +58,43 @@ inline u64 CountLeadingZeroes64(u64 value) {
58 return __builtin_clzll(value); 58 return __builtin_clzll(value);
59} 59}
60#endif 60#endif
61
62#ifdef _MSC_VER
63inline u32 CountTrailingZeroes32(u32 value) {
64 unsigned long trailing_zero = 0;
65
66 if (_BitScanForward(&trailing_zero, value) != 0) {
67 return trailing_zero;
68 }
69
70 return 32;
71}
72
73inline u64 CountTrailingZeroes64(u64 value) {
74 unsigned long trailing_zero = 0;
75
76 if (_BitScanForward64(&trailing_zero, value) != 0) {
77 return trailing_zero;
78 }
79
80 return 64;
81}
82#else
83inline u32 CountTrailingZeroes32(u32 value) {
84 if (value == 0) {
85 return 32;
86 }
87
88 return __builtin_ctz(value);
89}
90
91inline u64 CountTrailingZeroes64(u64 value) {
92 if (value == 0) {
93 return 64;
94 }
95
96 return __builtin_ctzll(value);
97}
98#endif
99
61} // namespace Common 100} // namespace Common
diff --git a/src/common/multi_level_queue.h b/src/common/multi_level_queue.h
new file mode 100644
index 000000000..2b61b91e0
--- /dev/null
+++ b/src/common/multi_level_queue.h
@@ -0,0 +1,337 @@
1// Copyright 2019 TuxSH
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <iterator>
9#include <list>
10#include <utility>
11
12#include "common/bit_util.h"
13#include "common/common_types.h"
14
15namespace Common {
16
17/**
18 * A MultiLevelQueue is a type of priority queue which has the following characteristics:
19 * - iteratable through each of its elements.
20 * - back can be obtained.
21 * - O(1) add, lookup (both front and back)
22 * - discrete priorities and a max of 64 priorities (limited domain)
23 * This type of priority queue is normaly used for managing threads within an scheduler
24 */
25template <typename T, std::size_t Depth>
26class MultiLevelQueue {
27public:
28 using value_type = T;
29 using reference = value_type&;
30 using const_reference = const value_type&;
31 using pointer = value_type*;
32 using const_pointer = const value_type*;
33
34 using difference_type = typename std::pointer_traits<pointer>::difference_type;
35 using size_type = std::size_t;
36
37 template <bool is_constant>
38 class iterator_impl {
39 public:
40 using iterator_category = std::bidirectional_iterator_tag;
41 using value_type = T;
42 using pointer = std::conditional_t<is_constant, T*, const T*>;
43 using reference = std::conditional_t<is_constant, const T&, T&>;
44 using difference_type = typename std::pointer_traits<pointer>::difference_type;
45
46 friend bool operator==(const iterator_impl& lhs, const iterator_impl& rhs) {
47 if (lhs.IsEnd() && rhs.IsEnd())
48 return true;
49 return std::tie(lhs.current_priority, lhs.it) == std::tie(rhs.current_priority, rhs.it);
50 }
51
52 friend bool operator!=(const iterator_impl& lhs, const iterator_impl& rhs) {
53 return !operator==(lhs, rhs);
54 }
55
56 reference operator*() const {
57 return *it;
58 }
59
60 pointer operator->() const {
61 return it.operator->();
62 }
63
64 iterator_impl& operator++() {
65 if (IsEnd()) {
66 return *this;
67 }
68
69 ++it;
70
71 if (it == GetEndItForPrio()) {
72 u64 prios = mlq.used_priorities;
73 prios &= ~((1ULL << (current_priority + 1)) - 1);
74 if (prios == 0) {
75 current_priority = mlq.depth();
76 } else {
77 current_priority = CountTrailingZeroes64(prios);
78 it = GetBeginItForPrio();
79 }
80 }
81 return *this;
82 }
83
84 iterator_impl& operator--() {
85 if (IsEnd()) {
86 if (mlq.used_priorities != 0) {
87 current_priority = 63 - CountLeadingZeroes64(mlq.used_priorities);
88 it = GetEndItForPrio();
89 --it;
90 }
91 } else if (it == GetBeginItForPrio()) {
92 u64 prios = mlq.used_priorities;
93 prios &= (1ULL << current_priority) - 1;
94 if (prios != 0) {
95 current_priority = CountTrailingZeroes64(prios);
96 it = GetEndItForPrio();
97 --it;
98 }
99 } else {
100 --it;
101 }
102 return *this;
103 }
104
105 iterator_impl operator++(int) {
106 const iterator_impl v{*this};
107 ++(*this);
108 return v;
109 }
110
111 iterator_impl operator--(int) {
112 const iterator_impl v{*this};
113 --(*this);
114 return v;
115 }
116
117 // allow implicit const->non-const
118 iterator_impl(const iterator_impl<false>& other)
119 : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
120
121 iterator_impl(const iterator_impl<true>& other)
122 : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
123
124 iterator_impl& operator=(const iterator_impl<false>& other) {
125 mlq = other.mlq;
126 it = other.it;
127 current_priority = other.current_priority;
128 return *this;
129 }
130
131 friend class iterator_impl<true>;
132 iterator_impl() = default;
133
134 private:
135 friend class MultiLevelQueue;
136 using container_ref =
137 std::conditional_t<is_constant, const MultiLevelQueue&, MultiLevelQueue&>;
138 using list_iterator = std::conditional_t<is_constant, typename std::list<T>::const_iterator,
139 typename std::list<T>::iterator>;
140
141 explicit iterator_impl(container_ref mlq, list_iterator it, u32 current_priority)
142 : mlq(mlq), it(it), current_priority(current_priority) {}
143 explicit iterator_impl(container_ref mlq, u32 current_priority)
144 : mlq(mlq), it(), current_priority(current_priority) {}
145
146 bool IsEnd() const {
147 return current_priority == mlq.depth();
148 }
149
150 list_iterator GetBeginItForPrio() const {
151 return mlq.levels[current_priority].begin();
152 }
153
154 list_iterator GetEndItForPrio() const {
155 return mlq.levels[current_priority].end();
156 }
157
158 container_ref mlq;
159 list_iterator it;
160 u32 current_priority;
161 };
162
163 using iterator = iterator_impl<false>;
164 using const_iterator = iterator_impl<true>;
165
166 void add(const T& element, u32 priority, bool send_back = true) {
167 if (send_back)
168 levels[priority].push_back(element);
169 else
170 levels[priority].push_front(element);
171 used_priorities |= 1ULL << priority;
172 }
173
174 void remove(const T& element, u32 priority) {
175 auto it = ListIterateTo(levels[priority], element);
176 if (it == levels[priority].end())
177 return;
178 levels[priority].erase(it);
179 if (levels[priority].empty()) {
180 used_priorities &= ~(1ULL << priority);
181 }
182 }
183
184 void adjust(const T& element, u32 old_priority, u32 new_priority, bool adjust_front = false) {
185 remove(element, old_priority);
186 add(element, new_priority, !adjust_front);
187 }
188 void adjust(const_iterator it, u32 old_priority, u32 new_priority, bool adjust_front = false) {
189 adjust(*it, old_priority, new_priority, adjust_front);
190 }
191
192 void transfer_to_front(const T& element, u32 priority, MultiLevelQueue& other) {
193 ListSplice(other.levels[priority], other.levels[priority].begin(), levels[priority],
194 ListIterateTo(levels[priority], element));
195
196 other.used_priorities |= 1ULL << priority;
197
198 if (levels[priority].empty()) {
199 used_priorities &= ~(1ULL << priority);
200 }
201 }
202
203 void transfer_to_front(const_iterator it, u32 priority, MultiLevelQueue& other) {
204 transfer_to_front(*it, priority, other);
205 }
206
207 void transfer_to_back(const T& element, u32 priority, MultiLevelQueue& other) {
208 ListSplice(other.levels[priority], other.levels[priority].end(), levels[priority],
209 ListIterateTo(levels[priority], element));
210
211 other.used_priorities |= 1ULL << priority;
212
213 if (levels[priority].empty()) {
214 used_priorities &= ~(1ULL << priority);
215 }
216 }
217
218 void transfer_to_back(const_iterator it, u32 priority, MultiLevelQueue& other) {
219 transfer_to_back(*it, priority, other);
220 }
221
222 void yield(u32 priority, std::size_t n = 1) {
223 ListShiftForward(levels[priority], n);
224 }
225
226 std::size_t depth() const {
227 return Depth;
228 }
229
230 std::size_t size(u32 priority) const {
231 return levels[priority].size();
232 }
233
234 std::size_t size() const {
235 u64 priorities = used_priorities;
236 std::size_t size = 0;
237 while (priorities != 0) {
238 const u64 current_priority = CountTrailingZeroes64(priorities);
239 size += levels[current_priority].size();
240 priorities &= ~(1ULL << current_priority);
241 }
242 return size;
243 }
244
245 bool empty() const {
246 return used_priorities == 0;
247 }
248
249 bool empty(u32 priority) const {
250 return (used_priorities & (1ULL << priority)) == 0;
251 }
252
253 u32 highest_priority_set(u32 max_priority = 0) const {
254 const u64 priorities =
255 max_priority == 0 ? used_priorities : (used_priorities & ~((1ULL << max_priority) - 1));
256 return priorities == 0 ? Depth : static_cast<u32>(CountTrailingZeroes64(priorities));
257 }
258
259 u32 lowest_priority_set(u32 min_priority = Depth - 1) const {
260 const u64 priorities = min_priority >= Depth - 1
261 ? used_priorities
262 : (used_priorities & ((1ULL << (min_priority + 1)) - 1));
263 return priorities == 0 ? Depth : 63 - CountLeadingZeroes64(priorities);
264 }
265
266 const_iterator cbegin(u32 max_prio = 0) const {
267 const u32 priority = highest_priority_set(max_prio);
268 return priority == Depth ? cend()
269 : const_iterator{*this, levels[priority].cbegin(), priority};
270 }
271 const_iterator begin(u32 max_prio = 0) const {
272 return cbegin(max_prio);
273 }
274 iterator begin(u32 max_prio = 0) {
275 const u32 priority = highest_priority_set(max_prio);
276 return priority == Depth ? end() : iterator{*this, levels[priority].begin(), priority};
277 }
278
279 const_iterator cend(u32 min_prio = Depth - 1) const {
280 return min_prio == Depth - 1 ? const_iterator{*this, Depth} : cbegin(min_prio + 1);
281 }
282 const_iterator end(u32 min_prio = Depth - 1) const {
283 return cend(min_prio);
284 }
285 iterator end(u32 min_prio = Depth - 1) {
286 return min_prio == Depth - 1 ? iterator{*this, Depth} : begin(min_prio + 1);
287 }
288
289 T& front(u32 max_priority = 0) {
290 const u32 priority = highest_priority_set(max_priority);
291 return levels[priority == Depth ? 0 : priority].front();
292 }
293 const T& front(u32 max_priority = 0) const {
294 const u32 priority = highest_priority_set(max_priority);
295 return levels[priority == Depth ? 0 : priority].front();
296 }
297
298 T back(u32 min_priority = Depth - 1) {
299 const u32 priority = lowest_priority_set(min_priority); // intended
300 return levels[priority == Depth ? 63 : priority].back();
301 }
302 const T& back(u32 min_priority = Depth - 1) const {
303 const u32 priority = lowest_priority_set(min_priority); // intended
304 return levels[priority == Depth ? 63 : priority].back();
305 }
306
307private:
308 using const_list_iterator = typename std::list<T>::const_iterator;
309
310 static void ListShiftForward(std::list<T>& list, const std::size_t shift = 1) {
311 if (shift >= list.size()) {
312 return;
313 }
314
315 const auto begin_range = list.begin();
316 const auto end_range = std::next(begin_range, shift);
317 list.splice(list.end(), list, begin_range, end_range);
318 }
319
320 static void ListSplice(std::list<T>& in_list, const_list_iterator position,
321 std::list<T>& out_list, const_list_iterator element) {
322 in_list.splice(position, out_list, element);
323 }
324
325 static const_list_iterator ListIterateTo(const std::list<T>& list, const T& element) {
326 auto it = list.cbegin();
327 while (it != list.cend() && *it != element) {
328 ++it;
329 }
330 return it;
331 }
332
333 std::array<std::list<T>, Depth> levels;
334 u64 used_priorities = 0;
335};
336
337} // namespace Common
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index 352190da8..c8842410b 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -26,7 +26,7 @@ void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_
26 // them all. 26 // them all.
27 std::size_t last = waiting_threads.size(); 27 std::size_t last = waiting_threads.size();
28 if (num_to_wake > 0) { 28 if (num_to_wake > 0) {
29 last = num_to_wake; 29 last = std::min(last, static_cast<std::size_t>(num_to_wake));
30 } 30 }
31 31
32 // Signal the waiting threads. 32 // Signal the waiting threads.
@@ -90,9 +90,9 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
90 // Determine the modified value depending on the waiting count. 90 // Determine the modified value depending on the waiting count.
91 s32 updated_value; 91 s32 updated_value;
92 if (waiting_threads.empty()) { 92 if (waiting_threads.empty()) {
93 updated_value = value - 1;
94 } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
95 updated_value = value + 1; 93 updated_value = value + 1;
94 } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
95 updated_value = value - 1;
96 } else { 96 } else {
97 updated_value = value; 97 updated_value = value;
98 } 98 }
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index a7e4ddc05..3b73be67b 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -62,7 +62,8 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
62 62
63 if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 || 63 if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 ||
64 thread->GetWaitHandle() != 0) { 64 thread->GetWaitHandle() != 0) {
65 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); 65 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex ||
66 thread->GetStatus() == ThreadStatus::WaitCondVar);
66 thread->SetMutexWaitAddress(0); 67 thread->SetMutexWaitAddress(0);
67 thread->SetCondVarWaitAddress(0); 68 thread->SetCondVarWaitAddress(0);
68 thread->SetWaitHandle(0); 69 thread->SetWaitHandle(0);
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index cc189cc64..ac501bf7f 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -29,8 +29,8 @@ Scheduler::~Scheduler() {
29} 29}
30 30
31bool Scheduler::HaveReadyThreads() const { 31bool Scheduler::HaveReadyThreads() const {
32 std::lock_guard<std::mutex> lock(scheduler_mutex); 32 std::lock_guard lock{scheduler_mutex};
33 return ready_queue.get_first() != nullptr; 33 return !ready_queue.empty();
34} 34}
35 35
36Thread* Scheduler::GetCurrentThread() const { 36Thread* Scheduler::GetCurrentThread() const {
@@ -46,22 +46,27 @@ Thread* Scheduler::PopNextReadyThread() {
46 Thread* thread = GetCurrentThread(); 46 Thread* thread = GetCurrentThread();
47 47
48 if (thread && thread->GetStatus() == ThreadStatus::Running) { 48 if (thread && thread->GetStatus() == ThreadStatus::Running) {
49 if (ready_queue.empty()) {
50 return thread;
51 }
49 // We have to do better than the current thread. 52 // We have to do better than the current thread.
50 // This call returns null when that's not possible. 53 // This call returns null when that's not possible.
51 next = ready_queue.pop_first_better(thread->GetPriority()); 54 next = ready_queue.front();
52 if (!next) { 55 if (next == nullptr || next->GetPriority() >= thread->GetPriority()) {
53 // Otherwise just keep going with the current thread
54 next = thread; 56 next = thread;
55 } 57 }
56 } else { 58 } else {
57 next = ready_queue.pop_first(); 59 if (ready_queue.empty()) {
60 return nullptr;
61 }
62 next = ready_queue.front();
58 } 63 }
59 64
60 return next; 65 return next;
61} 66}
62 67
63void Scheduler::SwitchContext(Thread* new_thread) { 68void Scheduler::SwitchContext(Thread* new_thread) {
64 Thread* const previous_thread = GetCurrentThread(); 69 Thread* previous_thread = GetCurrentThread();
65 Process* const previous_process = system.Kernel().CurrentProcess(); 70 Process* const previous_process = system.Kernel().CurrentProcess();
66 71
67 UpdateLastContextSwitchTime(previous_thread, previous_process); 72 UpdateLastContextSwitchTime(previous_thread, previous_process);
@@ -75,7 +80,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
75 if (previous_thread->GetStatus() == ThreadStatus::Running) { 80 if (previous_thread->GetStatus() == ThreadStatus::Running) {
76 // This is only the case when a reschedule is triggered without the current thread 81 // This is only the case when a reschedule is triggered without the current thread
77 // yielding execution (i.e. an event triggered, system core time-sliced, etc) 82 // yielding execution (i.e. an event triggered, system core time-sliced, etc)
78 ready_queue.push_front(previous_thread->GetPriority(), previous_thread); 83 ready_queue.add(previous_thread, previous_thread->GetPriority(), false);
79 previous_thread->SetStatus(ThreadStatus::Ready); 84 previous_thread->SetStatus(ThreadStatus::Ready);
80 } 85 }
81 } 86 }
@@ -90,7 +95,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
90 95
91 current_thread = new_thread; 96 current_thread = new_thread;
92 97
93 ready_queue.remove(new_thread->GetPriority(), new_thread); 98 ready_queue.remove(new_thread, new_thread->GetPriority());
94 new_thread->SetStatus(ThreadStatus::Running); 99 new_thread->SetStatus(ThreadStatus::Running);
95 100
96 auto* const thread_owner_process = current_thread->GetOwnerProcess(); 101 auto* const thread_owner_process = current_thread->GetOwnerProcess();
@@ -127,7 +132,7 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
127} 132}
128 133
129void Scheduler::Reschedule() { 134void Scheduler::Reschedule() {
130 std::lock_guard<std::mutex> lock(scheduler_mutex); 135 std::lock_guard lock{scheduler_mutex};
131 136
132 Thread* cur = GetCurrentThread(); 137 Thread* cur = GetCurrentThread();
133 Thread* next = PopNextReadyThread(); 138 Thread* next = PopNextReadyThread();
@@ -143,51 +148,54 @@ void Scheduler::Reschedule() {
143 SwitchContext(next); 148 SwitchContext(next);
144} 149}
145 150
146void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) { 151void Scheduler::AddThread(SharedPtr<Thread> thread) {
147 std::lock_guard<std::mutex> lock(scheduler_mutex); 152 std::lock_guard lock{scheduler_mutex};
148 153
149 thread_list.push_back(std::move(thread)); 154 thread_list.push_back(std::move(thread));
150 ready_queue.prepare(priority);
151} 155}
152 156
153void Scheduler::RemoveThread(Thread* thread) { 157void Scheduler::RemoveThread(Thread* thread) {
154 std::lock_guard<std::mutex> lock(scheduler_mutex); 158 std::lock_guard lock{scheduler_mutex};
155 159
156 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), 160 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
157 thread_list.end()); 161 thread_list.end());
158} 162}
159 163
160void Scheduler::ScheduleThread(Thread* thread, u32 priority) { 164void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
161 std::lock_guard<std::mutex> lock(scheduler_mutex); 165 std::lock_guard lock{scheduler_mutex};
162 166
163 ASSERT(thread->GetStatus() == ThreadStatus::Ready); 167 ASSERT(thread->GetStatus() == ThreadStatus::Ready);
164 ready_queue.push_back(priority, thread); 168 ready_queue.add(thread, priority);
165} 169}
166 170
167void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { 171void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
168 std::lock_guard<std::mutex> lock(scheduler_mutex); 172 std::lock_guard lock{scheduler_mutex};
169 173
170 ASSERT(thread->GetStatus() == ThreadStatus::Ready); 174 ASSERT(thread->GetStatus() == ThreadStatus::Ready);
171 ready_queue.remove(priority, thread); 175 ready_queue.remove(thread, priority);
172} 176}
173 177
174void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { 178void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
175 std::lock_guard<std::mutex> lock(scheduler_mutex); 179 std::lock_guard lock{scheduler_mutex};
180 if (thread->GetPriority() == priority) {
181 return;
182 }
176 183
177 // If thread was ready, adjust queues 184 // If thread was ready, adjust queues
178 if (thread->GetStatus() == ThreadStatus::Ready) 185 if (thread->GetStatus() == ThreadStatus::Ready)
179 ready_queue.move(thread, thread->GetPriority(), priority); 186 ready_queue.adjust(thread, thread->GetPriority(), priority);
180 else
181 ready_queue.prepare(priority);
182} 187}
183 188
184Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const { 189Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
185 std::lock_guard<std::mutex> lock(scheduler_mutex); 190 std::lock_guard lock{scheduler_mutex};
186 191
187 const u32 mask = 1U << core; 192 const u32 mask = 1U << core;
188 return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) { 193 for (auto* thread : ready_queue) {
189 return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority; 194 if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) {
190 }); 195 return thread;
196 }
197 }
198 return nullptr;
191} 199}
192 200
193void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { 201void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 1c5bf57d9..b29bf7be8 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -7,7 +7,7 @@
7#include <mutex> 7#include <mutex>
8#include <vector> 8#include <vector>
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "common/thread_queue_list.h" 10#include "common/multi_level_queue.h"
11#include "core/hle/kernel/object.h" 11#include "core/hle/kernel/object.h"
12#include "core/hle/kernel/thread.h" 12#include "core/hle/kernel/thread.h"
13 13
@@ -38,7 +38,7 @@ public:
38 u64 GetLastContextSwitchTicks() const; 38 u64 GetLastContextSwitchTicks() const;
39 39
40 /// Adds a new thread to the scheduler 40 /// Adds a new thread to the scheduler
41 void AddThread(SharedPtr<Thread> thread, u32 priority); 41 void AddThread(SharedPtr<Thread> thread);
42 42
43 /// Removes a thread from the scheduler 43 /// Removes a thread from the scheduler
44 void RemoveThread(Thread* thread); 44 void RemoveThread(Thread* thread);
@@ -156,7 +156,7 @@ private:
156 std::vector<SharedPtr<Thread>> thread_list; 156 std::vector<SharedPtr<Thread>> thread_list;
157 157
158 /// Lists only ready thread ids. 158 /// Lists only ready thread ids.
159 Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST + 1> ready_queue; 159 Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
160 160
161 SharedPtr<Thread> current_thread = nullptr; 161 SharedPtr<Thread> current_thread = nullptr;
162 162
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index e5e7f99e1..11796e5e5 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -175,11 +175,8 @@ static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
175 return ERR_INVALID_SIZE; 175 return ERR_INVALID_SIZE;
176 } 176 }
177 177
178 auto& vm_manager = Core::CurrentProcess()->VMManager(); 178 auto& vm_manager = Core::System::GetInstance().Kernel().CurrentProcess()->VMManager();
179 const VAddr heap_base = vm_manager.GetHeapRegionBaseAddress(); 179 const auto alloc_result = vm_manager.SetHeapSize(heap_size);
180 const auto alloc_result =
181 vm_manager.HeapAllocate(heap_base, heap_size, VMAPermission::ReadWrite);
182
183 if (alloc_result.Failed()) { 180 if (alloc_result.Failed()) {
184 return alloc_result.Code(); 181 return alloc_result.Code();
185 } 182 }
@@ -809,7 +806,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
809 return RESULT_SUCCESS; 806 return RESULT_SUCCESS;
810 807
811 case GetInfoType::TotalHeapUsage: 808 case GetInfoType::TotalHeapUsage:
812 *result = process->VMManager().GetTotalHeapUsage(); 809 *result = process->VMManager().GetCurrentHeapSize();
813 return RESULT_SUCCESS; 810 return RESULT_SUCCESS;
814 811
815 case GetInfoType::IsVirtualAddressMemoryEnabled: 812 case GetInfoType::IsVirtualAddressMemoryEnabled:
@@ -1356,7 +1353,7 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var
1356 current_thread->SetCondVarWaitAddress(condition_variable_addr); 1353 current_thread->SetCondVarWaitAddress(condition_variable_addr);
1357 current_thread->SetMutexWaitAddress(mutex_addr); 1354 current_thread->SetMutexWaitAddress(mutex_addr);
1358 current_thread->SetWaitHandle(thread_handle); 1355 current_thread->SetWaitHandle(thread_handle);
1359 current_thread->SetStatus(ThreadStatus::WaitMutex); 1356 current_thread->SetStatus(ThreadStatus::WaitCondVar);
1360 current_thread->InvalidateWakeupCallback(); 1357 current_thread->InvalidateWakeupCallback();
1361 1358
1362 current_thread->WakeAfterDelay(nano_seconds); 1359 current_thread->WakeAfterDelay(nano_seconds);
@@ -1400,10 +1397,10 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1400 // them all. 1397 // them all.
1401 std::size_t last = waiting_threads.size(); 1398 std::size_t last = waiting_threads.size();
1402 if (target != -1) 1399 if (target != -1)
1403 last = target; 1400 last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
1404 1401
1405 // If there are no threads waiting on this condition variable, just exit 1402 // If there are no threads waiting on this condition variable, just exit
1406 if (last > waiting_threads.size()) 1403 if (last == 0)
1407 return RESULT_SUCCESS; 1404 return RESULT_SUCCESS;
1408 1405
1409 for (std::size_t index = 0; index < last; ++index) { 1406 for (std::size_t index = 0; index < last; ++index) {
@@ -1411,6 +1408,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1411 1408
1412 ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr); 1409 ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
1413 1410
1411 // liberate Cond Var Thread.
1412 thread->SetCondVarWaitAddress(0);
1413
1414 std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex(); 1414 std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
1415 1415
1416 auto& monitor = Core::System::GetInstance().Monitor(); 1416 auto& monitor = Core::System::GetInstance().Monitor();
@@ -1429,10 +1429,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1429 } 1429 }
1430 } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(), 1430 } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
1431 thread->GetWaitHandle())); 1431 thread->GetWaitHandle()));
1432
1433 if (mutex_val == 0) { 1432 if (mutex_val == 0) {
1434 // We were able to acquire the mutex, resume this thread. 1433 // We were able to acquire the mutex, resume this thread.
1435 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); 1434 ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
1436 thread->ResumeFromWait(); 1435 thread->ResumeFromWait();
1437 1436
1438 auto* const lock_owner = thread->GetLockOwner(); 1437 auto* const lock_owner = thread->GetLockOwner();
@@ -1442,8 +1441,8 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1442 1441
1443 thread->SetLockOwner(nullptr); 1442 thread->SetLockOwner(nullptr);
1444 thread->SetMutexWaitAddress(0); 1443 thread->SetMutexWaitAddress(0);
1445 thread->SetCondVarWaitAddress(0);
1446 thread->SetWaitHandle(0); 1444 thread->SetWaitHandle(0);
1445 Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
1447 } else { 1446 } else {
1448 // Atomically signal that the mutex now has a waiting thread. 1447 // Atomically signal that the mutex now has a waiting thread.
1449 do { 1448 do {
@@ -1462,12 +1461,11 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1462 const auto& handle_table = Core::CurrentProcess()->GetHandleTable(); 1461 const auto& handle_table = Core::CurrentProcess()->GetHandleTable();
1463 auto owner = handle_table.Get<Thread>(owner_handle); 1462 auto owner = handle_table.Get<Thread>(owner_handle);
1464 ASSERT(owner); 1463 ASSERT(owner);
1465 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); 1464 ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
1466 thread->InvalidateWakeupCallback(); 1465 thread->InvalidateWakeupCallback();
1466 thread->SetStatus(ThreadStatus::WaitMutex);
1467 1467
1468 owner->AddMutexWaiter(thread); 1468 owner->AddMutexWaiter(thread);
1469
1470 Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
1471 } 1469 }
1472 } 1470 }
1473 1471
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 3b22e8e0d..fa3ac3abc 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -105,6 +105,7 @@ void Thread::ResumeFromWait() {
105 case ThreadStatus::WaitSleep: 105 case ThreadStatus::WaitSleep:
106 case ThreadStatus::WaitIPC: 106 case ThreadStatus::WaitIPC:
107 case ThreadStatus::WaitMutex: 107 case ThreadStatus::WaitMutex:
108 case ThreadStatus::WaitCondVar:
108 case ThreadStatus::WaitArb: 109 case ThreadStatus::WaitArb:
109 break; 110 break;
110 111
@@ -198,7 +199,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
198 thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap(); 199 thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap();
199 thread->owner_process = &owner_process; 200 thread->owner_process = &owner_process;
200 thread->scheduler = &system.Scheduler(processor_id); 201 thread->scheduler = &system.Scheduler(processor_id);
201 thread->scheduler->AddThread(thread, priority); 202 thread->scheduler->AddThread(thread);
202 thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread); 203 thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread);
203 204
204 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used 205 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
@@ -351,7 +352,7 @@ void Thread::ChangeScheduler() {
351 if (*new_processor_id != processor_id) { 352 if (*new_processor_id != processor_id) {
352 // Remove thread from previous core's scheduler 353 // Remove thread from previous core's scheduler
353 scheduler->RemoveThread(this); 354 scheduler->RemoveThread(this);
354 next_scheduler.AddThread(this, current_priority); 355 next_scheduler.AddThread(this);
355 } 356 }
356 357
357 processor_id = *new_processor_id; 358 processor_id = *new_processor_id;
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index faad5f391..9c684758c 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -51,7 +51,8 @@ enum class ThreadStatus {
51 WaitIPC, ///< Waiting for the reply from an IPC request 51 WaitIPC, ///< Waiting for the reply from an IPC request
52 WaitSynchAny, ///< Waiting due to WaitSynch1 or WaitSynchN with wait_all = false 52 WaitSynchAny, ///< Waiting due to WaitSynch1 or WaitSynchN with wait_all = false
53 WaitSynchAll, ///< Waiting due to WaitSynchronizationN with wait_all = true 53 WaitSynchAll, ///< Waiting due to WaitSynchronizationN with wait_all = true
54 WaitMutex, ///< Waiting due to an ArbitrateLock/WaitProcessWideKey svc 54 WaitMutex, ///< Waiting due to an ArbitrateLock svc
55 WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc
55 WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc 56 WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc
56 Dormant, ///< Created but not yet made ready 57 Dormant, ///< Created but not yet made ready
57 Dead ///< Run to completion, or forcefully terminated 58 Dead ///< Run to completion, or forcefully terminated
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index 22bf55ce7..ec0a480ce 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -256,57 +256,50 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p
256 return RESULT_SUCCESS; 256 return RESULT_SUCCESS;
257} 257}
258 258
259ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission perms) { 259ResultVal<VAddr> VMManager::SetHeapSize(u64 size) {
260 if (!IsWithinHeapRegion(target, size)) { 260 if (size > GetHeapRegionSize()) {
261 return ERR_INVALID_ADDRESS; 261 return ERR_OUT_OF_MEMORY;
262 }
263
264 // No need to do any additional work if the heap is already the given size.
265 if (size == GetCurrentHeapSize()) {
266 return MakeResult(heap_region_base);
262 } 267 }
263 268
264 if (heap_memory == nullptr) { 269 if (heap_memory == nullptr) {
265 // Initialize heap 270 // Initialize heap
266 heap_memory = std::make_shared<std::vector<u8>>(); 271 heap_memory = std::make_shared<std::vector<u8>>(size);
267 heap_start = heap_end = target; 272 heap_end = heap_region_base + size;
268 } else { 273 } else {
269 UnmapRange(heap_start, heap_end - heap_start); 274 UnmapRange(heap_region_base, GetCurrentHeapSize());
270 }
271
272 // If necessary, expand backing vector to cover new heap extents.
273 if (target < heap_start) {
274 heap_memory->insert(begin(*heap_memory), heap_start - target, 0);
275 heap_start = target;
276 RefreshMemoryBlockMappings(heap_memory.get());
277 }
278 if (target + size > heap_end) {
279 heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0);
280 heap_end = target + size;
281 RefreshMemoryBlockMappings(heap_memory.get());
282 } 275 }
283 ASSERT(heap_end - heap_start == heap_memory->size());
284 276
285 CASCADE_RESULT(auto vma, MapMemoryBlock(target, heap_memory, target - heap_start, size, 277 // If necessary, expand backing vector to cover new heap extents in
286 MemoryState::Heap)); 278 // the case of allocating. Otherwise, shrink the backing memory,
287 Reprotect(vma, perms); 279 // if a smaller heap has been requested.
280 const u64 old_heap_size = GetCurrentHeapSize();
281 if (size > old_heap_size) {
282 const u64 alloc_size = size - old_heap_size;
288 283
289 heap_used = size; 284 heap_memory->insert(heap_memory->end(), alloc_size, 0);
290 285 RefreshMemoryBlockMappings(heap_memory.get());
291 return MakeResult<VAddr>(heap_end - size); 286 } else if (size < old_heap_size) {
292} 287 heap_memory->resize(size);
288 heap_memory->shrink_to_fit();
293 289
294ResultCode VMManager::HeapFree(VAddr target, u64 size) { 290 RefreshMemoryBlockMappings(heap_memory.get());
295 if (!IsWithinHeapRegion(target, size)) {
296 return ERR_INVALID_ADDRESS;
297 } 291 }
298 292
299 if (size == 0) { 293 heap_end = heap_region_base + size;
300 return RESULT_SUCCESS; 294 ASSERT(GetCurrentHeapSize() == heap_memory->size());
301 }
302 295
303 const ResultCode result = UnmapRange(target, size); 296 const auto mapping_result =
304 if (result.IsError()) { 297 MapMemoryBlock(heap_region_base, heap_memory, 0, size, MemoryState::Heap);
305 return result; 298 if (mapping_result.Failed()) {
299 return mapping_result.Code();
306 } 300 }
307 301
308 heap_used -= size; 302 return MakeResult<VAddr>(heap_region_base);
309 return RESULT_SUCCESS;
310} 303}
311 304
312MemoryInfo VMManager::QueryMemory(VAddr address) const { 305MemoryInfo VMManager::QueryMemory(VAddr address) const {
@@ -598,6 +591,7 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
598 591
599 heap_region_base = map_region_end; 592 heap_region_base = map_region_end;
600 heap_region_end = heap_region_base + heap_region_size; 593 heap_region_end = heap_region_base + heap_region_size;
594 heap_end = heap_region_base;
601 595
602 new_map_region_base = heap_region_end; 596 new_map_region_base = heap_region_end;
603 new_map_region_end = new_map_region_base + new_map_region_size; 597 new_map_region_end = new_map_region_base + new_map_region_size;
@@ -692,10 +686,6 @@ u64 VMManager::GetTotalMemoryUsage() const {
692 return 0xF8000000; 686 return 0xF8000000;
693} 687}
694 688
695u64 VMManager::GetTotalHeapUsage() const {
696 return heap_used;
697}
698
699VAddr VMManager::GetAddressSpaceBaseAddress() const { 689VAddr VMManager::GetAddressSpaceBaseAddress() const {
700 return address_space_base; 690 return address_space_base;
701} 691}
@@ -778,6 +768,10 @@ u64 VMManager::GetHeapRegionSize() const {
778 return heap_region_end - heap_region_base; 768 return heap_region_end - heap_region_base;
779} 769}
780 770
771u64 VMManager::GetCurrentHeapSize() const {
772 return heap_end - heap_region_base;
773}
774
781bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const { 775bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const {
782 return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(), 776 return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(),
783 GetHeapRegionEndAddress()); 777 GetHeapRegionEndAddress());
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 7cdff6094..6f484b7bf 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -380,11 +380,41 @@ public:
380 /// Changes the permissions of a range of addresses, splitting VMAs as necessary. 380 /// Changes the permissions of a range of addresses, splitting VMAs as necessary.
381 ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms); 381 ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms);
382 382
383 ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
384 ResultCode HeapFree(VAddr target, u64 size);
385
386 ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state); 383 ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state);
387 384
385 /// Attempts to allocate a heap with the given size.
386 ///
387 /// @param size The size of the heap to allocate in bytes.
388 ///
389 /// @note If a heap is currently allocated, and this is called
390 /// with a size that is equal to the size of the current heap,
391 /// then this function will do nothing and return the current
392 /// heap's starting address, as there's no need to perform
393 /// any additional heap allocation work.
394 ///
395 /// @note If a heap is currently allocated, and this is called
396 /// with a size less than the current heap's size, then
397 /// this function will attempt to shrink the heap.
398 ///
399 /// @note If a heap is currently allocated, and this is called
400 /// with a size larger than the current heap's size, then
401 /// this function will attempt to extend the size of the heap.
402 ///
403 /// @returns A result indicating either success or failure.
404 /// <p>
405 /// If successful, this function will return a result
406 /// containing the starting address to the allocated heap.
407 /// <p>
408 /// If unsuccessful, this function will return a result
409 /// containing an error code.
410 ///
411 /// @pre The given size must lie within the allowable heap
412 /// memory region managed by this VMManager instance.
413 /// Failure to abide by this will result in ERR_OUT_OF_MEMORY
414 /// being returned as the result.
415 ///
416 ResultVal<VAddr> SetHeapSize(u64 size);
417
388 /// Queries the memory manager for information about the given address. 418 /// Queries the memory manager for information about the given address.
389 /// 419 ///
390 /// @param address The address to query the memory manager about for information. 420 /// @param address The address to query the memory manager about for information.
@@ -418,9 +448,6 @@ public:
418 /// Gets the total memory usage, used by svcGetInfo 448 /// Gets the total memory usage, used by svcGetInfo
419 u64 GetTotalMemoryUsage() const; 449 u64 GetTotalMemoryUsage() const;
420 450
421 /// Gets the total heap usage, used by svcGetInfo
422 u64 GetTotalHeapUsage() const;
423
424 /// Gets the address space base address 451 /// Gets the address space base address
425 VAddr GetAddressSpaceBaseAddress() const; 452 VAddr GetAddressSpaceBaseAddress() const;
426 453
@@ -469,6 +496,13 @@ public:
469 /// Gets the total size of the heap region in bytes. 496 /// Gets the total size of the heap region in bytes.
470 u64 GetHeapRegionSize() const; 497 u64 GetHeapRegionSize() const;
471 498
499 /// Gets the total size of the current heap in bytes.
500 ///
501 /// @note This is the current allocated heap size, not the size
502 /// of the region it's allowed to exist within.
503 ///
504 u64 GetCurrentHeapSize() const;
505
472 /// Determines whether or not the specified range is within the heap region. 506 /// Determines whether or not the specified range is within the heap region.
473 bool IsWithinHeapRegion(VAddr address, u64 size) const; 507 bool IsWithinHeapRegion(VAddr address, u64 size) const;
474 508
@@ -617,9 +651,6 @@ private:
617 VAddr new_map_region_base = 0; 651 VAddr new_map_region_base = 0;
618 VAddr new_map_region_end = 0; 652 VAddr new_map_region_end = 0;
619 653
620 VAddr main_code_region_base = 0;
621 VAddr main_code_region_end = 0;
622
623 VAddr tls_io_region_base = 0; 654 VAddr tls_io_region_base = 0;
624 VAddr tls_io_region_end = 0; 655 VAddr tls_io_region_end = 0;
625 656
@@ -628,9 +659,9 @@ private:
628 // This makes deallocation and reallocation of holes fast and keeps process memory contiguous 659 // This makes deallocation and reallocation of holes fast and keeps process memory contiguous
629 // in the emulator address space, allowing Memory::GetPointer to be reasonably safe. 660 // in the emulator address space, allowing Memory::GetPointer to be reasonably safe.
630 std::shared_ptr<std::vector<u8>> heap_memory; 661 std::shared_ptr<std::vector<u8>> heap_memory;
631 // The left/right bounds of the address space covered by heap_memory. 662
632 VAddr heap_start = 0; 663 // The end of the currently allocated heap. This is not an inclusive
664 // end of the range. This is essentially 'base_address + current_size'.
633 VAddr heap_end = 0; 665 VAddr heap_end = 0;
634 u64 heap_used = 0;
635}; 666};
636} // namespace Kernel 667} // namespace Kernel
diff --git a/src/core/hle/service/fatal/fatal.cpp b/src/core/hle/service/fatal/fatal.cpp
index 770590d0b..2c229bcad 100644
--- a/src/core/hle/service/fatal/fatal.cpp
+++ b/src/core/hle/service/fatal/fatal.cpp
@@ -25,21 +25,34 @@ Module::Interface::Interface(std::shared_ptr<Module> module, const char* name)
25Module::Interface::~Interface() = default; 25Module::Interface::~Interface() = default;
26 26
27struct FatalInfo { 27struct FatalInfo {
28 std::array<u64_le, 31> registers{}; // TODO(ogniK): See if this actually is registers or 28 enum class Architecture : s32 {
29 // not(find a game which has non zero valeus) 29 AArch64,
30 u64_le unk0{}; 30 AArch32,
31 u64_le unk1{}; 31 };
32 u64_le unk2{}; 32
33 u64_le unk3{}; 33 const char* ArchAsString() const {
34 u64_le unk4{}; 34 return arch == Architecture::AArch64 ? "AArch64" : "AArch32";
35 u64_le unk5{}; 35 }
36 u64_le unk6{}; 36
37 std::array<u64_le, 31> registers{};
38 u64_le sp{};
39 u64_le pc{};
40 u64_le pstate{};
41 u64_le afsr0{};
42 u64_le afsr1{};
43 u64_le esr{};
44 u64_le far{};
37 45
38 std::array<u64_le, 32> backtrace{}; 46 std::array<u64_le, 32> backtrace{};
39 u64_le unk7{}; 47 u64_le program_entry_point{};
40 u64_le unk8{}; 48
49 // Bit flags that indicate which registers have been set with values
50 // for this context. The service itself uses these to determine which
51 // registers to specifically print out.
52 u64_le set_flags{};
53
41 u32_le backtrace_size{}; 54 u32_le backtrace_size{};
42 u32_le unk9{}; 55 Architecture arch{};
43 u32_le unk10{}; // TODO(ogniK): Is this even used or is it just padding? 56 u32_le unk10{}; // TODO(ogniK): Is this even used or is it just padding?
44}; 57};
45static_assert(sizeof(FatalInfo) == 0x250, "FatalInfo is an invalid size"); 58static_assert(sizeof(FatalInfo) == 0x250, "FatalInfo is an invalid size");
@@ -52,36 +65,36 @@ enum class FatalType : u32 {
52 65
53static void GenerateErrorReport(ResultCode error_code, const FatalInfo& info) { 66static void GenerateErrorReport(ResultCode error_code, const FatalInfo& info) {
54 const auto title_id = Core::CurrentProcess()->GetTitleID(); 67 const auto title_id = Core::CurrentProcess()->GetTitleID();
55 std::string crash_report = 68 std::string crash_report = fmt::format(
56 fmt::format("Yuzu {}-{} crash report\n" 69 "Yuzu {}-{} crash report\n"
57 "Title ID: {:016x}\n" 70 "Title ID: {:016x}\n"
58 "Result: 0x{:X} ({:04}-{:04d})\n" 71 "Result: 0x{:X} ({:04}-{:04d})\n"
59 "\n", 72 "Set flags: 0x{:16X}\n"
60 Common::g_scm_branch, Common::g_scm_desc, title_id, error_code.raw, 73 "Program entry point: 0x{:16X}\n"
61 2000 + static_cast<u32>(error_code.module.Value()), 74 "\n",
62 static_cast<u32>(error_code.description.Value()), info.unk8, info.unk7); 75 Common::g_scm_branch, Common::g_scm_desc, title_id, error_code.raw,
76 2000 + static_cast<u32>(error_code.module.Value()),
77 static_cast<u32>(error_code.description.Value()), info.set_flags, info.program_entry_point);
63 if (info.backtrace_size != 0x0) { 78 if (info.backtrace_size != 0x0) {
64 crash_report += "Registers:\n"; 79 crash_report += "Registers:\n";
65 // TODO(ogniK): This is just a guess, find a game which actually has non zero values
66 for (size_t i = 0; i < info.registers.size(); i++) { 80 for (size_t i = 0; i < info.registers.size(); i++) {
67 crash_report += 81 crash_report +=
68 fmt::format(" X[{:02d}]: {:016x}\n", i, info.registers[i]); 82 fmt::format(" X[{:02d}]: {:016x}\n", i, info.registers[i]);
69 } 83 }
70 crash_report += fmt::format(" Unknown 0: {:016x}\n", info.unk0); 84 crash_report += fmt::format(" SP: {:016x}\n", info.sp);
71 crash_report += fmt::format(" Unknown 1: {:016x}\n", info.unk1); 85 crash_report += fmt::format(" PC: {:016x}\n", info.pc);
72 crash_report += fmt::format(" Unknown 2: {:016x}\n", info.unk2); 86 crash_report += fmt::format(" PSTATE: {:016x}\n", info.pstate);
73 crash_report += fmt::format(" Unknown 3: {:016x}\n", info.unk3); 87 crash_report += fmt::format(" AFSR0: {:016x}\n", info.afsr0);
74 crash_report += fmt::format(" Unknown 4: {:016x}\n", info.unk4); 88 crash_report += fmt::format(" AFSR1: {:016x}\n", info.afsr1);
75 crash_report += fmt::format(" Unknown 5: {:016x}\n", info.unk5); 89 crash_report += fmt::format(" ESR: {:016x}\n", info.esr);
76 crash_report += fmt::format(" Unknown 6: {:016x}\n", info.unk6); 90 crash_report += fmt::format(" FAR: {:016x}\n", info.far);
77 crash_report += "\nBacktrace:\n"; 91 crash_report += "\nBacktrace:\n";
78 for (size_t i = 0; i < info.backtrace_size; i++) { 92 for (size_t i = 0; i < info.backtrace_size; i++) {
79 crash_report += 93 crash_report +=
80 fmt::format(" Backtrace[{:02d}]: {:016x}\n", i, info.backtrace[i]); 94 fmt::format(" Backtrace[{:02d}]: {:016x}\n", i, info.backtrace[i]);
81 } 95 }
82 crash_report += fmt::format("\nUnknown 7: 0x{:016x}\n", info.unk7); 96
83 crash_report += fmt::format("Unknown 8: 0x{:016x}\n", info.unk8); 97 crash_report += fmt::format("Architecture: {}\n", info.ArchAsString());
84 crash_report += fmt::format("Unknown 9: 0x{:016x}\n", info.unk9);
85 crash_report += fmt::format("Unknown 10: 0x{:016x}\n", info.unk10); 98 crash_report += fmt::format("Unknown 10: 0x{:016x}\n", info.unk10);
86 } 99 }
87 100
@@ -125,13 +138,13 @@ static void ThrowFatalError(ResultCode error_code, FatalType fatal_type, const F
125 case FatalType::ErrorReport: 138 case FatalType::ErrorReport:
126 GenerateErrorReport(error_code, info); 139 GenerateErrorReport(error_code, info);
127 break; 140 break;
128 }; 141 }
129} 142}
130 143
131void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) { 144void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) {
132 LOG_ERROR(Service_Fatal, "called"); 145 LOG_ERROR(Service_Fatal, "called");
133 IPC::RequestParser rp{ctx}; 146 IPC::RequestParser rp{ctx};
134 auto error_code = rp.Pop<ResultCode>(); 147 const auto error_code = rp.Pop<ResultCode>();
135 148
136 ThrowFatalError(error_code, FatalType::ErrorScreen, {}); 149 ThrowFatalError(error_code, FatalType::ErrorScreen, {});
137 IPC::ResponseBuilder rb{ctx, 2}; 150 IPC::ResponseBuilder rb{ctx, 2};
@@ -141,8 +154,8 @@ void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) {
141void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) { 154void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) {
142 LOG_ERROR(Service_Fatal, "called"); 155 LOG_ERROR(Service_Fatal, "called");
143 IPC::RequestParser rp(ctx); 156 IPC::RequestParser rp(ctx);
144 auto error_code = rp.Pop<ResultCode>(); 157 const auto error_code = rp.Pop<ResultCode>();
145 auto fatal_type = rp.PopEnum<FatalType>(); 158 const auto fatal_type = rp.PopEnum<FatalType>();
146 159
147 ThrowFatalError(error_code, fatal_type, {}); // No info is passed with ThrowFatalWithPolicy 160 ThrowFatalError(error_code, fatal_type, {}); // No info is passed with ThrowFatalWithPolicy
148 IPC::ResponseBuilder rb{ctx, 2}; 161 IPC::ResponseBuilder rb{ctx, 2};
@@ -152,9 +165,9 @@ void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) {
152void Module::Interface::ThrowFatalWithCpuContext(Kernel::HLERequestContext& ctx) { 165void Module::Interface::ThrowFatalWithCpuContext(Kernel::HLERequestContext& ctx) {
153 LOG_ERROR(Service_Fatal, "called"); 166 LOG_ERROR(Service_Fatal, "called");
154 IPC::RequestParser rp(ctx); 167 IPC::RequestParser rp(ctx);
155 auto error_code = rp.Pop<ResultCode>(); 168 const auto error_code = rp.Pop<ResultCode>();
156 auto fatal_type = rp.PopEnum<FatalType>(); 169 const auto fatal_type = rp.PopEnum<FatalType>();
157 auto fatal_info = ctx.ReadBuffer(); 170 const auto fatal_info = ctx.ReadBuffer();
158 FatalInfo info{}; 171 FatalInfo info{};
159 172
160 ASSERT_MSG(fatal_info.size() == sizeof(FatalInfo), "Invalid fatal info buffer size!"); 173 ASSERT_MSG(fatal_info.size() == sizeof(FatalInfo), "Invalid fatal info buffer size!");
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt
index d0284bdf4..c7038b217 100644
--- a/src/tests/CMakeLists.txt
+++ b/src/tests/CMakeLists.txt
@@ -1,5 +1,7 @@
1add_executable(tests 1add_executable(tests
2 common/bit_field.cpp 2 common/bit_field.cpp
3 common/bit_utils.cpp
4 common/multi_level_queue.cpp
3 common/param_package.cpp 5 common/param_package.cpp
4 common/ring_buffer.cpp 6 common/ring_buffer.cpp
5 core/arm/arm_test_common.cpp 7 core/arm/arm_test_common.cpp
diff --git a/src/tests/common/bit_utils.cpp b/src/tests/common/bit_utils.cpp
new file mode 100644
index 000000000..479b5995a
--- /dev/null
+++ b/src/tests/common/bit_utils.cpp
@@ -0,0 +1,23 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <catch2/catch.hpp>
6#include <math.h>
7#include "common/bit_util.h"
8
9namespace Common {
10
11TEST_CASE("BitUtils::CountTrailingZeroes", "[common]") {
12 REQUIRE(Common::CountTrailingZeroes32(0) == 32);
13 REQUIRE(Common::CountTrailingZeroes64(0) == 64);
14 REQUIRE(Common::CountTrailingZeroes32(9) == 0);
15 REQUIRE(Common::CountTrailingZeroes32(8) == 3);
16 REQUIRE(Common::CountTrailingZeroes32(0x801000) == 12);
17 REQUIRE(Common::CountTrailingZeroes64(9) == 0);
18 REQUIRE(Common::CountTrailingZeroes64(8) == 3);
19 REQUIRE(Common::CountTrailingZeroes64(0x801000) == 12);
20 REQUIRE(Common::CountTrailingZeroes64(0x801000000000UL) == 36);
21}
22
23} // namespace Common
diff --git a/src/tests/common/multi_level_queue.cpp b/src/tests/common/multi_level_queue.cpp
new file mode 100644
index 000000000..cca7ec7da
--- /dev/null
+++ b/src/tests/common/multi_level_queue.cpp
@@ -0,0 +1,55 @@
1// Copyright 2019 Yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <catch2/catch.hpp>
6#include <math.h>
7#include "common/common_types.h"
8#include "common/multi_level_queue.h"
9
10namespace Common {
11
12TEST_CASE("MultiLevelQueue", "[common]") {
13 std::array<f32, 8> values = {0.0, 5.0, 1.0, 9.0, 8.0, 2.0, 6.0, 7.0};
14 Common::MultiLevelQueue<f32, 64> mlq;
15 REQUIRE(mlq.empty());
16 mlq.add(values[2], 2);
17 mlq.add(values[7], 7);
18 mlq.add(values[3], 3);
19 mlq.add(values[4], 4);
20 mlq.add(values[0], 0);
21 mlq.add(values[5], 5);
22 mlq.add(values[6], 6);
23 mlq.add(values[1], 1);
24 u32 index = 0;
25 bool all_set = true;
26 for (auto& f : mlq) {
27 all_set &= (f == values[index]);
28 index++;
29 }
30 REQUIRE(all_set);
31 REQUIRE(!mlq.empty());
32 f32 v = 8.0;
33 mlq.add(v, 2);
34 v = -7.0;
35 mlq.add(v, 2, false);
36 REQUIRE(mlq.front(2) == -7.0);
37 mlq.yield(2);
38 REQUIRE(mlq.front(2) == values[2]);
39 REQUIRE(mlq.back(2) == -7.0);
40 REQUIRE(mlq.empty(8));
41 v = 10.0;
42 mlq.add(v, 8);
43 mlq.adjust(v, 8, 9);
44 REQUIRE(mlq.front(9) == v);
45 REQUIRE(mlq.empty(8));
46 REQUIRE(!mlq.empty(9));
47 mlq.adjust(values[0], 0, 9);
48 REQUIRE(mlq.highest_priority_set() == 1);
49 REQUIRE(mlq.lowest_priority_set() == 9);
50 mlq.remove(values[1], 1);
51 REQUIRE(mlq.highest_priority_set() == 2);
52 REQUIRE(mlq.empty(1));
53}
54
55} // namespace Common
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 267a03f2d..30b29e14d 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -286,9 +286,10 @@ void GPU::ProcessSemaphoreTriggerMethod() {
286 // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of 286 // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of
287 // CoreTiming 287 // CoreTiming
288 block.timestamp = Core::System::GetInstance().CoreTiming().GetTicks(); 288 block.timestamp = Core::System::GetInstance().CoreTiming().GetTicks();
289 memory_manager->WriteBlock(regs.smaphore_address.SmaphoreAddress(), &block, sizeof(block)); 289 memory_manager->WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block,
290 sizeof(block));
290 } else { 291 } else {
291 const u32 word{memory_manager->Read<u32>(regs.smaphore_address.SmaphoreAddress())}; 292 const u32 word{memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress())};
292 if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) || 293 if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) ||
293 (op == GpuSemaphoreOperation::AcquireGequal && 294 (op == GpuSemaphoreOperation::AcquireGequal &&
294 static_cast<s32>(word - regs.semaphore_sequence) > 0) || 295 static_cast<s32>(word - regs.semaphore_sequence) > 0) ||
@@ -315,11 +316,11 @@ void GPU::ProcessSemaphoreTriggerMethod() {
315} 316}
316 317
317void GPU::ProcessSemaphoreRelease() { 318void GPU::ProcessSemaphoreRelease() {
318 memory_manager->Write<u32>(regs.smaphore_address.SmaphoreAddress(), regs.semaphore_release); 319 memory_manager->Write<u32>(regs.semaphore_address.SemaphoreAddress(), regs.semaphore_release);
319} 320}
320 321
321void GPU::ProcessSemaphoreAcquire() { 322void GPU::ProcessSemaphoreAcquire() {
322 const u32 word = memory_manager->Read<u32>(regs.smaphore_address.SmaphoreAddress()); 323 const u32 word = memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress());
323 const auto value = regs.semaphore_acquire; 324 const auto value = regs.semaphore_acquire;
324 if (word != value) { 325 if (word != value) {
325 regs.acquire_active = true; 326 regs.acquire_active = true;
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index c1830ac8d..de30ea354 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -177,11 +177,11 @@ public:
177 u32 address_high; 177 u32 address_high;
178 u32 address_low; 178 u32 address_low;
179 179
180 GPUVAddr SmaphoreAddress() const { 180 GPUVAddr SemaphoreAddress() const {
181 return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | 181 return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
182 address_low); 182 address_low);
183 } 183 }
184 } smaphore_address; 184 } semaphore_address;
185 185
186 u32 semaphore_sequence; 186 u32 semaphore_sequence;
187 u32 semaphore_trigger; 187 u32 semaphore_trigger;
@@ -263,7 +263,7 @@ private:
263 static_assert(offsetof(GPU::Regs, field_name) == position * 4, \ 263 static_assert(offsetof(GPU::Regs, field_name) == position * 4, \
264 "Field " #field_name " has invalid position") 264 "Field " #field_name " has invalid position")
265 265
266ASSERT_REG_POSITION(smaphore_address, 0x4); 266ASSERT_REG_POSITION(semaphore_address, 0x4);
267ASSERT_REG_POSITION(semaphore_sequence, 0x6); 267ASSERT_REG_POSITION(semaphore_sequence, 0x6);
268ASSERT_REG_POSITION(semaphore_trigger, 0x7); 268ASSERT_REG_POSITION(semaphore_trigger, 0x7);
269ASSERT_REG_POSITION(reference_count, 0x14); 269ASSERT_REG_POSITION(reference_count, 0x14);
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h
index c644271d0..e8073579f 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h
@@ -538,12 +538,12 @@ private:
538 return nullptr; 538 return nullptr;
539 } 539 }
540 540
541 void Register(const Surface& object) { 541 void Register(const Surface& object) override {
542 RasterizerCache<Surface>::Register(object); 542 RasterizerCache<Surface>::Register(object);
543 } 543 }
544 544
545 /// Unregisters an object from the cache 545 /// Unregisters an object from the cache
546 void Unregister(const Surface& object) { 546 void Unregister(const Surface& object) override {
547 if (object->IsReinterpreted()) { 547 if (object->IsReinterpreted()) {
548 auto interval = GetReinterpretInterval(object); 548 auto interval = GetReinterpretInterval(object);
549 reinterpreted_surfaces.erase(interval); 549 reinterpreted_surfaces.erase(interval);
diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.cpp b/src/video_core/renderer_vulkan/vk_resource_manager.cpp
index a1e117443..13c46e5b8 100644
--- a/src/video_core/renderer_vulkan/vk_resource_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_resource_manager.cpp
@@ -21,7 +21,7 @@ public:
21 CommandBufferPool(const VKDevice& device) 21 CommandBufferPool(const VKDevice& device)
22 : VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {} 22 : VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {}
23 23
24 void Allocate(std::size_t begin, std::size_t end) { 24 void Allocate(std::size_t begin, std::size_t end) override {
25 const auto dev = device.GetLogical(); 25 const auto dev = device.GetLogical();
26 const auto& dld = device.GetDispatchLoader(); 26 const auto& dld = device.GetDispatchLoader();
27 const u32 graphics_family = device.GetGraphicsFamily(); 27 const u32 graphics_family = device.GetGraphicsFamily();
diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.h b/src/video_core/renderer_vulkan/vk_resource_manager.h
index 5bfe4cead..08ee86fa6 100644
--- a/src/video_core/renderer_vulkan/vk_resource_manager.h
+++ b/src/video_core/renderer_vulkan/vk_resource_manager.h
@@ -97,7 +97,7 @@ private:
97class VKFenceWatch final : public VKResource { 97class VKFenceWatch final : public VKResource {
98public: 98public:
99 explicit VKFenceWatch(); 99 explicit VKFenceWatch();
100 ~VKFenceWatch(); 100 ~VKFenceWatch() override;
101 101
102 /// Waits for the fence to be released. 102 /// Waits for the fence to be released.
103 void Wait(); 103 void Wait();
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp
index 06ad74ffe..593bb681f 100644
--- a/src/yuzu/debugger/wait_tree.cpp
+++ b/src/yuzu/debugger/wait_tree.cpp
@@ -234,6 +234,9 @@ QString WaitTreeThread::GetText() const {
234 case Kernel::ThreadStatus::WaitMutex: 234 case Kernel::ThreadStatus::WaitMutex:
235 status = tr("waiting for mutex"); 235 status = tr("waiting for mutex");
236 break; 236 break;
237 case Kernel::ThreadStatus::WaitCondVar:
238 status = tr("waiting for condition variable");
239 break;
237 case Kernel::ThreadStatus::WaitArb: 240 case Kernel::ThreadStatus::WaitArb:
238 status = tr("waiting for address arbiter"); 241 status = tr("waiting for address arbiter");
239 break; 242 break;
@@ -269,6 +272,7 @@ QColor WaitTreeThread::GetColor() const {
269 case Kernel::ThreadStatus::WaitSynchAll: 272 case Kernel::ThreadStatus::WaitSynchAll:
270 case Kernel::ThreadStatus::WaitSynchAny: 273 case Kernel::ThreadStatus::WaitSynchAny:
271 case Kernel::ThreadStatus::WaitMutex: 274 case Kernel::ThreadStatus::WaitMutex:
275 case Kernel::ThreadStatus::WaitCondVar:
272 case Kernel::ThreadStatus::WaitArb: 276 case Kernel::ThreadStatus::WaitArb:
273 return QColor(Qt::GlobalColor::red); 277 return QColor(Qt::GlobalColor::red);
274 case Kernel::ThreadStatus::Dormant: 278 case Kernel::ThreadStatus::Dormant: