diff options
Diffstat (limited to 'src')
28 files changed, 590 insertions, 152 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt index 43ae8a9e7..850ce8006 100644 --- a/src/common/CMakeLists.txt +++ b/src/common/CMakeLists.txt | |||
| @@ -98,6 +98,7 @@ add_library(common STATIC | |||
| 98 | microprofile.h | 98 | microprofile.h |
| 99 | microprofileui.h | 99 | microprofileui.h |
| 100 | misc.cpp | 100 | misc.cpp |
| 101 | multi_level_queue.h | ||
| 101 | page_table.cpp | 102 | page_table.cpp |
| 102 | page_table.h | 103 | page_table.h |
| 103 | param_package.cpp | 104 | param_package.cpp |
diff --git a/src/common/bit_util.h b/src/common/bit_util.h index 1eea17ba1..a4f9ed4aa 100644 --- a/src/common/bit_util.h +++ b/src/common/bit_util.h | |||
| @@ -58,4 +58,43 @@ inline u64 CountLeadingZeroes64(u64 value) { | |||
| 58 | return __builtin_clzll(value); | 58 | return __builtin_clzll(value); |
| 59 | } | 59 | } |
| 60 | #endif | 60 | #endif |
| 61 | |||
| 62 | #ifdef _MSC_VER | ||
| 63 | inline u32 CountTrailingZeroes32(u32 value) { | ||
| 64 | unsigned long trailing_zero = 0; | ||
| 65 | |||
| 66 | if (_BitScanForward(&trailing_zero, value) != 0) { | ||
| 67 | return trailing_zero; | ||
| 68 | } | ||
| 69 | |||
| 70 | return 32; | ||
| 71 | } | ||
| 72 | |||
| 73 | inline u64 CountTrailingZeroes64(u64 value) { | ||
| 74 | unsigned long trailing_zero = 0; | ||
| 75 | |||
| 76 | if (_BitScanForward64(&trailing_zero, value) != 0) { | ||
| 77 | return trailing_zero; | ||
| 78 | } | ||
| 79 | |||
| 80 | return 64; | ||
| 81 | } | ||
| 82 | #else | ||
| 83 | inline u32 CountTrailingZeroes32(u32 value) { | ||
| 84 | if (value == 0) { | ||
| 85 | return 32; | ||
| 86 | } | ||
| 87 | |||
| 88 | return __builtin_ctz(value); | ||
| 89 | } | ||
| 90 | |||
| 91 | inline u64 CountTrailingZeroes64(u64 value) { | ||
| 92 | if (value == 0) { | ||
| 93 | return 64; | ||
| 94 | } | ||
| 95 | |||
| 96 | return __builtin_ctzll(value); | ||
| 97 | } | ||
| 98 | #endif | ||
| 99 | |||
| 61 | } // namespace Common | 100 | } // namespace Common |
diff --git a/src/common/multi_level_queue.h b/src/common/multi_level_queue.h new file mode 100644 index 000000000..2b61b91e0 --- /dev/null +++ b/src/common/multi_level_queue.h | |||
| @@ -0,0 +1,337 @@ | |||
| 1 | // Copyright 2019 TuxSH | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <array> | ||
| 8 | #include <iterator> | ||
| 9 | #include <list> | ||
| 10 | #include <utility> | ||
| 11 | |||
| 12 | #include "common/bit_util.h" | ||
| 13 | #include "common/common_types.h" | ||
| 14 | |||
| 15 | namespace Common { | ||
| 16 | |||
| 17 | /** | ||
| 18 | * A MultiLevelQueue is a type of priority queue which has the following characteristics: | ||
| 19 | * - iteratable through each of its elements. | ||
| 20 | * - back can be obtained. | ||
| 21 | * - O(1) add, lookup (both front and back) | ||
| 22 | * - discrete priorities and a max of 64 priorities (limited domain) | ||
| 23 | * This type of priority queue is normaly used for managing threads within an scheduler | ||
| 24 | */ | ||
| 25 | template <typename T, std::size_t Depth> | ||
| 26 | class MultiLevelQueue { | ||
| 27 | public: | ||
| 28 | using value_type = T; | ||
| 29 | using reference = value_type&; | ||
| 30 | using const_reference = const value_type&; | ||
| 31 | using pointer = value_type*; | ||
| 32 | using const_pointer = const value_type*; | ||
| 33 | |||
| 34 | using difference_type = typename std::pointer_traits<pointer>::difference_type; | ||
| 35 | using size_type = std::size_t; | ||
| 36 | |||
| 37 | template <bool is_constant> | ||
| 38 | class iterator_impl { | ||
| 39 | public: | ||
| 40 | using iterator_category = std::bidirectional_iterator_tag; | ||
| 41 | using value_type = T; | ||
| 42 | using pointer = std::conditional_t<is_constant, T*, const T*>; | ||
| 43 | using reference = std::conditional_t<is_constant, const T&, T&>; | ||
| 44 | using difference_type = typename std::pointer_traits<pointer>::difference_type; | ||
| 45 | |||
| 46 | friend bool operator==(const iterator_impl& lhs, const iterator_impl& rhs) { | ||
| 47 | if (lhs.IsEnd() && rhs.IsEnd()) | ||
| 48 | return true; | ||
| 49 | return std::tie(lhs.current_priority, lhs.it) == std::tie(rhs.current_priority, rhs.it); | ||
| 50 | } | ||
| 51 | |||
| 52 | friend bool operator!=(const iterator_impl& lhs, const iterator_impl& rhs) { | ||
| 53 | return !operator==(lhs, rhs); | ||
| 54 | } | ||
| 55 | |||
| 56 | reference operator*() const { | ||
| 57 | return *it; | ||
| 58 | } | ||
| 59 | |||
| 60 | pointer operator->() const { | ||
| 61 | return it.operator->(); | ||
| 62 | } | ||
| 63 | |||
| 64 | iterator_impl& operator++() { | ||
| 65 | if (IsEnd()) { | ||
| 66 | return *this; | ||
| 67 | } | ||
| 68 | |||
| 69 | ++it; | ||
| 70 | |||
| 71 | if (it == GetEndItForPrio()) { | ||
| 72 | u64 prios = mlq.used_priorities; | ||
| 73 | prios &= ~((1ULL << (current_priority + 1)) - 1); | ||
| 74 | if (prios == 0) { | ||
| 75 | current_priority = mlq.depth(); | ||
| 76 | } else { | ||
| 77 | current_priority = CountTrailingZeroes64(prios); | ||
| 78 | it = GetBeginItForPrio(); | ||
| 79 | } | ||
| 80 | } | ||
| 81 | return *this; | ||
| 82 | } | ||
| 83 | |||
| 84 | iterator_impl& operator--() { | ||
| 85 | if (IsEnd()) { | ||
| 86 | if (mlq.used_priorities != 0) { | ||
| 87 | current_priority = 63 - CountLeadingZeroes64(mlq.used_priorities); | ||
| 88 | it = GetEndItForPrio(); | ||
| 89 | --it; | ||
| 90 | } | ||
| 91 | } else if (it == GetBeginItForPrio()) { | ||
| 92 | u64 prios = mlq.used_priorities; | ||
| 93 | prios &= (1ULL << current_priority) - 1; | ||
| 94 | if (prios != 0) { | ||
| 95 | current_priority = CountTrailingZeroes64(prios); | ||
| 96 | it = GetEndItForPrio(); | ||
| 97 | --it; | ||
| 98 | } | ||
| 99 | } else { | ||
| 100 | --it; | ||
| 101 | } | ||
| 102 | return *this; | ||
| 103 | } | ||
| 104 | |||
| 105 | iterator_impl operator++(int) { | ||
| 106 | const iterator_impl v{*this}; | ||
| 107 | ++(*this); | ||
| 108 | return v; | ||
| 109 | } | ||
| 110 | |||
| 111 | iterator_impl operator--(int) { | ||
| 112 | const iterator_impl v{*this}; | ||
| 113 | --(*this); | ||
| 114 | return v; | ||
| 115 | } | ||
| 116 | |||
| 117 | // allow implicit const->non-const | ||
| 118 | iterator_impl(const iterator_impl<false>& other) | ||
| 119 | : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {} | ||
| 120 | |||
| 121 | iterator_impl(const iterator_impl<true>& other) | ||
| 122 | : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {} | ||
| 123 | |||
| 124 | iterator_impl& operator=(const iterator_impl<false>& other) { | ||
| 125 | mlq = other.mlq; | ||
| 126 | it = other.it; | ||
| 127 | current_priority = other.current_priority; | ||
| 128 | return *this; | ||
| 129 | } | ||
| 130 | |||
| 131 | friend class iterator_impl<true>; | ||
| 132 | iterator_impl() = default; | ||
| 133 | |||
| 134 | private: | ||
| 135 | friend class MultiLevelQueue; | ||
| 136 | using container_ref = | ||
| 137 | std::conditional_t<is_constant, const MultiLevelQueue&, MultiLevelQueue&>; | ||
| 138 | using list_iterator = std::conditional_t<is_constant, typename std::list<T>::const_iterator, | ||
| 139 | typename std::list<T>::iterator>; | ||
| 140 | |||
| 141 | explicit iterator_impl(container_ref mlq, list_iterator it, u32 current_priority) | ||
| 142 | : mlq(mlq), it(it), current_priority(current_priority) {} | ||
| 143 | explicit iterator_impl(container_ref mlq, u32 current_priority) | ||
| 144 | : mlq(mlq), it(), current_priority(current_priority) {} | ||
| 145 | |||
| 146 | bool IsEnd() const { | ||
| 147 | return current_priority == mlq.depth(); | ||
| 148 | } | ||
| 149 | |||
| 150 | list_iterator GetBeginItForPrio() const { | ||
| 151 | return mlq.levels[current_priority].begin(); | ||
| 152 | } | ||
| 153 | |||
| 154 | list_iterator GetEndItForPrio() const { | ||
| 155 | return mlq.levels[current_priority].end(); | ||
| 156 | } | ||
| 157 | |||
| 158 | container_ref mlq; | ||
| 159 | list_iterator it; | ||
| 160 | u32 current_priority; | ||
| 161 | }; | ||
| 162 | |||
| 163 | using iterator = iterator_impl<false>; | ||
| 164 | using const_iterator = iterator_impl<true>; | ||
| 165 | |||
| 166 | void add(const T& element, u32 priority, bool send_back = true) { | ||
| 167 | if (send_back) | ||
| 168 | levels[priority].push_back(element); | ||
| 169 | else | ||
| 170 | levels[priority].push_front(element); | ||
| 171 | used_priorities |= 1ULL << priority; | ||
| 172 | } | ||
| 173 | |||
| 174 | void remove(const T& element, u32 priority) { | ||
| 175 | auto it = ListIterateTo(levels[priority], element); | ||
| 176 | if (it == levels[priority].end()) | ||
| 177 | return; | ||
| 178 | levels[priority].erase(it); | ||
| 179 | if (levels[priority].empty()) { | ||
| 180 | used_priorities &= ~(1ULL << priority); | ||
| 181 | } | ||
| 182 | } | ||
| 183 | |||
| 184 | void adjust(const T& element, u32 old_priority, u32 new_priority, bool adjust_front = false) { | ||
| 185 | remove(element, old_priority); | ||
| 186 | add(element, new_priority, !adjust_front); | ||
| 187 | } | ||
| 188 | void adjust(const_iterator it, u32 old_priority, u32 new_priority, bool adjust_front = false) { | ||
| 189 | adjust(*it, old_priority, new_priority, adjust_front); | ||
| 190 | } | ||
| 191 | |||
| 192 | void transfer_to_front(const T& element, u32 priority, MultiLevelQueue& other) { | ||
| 193 | ListSplice(other.levels[priority], other.levels[priority].begin(), levels[priority], | ||
| 194 | ListIterateTo(levels[priority], element)); | ||
| 195 | |||
| 196 | other.used_priorities |= 1ULL << priority; | ||
| 197 | |||
| 198 | if (levels[priority].empty()) { | ||
| 199 | used_priorities &= ~(1ULL << priority); | ||
| 200 | } | ||
| 201 | } | ||
| 202 | |||
| 203 | void transfer_to_front(const_iterator it, u32 priority, MultiLevelQueue& other) { | ||
| 204 | transfer_to_front(*it, priority, other); | ||
| 205 | } | ||
| 206 | |||
| 207 | void transfer_to_back(const T& element, u32 priority, MultiLevelQueue& other) { | ||
| 208 | ListSplice(other.levels[priority], other.levels[priority].end(), levels[priority], | ||
| 209 | ListIterateTo(levels[priority], element)); | ||
| 210 | |||
| 211 | other.used_priorities |= 1ULL << priority; | ||
| 212 | |||
| 213 | if (levels[priority].empty()) { | ||
| 214 | used_priorities &= ~(1ULL << priority); | ||
| 215 | } | ||
| 216 | } | ||
| 217 | |||
| 218 | void transfer_to_back(const_iterator it, u32 priority, MultiLevelQueue& other) { | ||
| 219 | transfer_to_back(*it, priority, other); | ||
| 220 | } | ||
| 221 | |||
| 222 | void yield(u32 priority, std::size_t n = 1) { | ||
| 223 | ListShiftForward(levels[priority], n); | ||
| 224 | } | ||
| 225 | |||
| 226 | std::size_t depth() const { | ||
| 227 | return Depth; | ||
| 228 | } | ||
| 229 | |||
| 230 | std::size_t size(u32 priority) const { | ||
| 231 | return levels[priority].size(); | ||
| 232 | } | ||
| 233 | |||
| 234 | std::size_t size() const { | ||
| 235 | u64 priorities = used_priorities; | ||
| 236 | std::size_t size = 0; | ||
| 237 | while (priorities != 0) { | ||
| 238 | const u64 current_priority = CountTrailingZeroes64(priorities); | ||
| 239 | size += levels[current_priority].size(); | ||
| 240 | priorities &= ~(1ULL << current_priority); | ||
| 241 | } | ||
| 242 | return size; | ||
| 243 | } | ||
| 244 | |||
| 245 | bool empty() const { | ||
| 246 | return used_priorities == 0; | ||
| 247 | } | ||
| 248 | |||
| 249 | bool empty(u32 priority) const { | ||
| 250 | return (used_priorities & (1ULL << priority)) == 0; | ||
| 251 | } | ||
| 252 | |||
| 253 | u32 highest_priority_set(u32 max_priority = 0) const { | ||
| 254 | const u64 priorities = | ||
| 255 | max_priority == 0 ? used_priorities : (used_priorities & ~((1ULL << max_priority) - 1)); | ||
| 256 | return priorities == 0 ? Depth : static_cast<u32>(CountTrailingZeroes64(priorities)); | ||
| 257 | } | ||
| 258 | |||
| 259 | u32 lowest_priority_set(u32 min_priority = Depth - 1) const { | ||
| 260 | const u64 priorities = min_priority >= Depth - 1 | ||
| 261 | ? used_priorities | ||
| 262 | : (used_priorities & ((1ULL << (min_priority + 1)) - 1)); | ||
| 263 | return priorities == 0 ? Depth : 63 - CountLeadingZeroes64(priorities); | ||
| 264 | } | ||
| 265 | |||
| 266 | const_iterator cbegin(u32 max_prio = 0) const { | ||
| 267 | const u32 priority = highest_priority_set(max_prio); | ||
| 268 | return priority == Depth ? cend() | ||
| 269 | : const_iterator{*this, levels[priority].cbegin(), priority}; | ||
| 270 | } | ||
| 271 | const_iterator begin(u32 max_prio = 0) const { | ||
| 272 | return cbegin(max_prio); | ||
| 273 | } | ||
| 274 | iterator begin(u32 max_prio = 0) { | ||
| 275 | const u32 priority = highest_priority_set(max_prio); | ||
| 276 | return priority == Depth ? end() : iterator{*this, levels[priority].begin(), priority}; | ||
| 277 | } | ||
| 278 | |||
| 279 | const_iterator cend(u32 min_prio = Depth - 1) const { | ||
| 280 | return min_prio == Depth - 1 ? const_iterator{*this, Depth} : cbegin(min_prio + 1); | ||
| 281 | } | ||
| 282 | const_iterator end(u32 min_prio = Depth - 1) const { | ||
| 283 | return cend(min_prio); | ||
| 284 | } | ||
| 285 | iterator end(u32 min_prio = Depth - 1) { | ||
| 286 | return min_prio == Depth - 1 ? iterator{*this, Depth} : begin(min_prio + 1); | ||
| 287 | } | ||
| 288 | |||
| 289 | T& front(u32 max_priority = 0) { | ||
| 290 | const u32 priority = highest_priority_set(max_priority); | ||
| 291 | return levels[priority == Depth ? 0 : priority].front(); | ||
| 292 | } | ||
| 293 | const T& front(u32 max_priority = 0) const { | ||
| 294 | const u32 priority = highest_priority_set(max_priority); | ||
| 295 | return levels[priority == Depth ? 0 : priority].front(); | ||
| 296 | } | ||
| 297 | |||
| 298 | T back(u32 min_priority = Depth - 1) { | ||
| 299 | const u32 priority = lowest_priority_set(min_priority); // intended | ||
| 300 | return levels[priority == Depth ? 63 : priority].back(); | ||
| 301 | } | ||
| 302 | const T& back(u32 min_priority = Depth - 1) const { | ||
| 303 | const u32 priority = lowest_priority_set(min_priority); // intended | ||
| 304 | return levels[priority == Depth ? 63 : priority].back(); | ||
| 305 | } | ||
| 306 | |||
| 307 | private: | ||
| 308 | using const_list_iterator = typename std::list<T>::const_iterator; | ||
| 309 | |||
| 310 | static void ListShiftForward(std::list<T>& list, const std::size_t shift = 1) { | ||
| 311 | if (shift >= list.size()) { | ||
| 312 | return; | ||
| 313 | } | ||
| 314 | |||
| 315 | const auto begin_range = list.begin(); | ||
| 316 | const auto end_range = std::next(begin_range, shift); | ||
| 317 | list.splice(list.end(), list, begin_range, end_range); | ||
| 318 | } | ||
| 319 | |||
| 320 | static void ListSplice(std::list<T>& in_list, const_list_iterator position, | ||
| 321 | std::list<T>& out_list, const_list_iterator element) { | ||
| 322 | in_list.splice(position, out_list, element); | ||
| 323 | } | ||
| 324 | |||
| 325 | static const_list_iterator ListIterateTo(const std::list<T>& list, const T& element) { | ||
| 326 | auto it = list.cbegin(); | ||
| 327 | while (it != list.cend() && *it != element) { | ||
| 328 | ++it; | ||
| 329 | } | ||
| 330 | return it; | ||
| 331 | } | ||
| 332 | |||
| 333 | std::array<std::list<T>, Depth> levels; | ||
| 334 | u64 used_priorities = 0; | ||
| 335 | }; | ||
| 336 | |||
| 337 | } // namespace Common | ||
diff --git a/src/common/thread.cpp b/src/common/thread.cpp index 5144c0d9f..fe7a420cc 100644 --- a/src/common/thread.cpp +++ b/src/common/thread.cpp | |||
| @@ -27,18 +27,6 @@ namespace Common { | |||
| 27 | 27 | ||
| 28 | #ifdef _MSC_VER | 28 | #ifdef _MSC_VER |
| 29 | 29 | ||
| 30 | void SetThreadAffinity(std::thread::native_handle_type thread, u32 mask) { | ||
| 31 | SetThreadAffinityMask(thread, mask); | ||
| 32 | } | ||
| 33 | |||
| 34 | void SetCurrentThreadAffinity(u32 mask) { | ||
| 35 | SetThreadAffinityMask(GetCurrentThread(), mask); | ||
| 36 | } | ||
| 37 | |||
| 38 | void SwitchCurrentThread() { | ||
| 39 | SwitchToThread(); | ||
| 40 | } | ||
| 41 | |||
| 42 | // Sets the debugger-visible name of the current thread. | 30 | // Sets the debugger-visible name of the current thread. |
| 43 | // Uses undocumented (actually, it is now documented) trick. | 31 | // Uses undocumented (actually, it is now documented) trick. |
| 44 | // http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vsdebug/html/vxtsksettingthreadname.asp | 32 | // http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vsdebug/html/vxtsksettingthreadname.asp |
| @@ -70,31 +58,6 @@ void SetCurrentThreadName(const char* name) { | |||
| 70 | 58 | ||
| 71 | #else // !MSVC_VER, so must be POSIX threads | 59 | #else // !MSVC_VER, so must be POSIX threads |
| 72 | 60 | ||
| 73 | void SetThreadAffinity(std::thread::native_handle_type thread, u32 mask) { | ||
| 74 | #ifdef __APPLE__ | ||
| 75 | thread_policy_set(pthread_mach_thread_np(thread), THREAD_AFFINITY_POLICY, (integer_t*)&mask, 1); | ||
| 76 | #elif (defined __linux__ || defined __FreeBSD__) && !(defined ANDROID) | ||
| 77 | cpu_set_t cpu_set; | ||
| 78 | CPU_ZERO(&cpu_set); | ||
| 79 | |||
| 80 | for (int i = 0; i != sizeof(mask) * 8; ++i) | ||
| 81 | if ((mask >> i) & 1) | ||
| 82 | CPU_SET(i, &cpu_set); | ||
| 83 | |||
| 84 | pthread_setaffinity_np(thread, sizeof(cpu_set), &cpu_set); | ||
| 85 | #endif | ||
| 86 | } | ||
| 87 | |||
| 88 | void SetCurrentThreadAffinity(u32 mask) { | ||
| 89 | SetThreadAffinity(pthread_self(), mask); | ||
| 90 | } | ||
| 91 | |||
| 92 | #ifndef _WIN32 | ||
| 93 | void SwitchCurrentThread() { | ||
| 94 | usleep(1000 * 1); | ||
| 95 | } | ||
| 96 | #endif | ||
| 97 | |||
| 98 | // MinGW with the POSIX threading model does not support pthread_setname_np | 61 | // MinGW with the POSIX threading model does not support pthread_setname_np |
| 99 | #if !defined(_WIN32) || defined(_MSC_VER) | 62 | #if !defined(_WIN32) || defined(_MSC_VER) |
| 100 | void SetCurrentThreadName(const char* name) { | 63 | void SetCurrentThreadName(const char* name) { |
diff --git a/src/common/thread.h b/src/common/thread.h index 2cf74452d..c5fc3533d 100644 --- a/src/common/thread.h +++ b/src/common/thread.h | |||
| @@ -9,7 +9,6 @@ | |||
| 9 | #include <cstddef> | 9 | #include <cstddef> |
| 10 | #include <mutex> | 10 | #include <mutex> |
| 11 | #include <thread> | 11 | #include <thread> |
| 12 | #include "common/common_types.h" | ||
| 13 | 12 | ||
| 14 | namespace Common { | 13 | namespace Common { |
| 15 | 14 | ||
| @@ -78,9 +77,6 @@ private: | |||
| 78 | std::size_t generation = 0; // Incremented once each time the barrier is used | 77 | std::size_t generation = 0; // Incremented once each time the barrier is used |
| 79 | }; | 78 | }; |
| 80 | 79 | ||
| 81 | void SetThreadAffinity(std::thread::native_handle_type thread, u32 mask); | ||
| 82 | void SetCurrentThreadAffinity(u32 mask); | ||
| 83 | void SwitchCurrentThread(); // On Linux, this is equal to sleep 1ms | ||
| 84 | void SetCurrentThreadName(const char* name); | 80 | void SetCurrentThreadName(const char* name); |
| 85 | 81 | ||
| 86 | } // namespace Common | 82 | } // namespace Common |
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp index 352190da8..c8842410b 100644 --- a/src/core/hle/kernel/address_arbiter.cpp +++ b/src/core/hle/kernel/address_arbiter.cpp | |||
| @@ -26,7 +26,7 @@ void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_ | |||
| 26 | // them all. | 26 | // them all. |
| 27 | std::size_t last = waiting_threads.size(); | 27 | std::size_t last = waiting_threads.size(); |
| 28 | if (num_to_wake > 0) { | 28 | if (num_to_wake > 0) { |
| 29 | last = num_to_wake; | 29 | last = std::min(last, static_cast<std::size_t>(num_to_wake)); |
| 30 | } | 30 | } |
| 31 | 31 | ||
| 32 | // Signal the waiting threads. | 32 | // Signal the waiting threads. |
| @@ -90,9 +90,9 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a | |||
| 90 | // Determine the modified value depending on the waiting count. | 90 | // Determine the modified value depending on the waiting count. |
| 91 | s32 updated_value; | 91 | s32 updated_value; |
| 92 | if (waiting_threads.empty()) { | 92 | if (waiting_threads.empty()) { |
| 93 | updated_value = value - 1; | ||
| 94 | } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) { | ||
| 95 | updated_value = value + 1; | 93 | updated_value = value + 1; |
| 94 | } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) { | ||
| 95 | updated_value = value - 1; | ||
| 96 | } else { | 96 | } else { |
| 97 | updated_value = value; | 97 | updated_value = value; |
| 98 | } | 98 | } |
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp index a7e4ddc05..3b73be67b 100644 --- a/src/core/hle/kernel/kernel.cpp +++ b/src/core/hle/kernel/kernel.cpp | |||
| @@ -62,7 +62,8 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_ | |||
| 62 | 62 | ||
| 63 | if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 || | 63 | if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 || |
| 64 | thread->GetWaitHandle() != 0) { | 64 | thread->GetWaitHandle() != 0) { |
| 65 | ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); | 65 | ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex || |
| 66 | thread->GetStatus() == ThreadStatus::WaitCondVar); | ||
| 66 | thread->SetMutexWaitAddress(0); | 67 | thread->SetMutexWaitAddress(0); |
| 67 | thread->SetCondVarWaitAddress(0); | 68 | thread->SetCondVarWaitAddress(0); |
| 68 | thread->SetWaitHandle(0); | 69 | thread->SetWaitHandle(0); |
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp index cc189cc64..ac501bf7f 100644 --- a/src/core/hle/kernel/scheduler.cpp +++ b/src/core/hle/kernel/scheduler.cpp | |||
| @@ -29,8 +29,8 @@ Scheduler::~Scheduler() { | |||
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | bool Scheduler::HaveReadyThreads() const { | 31 | bool Scheduler::HaveReadyThreads() const { |
| 32 | std::lock_guard<std::mutex> lock(scheduler_mutex); | 32 | std::lock_guard lock{scheduler_mutex}; |
| 33 | return ready_queue.get_first() != nullptr; | 33 | return !ready_queue.empty(); |
| 34 | } | 34 | } |
| 35 | 35 | ||
| 36 | Thread* Scheduler::GetCurrentThread() const { | 36 | Thread* Scheduler::GetCurrentThread() const { |
| @@ -46,22 +46,27 @@ Thread* Scheduler::PopNextReadyThread() { | |||
| 46 | Thread* thread = GetCurrentThread(); | 46 | Thread* thread = GetCurrentThread(); |
| 47 | 47 | ||
| 48 | if (thread && thread->GetStatus() == ThreadStatus::Running) { | 48 | if (thread && thread->GetStatus() == ThreadStatus::Running) { |
| 49 | if (ready_queue.empty()) { | ||
| 50 | return thread; | ||
| 51 | } | ||
| 49 | // We have to do better than the current thread. | 52 | // We have to do better than the current thread. |
| 50 | // This call returns null when that's not possible. | 53 | // This call returns null when that's not possible. |
| 51 | next = ready_queue.pop_first_better(thread->GetPriority()); | 54 | next = ready_queue.front(); |
| 52 | if (!next) { | 55 | if (next == nullptr || next->GetPriority() >= thread->GetPriority()) { |
| 53 | // Otherwise just keep going with the current thread | ||
| 54 | next = thread; | 56 | next = thread; |
| 55 | } | 57 | } |
| 56 | } else { | 58 | } else { |
| 57 | next = ready_queue.pop_first(); | 59 | if (ready_queue.empty()) { |
| 60 | return nullptr; | ||
| 61 | } | ||
| 62 | next = ready_queue.front(); | ||
| 58 | } | 63 | } |
| 59 | 64 | ||
| 60 | return next; | 65 | return next; |
| 61 | } | 66 | } |
| 62 | 67 | ||
| 63 | void Scheduler::SwitchContext(Thread* new_thread) { | 68 | void Scheduler::SwitchContext(Thread* new_thread) { |
| 64 | Thread* const previous_thread = GetCurrentThread(); | 69 | Thread* previous_thread = GetCurrentThread(); |
| 65 | Process* const previous_process = system.Kernel().CurrentProcess(); | 70 | Process* const previous_process = system.Kernel().CurrentProcess(); |
| 66 | 71 | ||
| 67 | UpdateLastContextSwitchTime(previous_thread, previous_process); | 72 | UpdateLastContextSwitchTime(previous_thread, previous_process); |
| @@ -75,7 +80,7 @@ void Scheduler::SwitchContext(Thread* new_thread) { | |||
| 75 | if (previous_thread->GetStatus() == ThreadStatus::Running) { | 80 | if (previous_thread->GetStatus() == ThreadStatus::Running) { |
| 76 | // This is only the case when a reschedule is triggered without the current thread | 81 | // This is only the case when a reschedule is triggered without the current thread |
| 77 | // yielding execution (i.e. an event triggered, system core time-sliced, etc) | 82 | // yielding execution (i.e. an event triggered, system core time-sliced, etc) |
| 78 | ready_queue.push_front(previous_thread->GetPriority(), previous_thread); | 83 | ready_queue.add(previous_thread, previous_thread->GetPriority(), false); |
| 79 | previous_thread->SetStatus(ThreadStatus::Ready); | 84 | previous_thread->SetStatus(ThreadStatus::Ready); |
| 80 | } | 85 | } |
| 81 | } | 86 | } |
| @@ -90,7 +95,7 @@ void Scheduler::SwitchContext(Thread* new_thread) { | |||
| 90 | 95 | ||
| 91 | current_thread = new_thread; | 96 | current_thread = new_thread; |
| 92 | 97 | ||
| 93 | ready_queue.remove(new_thread->GetPriority(), new_thread); | 98 | ready_queue.remove(new_thread, new_thread->GetPriority()); |
| 94 | new_thread->SetStatus(ThreadStatus::Running); | 99 | new_thread->SetStatus(ThreadStatus::Running); |
| 95 | 100 | ||
| 96 | auto* const thread_owner_process = current_thread->GetOwnerProcess(); | 101 | auto* const thread_owner_process = current_thread->GetOwnerProcess(); |
| @@ -127,7 +132,7 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) { | |||
| 127 | } | 132 | } |
| 128 | 133 | ||
| 129 | void Scheduler::Reschedule() { | 134 | void Scheduler::Reschedule() { |
| 130 | std::lock_guard<std::mutex> lock(scheduler_mutex); | 135 | std::lock_guard lock{scheduler_mutex}; |
| 131 | 136 | ||
| 132 | Thread* cur = GetCurrentThread(); | 137 | Thread* cur = GetCurrentThread(); |
| 133 | Thread* next = PopNextReadyThread(); | 138 | Thread* next = PopNextReadyThread(); |
| @@ -143,51 +148,54 @@ void Scheduler::Reschedule() { | |||
| 143 | SwitchContext(next); | 148 | SwitchContext(next); |
| 144 | } | 149 | } |
| 145 | 150 | ||
| 146 | void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) { | 151 | void Scheduler::AddThread(SharedPtr<Thread> thread) { |
| 147 | std::lock_guard<std::mutex> lock(scheduler_mutex); | 152 | std::lock_guard lock{scheduler_mutex}; |
| 148 | 153 | ||
| 149 | thread_list.push_back(std::move(thread)); | 154 | thread_list.push_back(std::move(thread)); |
| 150 | ready_queue.prepare(priority); | ||
| 151 | } | 155 | } |
| 152 | 156 | ||
| 153 | void Scheduler::RemoveThread(Thread* thread) { | 157 | void Scheduler::RemoveThread(Thread* thread) { |
| 154 | std::lock_guard<std::mutex> lock(scheduler_mutex); | 158 | std::lock_guard lock{scheduler_mutex}; |
| 155 | 159 | ||
| 156 | thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), | 160 | thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), |
| 157 | thread_list.end()); | 161 | thread_list.end()); |
| 158 | } | 162 | } |
| 159 | 163 | ||
| 160 | void Scheduler::ScheduleThread(Thread* thread, u32 priority) { | 164 | void Scheduler::ScheduleThread(Thread* thread, u32 priority) { |
| 161 | std::lock_guard<std::mutex> lock(scheduler_mutex); | 165 | std::lock_guard lock{scheduler_mutex}; |
| 162 | 166 | ||
| 163 | ASSERT(thread->GetStatus() == ThreadStatus::Ready); | 167 | ASSERT(thread->GetStatus() == ThreadStatus::Ready); |
| 164 | ready_queue.push_back(priority, thread); | 168 | ready_queue.add(thread, priority); |
| 165 | } | 169 | } |
| 166 | 170 | ||
| 167 | void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { | 171 | void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { |
| 168 | std::lock_guard<std::mutex> lock(scheduler_mutex); | 172 | std::lock_guard lock{scheduler_mutex}; |
| 169 | 173 | ||
| 170 | ASSERT(thread->GetStatus() == ThreadStatus::Ready); | 174 | ASSERT(thread->GetStatus() == ThreadStatus::Ready); |
| 171 | ready_queue.remove(priority, thread); | 175 | ready_queue.remove(thread, priority); |
| 172 | } | 176 | } |
| 173 | 177 | ||
| 174 | void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { | 178 | void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { |
| 175 | std::lock_guard<std::mutex> lock(scheduler_mutex); | 179 | std::lock_guard lock{scheduler_mutex}; |
| 180 | if (thread->GetPriority() == priority) { | ||
| 181 | return; | ||
| 182 | } | ||
| 176 | 183 | ||
| 177 | // If thread was ready, adjust queues | 184 | // If thread was ready, adjust queues |
| 178 | if (thread->GetStatus() == ThreadStatus::Ready) | 185 | if (thread->GetStatus() == ThreadStatus::Ready) |
| 179 | ready_queue.move(thread, thread->GetPriority(), priority); | 186 | ready_queue.adjust(thread, thread->GetPriority(), priority); |
| 180 | else | ||
| 181 | ready_queue.prepare(priority); | ||
| 182 | } | 187 | } |
| 183 | 188 | ||
| 184 | Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const { | 189 | Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const { |
| 185 | std::lock_guard<std::mutex> lock(scheduler_mutex); | 190 | std::lock_guard lock{scheduler_mutex}; |
| 186 | 191 | ||
| 187 | const u32 mask = 1U << core; | 192 | const u32 mask = 1U << core; |
| 188 | return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) { | 193 | for (auto* thread : ready_queue) { |
| 189 | return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority; | 194 | if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) { |
| 190 | }); | 195 | return thread; |
| 196 | } | ||
| 197 | } | ||
| 198 | return nullptr; | ||
| 191 | } | 199 | } |
| 192 | 200 | ||
| 193 | void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { | 201 | void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { |
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h index 1c5bf57d9..b29bf7be8 100644 --- a/src/core/hle/kernel/scheduler.h +++ b/src/core/hle/kernel/scheduler.h | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | #include <mutex> | 7 | #include <mutex> |
| 8 | #include <vector> | 8 | #include <vector> |
| 9 | #include "common/common_types.h" | 9 | #include "common/common_types.h" |
| 10 | #include "common/thread_queue_list.h" | 10 | #include "common/multi_level_queue.h" |
| 11 | #include "core/hle/kernel/object.h" | 11 | #include "core/hle/kernel/object.h" |
| 12 | #include "core/hle/kernel/thread.h" | 12 | #include "core/hle/kernel/thread.h" |
| 13 | 13 | ||
| @@ -38,7 +38,7 @@ public: | |||
| 38 | u64 GetLastContextSwitchTicks() const; | 38 | u64 GetLastContextSwitchTicks() const; |
| 39 | 39 | ||
| 40 | /// Adds a new thread to the scheduler | 40 | /// Adds a new thread to the scheduler |
| 41 | void AddThread(SharedPtr<Thread> thread, u32 priority); | 41 | void AddThread(SharedPtr<Thread> thread); |
| 42 | 42 | ||
| 43 | /// Removes a thread from the scheduler | 43 | /// Removes a thread from the scheduler |
| 44 | void RemoveThread(Thread* thread); | 44 | void RemoveThread(Thread* thread); |
| @@ -156,7 +156,7 @@ private: | |||
| 156 | std::vector<SharedPtr<Thread>> thread_list; | 156 | std::vector<SharedPtr<Thread>> thread_list; |
| 157 | 157 | ||
| 158 | /// Lists only ready thread ids. | 158 | /// Lists only ready thread ids. |
| 159 | Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST + 1> ready_queue; | 159 | Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue; |
| 160 | 160 | ||
| 161 | SharedPtr<Thread> current_thread = nullptr; | 161 | SharedPtr<Thread> current_thread = nullptr; |
| 162 | 162 | ||
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp index 17bfe10ff..c408d4e22 100644 --- a/src/core/hle/kernel/svc.cpp +++ b/src/core/hle/kernel/svc.cpp | |||
| @@ -1353,7 +1353,7 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var | |||
| 1353 | current_thread->SetCondVarWaitAddress(condition_variable_addr); | 1353 | current_thread->SetCondVarWaitAddress(condition_variable_addr); |
| 1354 | current_thread->SetMutexWaitAddress(mutex_addr); | 1354 | current_thread->SetMutexWaitAddress(mutex_addr); |
| 1355 | current_thread->SetWaitHandle(thread_handle); | 1355 | current_thread->SetWaitHandle(thread_handle); |
| 1356 | current_thread->SetStatus(ThreadStatus::WaitMutex); | 1356 | current_thread->SetStatus(ThreadStatus::WaitCondVar); |
| 1357 | current_thread->InvalidateWakeupCallback(); | 1357 | current_thread->InvalidateWakeupCallback(); |
| 1358 | 1358 | ||
| 1359 | current_thread->WakeAfterDelay(nano_seconds); | 1359 | current_thread->WakeAfterDelay(nano_seconds); |
| @@ -1397,10 +1397,10 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target | |||
| 1397 | // them all. | 1397 | // them all. |
| 1398 | std::size_t last = waiting_threads.size(); | 1398 | std::size_t last = waiting_threads.size(); |
| 1399 | if (target != -1) | 1399 | if (target != -1) |
| 1400 | last = target; | 1400 | last = std::min(waiting_threads.size(), static_cast<std::size_t>(target)); |
| 1401 | 1401 | ||
| 1402 | // If there are no threads waiting on this condition variable, just exit | 1402 | // If there are no threads waiting on this condition variable, just exit |
| 1403 | if (last > waiting_threads.size()) | 1403 | if (last == 0) |
| 1404 | return RESULT_SUCCESS; | 1404 | return RESULT_SUCCESS; |
| 1405 | 1405 | ||
| 1406 | for (std::size_t index = 0; index < last; ++index) { | 1406 | for (std::size_t index = 0; index < last; ++index) { |
| @@ -1408,6 +1408,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target | |||
| 1408 | 1408 | ||
| 1409 | ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr); | 1409 | ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr); |
| 1410 | 1410 | ||
| 1411 | // liberate Cond Var Thread. | ||
| 1412 | thread->SetCondVarWaitAddress(0); | ||
| 1413 | |||
| 1411 | std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex(); | 1414 | std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex(); |
| 1412 | 1415 | ||
| 1413 | auto& monitor = Core::System::GetInstance().Monitor(); | 1416 | auto& monitor = Core::System::GetInstance().Monitor(); |
| @@ -1426,10 +1429,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target | |||
| 1426 | } | 1429 | } |
| 1427 | } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(), | 1430 | } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(), |
| 1428 | thread->GetWaitHandle())); | 1431 | thread->GetWaitHandle())); |
| 1429 | |||
| 1430 | if (mutex_val == 0) { | 1432 | if (mutex_val == 0) { |
| 1431 | // We were able to acquire the mutex, resume this thread. | 1433 | // We were able to acquire the mutex, resume this thread. |
| 1432 | ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); | 1434 | ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar); |
| 1433 | thread->ResumeFromWait(); | 1435 | thread->ResumeFromWait(); |
| 1434 | 1436 | ||
| 1435 | auto* const lock_owner = thread->GetLockOwner(); | 1437 | auto* const lock_owner = thread->GetLockOwner(); |
| @@ -1439,8 +1441,8 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target | |||
| 1439 | 1441 | ||
| 1440 | thread->SetLockOwner(nullptr); | 1442 | thread->SetLockOwner(nullptr); |
| 1441 | thread->SetMutexWaitAddress(0); | 1443 | thread->SetMutexWaitAddress(0); |
| 1442 | thread->SetCondVarWaitAddress(0); | ||
| 1443 | thread->SetWaitHandle(0); | 1444 | thread->SetWaitHandle(0); |
| 1445 | Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule(); | ||
| 1444 | } else { | 1446 | } else { |
| 1445 | // Atomically signal that the mutex now has a waiting thread. | 1447 | // Atomically signal that the mutex now has a waiting thread. |
| 1446 | do { | 1448 | do { |
| @@ -1459,12 +1461,11 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target | |||
| 1459 | const auto& handle_table = Core::CurrentProcess()->GetHandleTable(); | 1461 | const auto& handle_table = Core::CurrentProcess()->GetHandleTable(); |
| 1460 | auto owner = handle_table.Get<Thread>(owner_handle); | 1462 | auto owner = handle_table.Get<Thread>(owner_handle); |
| 1461 | ASSERT(owner); | 1463 | ASSERT(owner); |
| 1462 | ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); | 1464 | ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar); |
| 1463 | thread->InvalidateWakeupCallback(); | 1465 | thread->InvalidateWakeupCallback(); |
| 1466 | thread->SetStatus(ThreadStatus::WaitMutex); | ||
| 1464 | 1467 | ||
| 1465 | owner->AddMutexWaiter(thread); | 1468 | owner->AddMutexWaiter(thread); |
| 1466 | |||
| 1467 | Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule(); | ||
| 1468 | } | 1469 | } |
| 1469 | } | 1470 | } |
| 1470 | 1471 | ||
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp index 3b22e8e0d..fa3ac3abc 100644 --- a/src/core/hle/kernel/thread.cpp +++ b/src/core/hle/kernel/thread.cpp | |||
| @@ -105,6 +105,7 @@ void Thread::ResumeFromWait() { | |||
| 105 | case ThreadStatus::WaitSleep: | 105 | case ThreadStatus::WaitSleep: |
| 106 | case ThreadStatus::WaitIPC: | 106 | case ThreadStatus::WaitIPC: |
| 107 | case ThreadStatus::WaitMutex: | 107 | case ThreadStatus::WaitMutex: |
| 108 | case ThreadStatus::WaitCondVar: | ||
| 108 | case ThreadStatus::WaitArb: | 109 | case ThreadStatus::WaitArb: |
| 109 | break; | 110 | break; |
| 110 | 111 | ||
| @@ -198,7 +199,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name | |||
| 198 | thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap(); | 199 | thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap(); |
| 199 | thread->owner_process = &owner_process; | 200 | thread->owner_process = &owner_process; |
| 200 | thread->scheduler = &system.Scheduler(processor_id); | 201 | thread->scheduler = &system.Scheduler(processor_id); |
| 201 | thread->scheduler->AddThread(thread, priority); | 202 | thread->scheduler->AddThread(thread); |
| 202 | thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread); | 203 | thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread); |
| 203 | 204 | ||
| 204 | // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used | 205 | // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used |
| @@ -351,7 +352,7 @@ void Thread::ChangeScheduler() { | |||
| 351 | if (*new_processor_id != processor_id) { | 352 | if (*new_processor_id != processor_id) { |
| 352 | // Remove thread from previous core's scheduler | 353 | // Remove thread from previous core's scheduler |
| 353 | scheduler->RemoveThread(this); | 354 | scheduler->RemoveThread(this); |
| 354 | next_scheduler.AddThread(this, current_priority); | 355 | next_scheduler.AddThread(this); |
| 355 | } | 356 | } |
| 356 | 357 | ||
| 357 | processor_id = *new_processor_id; | 358 | processor_id = *new_processor_id; |
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h index faad5f391..9c684758c 100644 --- a/src/core/hle/kernel/thread.h +++ b/src/core/hle/kernel/thread.h | |||
| @@ -51,7 +51,8 @@ enum class ThreadStatus { | |||
| 51 | WaitIPC, ///< Waiting for the reply from an IPC request | 51 | WaitIPC, ///< Waiting for the reply from an IPC request |
| 52 | WaitSynchAny, ///< Waiting due to WaitSynch1 or WaitSynchN with wait_all = false | 52 | WaitSynchAny, ///< Waiting due to WaitSynch1 or WaitSynchN with wait_all = false |
| 53 | WaitSynchAll, ///< Waiting due to WaitSynchronizationN with wait_all = true | 53 | WaitSynchAll, ///< Waiting due to WaitSynchronizationN with wait_all = true |
| 54 | WaitMutex, ///< Waiting due to an ArbitrateLock/WaitProcessWideKey svc | 54 | WaitMutex, ///< Waiting due to an ArbitrateLock svc |
| 55 | WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc | ||
| 55 | WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc | 56 | WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc |
| 56 | Dormant, ///< Created but not yet made ready | 57 | Dormant, ///< Created but not yet made ready |
| 57 | Dead ///< Run to completion, or forcefully terminated | 58 | Dead ///< Run to completion, or forcefully terminated |
diff --git a/src/core/hle/service/fatal/fatal.cpp b/src/core/hle/service/fatal/fatal.cpp index 770590d0b..2c229bcad 100644 --- a/src/core/hle/service/fatal/fatal.cpp +++ b/src/core/hle/service/fatal/fatal.cpp | |||
| @@ -25,21 +25,34 @@ Module::Interface::Interface(std::shared_ptr<Module> module, const char* name) | |||
| 25 | Module::Interface::~Interface() = default; | 25 | Module::Interface::~Interface() = default; |
| 26 | 26 | ||
| 27 | struct FatalInfo { | 27 | struct FatalInfo { |
| 28 | std::array<u64_le, 31> registers{}; // TODO(ogniK): See if this actually is registers or | 28 | enum class Architecture : s32 { |
| 29 | // not(find a game which has non zero valeus) | 29 | AArch64, |
| 30 | u64_le unk0{}; | 30 | AArch32, |
| 31 | u64_le unk1{}; | 31 | }; |
| 32 | u64_le unk2{}; | 32 | |
| 33 | u64_le unk3{}; | 33 | const char* ArchAsString() const { |
| 34 | u64_le unk4{}; | 34 | return arch == Architecture::AArch64 ? "AArch64" : "AArch32"; |
| 35 | u64_le unk5{}; | 35 | } |
| 36 | u64_le unk6{}; | 36 | |
| 37 | std::array<u64_le, 31> registers{}; | ||
| 38 | u64_le sp{}; | ||
| 39 | u64_le pc{}; | ||
| 40 | u64_le pstate{}; | ||
| 41 | u64_le afsr0{}; | ||
| 42 | u64_le afsr1{}; | ||
| 43 | u64_le esr{}; | ||
| 44 | u64_le far{}; | ||
| 37 | 45 | ||
| 38 | std::array<u64_le, 32> backtrace{}; | 46 | std::array<u64_le, 32> backtrace{}; |
| 39 | u64_le unk7{}; | 47 | u64_le program_entry_point{}; |
| 40 | u64_le unk8{}; | 48 | |
| 49 | // Bit flags that indicate which registers have been set with values | ||
| 50 | // for this context. The service itself uses these to determine which | ||
| 51 | // registers to specifically print out. | ||
| 52 | u64_le set_flags{}; | ||
| 53 | |||
| 41 | u32_le backtrace_size{}; | 54 | u32_le backtrace_size{}; |
| 42 | u32_le unk9{}; | 55 | Architecture arch{}; |
| 43 | u32_le unk10{}; // TODO(ogniK): Is this even used or is it just padding? | 56 | u32_le unk10{}; // TODO(ogniK): Is this even used or is it just padding? |
| 44 | }; | 57 | }; |
| 45 | static_assert(sizeof(FatalInfo) == 0x250, "FatalInfo is an invalid size"); | 58 | static_assert(sizeof(FatalInfo) == 0x250, "FatalInfo is an invalid size"); |
| @@ -52,36 +65,36 @@ enum class FatalType : u32 { | |||
| 52 | 65 | ||
| 53 | static void GenerateErrorReport(ResultCode error_code, const FatalInfo& info) { | 66 | static void GenerateErrorReport(ResultCode error_code, const FatalInfo& info) { |
| 54 | const auto title_id = Core::CurrentProcess()->GetTitleID(); | 67 | const auto title_id = Core::CurrentProcess()->GetTitleID(); |
| 55 | std::string crash_report = | 68 | std::string crash_report = fmt::format( |
| 56 | fmt::format("Yuzu {}-{} crash report\n" | 69 | "Yuzu {}-{} crash report\n" |
| 57 | "Title ID: {:016x}\n" | 70 | "Title ID: {:016x}\n" |
| 58 | "Result: 0x{:X} ({:04}-{:04d})\n" | 71 | "Result: 0x{:X} ({:04}-{:04d})\n" |
| 59 | "\n", | 72 | "Set flags: 0x{:16X}\n" |
| 60 | Common::g_scm_branch, Common::g_scm_desc, title_id, error_code.raw, | 73 | "Program entry point: 0x{:16X}\n" |
| 61 | 2000 + static_cast<u32>(error_code.module.Value()), | 74 | "\n", |
| 62 | static_cast<u32>(error_code.description.Value()), info.unk8, info.unk7); | 75 | Common::g_scm_branch, Common::g_scm_desc, title_id, error_code.raw, |
| 76 | 2000 + static_cast<u32>(error_code.module.Value()), | ||
| 77 | static_cast<u32>(error_code.description.Value()), info.set_flags, info.program_entry_point); | ||
| 63 | if (info.backtrace_size != 0x0) { | 78 | if (info.backtrace_size != 0x0) { |
| 64 | crash_report += "Registers:\n"; | 79 | crash_report += "Registers:\n"; |
| 65 | // TODO(ogniK): This is just a guess, find a game which actually has non zero values | ||
| 66 | for (size_t i = 0; i < info.registers.size(); i++) { | 80 | for (size_t i = 0; i < info.registers.size(); i++) { |
| 67 | crash_report += | 81 | crash_report += |
| 68 | fmt::format(" X[{:02d}]: {:016x}\n", i, info.registers[i]); | 82 | fmt::format(" X[{:02d}]: {:016x}\n", i, info.registers[i]); |
| 69 | } | 83 | } |
| 70 | crash_report += fmt::format(" Unknown 0: {:016x}\n", info.unk0); | 84 | crash_report += fmt::format(" SP: {:016x}\n", info.sp); |
| 71 | crash_report += fmt::format(" Unknown 1: {:016x}\n", info.unk1); | 85 | crash_report += fmt::format(" PC: {:016x}\n", info.pc); |
| 72 | crash_report += fmt::format(" Unknown 2: {:016x}\n", info.unk2); | 86 | crash_report += fmt::format(" PSTATE: {:016x}\n", info.pstate); |
| 73 | crash_report += fmt::format(" Unknown 3: {:016x}\n", info.unk3); | 87 | crash_report += fmt::format(" AFSR0: {:016x}\n", info.afsr0); |
| 74 | crash_report += fmt::format(" Unknown 4: {:016x}\n", info.unk4); | 88 | crash_report += fmt::format(" AFSR1: {:016x}\n", info.afsr1); |
| 75 | crash_report += fmt::format(" Unknown 5: {:016x}\n", info.unk5); | 89 | crash_report += fmt::format(" ESR: {:016x}\n", info.esr); |
| 76 | crash_report += fmt::format(" Unknown 6: {:016x}\n", info.unk6); | 90 | crash_report += fmt::format(" FAR: {:016x}\n", info.far); |
| 77 | crash_report += "\nBacktrace:\n"; | 91 | crash_report += "\nBacktrace:\n"; |
| 78 | for (size_t i = 0; i < info.backtrace_size; i++) { | 92 | for (size_t i = 0; i < info.backtrace_size; i++) { |
| 79 | crash_report += | 93 | crash_report += |
| 80 | fmt::format(" Backtrace[{:02d}]: {:016x}\n", i, info.backtrace[i]); | 94 | fmt::format(" Backtrace[{:02d}]: {:016x}\n", i, info.backtrace[i]); |
| 81 | } | 95 | } |
| 82 | crash_report += fmt::format("\nUnknown 7: 0x{:016x}\n", info.unk7); | 96 | |
| 83 | crash_report += fmt::format("Unknown 8: 0x{:016x}\n", info.unk8); | 97 | crash_report += fmt::format("Architecture: {}\n", info.ArchAsString()); |
| 84 | crash_report += fmt::format("Unknown 9: 0x{:016x}\n", info.unk9); | ||
| 85 | crash_report += fmt::format("Unknown 10: 0x{:016x}\n", info.unk10); | 98 | crash_report += fmt::format("Unknown 10: 0x{:016x}\n", info.unk10); |
| 86 | } | 99 | } |
| 87 | 100 | ||
| @@ -125,13 +138,13 @@ static void ThrowFatalError(ResultCode error_code, FatalType fatal_type, const F | |||
| 125 | case FatalType::ErrorReport: | 138 | case FatalType::ErrorReport: |
| 126 | GenerateErrorReport(error_code, info); | 139 | GenerateErrorReport(error_code, info); |
| 127 | break; | 140 | break; |
| 128 | }; | 141 | } |
| 129 | } | 142 | } |
| 130 | 143 | ||
| 131 | void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) { | 144 | void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) { |
| 132 | LOG_ERROR(Service_Fatal, "called"); | 145 | LOG_ERROR(Service_Fatal, "called"); |
| 133 | IPC::RequestParser rp{ctx}; | 146 | IPC::RequestParser rp{ctx}; |
| 134 | auto error_code = rp.Pop<ResultCode>(); | 147 | const auto error_code = rp.Pop<ResultCode>(); |
| 135 | 148 | ||
| 136 | ThrowFatalError(error_code, FatalType::ErrorScreen, {}); | 149 | ThrowFatalError(error_code, FatalType::ErrorScreen, {}); |
| 137 | IPC::ResponseBuilder rb{ctx, 2}; | 150 | IPC::ResponseBuilder rb{ctx, 2}; |
| @@ -141,8 +154,8 @@ void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) { | |||
| 141 | void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) { | 154 | void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) { |
| 142 | LOG_ERROR(Service_Fatal, "called"); | 155 | LOG_ERROR(Service_Fatal, "called"); |
| 143 | IPC::RequestParser rp(ctx); | 156 | IPC::RequestParser rp(ctx); |
| 144 | auto error_code = rp.Pop<ResultCode>(); | 157 | const auto error_code = rp.Pop<ResultCode>(); |
| 145 | auto fatal_type = rp.PopEnum<FatalType>(); | 158 | const auto fatal_type = rp.PopEnum<FatalType>(); |
| 146 | 159 | ||
| 147 | ThrowFatalError(error_code, fatal_type, {}); // No info is passed with ThrowFatalWithPolicy | 160 | ThrowFatalError(error_code, fatal_type, {}); // No info is passed with ThrowFatalWithPolicy |
| 148 | IPC::ResponseBuilder rb{ctx, 2}; | 161 | IPC::ResponseBuilder rb{ctx, 2}; |
| @@ -152,9 +165,9 @@ void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) { | |||
| 152 | void Module::Interface::ThrowFatalWithCpuContext(Kernel::HLERequestContext& ctx) { | 165 | void Module::Interface::ThrowFatalWithCpuContext(Kernel::HLERequestContext& ctx) { |
| 153 | LOG_ERROR(Service_Fatal, "called"); | 166 | LOG_ERROR(Service_Fatal, "called"); |
| 154 | IPC::RequestParser rp(ctx); | 167 | IPC::RequestParser rp(ctx); |
| 155 | auto error_code = rp.Pop<ResultCode>(); | 168 | const auto error_code = rp.Pop<ResultCode>(); |
| 156 | auto fatal_type = rp.PopEnum<FatalType>(); | 169 | const auto fatal_type = rp.PopEnum<FatalType>(); |
| 157 | auto fatal_info = ctx.ReadBuffer(); | 170 | const auto fatal_info = ctx.ReadBuffer(); |
| 158 | FatalInfo info{}; | 171 | FatalInfo info{}; |
| 159 | 172 | ||
| 160 | ASSERT_MSG(fatal_info.size() == sizeof(FatalInfo), "Invalid fatal info buffer size!"); | 173 | ASSERT_MSG(fatal_info.size() == sizeof(FatalInfo), "Invalid fatal info buffer size!"); |
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt index d0284bdf4..c7038b217 100644 --- a/src/tests/CMakeLists.txt +++ b/src/tests/CMakeLists.txt | |||
| @@ -1,5 +1,7 @@ | |||
| 1 | add_executable(tests | 1 | add_executable(tests |
| 2 | common/bit_field.cpp | 2 | common/bit_field.cpp |
| 3 | common/bit_utils.cpp | ||
| 4 | common/multi_level_queue.cpp | ||
| 3 | common/param_package.cpp | 5 | common/param_package.cpp |
| 4 | common/ring_buffer.cpp | 6 | common/ring_buffer.cpp |
| 5 | core/arm/arm_test_common.cpp | 7 | core/arm/arm_test_common.cpp |
diff --git a/src/tests/common/bit_utils.cpp b/src/tests/common/bit_utils.cpp new file mode 100644 index 000000000..479b5995a --- /dev/null +++ b/src/tests/common/bit_utils.cpp | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | // Copyright 2017 Citra Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <catch2/catch.hpp> | ||
| 6 | #include <math.h> | ||
| 7 | #include "common/bit_util.h" | ||
| 8 | |||
| 9 | namespace Common { | ||
| 10 | |||
| 11 | TEST_CASE("BitUtils::CountTrailingZeroes", "[common]") { | ||
| 12 | REQUIRE(Common::CountTrailingZeroes32(0) == 32); | ||
| 13 | REQUIRE(Common::CountTrailingZeroes64(0) == 64); | ||
| 14 | REQUIRE(Common::CountTrailingZeroes32(9) == 0); | ||
| 15 | REQUIRE(Common::CountTrailingZeroes32(8) == 3); | ||
| 16 | REQUIRE(Common::CountTrailingZeroes32(0x801000) == 12); | ||
| 17 | REQUIRE(Common::CountTrailingZeroes64(9) == 0); | ||
| 18 | REQUIRE(Common::CountTrailingZeroes64(8) == 3); | ||
| 19 | REQUIRE(Common::CountTrailingZeroes64(0x801000) == 12); | ||
| 20 | REQUIRE(Common::CountTrailingZeroes64(0x801000000000UL) == 36); | ||
| 21 | } | ||
| 22 | |||
| 23 | } // namespace Common | ||
diff --git a/src/tests/common/multi_level_queue.cpp b/src/tests/common/multi_level_queue.cpp new file mode 100644 index 000000000..cca7ec7da --- /dev/null +++ b/src/tests/common/multi_level_queue.cpp | |||
| @@ -0,0 +1,55 @@ | |||
| 1 | // Copyright 2019 Yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <catch2/catch.hpp> | ||
| 6 | #include <math.h> | ||
| 7 | #include "common/common_types.h" | ||
| 8 | #include "common/multi_level_queue.h" | ||
| 9 | |||
| 10 | namespace Common { | ||
| 11 | |||
| 12 | TEST_CASE("MultiLevelQueue", "[common]") { | ||
| 13 | std::array<f32, 8> values = {0.0, 5.0, 1.0, 9.0, 8.0, 2.0, 6.0, 7.0}; | ||
| 14 | Common::MultiLevelQueue<f32, 64> mlq; | ||
| 15 | REQUIRE(mlq.empty()); | ||
| 16 | mlq.add(values[2], 2); | ||
| 17 | mlq.add(values[7], 7); | ||
| 18 | mlq.add(values[3], 3); | ||
| 19 | mlq.add(values[4], 4); | ||
| 20 | mlq.add(values[0], 0); | ||
| 21 | mlq.add(values[5], 5); | ||
| 22 | mlq.add(values[6], 6); | ||
| 23 | mlq.add(values[1], 1); | ||
| 24 | u32 index = 0; | ||
| 25 | bool all_set = true; | ||
| 26 | for (auto& f : mlq) { | ||
| 27 | all_set &= (f == values[index]); | ||
| 28 | index++; | ||
| 29 | } | ||
| 30 | REQUIRE(all_set); | ||
| 31 | REQUIRE(!mlq.empty()); | ||
| 32 | f32 v = 8.0; | ||
| 33 | mlq.add(v, 2); | ||
| 34 | v = -7.0; | ||
| 35 | mlq.add(v, 2, false); | ||
| 36 | REQUIRE(mlq.front(2) == -7.0); | ||
| 37 | mlq.yield(2); | ||
| 38 | REQUIRE(mlq.front(2) == values[2]); | ||
| 39 | REQUIRE(mlq.back(2) == -7.0); | ||
| 40 | REQUIRE(mlq.empty(8)); | ||
| 41 | v = 10.0; | ||
| 42 | mlq.add(v, 8); | ||
| 43 | mlq.adjust(v, 8, 9); | ||
| 44 | REQUIRE(mlq.front(9) == v); | ||
| 45 | REQUIRE(mlq.empty(8)); | ||
| 46 | REQUIRE(!mlq.empty(9)); | ||
| 47 | mlq.adjust(values[0], 0, 9); | ||
| 48 | REQUIRE(mlq.highest_priority_set() == 1); | ||
| 49 | REQUIRE(mlq.lowest_priority_set() == 9); | ||
| 50 | mlq.remove(values[1], 1); | ||
| 51 | REQUIRE(mlq.highest_priority_set() == 2); | ||
| 52 | REQUIRE(mlq.empty(1)); | ||
| 53 | } | ||
| 54 | |||
| 55 | } // namespace Common | ||
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp index 086b2f625..c5dc199c5 100644 --- a/src/video_core/gpu_thread.cpp +++ b/src/video_core/gpu_thread.cpp | |||
| @@ -52,8 +52,8 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p | |||
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | ThreadManager::ThreadManager(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher) | 54 | ThreadManager::ThreadManager(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher) |
| 55 | : renderer{renderer}, dma_pusher{dma_pusher}, thread{RunThread, std::ref(renderer), | 55 | : renderer{renderer}, thread{RunThread, std::ref(renderer), std::ref(dma_pusher), |
| 56 | std::ref(dma_pusher), std::ref(state)} {} | 56 | std::ref(state)} {} |
| 57 | 57 | ||
| 58 | ThreadManager::~ThreadManager() { | 58 | ThreadManager::~ThreadManager() { |
| 59 | // Notify GPU thread that a shutdown is pending | 59 | // Notify GPU thread that a shutdown is pending |
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h index 8cd7db1c6..6ab7142f8 100644 --- a/src/video_core/gpu_thread.h +++ b/src/video_core/gpu_thread.h | |||
| @@ -4,10 +4,8 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <array> | ||
| 8 | #include <atomic> | 7 | #include <atomic> |
| 9 | #include <condition_variable> | 8 | #include <condition_variable> |
| 10 | #include <memory> | ||
| 11 | #include <mutex> | 9 | #include <mutex> |
| 12 | #include <optional> | 10 | #include <optional> |
| 13 | #include <thread> | 11 | #include <thread> |
| @@ -177,7 +175,6 @@ private: | |||
| 177 | private: | 175 | private: |
| 178 | SynchState state; | 176 | SynchState state; |
| 179 | VideoCore::RendererBase& renderer; | 177 | VideoCore::RendererBase& renderer; |
| 180 | Tegra::DmaPusher& dma_pusher; | ||
| 181 | std::thread thread; | 178 | std::thread thread; |
| 182 | std::thread::id thread_id; | 179 | std::thread::id thread_id; |
| 183 | }; | 180 | }; |
diff --git a/src/video_core/rasterizer_cache.h b/src/video_core/rasterizer_cache.h index 9fc9f3056..110ad7d26 100644 --- a/src/video_core/rasterizer_cache.h +++ b/src/video_core/rasterizer_cache.h | |||
| @@ -71,8 +71,8 @@ private: | |||
| 71 | bool is_registered{}; ///< Whether the object is currently registered with the cache | 71 | bool is_registered{}; ///< Whether the object is currently registered with the cache |
| 72 | bool is_dirty{}; ///< Whether the object is dirty (out of sync with guest memory) | 72 | bool is_dirty{}; ///< Whether the object is dirty (out of sync with guest memory) |
| 73 | u64 last_modified_ticks{}; ///< When the object was last modified, used for in-order flushing | 73 | u64 last_modified_ticks{}; ///< When the object was last modified, used for in-order flushing |
| 74 | CacheAddr cache_addr{}; ///< Cache address memory, unique from emulated virtual address space | ||
| 75 | const u8* host_ptr{}; ///< Pointer to the memory backing this cached region | 74 | const u8* host_ptr{}; ///< Pointer to the memory backing this cached region |
| 75 | CacheAddr cache_addr{}; ///< Cache address memory, unique from emulated virtual address space | ||
| 76 | }; | 76 | }; |
| 77 | 77 | ||
| 78 | template <class T> | 78 | template <class T> |
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp index f75c65825..fd091c84c 100644 --- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp | |||
| @@ -15,8 +15,8 @@ namespace OpenGL { | |||
| 15 | 15 | ||
| 16 | CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, GLintptr offset, | 16 | CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, GLintptr offset, |
| 17 | std::size_t alignment, u8* host_ptr) | 17 | std::size_t alignment, u8* host_ptr) |
| 18 | : cpu_addr{cpu_addr}, size{size}, offset{offset}, alignment{alignment}, RasterizerCacheObject{ | 18 | : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, size{size}, offset{offset}, |
| 19 | host_ptr} {} | 19 | alignment{alignment} {} |
| 20 | 20 | ||
| 21 | OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size) | 21 | OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size) |
| 22 | : RasterizerCache{rasterizer}, stream_buffer(size, true) {} | 22 | : RasterizerCache{rasterizer}, stream_buffer(size, true) {} |
diff --git a/src/video_core/renderer_opengl/gl_global_cache.cpp b/src/video_core/renderer_opengl/gl_global_cache.cpp index 0fbfbad55..da9326253 100644 --- a/src/video_core/renderer_opengl/gl_global_cache.cpp +++ b/src/video_core/renderer_opengl/gl_global_cache.cpp | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | namespace OpenGL { | 15 | namespace OpenGL { |
| 16 | 16 | ||
| 17 | CachedGlobalRegion::CachedGlobalRegion(VAddr cpu_addr, u32 size, u8* host_ptr) | 17 | CachedGlobalRegion::CachedGlobalRegion(VAddr cpu_addr, u32 size, u8* host_ptr) |
| 18 | : cpu_addr{cpu_addr}, size{size}, RasterizerCacheObject{host_ptr} { | 18 | : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, size{size} { |
| 19 | buffer.Create(); | 19 | buffer.Create(); |
| 20 | // Bind and unbind the buffer so it gets allocated by the driver | 20 | // Bind and unbind the buffer so it gets allocated by the driver |
| 21 | glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer.handle); | 21 | glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer.handle); |
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp index e06dfe43f..046fc935b 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp | |||
| @@ -100,11 +100,9 @@ struct FramebufferCacheKey { | |||
| 100 | } | 100 | } |
| 101 | }; | 101 | }; |
| 102 | 102 | ||
| 103 | RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& window, Core::System& system, | 103 | RasterizerOpenGL::RasterizerOpenGL(Core::System& system, ScreenInfo& info) |
| 104 | ScreenInfo& info) | 104 | : res_cache{*this}, shader_cache{*this, system}, global_cache{*this}, system{system}, |
| 105 | : res_cache{*this}, shader_cache{*this, system}, global_cache{*this}, | 105 | screen_info{info}, buffer_cache(*this, STREAM_BUFFER_SIZE) { |
| 106 | emu_window{window}, system{system}, screen_info{info}, | ||
| 107 | buffer_cache(*this, STREAM_BUFFER_SIZE) { | ||
| 108 | // Create sampler objects | 106 | // Create sampler objects |
| 109 | for (std::size_t i = 0; i < texture_samplers.size(); ++i) { | 107 | for (std::size_t i = 0; i < texture_samplers.size(); ++i) { |
| 110 | texture_samplers[i].Create(); | 108 | texture_samplers[i].Create(); |
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h index 30f3e8acb..4de565321 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer.h +++ b/src/video_core/renderer_opengl/gl_rasterizer.h | |||
| @@ -50,8 +50,7 @@ struct FramebufferCacheKey; | |||
| 50 | 50 | ||
| 51 | class RasterizerOpenGL : public VideoCore::RasterizerInterface { | 51 | class RasterizerOpenGL : public VideoCore::RasterizerInterface { |
| 52 | public: | 52 | public: |
| 53 | explicit RasterizerOpenGL(Core::Frontend::EmuWindow& window, Core::System& system, | 53 | explicit RasterizerOpenGL(Core::System& system, ScreenInfo& info); |
| 54 | ScreenInfo& info); | ||
| 55 | ~RasterizerOpenGL() override; | 54 | ~RasterizerOpenGL() override; |
| 56 | 55 | ||
| 57 | void DrawArrays() override; | 56 | void DrawArrays() override; |
| @@ -214,7 +213,6 @@ private: | |||
| 214 | ShaderCacheOpenGL shader_cache; | 213 | ShaderCacheOpenGL shader_cache; |
| 215 | GlobalRegionCacheOpenGL global_cache; | 214 | GlobalRegionCacheOpenGL global_cache; |
| 216 | 215 | ||
| 217 | Core::Frontend::EmuWindow& emu_window; | ||
| 218 | Core::System& system; | 216 | Core::System& system; |
| 219 | 217 | ||
| 220 | ScreenInfo& screen_info; | 218 | ScreenInfo& screen_info; |
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp index 0235317c0..aba6ce731 100644 --- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp +++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp | |||
| @@ -562,8 +562,8 @@ void RasterizerCacheOpenGL::CopySurface(const Surface& src_surface, const Surfac | |||
| 562 | } | 562 | } |
| 563 | 563 | ||
| 564 | CachedSurface::CachedSurface(const SurfaceParams& params) | 564 | CachedSurface::CachedSurface(const SurfaceParams& params) |
| 565 | : params{params}, gl_target{SurfaceTargetToGL(params.target)}, | 565 | : RasterizerCacheObject{params.host_ptr}, params{params}, |
| 566 | cached_size_in_bytes{params.size_in_bytes}, RasterizerCacheObject{params.host_ptr} { | 566 | gl_target{SurfaceTargetToGL(params.target)}, cached_size_in_bytes{params.size_in_bytes} { |
| 567 | 567 | ||
| 568 | const auto optional_cpu_addr{ | 568 | const auto optional_cpu_addr{ |
| 569 | Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(params.gpu_addr)}; | 569 | Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(params.gpu_addr)}; |
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp index 1f8eca6f0..290e654bc 100644 --- a/src/video_core/renderer_opengl/gl_shader_cache.cpp +++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp | |||
| @@ -215,9 +215,9 @@ CachedShader::CachedShader(VAddr cpu_addr, u64 unique_identifier, | |||
| 215 | Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, | 215 | Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, |
| 216 | const PrecompiledPrograms& precompiled_programs, | 216 | const PrecompiledPrograms& precompiled_programs, |
| 217 | ProgramCode&& program_code, ProgramCode&& program_code_b, u8* host_ptr) | 217 | ProgramCode&& program_code, ProgramCode&& program_code_b, u8* host_ptr) |
| 218 | : host_ptr{host_ptr}, cpu_addr{cpu_addr}, unique_identifier{unique_identifier}, | 218 | : RasterizerCacheObject{host_ptr}, host_ptr{host_ptr}, cpu_addr{cpu_addr}, |
| 219 | program_type{program_type}, disk_cache{disk_cache}, | 219 | unique_identifier{unique_identifier}, program_type{program_type}, disk_cache{disk_cache}, |
| 220 | precompiled_programs{precompiled_programs}, RasterizerCacheObject{host_ptr} { | 220 | precompiled_programs{precompiled_programs} { |
| 221 | 221 | ||
| 222 | const std::size_t code_size = CalculateProgramSize(program_code); | 222 | const std::size_t code_size = CalculateProgramSize(program_code); |
| 223 | const std::size_t code_size_b = | 223 | const std::size_t code_size_b = |
| @@ -245,9 +245,9 @@ CachedShader::CachedShader(VAddr cpu_addr, u64 unique_identifier, | |||
| 245 | Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, | 245 | Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, |
| 246 | const PrecompiledPrograms& precompiled_programs, | 246 | const PrecompiledPrograms& precompiled_programs, |
| 247 | GLShader::ProgramResult result, u8* host_ptr) | 247 | GLShader::ProgramResult result, u8* host_ptr) |
| 248 | : cpu_addr{cpu_addr}, unique_identifier{unique_identifier}, program_type{program_type}, | 248 | : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, unique_identifier{unique_identifier}, |
| 249 | disk_cache{disk_cache}, precompiled_programs{precompiled_programs}, RasterizerCacheObject{ | 249 | program_type{program_type}, disk_cache{disk_cache}, precompiled_programs{ |
| 250 | host_ptr} { | 250 | precompiled_programs} { |
| 251 | 251 | ||
| 252 | code = std::move(result.first); | 252 | code = std::move(result.first); |
| 253 | entries = result.second; | 253 | entries = result.second; |
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp index 5e3d862c6..a01efeb05 100644 --- a/src/video_core/renderer_opengl/renderer_opengl.cpp +++ b/src/video_core/renderer_opengl/renderer_opengl.cpp | |||
| @@ -266,7 +266,7 @@ void RendererOpenGL::CreateRasterizer() { | |||
| 266 | } | 266 | } |
| 267 | // Initialize sRGB Usage | 267 | // Initialize sRGB Usage |
| 268 | OpenGLState::ClearsRGBUsed(); | 268 | OpenGLState::ClearsRGBUsed(); |
| 269 | rasterizer = std::make_unique<RasterizerOpenGL>(render_window, system, screen_info); | 269 | rasterizer = std::make_unique<RasterizerOpenGL>(system, screen_info); |
| 270 | } | 270 | } |
| 271 | 271 | ||
| 272 | void RendererOpenGL::ConfigureFramebufferTexture(TextureInfo& texture, | 272 | void RendererOpenGL::ConfigureFramebufferTexture(TextureInfo& texture, |
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp index eac51ecb3..388b5ffd5 100644 --- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp +++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp | |||
| @@ -19,8 +19,8 @@ namespace Vulkan { | |||
| 19 | 19 | ||
| 20 | CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, u64 offset, | 20 | CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, u64 offset, |
| 21 | std::size_t alignment, u8* host_ptr) | 21 | std::size_t alignment, u8* host_ptr) |
| 22 | : cpu_addr{cpu_addr}, size{size}, offset{offset}, alignment{alignment}, RasterizerCacheObject{ | 22 | : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, size{size}, offset{offset}, |
| 23 | host_ptr} {} | 23 | alignment{alignment} {} |
| 24 | 24 | ||
| 25 | VKBufferCache::VKBufferCache(Tegra::MemoryManager& tegra_memory_manager, | 25 | VKBufferCache::VKBufferCache(Tegra::MemoryManager& tegra_memory_manager, |
| 26 | VideoCore::RasterizerInterface& rasterizer, const VKDevice& device, | 26 | VideoCore::RasterizerInterface& rasterizer, const VKDevice& device, |
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp index 06ad74ffe..593bb681f 100644 --- a/src/yuzu/debugger/wait_tree.cpp +++ b/src/yuzu/debugger/wait_tree.cpp | |||
| @@ -234,6 +234,9 @@ QString WaitTreeThread::GetText() const { | |||
| 234 | case Kernel::ThreadStatus::WaitMutex: | 234 | case Kernel::ThreadStatus::WaitMutex: |
| 235 | status = tr("waiting for mutex"); | 235 | status = tr("waiting for mutex"); |
| 236 | break; | 236 | break; |
| 237 | case Kernel::ThreadStatus::WaitCondVar: | ||
| 238 | status = tr("waiting for condition variable"); | ||
| 239 | break; | ||
| 237 | case Kernel::ThreadStatus::WaitArb: | 240 | case Kernel::ThreadStatus::WaitArb: |
| 238 | status = tr("waiting for address arbiter"); | 241 | status = tr("waiting for address arbiter"); |
| 239 | break; | 242 | break; |
| @@ -269,6 +272,7 @@ QColor WaitTreeThread::GetColor() const { | |||
| 269 | case Kernel::ThreadStatus::WaitSynchAll: | 272 | case Kernel::ThreadStatus::WaitSynchAll: |
| 270 | case Kernel::ThreadStatus::WaitSynchAny: | 273 | case Kernel::ThreadStatus::WaitSynchAny: |
| 271 | case Kernel::ThreadStatus::WaitMutex: | 274 | case Kernel::ThreadStatus::WaitMutex: |
| 275 | case Kernel::ThreadStatus::WaitCondVar: | ||
| 272 | case Kernel::ThreadStatus::WaitArb: | 276 | case Kernel::ThreadStatus::WaitArb: |
| 273 | return QColor(Qt::GlobalColor::red); | 277 | return QColor(Qt::GlobalColor::red); |
| 274 | case Kernel::ThreadStatus::Dormant: | 278 | case Kernel::ThreadStatus::Dormant: |