summaryrefslogtreecommitdiff
path: root/src/common
diff options
context:
space:
mode:
Diffstat (limited to 'src/common')
-rw-r--r--src/common/CMakeLists.txt6
-rw-r--r--src/common/bit_util.h47
-rw-r--r--src/common/detached_tasks.cpp8
-rw-r--r--src/common/logging/backend.cpp6
-rw-r--r--src/common/lz4_compression.cpp76
-rw-r--r--src/common/lz4_compression.h55
-rw-r--r--src/common/multi_level_queue.h337
-rw-r--r--src/common/thread.cpp37
-rw-r--r--src/common/thread.h14
-rw-r--r--src/common/threadsafe_queue.h4
-rw-r--r--src/common/zstd_compression.cpp53
-rw-r--r--src/common/zstd_compression.h42
12 files changed, 626 insertions, 59 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 43ae8a9e7..1e8e1b215 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -91,6 +91,8 @@ add_library(common STATIC
91 logging/log.h 91 logging/log.h
92 logging/text_formatter.cpp 92 logging/text_formatter.cpp
93 logging/text_formatter.h 93 logging/text_formatter.h
94 lz4_compression.cpp
95 lz4_compression.h
94 math_util.h 96 math_util.h
95 memory_hook.cpp 97 memory_hook.cpp
96 memory_hook.h 98 memory_hook.h
@@ -98,6 +100,7 @@ add_library(common STATIC
98 microprofile.h 100 microprofile.h
99 microprofileui.h 101 microprofileui.h
100 misc.cpp 102 misc.cpp
103 multi_level_queue.h
101 page_table.cpp 104 page_table.cpp
102 page_table.h 105 page_table.h
103 param_package.cpp 106 param_package.cpp
@@ -122,6 +125,8 @@ add_library(common STATIC
122 uint128.h 125 uint128.h
123 vector_math.h 126 vector_math.h
124 web_result.h 127 web_result.h
128 zstd_compression.cpp
129 zstd_compression.h
125) 130)
126 131
127if(ARCHITECTURE_x86_64) 132if(ARCHITECTURE_x86_64)
@@ -135,3 +140,4 @@ endif()
135create_target_directory_groups(common) 140create_target_directory_groups(common)
136 141
137target_link_libraries(common PUBLIC Boost::boost fmt microprofile) 142target_link_libraries(common PUBLIC Boost::boost fmt microprofile)
143target_link_libraries(common PRIVATE lz4_static libzstd_static)
diff --git a/src/common/bit_util.h b/src/common/bit_util.h
index 1eea17ba1..d032df413 100644
--- a/src/common/bit_util.h
+++ b/src/common/bit_util.h
@@ -32,7 +32,7 @@ inline u32 CountLeadingZeroes32(u32 value) {
32 return 32; 32 return 32;
33} 33}
34 34
35inline u64 CountLeadingZeroes64(u64 value) { 35inline u32 CountLeadingZeroes64(u64 value) {
36 unsigned long leading_zero = 0; 36 unsigned long leading_zero = 0;
37 37
38 if (_BitScanReverse64(&leading_zero, value) != 0) { 38 if (_BitScanReverse64(&leading_zero, value) != 0) {
@@ -47,15 +47,54 @@ inline u32 CountLeadingZeroes32(u32 value) {
47 return 32; 47 return 32;
48 } 48 }
49 49
50 return __builtin_clz(value); 50 return static_cast<u32>(__builtin_clz(value));
51} 51}
52 52
53inline u64 CountLeadingZeroes64(u64 value) { 53inline u32 CountLeadingZeroes64(u64 value) {
54 if (value == 0) { 54 if (value == 0) {
55 return 64; 55 return 64;
56 } 56 }
57 57
58 return __builtin_clzll(value); 58 return static_cast<u32>(__builtin_clzll(value));
59} 59}
60#endif 60#endif
61
62#ifdef _MSC_VER
63inline u32 CountTrailingZeroes32(u32 value) {
64 unsigned long trailing_zero = 0;
65
66 if (_BitScanForward(&trailing_zero, value) != 0) {
67 return trailing_zero;
68 }
69
70 return 32;
71}
72
73inline u32 CountTrailingZeroes64(u64 value) {
74 unsigned long trailing_zero = 0;
75
76 if (_BitScanForward64(&trailing_zero, value) != 0) {
77 return trailing_zero;
78 }
79
80 return 64;
81}
82#else
83inline u32 CountTrailingZeroes32(u32 value) {
84 if (value == 0) {
85 return 32;
86 }
87
88 return static_cast<u32>(__builtin_ctz(value));
89}
90
91inline u32 CountTrailingZeroes64(u64 value) {
92 if (value == 0) {
93 return 64;
94 }
95
96 return static_cast<u32>(__builtin_ctzll(value));
97}
98#endif
99
61} // namespace Common 100} // namespace Common
diff --git a/src/common/detached_tasks.cpp b/src/common/detached_tasks.cpp
index a347d9e02..f268d6021 100644
--- a/src/common/detached_tasks.cpp
+++ b/src/common/detached_tasks.cpp
@@ -16,22 +16,22 @@ DetachedTasks::DetachedTasks() {
16} 16}
17 17
18void DetachedTasks::WaitForAllTasks() { 18void DetachedTasks::WaitForAllTasks() {
19 std::unique_lock<std::mutex> lock(mutex); 19 std::unique_lock lock{mutex};
20 cv.wait(lock, [this]() { return count == 0; }); 20 cv.wait(lock, [this]() { return count == 0; });
21} 21}
22 22
23DetachedTasks::~DetachedTasks() { 23DetachedTasks::~DetachedTasks() {
24 std::unique_lock<std::mutex> lock(mutex); 24 std::unique_lock lock{mutex};
25 ASSERT(count == 0); 25 ASSERT(count == 0);
26 instance = nullptr; 26 instance = nullptr;
27} 27}
28 28
29void DetachedTasks::AddTask(std::function<void()> task) { 29void DetachedTasks::AddTask(std::function<void()> task) {
30 std::unique_lock<std::mutex> lock(instance->mutex); 30 std::unique_lock lock{instance->mutex};
31 ++instance->count; 31 ++instance->count;
32 std::thread([task{std::move(task)}]() { 32 std::thread([task{std::move(task)}]() {
33 task(); 33 task();
34 std::unique_lock<std::mutex> lock(instance->mutex); 34 std::unique_lock lock{instance->mutex};
35 --instance->count; 35 --instance->count;
36 std::notify_all_at_thread_exit(instance->cv, std::move(lock)); 36 std::notify_all_at_thread_exit(instance->cv, std::move(lock));
37 }) 37 })
diff --git a/src/common/logging/backend.cpp b/src/common/logging/backend.cpp
index 4462ff3fb..a03179520 100644
--- a/src/common/logging/backend.cpp
+++ b/src/common/logging/backend.cpp
@@ -46,12 +46,12 @@ public:
46 } 46 }
47 47
48 void AddBackend(std::unique_ptr<Backend> backend) { 48 void AddBackend(std::unique_ptr<Backend> backend) {
49 std::lock_guard<std::mutex> lock(writing_mutex); 49 std::lock_guard lock{writing_mutex};
50 backends.push_back(std::move(backend)); 50 backends.push_back(std::move(backend));
51 } 51 }
52 52
53 void RemoveBackend(std::string_view backend_name) { 53 void RemoveBackend(std::string_view backend_name) {
54 std::lock_guard<std::mutex> lock(writing_mutex); 54 std::lock_guard lock{writing_mutex};
55 const auto it = 55 const auto it =
56 std::remove_if(backends.begin(), backends.end(), 56 std::remove_if(backends.begin(), backends.end(),
57 [&backend_name](const auto& i) { return backend_name == i->GetName(); }); 57 [&backend_name](const auto& i) { return backend_name == i->GetName(); });
@@ -80,7 +80,7 @@ private:
80 backend_thread = std::thread([&] { 80 backend_thread = std::thread([&] {
81 Entry entry; 81 Entry entry;
82 auto write_logs = [&](Entry& e) { 82 auto write_logs = [&](Entry& e) {
83 std::lock_guard<std::mutex> lock(writing_mutex); 83 std::lock_guard lock{writing_mutex};
84 for (const auto& backend : backends) { 84 for (const auto& backend : backends) {
85 backend->Write(e); 85 backend->Write(e);
86 } 86 }
diff --git a/src/common/lz4_compression.cpp b/src/common/lz4_compression.cpp
new file mode 100644
index 000000000..ade6759bb
--- /dev/null
+++ b/src/common/lz4_compression.cpp
@@ -0,0 +1,76 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <lz4hc.h>
7
8#include "common/assert.h"
9#include "common/lz4_compression.h"
10
11namespace Common::Compression {
12
13std::vector<u8> CompressDataLZ4(const u8* source, std::size_t source_size) {
14 ASSERT_MSG(source_size <= LZ4_MAX_INPUT_SIZE, "Source size exceeds LZ4 maximum input size");
15
16 const auto source_size_int = static_cast<int>(source_size);
17 const int max_compressed_size = LZ4_compressBound(source_size_int);
18 std::vector<u8> compressed(max_compressed_size);
19
20 const int compressed_size = LZ4_compress_default(reinterpret_cast<const char*>(source),
21 reinterpret_cast<char*>(compressed.data()),
22 source_size_int, max_compressed_size);
23
24 if (compressed_size <= 0) {
25 // Compression failed
26 return {};
27 }
28
29 compressed.resize(compressed_size);
30
31 return compressed;
32}
33
34std::vector<u8> CompressDataLZ4HC(const u8* source, std::size_t source_size,
35 s32 compression_level) {
36 ASSERT_MSG(source_size <= LZ4_MAX_INPUT_SIZE, "Source size exceeds LZ4 maximum input size");
37
38 compression_level = std::clamp(compression_level, LZ4HC_CLEVEL_MIN, LZ4HC_CLEVEL_MAX);
39
40 const auto source_size_int = static_cast<int>(source_size);
41 const int max_compressed_size = LZ4_compressBound(source_size_int);
42 std::vector<u8> compressed(max_compressed_size);
43
44 const int compressed_size = LZ4_compress_HC(
45 reinterpret_cast<const char*>(source), reinterpret_cast<char*>(compressed.data()),
46 source_size_int, max_compressed_size, compression_level);
47
48 if (compressed_size <= 0) {
49 // Compression failed
50 return {};
51 }
52
53 compressed.resize(compressed_size);
54
55 return compressed;
56}
57
58std::vector<u8> CompressDataLZ4HCMax(const u8* source, std::size_t source_size) {
59 return CompressDataLZ4HC(source, source_size, LZ4HC_CLEVEL_MAX);
60}
61
62std::vector<u8> DecompressDataLZ4(const std::vector<u8>& compressed,
63 std::size_t uncompressed_size) {
64 std::vector<u8> uncompressed(uncompressed_size);
65 const int size_check = LZ4_decompress_safe(reinterpret_cast<const char*>(compressed.data()),
66 reinterpret_cast<char*>(uncompressed.data()),
67 static_cast<int>(compressed.size()),
68 static_cast<int>(uncompressed.size()));
69 if (static_cast<int>(uncompressed_size) != size_check) {
70 // Decompression failed
71 return {};
72 }
73 return uncompressed;
74}
75
76} // namespace Common::Compression
diff --git a/src/common/lz4_compression.h b/src/common/lz4_compression.h
new file mode 100644
index 000000000..fe2231a6c
--- /dev/null
+++ b/src/common/lz4_compression.h
@@ -0,0 +1,55 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <vector>
6
7#include "common/common_types.h"
8
9namespace Common::Compression {
10
11/**
12 * Compresses a source memory region with LZ4 and returns the compressed data in a vector.
13 *
14 * @param source the uncompressed source memory region.
15 * @param source_size the size in bytes of the uncompressed source memory region.
16 *
17 * @return the compressed data.
18 */
19std::vector<u8> CompressDataLZ4(const u8* source, std::size_t source_size);
20
21/**
22 * Utilizes the LZ4 subalgorithm LZ4HC with the specified compression level. Higher compression
23 * levels result in a smaller compressed size, but require more CPU time for compression. The
24 * compression level has almost no impact on decompression speed. Data compressed with LZ4HC can
25 * also be decompressed with the default LZ4 decompression.
26 *
27 * @param source the uncompressed source memory region.
28 * @param source_size the size in bytes of the uncompressed source memory region.
29 * @param compression_level the used compression level. Should be between 3 and 12.
30 *
31 * @return the compressed data.
32 */
33std::vector<u8> CompressDataLZ4HC(const u8* source, std::size_t source_size, s32 compression_level);
34
35/**
36 * Utilizes the LZ4 subalgorithm LZ4HC with the highest possible compression level.
37 *
38 * @param source the uncompressed source memory region.
39 * @param source_size the size in bytes of the uncompressed source memory region.
40 *
41 * @return the compressed data.
42 */
43std::vector<u8> CompressDataLZ4HCMax(const u8* source, std::size_t source_size);
44
45/**
46 * Decompresses a source memory region with LZ4 and returns the uncompressed data in a vector.
47 *
48 * @param compressed the compressed source memory region.
49 * @param uncompressed_size the size in bytes of the uncompressed data.
50 *
51 * @return the decompressed data.
52 */
53std::vector<u8> DecompressDataLZ4(const std::vector<u8>& compressed, std::size_t uncompressed_size);
54
55} // namespace Common::Compression \ No newline at end of file
diff --git a/src/common/multi_level_queue.h b/src/common/multi_level_queue.h
new file mode 100644
index 000000000..9cb448f56
--- /dev/null
+++ b/src/common/multi_level_queue.h
@@ -0,0 +1,337 @@
1// Copyright 2019 TuxSH
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <iterator>
9#include <list>
10#include <utility>
11
12#include "common/bit_util.h"
13#include "common/common_types.h"
14
15namespace Common {
16
17/**
18 * A MultiLevelQueue is a type of priority queue which has the following characteristics:
19 * - iteratable through each of its elements.
20 * - back can be obtained.
21 * - O(1) add, lookup (both front and back)
22 * - discrete priorities and a max of 64 priorities (limited domain)
23 * This type of priority queue is normaly used for managing threads within an scheduler
24 */
25template <typename T, std::size_t Depth>
26class MultiLevelQueue {
27public:
28 using value_type = T;
29 using reference = value_type&;
30 using const_reference = const value_type&;
31 using pointer = value_type*;
32 using const_pointer = const value_type*;
33
34 using difference_type = typename std::pointer_traits<pointer>::difference_type;
35 using size_type = std::size_t;
36
37 template <bool is_constant>
38 class iterator_impl {
39 public:
40 using iterator_category = std::bidirectional_iterator_tag;
41 using value_type = T;
42 using pointer = std::conditional_t<is_constant, T*, const T*>;
43 using reference = std::conditional_t<is_constant, const T&, T&>;
44 using difference_type = typename std::pointer_traits<pointer>::difference_type;
45
46 friend bool operator==(const iterator_impl& lhs, const iterator_impl& rhs) {
47 if (lhs.IsEnd() && rhs.IsEnd())
48 return true;
49 return std::tie(lhs.current_priority, lhs.it) == std::tie(rhs.current_priority, rhs.it);
50 }
51
52 friend bool operator!=(const iterator_impl& lhs, const iterator_impl& rhs) {
53 return !operator==(lhs, rhs);
54 }
55
56 reference operator*() const {
57 return *it;
58 }
59
60 pointer operator->() const {
61 return it.operator->();
62 }
63
64 iterator_impl& operator++() {
65 if (IsEnd()) {
66 return *this;
67 }
68
69 ++it;
70
71 if (it == GetEndItForPrio()) {
72 u64 prios = mlq.used_priorities;
73 prios &= ~((1ULL << (current_priority + 1)) - 1);
74 if (prios == 0) {
75 current_priority = static_cast<u32>(mlq.depth());
76 } else {
77 current_priority = CountTrailingZeroes64(prios);
78 it = GetBeginItForPrio();
79 }
80 }
81 return *this;
82 }
83
84 iterator_impl& operator--() {
85 if (IsEnd()) {
86 if (mlq.used_priorities != 0) {
87 current_priority = 63 - CountLeadingZeroes64(mlq.used_priorities);
88 it = GetEndItForPrio();
89 --it;
90 }
91 } else if (it == GetBeginItForPrio()) {
92 u64 prios = mlq.used_priorities;
93 prios &= (1ULL << current_priority) - 1;
94 if (prios != 0) {
95 current_priority = CountTrailingZeroes64(prios);
96 it = GetEndItForPrio();
97 --it;
98 }
99 } else {
100 --it;
101 }
102 return *this;
103 }
104
105 iterator_impl operator++(int) {
106 const iterator_impl v{*this};
107 ++(*this);
108 return v;
109 }
110
111 iterator_impl operator--(int) {
112 const iterator_impl v{*this};
113 --(*this);
114 return v;
115 }
116
117 // allow implicit const->non-const
118 iterator_impl(const iterator_impl<false>& other)
119 : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
120
121 iterator_impl(const iterator_impl<true>& other)
122 : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
123
124 iterator_impl& operator=(const iterator_impl<false>& other) {
125 mlq = other.mlq;
126 it = other.it;
127 current_priority = other.current_priority;
128 return *this;
129 }
130
131 friend class iterator_impl<true>;
132 iterator_impl() = default;
133
134 private:
135 friend class MultiLevelQueue;
136 using container_ref =
137 std::conditional_t<is_constant, const MultiLevelQueue&, MultiLevelQueue&>;
138 using list_iterator = std::conditional_t<is_constant, typename std::list<T>::const_iterator,
139 typename std::list<T>::iterator>;
140
141 explicit iterator_impl(container_ref mlq, list_iterator it, u32 current_priority)
142 : mlq(mlq), it(it), current_priority(current_priority) {}
143 explicit iterator_impl(container_ref mlq, u32 current_priority)
144 : mlq(mlq), it(), current_priority(current_priority) {}
145
146 bool IsEnd() const {
147 return current_priority == mlq.depth();
148 }
149
150 list_iterator GetBeginItForPrio() const {
151 return mlq.levels[current_priority].begin();
152 }
153
154 list_iterator GetEndItForPrio() const {
155 return mlq.levels[current_priority].end();
156 }
157
158 container_ref mlq;
159 list_iterator it;
160 u32 current_priority;
161 };
162
163 using iterator = iterator_impl<false>;
164 using const_iterator = iterator_impl<true>;
165
166 void add(const T& element, u32 priority, bool send_back = true) {
167 if (send_back)
168 levels[priority].push_back(element);
169 else
170 levels[priority].push_front(element);
171 used_priorities |= 1ULL << priority;
172 }
173
174 void remove(const T& element, u32 priority) {
175 auto it = ListIterateTo(levels[priority], element);
176 if (it == levels[priority].end())
177 return;
178 levels[priority].erase(it);
179 if (levels[priority].empty()) {
180 used_priorities &= ~(1ULL << priority);
181 }
182 }
183
184 void adjust(const T& element, u32 old_priority, u32 new_priority, bool adjust_front = false) {
185 remove(element, old_priority);
186 add(element, new_priority, !adjust_front);
187 }
188 void adjust(const_iterator it, u32 old_priority, u32 new_priority, bool adjust_front = false) {
189 adjust(*it, old_priority, new_priority, adjust_front);
190 }
191
192 void transfer_to_front(const T& element, u32 priority, MultiLevelQueue& other) {
193 ListSplice(other.levels[priority], other.levels[priority].begin(), levels[priority],
194 ListIterateTo(levels[priority], element));
195
196 other.used_priorities |= 1ULL << priority;
197
198 if (levels[priority].empty()) {
199 used_priorities &= ~(1ULL << priority);
200 }
201 }
202
203 void transfer_to_front(const_iterator it, u32 priority, MultiLevelQueue& other) {
204 transfer_to_front(*it, priority, other);
205 }
206
207 void transfer_to_back(const T& element, u32 priority, MultiLevelQueue& other) {
208 ListSplice(other.levels[priority], other.levels[priority].end(), levels[priority],
209 ListIterateTo(levels[priority], element));
210
211 other.used_priorities |= 1ULL << priority;
212
213 if (levels[priority].empty()) {
214 used_priorities &= ~(1ULL << priority);
215 }
216 }
217
218 void transfer_to_back(const_iterator it, u32 priority, MultiLevelQueue& other) {
219 transfer_to_back(*it, priority, other);
220 }
221
222 void yield(u32 priority, std::size_t n = 1) {
223 ListShiftForward(levels[priority], n);
224 }
225
226 std::size_t depth() const {
227 return Depth;
228 }
229
230 std::size_t size(u32 priority) const {
231 return levels[priority].size();
232 }
233
234 std::size_t size() const {
235 u64 priorities = used_priorities;
236 std::size_t size = 0;
237 while (priorities != 0) {
238 const u64 current_priority = CountTrailingZeroes64(priorities);
239 size += levels[current_priority].size();
240 priorities &= ~(1ULL << current_priority);
241 }
242 return size;
243 }
244
245 bool empty() const {
246 return used_priorities == 0;
247 }
248
249 bool empty(u32 priority) const {
250 return (used_priorities & (1ULL << priority)) == 0;
251 }
252
253 u32 highest_priority_set(u32 max_priority = 0) const {
254 const u64 priorities =
255 max_priority == 0 ? used_priorities : (used_priorities & ~((1ULL << max_priority) - 1));
256 return priorities == 0 ? Depth : static_cast<u32>(CountTrailingZeroes64(priorities));
257 }
258
259 u32 lowest_priority_set(u32 min_priority = Depth - 1) const {
260 const u64 priorities = min_priority >= Depth - 1
261 ? used_priorities
262 : (used_priorities & ((1ULL << (min_priority + 1)) - 1));
263 return priorities == 0 ? Depth : 63 - CountLeadingZeroes64(priorities);
264 }
265
266 const_iterator cbegin(u32 max_prio = 0) const {
267 const u32 priority = highest_priority_set(max_prio);
268 return priority == Depth ? cend()
269 : const_iterator{*this, levels[priority].cbegin(), priority};
270 }
271 const_iterator begin(u32 max_prio = 0) const {
272 return cbegin(max_prio);
273 }
274 iterator begin(u32 max_prio = 0) {
275 const u32 priority = highest_priority_set(max_prio);
276 return priority == Depth ? end() : iterator{*this, levels[priority].begin(), priority};
277 }
278
279 const_iterator cend(u32 min_prio = Depth - 1) const {
280 return min_prio == Depth - 1 ? const_iterator{*this, Depth} : cbegin(min_prio + 1);
281 }
282 const_iterator end(u32 min_prio = Depth - 1) const {
283 return cend(min_prio);
284 }
285 iterator end(u32 min_prio = Depth - 1) {
286 return min_prio == Depth - 1 ? iterator{*this, Depth} : begin(min_prio + 1);
287 }
288
289 T& front(u32 max_priority = 0) {
290 const u32 priority = highest_priority_set(max_priority);
291 return levels[priority == Depth ? 0 : priority].front();
292 }
293 const T& front(u32 max_priority = 0) const {
294 const u32 priority = highest_priority_set(max_priority);
295 return levels[priority == Depth ? 0 : priority].front();
296 }
297
298 T back(u32 min_priority = Depth - 1) {
299 const u32 priority = lowest_priority_set(min_priority); // intended
300 return levels[priority == Depth ? 63 : priority].back();
301 }
302 const T& back(u32 min_priority = Depth - 1) const {
303 const u32 priority = lowest_priority_set(min_priority); // intended
304 return levels[priority == Depth ? 63 : priority].back();
305 }
306
307private:
308 using const_list_iterator = typename std::list<T>::const_iterator;
309
310 static void ListShiftForward(std::list<T>& list, const std::size_t shift = 1) {
311 if (shift >= list.size()) {
312 return;
313 }
314
315 const auto begin_range = list.begin();
316 const auto end_range = std::next(begin_range, shift);
317 list.splice(list.end(), list, begin_range, end_range);
318 }
319
320 static void ListSplice(std::list<T>& in_list, const_list_iterator position,
321 std::list<T>& out_list, const_list_iterator element) {
322 in_list.splice(position, out_list, element);
323 }
324
325 static const_list_iterator ListIterateTo(const std::list<T>& list, const T& element) {
326 auto it = list.cbegin();
327 while (it != list.cend() && *it != element) {
328 ++it;
329 }
330 return it;
331 }
332
333 std::array<std::list<T>, Depth> levels;
334 u64 used_priorities = 0;
335};
336
337} // namespace Common
diff --git a/src/common/thread.cpp b/src/common/thread.cpp
index 5144c0d9f..fe7a420cc 100644
--- a/src/common/thread.cpp
+++ b/src/common/thread.cpp
@@ -27,18 +27,6 @@ namespace Common {
27 27
28#ifdef _MSC_VER 28#ifdef _MSC_VER
29 29
30void SetThreadAffinity(std::thread::native_handle_type thread, u32 mask) {
31 SetThreadAffinityMask(thread, mask);
32}
33
34void SetCurrentThreadAffinity(u32 mask) {
35 SetThreadAffinityMask(GetCurrentThread(), mask);
36}
37
38void SwitchCurrentThread() {
39 SwitchToThread();
40}
41
42// Sets the debugger-visible name of the current thread. 30// Sets the debugger-visible name of the current thread.
43// Uses undocumented (actually, it is now documented) trick. 31// Uses undocumented (actually, it is now documented) trick.
44// http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vsdebug/html/vxtsksettingthreadname.asp 32// http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vsdebug/html/vxtsksettingthreadname.asp
@@ -70,31 +58,6 @@ void SetCurrentThreadName(const char* name) {
70 58
71#else // !MSVC_VER, so must be POSIX threads 59#else // !MSVC_VER, so must be POSIX threads
72 60
73void SetThreadAffinity(std::thread::native_handle_type thread, u32 mask) {
74#ifdef __APPLE__
75 thread_policy_set(pthread_mach_thread_np(thread), THREAD_AFFINITY_POLICY, (integer_t*)&mask, 1);
76#elif (defined __linux__ || defined __FreeBSD__) && !(defined ANDROID)
77 cpu_set_t cpu_set;
78 CPU_ZERO(&cpu_set);
79
80 for (int i = 0; i != sizeof(mask) * 8; ++i)
81 if ((mask >> i) & 1)
82 CPU_SET(i, &cpu_set);
83
84 pthread_setaffinity_np(thread, sizeof(cpu_set), &cpu_set);
85#endif
86}
87
88void SetCurrentThreadAffinity(u32 mask) {
89 SetThreadAffinity(pthread_self(), mask);
90}
91
92#ifndef _WIN32
93void SwitchCurrentThread() {
94 usleep(1000 * 1);
95}
96#endif
97
98// MinGW with the POSIX threading model does not support pthread_setname_np 61// MinGW with the POSIX threading model does not support pthread_setname_np
99#if !defined(_WIN32) || defined(_MSC_VER) 62#if !defined(_WIN32) || defined(_MSC_VER)
100void SetCurrentThreadName(const char* name) { 63void SetCurrentThreadName(const char* name) {
diff --git a/src/common/thread.h b/src/common/thread.h
index 2cf74452d..0cfd98be6 100644
--- a/src/common/thread.h
+++ b/src/common/thread.h
@@ -9,14 +9,13 @@
9#include <cstddef> 9#include <cstddef>
10#include <mutex> 10#include <mutex>
11#include <thread> 11#include <thread>
12#include "common/common_types.h"
13 12
14namespace Common { 13namespace Common {
15 14
16class Event { 15class Event {
17public: 16public:
18 void Set() { 17 void Set() {
19 std::lock_guard<std::mutex> lk(mutex); 18 std::lock_guard lk{mutex};
20 if (!is_set) { 19 if (!is_set) {
21 is_set = true; 20 is_set = true;
22 condvar.notify_one(); 21 condvar.notify_one();
@@ -24,14 +23,14 @@ public:
24 } 23 }
25 24
26 void Wait() { 25 void Wait() {
27 std::unique_lock<std::mutex> lk(mutex); 26 std::unique_lock lk{mutex};
28 condvar.wait(lk, [&] { return is_set; }); 27 condvar.wait(lk, [&] { return is_set; });
29 is_set = false; 28 is_set = false;
30 } 29 }
31 30
32 template <class Clock, class Duration> 31 template <class Clock, class Duration>
33 bool WaitUntil(const std::chrono::time_point<Clock, Duration>& time) { 32 bool WaitUntil(const std::chrono::time_point<Clock, Duration>& time) {
34 std::unique_lock<std::mutex> lk(mutex); 33 std::unique_lock lk{mutex};
35 if (!condvar.wait_until(lk, time, [this] { return is_set; })) 34 if (!condvar.wait_until(lk, time, [this] { return is_set; }))
36 return false; 35 return false;
37 is_set = false; 36 is_set = false;
@@ -39,7 +38,7 @@ public:
39 } 38 }
40 39
41 void Reset() { 40 void Reset() {
42 std::unique_lock<std::mutex> lk(mutex); 41 std::unique_lock lk{mutex};
43 // no other action required, since wait loops on the predicate and any lingering signal will 42 // no other action required, since wait loops on the predicate and any lingering signal will
44 // get cleared on the first iteration 43 // get cleared on the first iteration
45 is_set = false; 44 is_set = false;
@@ -57,7 +56,7 @@ public:
57 56
58 /// Blocks until all "count" threads have called Sync() 57 /// Blocks until all "count" threads have called Sync()
59 void Sync() { 58 void Sync() {
60 std::unique_lock<std::mutex> lk(mutex); 59 std::unique_lock lk{mutex};
61 const std::size_t current_generation = generation; 60 const std::size_t current_generation = generation;
62 61
63 if (++waiting == count) { 62 if (++waiting == count) {
@@ -78,9 +77,6 @@ private:
78 std::size_t generation = 0; // Incremented once each time the barrier is used 77 std::size_t generation = 0; // Incremented once each time the barrier is used
79}; 78};
80 79
81void SetThreadAffinity(std::thread::native_handle_type thread, u32 mask);
82void SetCurrentThreadAffinity(u32 mask);
83void SwitchCurrentThread(); // On Linux, this is equal to sleep 1ms
84void SetCurrentThreadName(const char* name); 80void SetCurrentThreadName(const char* name);
85 81
86} // namespace Common 82} // namespace Common
diff --git a/src/common/threadsafe_queue.h b/src/common/threadsafe_queue.h
index 821e8536a..e714ba5b3 100644
--- a/src/common/threadsafe_queue.h
+++ b/src/common/threadsafe_queue.h
@@ -78,7 +78,7 @@ public:
78 78
79 T PopWait() { 79 T PopWait() {
80 if (Empty()) { 80 if (Empty()) {
81 std::unique_lock<std::mutex> lock(cv_mutex); 81 std::unique_lock lock{cv_mutex};
82 cv.wait(lock, [this]() { return !Empty(); }); 82 cv.wait(lock, [this]() { return !Empty(); });
83 } 83 }
84 T t; 84 T t;
@@ -137,7 +137,7 @@ public:
137 137
138 template <typename Arg> 138 template <typename Arg>
139 void Push(Arg&& t) { 139 void Push(Arg&& t) {
140 std::lock_guard<std::mutex> lock(write_lock); 140 std::lock_guard lock{write_lock};
141 spsc_queue.Push(t); 141 spsc_queue.Push(t);
142 } 142 }
143 143
diff --git a/src/common/zstd_compression.cpp b/src/common/zstd_compression.cpp
new file mode 100644
index 000000000..60a35c67c
--- /dev/null
+++ b/src/common/zstd_compression.cpp
@@ -0,0 +1,53 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <algorithm>
8#include <zstd.h>
9
10#include "common/assert.h"
11#include "common/zstd_compression.h"
12
13namespace Common::Compression {
14
15std::vector<u8> CompressDataZSTD(const u8* source, std::size_t source_size, s32 compression_level) {
16 compression_level = std::clamp(compression_level, 1, ZSTD_maxCLevel());
17
18 const std::size_t max_compressed_size = ZSTD_compressBound(source_size);
19 std::vector<u8> compressed(max_compressed_size);
20
21 const std::size_t compressed_size =
22 ZSTD_compress(compressed.data(), compressed.size(), source, source_size, compression_level);
23
24 if (ZSTD_isError(compressed_size)) {
25 // Compression failed
26 return {};
27 }
28
29 compressed.resize(compressed_size);
30
31 return compressed;
32}
33
34std::vector<u8> CompressDataZSTDDefault(const u8* source, std::size_t source_size) {
35 return CompressDataZSTD(source, source_size, ZSTD_CLEVEL_DEFAULT);
36}
37
38std::vector<u8> DecompressDataZSTD(const std::vector<u8>& compressed) {
39 const std::size_t decompressed_size =
40 ZSTD_getDecompressedSize(compressed.data(), compressed.size());
41 std::vector<u8> decompressed(decompressed_size);
42
43 const std::size_t uncompressed_result_size = ZSTD_decompress(
44 decompressed.data(), decompressed.size(), compressed.data(), compressed.size());
45
46 if (decompressed_size != uncompressed_result_size || ZSTD_isError(uncompressed_result_size)) {
47 // Decompression failed
48 return {};
49 }
50 return decompressed;
51}
52
53} // namespace Common::Compression
diff --git a/src/common/zstd_compression.h b/src/common/zstd_compression.h
new file mode 100644
index 000000000..e0a64b035
--- /dev/null
+++ b/src/common/zstd_compression.h
@@ -0,0 +1,42 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <vector>
6
7#include "common/common_types.h"
8
9namespace Common::Compression {
10
11/**
12 * Compresses a source memory region with Zstandard and returns the compressed data in a vector.
13 *
14 * @param source the uncompressed source memory region.
15 * @param source_size the size in bytes of the uncompressed source memory region.
16 * @param compression_level the used compression level. Should be between 1 and 22.
17 *
18 * @return the compressed data.
19 */
20std::vector<u8> CompressDataZSTD(const u8* source, std::size_t source_size, s32 compression_level);
21
22/**
23 * Compresses a source memory region with Zstandard with the default compression level and returns
24 * the compressed data in a vector.
25 *
26 * @param source the uncompressed source memory region.
27 * @param source_size the size in bytes of the uncompressed source memory region.
28 *
29 * @return the compressed data.
30 */
31std::vector<u8> CompressDataZSTDDefault(const u8* source, std::size_t source_size);
32
33/**
34 * Decompresses a source memory region with Zstandard and returns the uncompressed data in a vector.
35 *
36 * @param compressed the compressed source memory region.
37 *
38 * @return the decompressed data.
39 */
40std::vector<u8> DecompressDataZSTD(const std::vector<u8>& compressed);
41
42} // namespace Common::Compression \ No newline at end of file