summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/common/CMakeLists.txt4
-rw-r--r--src/common/bit_util.h39
-rw-r--r--src/common/detached_tasks.cpp8
-rw-r--r--src/common/logging/backend.cpp6
-rw-r--r--src/common/lz4_compression.cpp76
-rw-r--r--src/common/lz4_compression.h55
-rw-r--r--src/common/multi_level_queue.h337
-rw-r--r--src/common/thread.cpp37
-rw-r--r--src/common/thread.h14
-rw-r--r--src/common/threadsafe_queue.h4
-rw-r--r--src/core/CMakeLists.txt2
-rw-r--r--src/core/core_cpu.cpp6
-rw-r--r--src/core/frontend/emu_window.cpp6
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp6
-rw-r--r--src/core/hle/kernel/code_set.h3
-rw-r--r--src/core/hle/kernel/kernel.cpp11
-rw-r--r--src/core/hle/kernel/kernel.h3
-rw-r--r--src/core/hle/kernel/object.cpp1
-rw-r--r--src/core/hle/kernel/object.h1
-rw-r--r--src/core/hle/kernel/process.cpp34
-rw-r--r--src/core/hle/kernel/process.h38
-rw-r--r--src/core/hle/kernel/readable_event.cpp2
-rw-r--r--src/core/hle/kernel/readable_event.h2
-rw-r--r--src/core/hle/kernel/resource_limit.cpp7
-rw-r--r--src/core/hle/kernel/resource_limit.h11
-rw-r--r--src/core/hle/kernel/scheduler.cpp60
-rw-r--r--src/core/hle/kernel/scheduler.h6
-rw-r--r--src/core/hle/kernel/server_port.cpp2
-rw-r--r--src/core/hle/kernel/server_port.h2
-rw-r--r--src/core/hle/kernel/server_session.cpp2
-rw-r--r--src/core/hle/kernel/server_session.h2
-rw-r--r--src/core/hle/kernel/shared_memory.cpp11
-rw-r--r--src/core/hle/kernel/shared_memory.h10
-rw-r--r--src/core/hle/kernel/svc.cpp119
-rw-r--r--src/core/hle/kernel/svc_wrap.h8
-rw-r--r--src/core/hle/kernel/thread.cpp21
-rw-r--r--src/core/hle/kernel/thread.h9
-rw-r--r--src/core/hle/kernel/transfer_memory.cpp22
-rw-r--r--src/core/hle/kernel/transfer_memory.h20
-rw-r--r--src/core/hle/kernel/vm_manager.cpp76
-rw-r--r--src/core/hle/kernel/vm_manager.h55
-rw-r--r--src/core/hle/kernel/wait_object.h2
-rw-r--r--src/core/hle/service/am/am.cpp99
-rw-r--r--src/core/hle/service/am/am.h15
-rw-r--r--src/core/hle/service/audio/audout_u.cpp4
-rw-r--r--src/core/hle/service/audio/audren_u.cpp13
-rw-r--r--src/core/hle/service/fatal/fatal.cpp89
-rw-r--r--src/core/hle/service/filesystem/fsp_srv.cpp70
-rw-r--r--src/core/hle/service/nfc/nfc.cpp2
-rw-r--r--src/core/hle/service/nfp/nfp.cpp2
-rw-r--r--src/core/hle/service/sockets/sfdnsres.cpp12
-rw-r--r--src/core/hle/service/spl/module.cpp4
-rw-r--r--src/core/hle/service/ssl/ssl.cpp9
-rw-r--r--src/core/hle/service/vi/vi.cpp1
-rw-r--r--src/core/loader/elf.cpp2
-rw-r--r--src/core/loader/nro.cpp2
-rw-r--r--src/core/loader/nso.cpp17
-rw-r--r--src/core/perf_stats.cpp10
-rw-r--r--src/core/settings.cpp1
-rw-r--r--src/core/settings.h1
-rw-r--r--src/input_common/keyboard.cpp8
-rw-r--r--src/input_common/motion_emu.cpp10
-rw-r--r--src/input_common/sdl/sdl_impl.cpp26
-rw-r--r--src/tests/CMakeLists.txt2
-rw-r--r--src/tests/common/bit_utils.cpp23
-rw-r--r--src/tests/common/multi_level_queue.cpp55
-rw-r--r--src/video_core/CMakeLists.txt6
-rw-r--r--src/video_core/debug_utils/debug_utils.cpp4
-rw-r--r--src/video_core/debug_utils/debug_utils.h4
-rw-r--r--src/video_core/gpu.cpp9
-rw-r--r--src/video_core/gpu.h6
-rw-r--r--src/video_core/gpu_thread.cpp4
-rw-r--r--src/video_core/gpu_thread.h15
-rw-r--r--src/video_core/rasterizer_cache.h16
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp5
-rw-r--r--src/video_core/renderer_opengl/gl_global_cache.cpp3
-rw-r--r--src/video_core/renderer_opengl/gl_primitive_assembler.cpp2
-rw-r--r--src/video_core/renderer_opengl/gl_primitive_assembler.h2
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp12
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h9
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer_cache.cpp5
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer_cache.h7
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp14
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.h5
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp5
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.h1
-rw-r--r--src/video_core/renderer_opengl/gl_shader_disk_cache.cpp46
-rw-r--r--src/video_core/renderer_opengl/gl_shader_gen.cpp1
-rw-r--r--src/video_core/renderer_opengl/gl_shader_gen.h3
-rw-r--r--src/video_core/renderer_opengl/gl_shader_manager.cpp17
-rw-r--r--src/video_core/renderer_opengl/gl_shader_manager.h11
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_manager.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_manager.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.cpp210
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.h92
-rw-r--r--src/web_service/web_backend.cpp4
-rw-r--r--src/yuzu/applets/profile_select.cpp7
-rw-r--r--src/yuzu/applets/software_keyboard.cpp18
-rw-r--r--src/yuzu/applets/web_browser.cpp4
-rw-r--r--src/yuzu/bootmanager.cpp2
-rw-r--r--src/yuzu/bootmanager.h2
-rw-r--r--src/yuzu/configuration/config.cpp2
-rw-r--r--src/yuzu/configuration/configure_general.cpp2
-rw-r--r--src/yuzu/configuration/configure_general.ui20
-rw-r--r--src/yuzu/debugger/profiler.cpp1
-rw-r--r--src/yuzu/debugger/profiler.h9
-rw-r--r--src/yuzu/debugger/wait_tree.cpp4
-rw-r--r--src/yuzu/game_list.cpp4
-rw-r--r--src/yuzu/game_list.h1
-rw-r--r--src/yuzu/main.cpp48
-rw-r--r--src/yuzu/main.h1
-rw-r--r--src/yuzu_cmd/config.cpp1
114 files changed, 1657 insertions, 615 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 43ae8a9e7..5639021d3 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -91,6 +91,8 @@ add_library(common STATIC
91 logging/log.h 91 logging/log.h
92 logging/text_formatter.cpp 92 logging/text_formatter.cpp
93 logging/text_formatter.h 93 logging/text_formatter.h
94 lz4_compression.cpp
95 lz4_compression.h
94 math_util.h 96 math_util.h
95 memory_hook.cpp 97 memory_hook.cpp
96 memory_hook.h 98 memory_hook.h
@@ -98,6 +100,7 @@ add_library(common STATIC
98 microprofile.h 100 microprofile.h
99 microprofileui.h 101 microprofileui.h
100 misc.cpp 102 misc.cpp
103 multi_level_queue.h
101 page_table.cpp 104 page_table.cpp
102 page_table.h 105 page_table.h
103 param_package.cpp 106 param_package.cpp
@@ -135,3 +138,4 @@ endif()
135create_target_directory_groups(common) 138create_target_directory_groups(common)
136 139
137target_link_libraries(common PUBLIC Boost::boost fmt microprofile) 140target_link_libraries(common PUBLIC Boost::boost fmt microprofile)
141target_link_libraries(common PRIVATE lz4_static)
diff --git a/src/common/bit_util.h b/src/common/bit_util.h
index 1eea17ba1..a4f9ed4aa 100644
--- a/src/common/bit_util.h
+++ b/src/common/bit_util.h
@@ -58,4 +58,43 @@ inline u64 CountLeadingZeroes64(u64 value) {
58 return __builtin_clzll(value); 58 return __builtin_clzll(value);
59} 59}
60#endif 60#endif
61
62#ifdef _MSC_VER
63inline u32 CountTrailingZeroes32(u32 value) {
64 unsigned long trailing_zero = 0;
65
66 if (_BitScanForward(&trailing_zero, value) != 0) {
67 return trailing_zero;
68 }
69
70 return 32;
71}
72
73inline u64 CountTrailingZeroes64(u64 value) {
74 unsigned long trailing_zero = 0;
75
76 if (_BitScanForward64(&trailing_zero, value) != 0) {
77 return trailing_zero;
78 }
79
80 return 64;
81}
82#else
83inline u32 CountTrailingZeroes32(u32 value) {
84 if (value == 0) {
85 return 32;
86 }
87
88 return __builtin_ctz(value);
89}
90
91inline u64 CountTrailingZeroes64(u64 value) {
92 if (value == 0) {
93 return 64;
94 }
95
96 return __builtin_ctzll(value);
97}
98#endif
99
61} // namespace Common 100} // namespace Common
diff --git a/src/common/detached_tasks.cpp b/src/common/detached_tasks.cpp
index a347d9e02..f268d6021 100644
--- a/src/common/detached_tasks.cpp
+++ b/src/common/detached_tasks.cpp
@@ -16,22 +16,22 @@ DetachedTasks::DetachedTasks() {
16} 16}
17 17
18void DetachedTasks::WaitForAllTasks() { 18void DetachedTasks::WaitForAllTasks() {
19 std::unique_lock<std::mutex> lock(mutex); 19 std::unique_lock lock{mutex};
20 cv.wait(lock, [this]() { return count == 0; }); 20 cv.wait(lock, [this]() { return count == 0; });
21} 21}
22 22
23DetachedTasks::~DetachedTasks() { 23DetachedTasks::~DetachedTasks() {
24 std::unique_lock<std::mutex> lock(mutex); 24 std::unique_lock lock{mutex};
25 ASSERT(count == 0); 25 ASSERT(count == 0);
26 instance = nullptr; 26 instance = nullptr;
27} 27}
28 28
29void DetachedTasks::AddTask(std::function<void()> task) { 29void DetachedTasks::AddTask(std::function<void()> task) {
30 std::unique_lock<std::mutex> lock(instance->mutex); 30 std::unique_lock lock{instance->mutex};
31 ++instance->count; 31 ++instance->count;
32 std::thread([task{std::move(task)}]() { 32 std::thread([task{std::move(task)}]() {
33 task(); 33 task();
34 std::unique_lock<std::mutex> lock(instance->mutex); 34 std::unique_lock lock{instance->mutex};
35 --instance->count; 35 --instance->count;
36 std::notify_all_at_thread_exit(instance->cv, std::move(lock)); 36 std::notify_all_at_thread_exit(instance->cv, std::move(lock));
37 }) 37 })
diff --git a/src/common/logging/backend.cpp b/src/common/logging/backend.cpp
index 4462ff3fb..a03179520 100644
--- a/src/common/logging/backend.cpp
+++ b/src/common/logging/backend.cpp
@@ -46,12 +46,12 @@ public:
46 } 46 }
47 47
48 void AddBackend(std::unique_ptr<Backend> backend) { 48 void AddBackend(std::unique_ptr<Backend> backend) {
49 std::lock_guard<std::mutex> lock(writing_mutex); 49 std::lock_guard lock{writing_mutex};
50 backends.push_back(std::move(backend)); 50 backends.push_back(std::move(backend));
51 } 51 }
52 52
53 void RemoveBackend(std::string_view backend_name) { 53 void RemoveBackend(std::string_view backend_name) {
54 std::lock_guard<std::mutex> lock(writing_mutex); 54 std::lock_guard lock{writing_mutex};
55 const auto it = 55 const auto it =
56 std::remove_if(backends.begin(), backends.end(), 56 std::remove_if(backends.begin(), backends.end(),
57 [&backend_name](const auto& i) { return backend_name == i->GetName(); }); 57 [&backend_name](const auto& i) { return backend_name == i->GetName(); });
@@ -80,7 +80,7 @@ private:
80 backend_thread = std::thread([&] { 80 backend_thread = std::thread([&] {
81 Entry entry; 81 Entry entry;
82 auto write_logs = [&](Entry& e) { 82 auto write_logs = [&](Entry& e) {
83 std::lock_guard<std::mutex> lock(writing_mutex); 83 std::lock_guard lock{writing_mutex};
84 for (const auto& backend : backends) { 84 for (const auto& backend : backends) {
85 backend->Write(e); 85 backend->Write(e);
86 } 86 }
diff --git a/src/common/lz4_compression.cpp b/src/common/lz4_compression.cpp
new file mode 100644
index 000000000..ade6759bb
--- /dev/null
+++ b/src/common/lz4_compression.cpp
@@ -0,0 +1,76 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <lz4hc.h>
7
8#include "common/assert.h"
9#include "common/lz4_compression.h"
10
11namespace Common::Compression {
12
13std::vector<u8> CompressDataLZ4(const u8* source, std::size_t source_size) {
14 ASSERT_MSG(source_size <= LZ4_MAX_INPUT_SIZE, "Source size exceeds LZ4 maximum input size");
15
16 const auto source_size_int = static_cast<int>(source_size);
17 const int max_compressed_size = LZ4_compressBound(source_size_int);
18 std::vector<u8> compressed(max_compressed_size);
19
20 const int compressed_size = LZ4_compress_default(reinterpret_cast<const char*>(source),
21 reinterpret_cast<char*>(compressed.data()),
22 source_size_int, max_compressed_size);
23
24 if (compressed_size <= 0) {
25 // Compression failed
26 return {};
27 }
28
29 compressed.resize(compressed_size);
30
31 return compressed;
32}
33
34std::vector<u8> CompressDataLZ4HC(const u8* source, std::size_t source_size,
35 s32 compression_level) {
36 ASSERT_MSG(source_size <= LZ4_MAX_INPUT_SIZE, "Source size exceeds LZ4 maximum input size");
37
38 compression_level = std::clamp(compression_level, LZ4HC_CLEVEL_MIN, LZ4HC_CLEVEL_MAX);
39
40 const auto source_size_int = static_cast<int>(source_size);
41 const int max_compressed_size = LZ4_compressBound(source_size_int);
42 std::vector<u8> compressed(max_compressed_size);
43
44 const int compressed_size = LZ4_compress_HC(
45 reinterpret_cast<const char*>(source), reinterpret_cast<char*>(compressed.data()),
46 source_size_int, max_compressed_size, compression_level);
47
48 if (compressed_size <= 0) {
49 // Compression failed
50 return {};
51 }
52
53 compressed.resize(compressed_size);
54
55 return compressed;
56}
57
58std::vector<u8> CompressDataLZ4HCMax(const u8* source, std::size_t source_size) {
59 return CompressDataLZ4HC(source, source_size, LZ4HC_CLEVEL_MAX);
60}
61
62std::vector<u8> DecompressDataLZ4(const std::vector<u8>& compressed,
63 std::size_t uncompressed_size) {
64 std::vector<u8> uncompressed(uncompressed_size);
65 const int size_check = LZ4_decompress_safe(reinterpret_cast<const char*>(compressed.data()),
66 reinterpret_cast<char*>(uncompressed.data()),
67 static_cast<int>(compressed.size()),
68 static_cast<int>(uncompressed.size()));
69 if (static_cast<int>(uncompressed_size) != size_check) {
70 // Decompression failed
71 return {};
72 }
73 return uncompressed;
74}
75
76} // namespace Common::Compression
diff --git a/src/common/lz4_compression.h b/src/common/lz4_compression.h
new file mode 100644
index 000000000..fe2231a6c
--- /dev/null
+++ b/src/common/lz4_compression.h
@@ -0,0 +1,55 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <vector>
6
7#include "common/common_types.h"
8
9namespace Common::Compression {
10
11/**
12 * Compresses a source memory region with LZ4 and returns the compressed data in a vector.
13 *
14 * @param source the uncompressed source memory region.
15 * @param source_size the size in bytes of the uncompressed source memory region.
16 *
17 * @return the compressed data.
18 */
19std::vector<u8> CompressDataLZ4(const u8* source, std::size_t source_size);
20
21/**
22 * Utilizes the LZ4 subalgorithm LZ4HC with the specified compression level. Higher compression
23 * levels result in a smaller compressed size, but require more CPU time for compression. The
24 * compression level has almost no impact on decompression speed. Data compressed with LZ4HC can
25 * also be decompressed with the default LZ4 decompression.
26 *
27 * @param source the uncompressed source memory region.
28 * @param source_size the size in bytes of the uncompressed source memory region.
29 * @param compression_level the used compression level. Should be between 3 and 12.
30 *
31 * @return the compressed data.
32 */
33std::vector<u8> CompressDataLZ4HC(const u8* source, std::size_t source_size, s32 compression_level);
34
35/**
36 * Utilizes the LZ4 subalgorithm LZ4HC with the highest possible compression level.
37 *
38 * @param source the uncompressed source memory region.
39 * @param source_size the size in bytes of the uncompressed source memory region.
40 *
41 * @return the compressed data.
42 */
43std::vector<u8> CompressDataLZ4HCMax(const u8* source, std::size_t source_size);
44
45/**
46 * Decompresses a source memory region with LZ4 and returns the uncompressed data in a vector.
47 *
48 * @param compressed the compressed source memory region.
49 * @param uncompressed_size the size in bytes of the uncompressed data.
50 *
51 * @return the decompressed data.
52 */
53std::vector<u8> DecompressDataLZ4(const std::vector<u8>& compressed, std::size_t uncompressed_size);
54
55} // namespace Common::Compression \ No newline at end of file
diff --git a/src/common/multi_level_queue.h b/src/common/multi_level_queue.h
new file mode 100644
index 000000000..2b61b91e0
--- /dev/null
+++ b/src/common/multi_level_queue.h
@@ -0,0 +1,337 @@
1// Copyright 2019 TuxSH
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <iterator>
9#include <list>
10#include <utility>
11
12#include "common/bit_util.h"
13#include "common/common_types.h"
14
15namespace Common {
16
17/**
18 * A MultiLevelQueue is a type of priority queue which has the following characteristics:
19 * - iteratable through each of its elements.
20 * - back can be obtained.
21 * - O(1) add, lookup (both front and back)
22 * - discrete priorities and a max of 64 priorities (limited domain)
23 * This type of priority queue is normaly used for managing threads within an scheduler
24 */
25template <typename T, std::size_t Depth>
26class MultiLevelQueue {
27public:
28 using value_type = T;
29 using reference = value_type&;
30 using const_reference = const value_type&;
31 using pointer = value_type*;
32 using const_pointer = const value_type*;
33
34 using difference_type = typename std::pointer_traits<pointer>::difference_type;
35 using size_type = std::size_t;
36
37 template <bool is_constant>
38 class iterator_impl {
39 public:
40 using iterator_category = std::bidirectional_iterator_tag;
41 using value_type = T;
42 using pointer = std::conditional_t<is_constant, T*, const T*>;
43 using reference = std::conditional_t<is_constant, const T&, T&>;
44 using difference_type = typename std::pointer_traits<pointer>::difference_type;
45
46 friend bool operator==(const iterator_impl& lhs, const iterator_impl& rhs) {
47 if (lhs.IsEnd() && rhs.IsEnd())
48 return true;
49 return std::tie(lhs.current_priority, lhs.it) == std::tie(rhs.current_priority, rhs.it);
50 }
51
52 friend bool operator!=(const iterator_impl& lhs, const iterator_impl& rhs) {
53 return !operator==(lhs, rhs);
54 }
55
56 reference operator*() const {
57 return *it;
58 }
59
60 pointer operator->() const {
61 return it.operator->();
62 }
63
64 iterator_impl& operator++() {
65 if (IsEnd()) {
66 return *this;
67 }
68
69 ++it;
70
71 if (it == GetEndItForPrio()) {
72 u64 prios = mlq.used_priorities;
73 prios &= ~((1ULL << (current_priority + 1)) - 1);
74 if (prios == 0) {
75 current_priority = mlq.depth();
76 } else {
77 current_priority = CountTrailingZeroes64(prios);
78 it = GetBeginItForPrio();
79 }
80 }
81 return *this;
82 }
83
84 iterator_impl& operator--() {
85 if (IsEnd()) {
86 if (mlq.used_priorities != 0) {
87 current_priority = 63 - CountLeadingZeroes64(mlq.used_priorities);
88 it = GetEndItForPrio();
89 --it;
90 }
91 } else if (it == GetBeginItForPrio()) {
92 u64 prios = mlq.used_priorities;
93 prios &= (1ULL << current_priority) - 1;
94 if (prios != 0) {
95 current_priority = CountTrailingZeroes64(prios);
96 it = GetEndItForPrio();
97 --it;
98 }
99 } else {
100 --it;
101 }
102 return *this;
103 }
104
105 iterator_impl operator++(int) {
106 const iterator_impl v{*this};
107 ++(*this);
108 return v;
109 }
110
111 iterator_impl operator--(int) {
112 const iterator_impl v{*this};
113 --(*this);
114 return v;
115 }
116
117 // allow implicit const->non-const
118 iterator_impl(const iterator_impl<false>& other)
119 : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
120
121 iterator_impl(const iterator_impl<true>& other)
122 : mlq(other.mlq), it(other.it), current_priority(other.current_priority) {}
123
124 iterator_impl& operator=(const iterator_impl<false>& other) {
125 mlq = other.mlq;
126 it = other.it;
127 current_priority = other.current_priority;
128 return *this;
129 }
130
131 friend class iterator_impl<true>;
132 iterator_impl() = default;
133
134 private:
135 friend class MultiLevelQueue;
136 using container_ref =
137 std::conditional_t<is_constant, const MultiLevelQueue&, MultiLevelQueue&>;
138 using list_iterator = std::conditional_t<is_constant, typename std::list<T>::const_iterator,
139 typename std::list<T>::iterator>;
140
141 explicit iterator_impl(container_ref mlq, list_iterator it, u32 current_priority)
142 : mlq(mlq), it(it), current_priority(current_priority) {}
143 explicit iterator_impl(container_ref mlq, u32 current_priority)
144 : mlq(mlq), it(), current_priority(current_priority) {}
145
146 bool IsEnd() const {
147 return current_priority == mlq.depth();
148 }
149
150 list_iterator GetBeginItForPrio() const {
151 return mlq.levels[current_priority].begin();
152 }
153
154 list_iterator GetEndItForPrio() const {
155 return mlq.levels[current_priority].end();
156 }
157
158 container_ref mlq;
159 list_iterator it;
160 u32 current_priority;
161 };
162
163 using iterator = iterator_impl<false>;
164 using const_iterator = iterator_impl<true>;
165
166 void add(const T& element, u32 priority, bool send_back = true) {
167 if (send_back)
168 levels[priority].push_back(element);
169 else
170 levels[priority].push_front(element);
171 used_priorities |= 1ULL << priority;
172 }
173
174 void remove(const T& element, u32 priority) {
175 auto it = ListIterateTo(levels[priority], element);
176 if (it == levels[priority].end())
177 return;
178 levels[priority].erase(it);
179 if (levels[priority].empty()) {
180 used_priorities &= ~(1ULL << priority);
181 }
182 }
183
184 void adjust(const T& element, u32 old_priority, u32 new_priority, bool adjust_front = false) {
185 remove(element, old_priority);
186 add(element, new_priority, !adjust_front);
187 }
188 void adjust(const_iterator it, u32 old_priority, u32 new_priority, bool adjust_front = false) {
189 adjust(*it, old_priority, new_priority, adjust_front);
190 }
191
192 void transfer_to_front(const T& element, u32 priority, MultiLevelQueue& other) {
193 ListSplice(other.levels[priority], other.levels[priority].begin(), levels[priority],
194 ListIterateTo(levels[priority], element));
195
196 other.used_priorities |= 1ULL << priority;
197
198 if (levels[priority].empty()) {
199 used_priorities &= ~(1ULL << priority);
200 }
201 }
202
203 void transfer_to_front(const_iterator it, u32 priority, MultiLevelQueue& other) {
204 transfer_to_front(*it, priority, other);
205 }
206
207 void transfer_to_back(const T& element, u32 priority, MultiLevelQueue& other) {
208 ListSplice(other.levels[priority], other.levels[priority].end(), levels[priority],
209 ListIterateTo(levels[priority], element));
210
211 other.used_priorities |= 1ULL << priority;
212
213 if (levels[priority].empty()) {
214 used_priorities &= ~(1ULL << priority);
215 }
216 }
217
218 void transfer_to_back(const_iterator it, u32 priority, MultiLevelQueue& other) {
219 transfer_to_back(*it, priority, other);
220 }
221
222 void yield(u32 priority, std::size_t n = 1) {
223 ListShiftForward(levels[priority], n);
224 }
225
226 std::size_t depth() const {
227 return Depth;
228 }
229
230 std::size_t size(u32 priority) const {
231 return levels[priority].size();
232 }
233
234 std::size_t size() const {
235 u64 priorities = used_priorities;
236 std::size_t size = 0;
237 while (priorities != 0) {
238 const u64 current_priority = CountTrailingZeroes64(priorities);
239 size += levels[current_priority].size();
240 priorities &= ~(1ULL << current_priority);
241 }
242 return size;
243 }
244
245 bool empty() const {
246 return used_priorities == 0;
247 }
248
249 bool empty(u32 priority) const {
250 return (used_priorities & (1ULL << priority)) == 0;
251 }
252
253 u32 highest_priority_set(u32 max_priority = 0) const {
254 const u64 priorities =
255 max_priority == 0 ? used_priorities : (used_priorities & ~((1ULL << max_priority) - 1));
256 return priorities == 0 ? Depth : static_cast<u32>(CountTrailingZeroes64(priorities));
257 }
258
259 u32 lowest_priority_set(u32 min_priority = Depth - 1) const {
260 const u64 priorities = min_priority >= Depth - 1
261 ? used_priorities
262 : (used_priorities & ((1ULL << (min_priority + 1)) - 1));
263 return priorities == 0 ? Depth : 63 - CountLeadingZeroes64(priorities);
264 }
265
266 const_iterator cbegin(u32 max_prio = 0) const {
267 const u32 priority = highest_priority_set(max_prio);
268 return priority == Depth ? cend()
269 : const_iterator{*this, levels[priority].cbegin(), priority};
270 }
271 const_iterator begin(u32 max_prio = 0) const {
272 return cbegin(max_prio);
273 }
274 iterator begin(u32 max_prio = 0) {
275 const u32 priority = highest_priority_set(max_prio);
276 return priority == Depth ? end() : iterator{*this, levels[priority].begin(), priority};
277 }
278
279 const_iterator cend(u32 min_prio = Depth - 1) const {
280 return min_prio == Depth - 1 ? const_iterator{*this, Depth} : cbegin(min_prio + 1);
281 }
282 const_iterator end(u32 min_prio = Depth - 1) const {
283 return cend(min_prio);
284 }
285 iterator end(u32 min_prio = Depth - 1) {
286 return min_prio == Depth - 1 ? iterator{*this, Depth} : begin(min_prio + 1);
287 }
288
289 T& front(u32 max_priority = 0) {
290 const u32 priority = highest_priority_set(max_priority);
291 return levels[priority == Depth ? 0 : priority].front();
292 }
293 const T& front(u32 max_priority = 0) const {
294 const u32 priority = highest_priority_set(max_priority);
295 return levels[priority == Depth ? 0 : priority].front();
296 }
297
298 T back(u32 min_priority = Depth - 1) {
299 const u32 priority = lowest_priority_set(min_priority); // intended
300 return levels[priority == Depth ? 63 : priority].back();
301 }
302 const T& back(u32 min_priority = Depth - 1) const {
303 const u32 priority = lowest_priority_set(min_priority); // intended
304 return levels[priority == Depth ? 63 : priority].back();
305 }
306
307private:
308 using const_list_iterator = typename std::list<T>::const_iterator;
309
310 static void ListShiftForward(std::list<T>& list, const std::size_t shift = 1) {
311 if (shift >= list.size()) {
312 return;
313 }
314
315 const auto begin_range = list.begin();
316 const auto end_range = std::next(begin_range, shift);
317 list.splice(list.end(), list, begin_range, end_range);
318 }
319
320 static void ListSplice(std::list<T>& in_list, const_list_iterator position,
321 std::list<T>& out_list, const_list_iterator element) {
322 in_list.splice(position, out_list, element);
323 }
324
325 static const_list_iterator ListIterateTo(const std::list<T>& list, const T& element) {
326 auto it = list.cbegin();
327 while (it != list.cend() && *it != element) {
328 ++it;
329 }
330 return it;
331 }
332
333 std::array<std::list<T>, Depth> levels;
334 u64 used_priorities = 0;
335};
336
337} // namespace Common
diff --git a/src/common/thread.cpp b/src/common/thread.cpp
index 5144c0d9f..fe7a420cc 100644
--- a/src/common/thread.cpp
+++ b/src/common/thread.cpp
@@ -27,18 +27,6 @@ namespace Common {
27 27
28#ifdef _MSC_VER 28#ifdef _MSC_VER
29 29
30void SetThreadAffinity(std::thread::native_handle_type thread, u32 mask) {
31 SetThreadAffinityMask(thread, mask);
32}
33
34void SetCurrentThreadAffinity(u32 mask) {
35 SetThreadAffinityMask(GetCurrentThread(), mask);
36}
37
38void SwitchCurrentThread() {
39 SwitchToThread();
40}
41
42// Sets the debugger-visible name of the current thread. 30// Sets the debugger-visible name of the current thread.
43// Uses undocumented (actually, it is now documented) trick. 31// Uses undocumented (actually, it is now documented) trick.
44// http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vsdebug/html/vxtsksettingthreadname.asp 32// http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vsdebug/html/vxtsksettingthreadname.asp
@@ -70,31 +58,6 @@ void SetCurrentThreadName(const char* name) {
70 58
71#else // !MSVC_VER, so must be POSIX threads 59#else // !MSVC_VER, so must be POSIX threads
72 60
73void SetThreadAffinity(std::thread::native_handle_type thread, u32 mask) {
74#ifdef __APPLE__
75 thread_policy_set(pthread_mach_thread_np(thread), THREAD_AFFINITY_POLICY, (integer_t*)&mask, 1);
76#elif (defined __linux__ || defined __FreeBSD__) && !(defined ANDROID)
77 cpu_set_t cpu_set;
78 CPU_ZERO(&cpu_set);
79
80 for (int i = 0; i != sizeof(mask) * 8; ++i)
81 if ((mask >> i) & 1)
82 CPU_SET(i, &cpu_set);
83
84 pthread_setaffinity_np(thread, sizeof(cpu_set), &cpu_set);
85#endif
86}
87
88void SetCurrentThreadAffinity(u32 mask) {
89 SetThreadAffinity(pthread_self(), mask);
90}
91
92#ifndef _WIN32
93void SwitchCurrentThread() {
94 usleep(1000 * 1);
95}
96#endif
97
98// MinGW with the POSIX threading model does not support pthread_setname_np 61// MinGW with the POSIX threading model does not support pthread_setname_np
99#if !defined(_WIN32) || defined(_MSC_VER) 62#if !defined(_WIN32) || defined(_MSC_VER)
100void SetCurrentThreadName(const char* name) { 63void SetCurrentThreadName(const char* name) {
diff --git a/src/common/thread.h b/src/common/thread.h
index 2cf74452d..0cfd98be6 100644
--- a/src/common/thread.h
+++ b/src/common/thread.h
@@ -9,14 +9,13 @@
9#include <cstddef> 9#include <cstddef>
10#include <mutex> 10#include <mutex>
11#include <thread> 11#include <thread>
12#include "common/common_types.h"
13 12
14namespace Common { 13namespace Common {
15 14
16class Event { 15class Event {
17public: 16public:
18 void Set() { 17 void Set() {
19 std::lock_guard<std::mutex> lk(mutex); 18 std::lock_guard lk{mutex};
20 if (!is_set) { 19 if (!is_set) {
21 is_set = true; 20 is_set = true;
22 condvar.notify_one(); 21 condvar.notify_one();
@@ -24,14 +23,14 @@ public:
24 } 23 }
25 24
26 void Wait() { 25 void Wait() {
27 std::unique_lock<std::mutex> lk(mutex); 26 std::unique_lock lk{mutex};
28 condvar.wait(lk, [&] { return is_set; }); 27 condvar.wait(lk, [&] { return is_set; });
29 is_set = false; 28 is_set = false;
30 } 29 }
31 30
32 template <class Clock, class Duration> 31 template <class Clock, class Duration>
33 bool WaitUntil(const std::chrono::time_point<Clock, Duration>& time) { 32 bool WaitUntil(const std::chrono::time_point<Clock, Duration>& time) {
34 std::unique_lock<std::mutex> lk(mutex); 33 std::unique_lock lk{mutex};
35 if (!condvar.wait_until(lk, time, [this] { return is_set; })) 34 if (!condvar.wait_until(lk, time, [this] { return is_set; }))
36 return false; 35 return false;
37 is_set = false; 36 is_set = false;
@@ -39,7 +38,7 @@ public:
39 } 38 }
40 39
41 void Reset() { 40 void Reset() {
42 std::unique_lock<std::mutex> lk(mutex); 41 std::unique_lock lk{mutex};
43 // no other action required, since wait loops on the predicate and any lingering signal will 42 // no other action required, since wait loops on the predicate and any lingering signal will
44 // get cleared on the first iteration 43 // get cleared on the first iteration
45 is_set = false; 44 is_set = false;
@@ -57,7 +56,7 @@ public:
57 56
58 /// Blocks until all "count" threads have called Sync() 57 /// Blocks until all "count" threads have called Sync()
59 void Sync() { 58 void Sync() {
60 std::unique_lock<std::mutex> lk(mutex); 59 std::unique_lock lk{mutex};
61 const std::size_t current_generation = generation; 60 const std::size_t current_generation = generation;
62 61
63 if (++waiting == count) { 62 if (++waiting == count) {
@@ -78,9 +77,6 @@ private:
78 std::size_t generation = 0; // Incremented once each time the barrier is used 77 std::size_t generation = 0; // Incremented once each time the barrier is used
79}; 78};
80 79
81void SetThreadAffinity(std::thread::native_handle_type thread, u32 mask);
82void SetCurrentThreadAffinity(u32 mask);
83void SwitchCurrentThread(); // On Linux, this is equal to sleep 1ms
84void SetCurrentThreadName(const char* name); 80void SetCurrentThreadName(const char* name);
85 81
86} // namespace Common 82} // namespace Common
diff --git a/src/common/threadsafe_queue.h b/src/common/threadsafe_queue.h
index 821e8536a..e714ba5b3 100644
--- a/src/common/threadsafe_queue.h
+++ b/src/common/threadsafe_queue.h
@@ -78,7 +78,7 @@ public:
78 78
79 T PopWait() { 79 T PopWait() {
80 if (Empty()) { 80 if (Empty()) {
81 std::unique_lock<std::mutex> lock(cv_mutex); 81 std::unique_lock lock{cv_mutex};
82 cv.wait(lock, [this]() { return !Empty(); }); 82 cv.wait(lock, [this]() { return !Empty(); });
83 } 83 }
84 T t; 84 T t;
@@ -137,7 +137,7 @@ public:
137 137
138 template <typename Arg> 138 template <typename Arg>
139 void Push(Arg&& t) { 139 void Push(Arg&& t) {
140 std::lock_guard<std::mutex> lock(write_lock); 140 std::lock_guard lock{write_lock};
141 spsc_queue.Push(t); 141 spsc_queue.Push(t);
142 } 142 }
143 143
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 9e23afe85..c59107102 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -458,7 +458,7 @@ add_library(core STATIC
458create_target_directory_groups(core) 458create_target_directory_groups(core)
459 459
460target_link_libraries(core PUBLIC common PRIVATE audio_core video_core) 460target_link_libraries(core PUBLIC common PRIVATE audio_core video_core)
461target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt lz4_static mbedtls opus unicorn open_source_archives) 461target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt mbedtls opus unicorn open_source_archives)
462if (ENABLE_WEB_SERVICE) 462if (ENABLE_WEB_SERVICE)
463 target_compile_definitions(core PRIVATE -DENABLE_WEB_SERVICE) 463 target_compile_definitions(core PRIVATE -DENABLE_WEB_SERVICE)
464 target_link_libraries(core PRIVATE web_service) 464 target_link_libraries(core PRIVATE web_service)
diff --git a/src/core/core_cpu.cpp b/src/core/core_cpu.cpp
index 1eefed6d0..e75741db0 100644
--- a/src/core/core_cpu.cpp
+++ b/src/core/core_cpu.cpp
@@ -22,7 +22,7 @@
22namespace Core { 22namespace Core {
23 23
24void CpuBarrier::NotifyEnd() { 24void CpuBarrier::NotifyEnd() {
25 std::unique_lock<std::mutex> lock(mutex); 25 std::unique_lock lock{mutex};
26 end = true; 26 end = true;
27 condition.notify_all(); 27 condition.notify_all();
28} 28}
@@ -34,7 +34,7 @@ bool CpuBarrier::Rendezvous() {
34 } 34 }
35 35
36 if (!end) { 36 if (!end) {
37 std::unique_lock<std::mutex> lock(mutex); 37 std::unique_lock lock{mutex};
38 38
39 --cores_waiting; 39 --cores_waiting;
40 if (!cores_waiting) { 40 if (!cores_waiting) {
@@ -131,7 +131,7 @@ void Cpu::Reschedule() {
131 131
132 reschedule_pending = false; 132 reschedule_pending = false;
133 // Lock the global kernel mutex when we manipulate the HLE state 133 // Lock the global kernel mutex when we manipulate the HLE state
134 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 134 std::lock_guard lock{HLE::g_hle_lock};
135 scheduler->Reschedule(); 135 scheduler->Reschedule();
136} 136}
137 137
diff --git a/src/core/frontend/emu_window.cpp b/src/core/frontend/emu_window.cpp
index e29afd630..1320bbe77 100644
--- a/src/core/frontend/emu_window.cpp
+++ b/src/core/frontend/emu_window.cpp
@@ -30,7 +30,7 @@ private:
30 explicit Device(std::weak_ptr<TouchState>&& touch_state) : touch_state(touch_state) {} 30 explicit Device(std::weak_ptr<TouchState>&& touch_state) : touch_state(touch_state) {}
31 std::tuple<float, float, bool> GetStatus() const override { 31 std::tuple<float, float, bool> GetStatus() const override {
32 if (auto state = touch_state.lock()) { 32 if (auto state = touch_state.lock()) {
33 std::lock_guard<std::mutex> guard(state->mutex); 33 std::lock_guard guard{state->mutex};
34 return std::make_tuple(state->touch_x, state->touch_y, state->touch_pressed); 34 return std::make_tuple(state->touch_x, state->touch_y, state->touch_pressed);
35 } 35 }
36 return std::make_tuple(0.0f, 0.0f, false); 36 return std::make_tuple(0.0f, 0.0f, false);
@@ -81,7 +81,7 @@ void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y) {
81 if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y)) 81 if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y))
82 return; 82 return;
83 83
84 std::lock_guard<std::mutex> guard(touch_state->mutex); 84 std::lock_guard guard{touch_state->mutex};
85 touch_state->touch_x = static_cast<float>(framebuffer_x - framebuffer_layout.screen.left) / 85 touch_state->touch_x = static_cast<float>(framebuffer_x - framebuffer_layout.screen.left) /
86 (framebuffer_layout.screen.right - framebuffer_layout.screen.left); 86 (framebuffer_layout.screen.right - framebuffer_layout.screen.left);
87 touch_state->touch_y = static_cast<float>(framebuffer_y - framebuffer_layout.screen.top) / 87 touch_state->touch_y = static_cast<float>(framebuffer_y - framebuffer_layout.screen.top) /
@@ -91,7 +91,7 @@ void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y) {
91} 91}
92 92
93void EmuWindow::TouchReleased() { 93void EmuWindow::TouchReleased() {
94 std::lock_guard<std::mutex> guard(touch_state->mutex); 94 std::lock_guard guard{touch_state->mutex};
95 touch_state->touch_pressed = false; 95 touch_state->touch_pressed = false;
96 touch_state->touch_x = 0; 96 touch_state->touch_x = 0;
97 touch_state->touch_y = 0; 97 touch_state->touch_y = 0;
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
index 352190da8..c8842410b 100644
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ b/src/core/hle/kernel/address_arbiter.cpp
@@ -26,7 +26,7 @@ void WakeThreads(const std::vector<SharedPtr<Thread>>& waiting_threads, s32 num_
26 // them all. 26 // them all.
27 std::size_t last = waiting_threads.size(); 27 std::size_t last = waiting_threads.size();
28 if (num_to_wake > 0) { 28 if (num_to_wake > 0) {
29 last = num_to_wake; 29 last = std::min(last, static_cast<std::size_t>(num_to_wake));
30 } 30 }
31 31
32 // Signal the waiting threads. 32 // Signal the waiting threads.
@@ -90,9 +90,9 @@ ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr a
90 // Determine the modified value depending on the waiting count. 90 // Determine the modified value depending on the waiting count.
91 s32 updated_value; 91 s32 updated_value;
92 if (waiting_threads.empty()) { 92 if (waiting_threads.empty()) {
93 updated_value = value - 1;
94 } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
95 updated_value = value + 1; 93 updated_value = value + 1;
94 } else if (num_to_wake <= 0 || waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
95 updated_value = value - 1;
96 } else { 96 } else {
97 updated_value = value; 97 updated_value = value;
98 } 98 }
diff --git a/src/core/hle/kernel/code_set.h b/src/core/hle/kernel/code_set.h
index 834fd23d2..879957dcb 100644
--- a/src/core/hle/kernel/code_set.h
+++ b/src/core/hle/kernel/code_set.h
@@ -5,7 +5,6 @@
5#pragma once 5#pragma once
6 6
7#include <cstddef> 7#include <cstddef>
8#include <memory>
9#include <vector> 8#include <vector>
10 9
11#include "common/common_types.h" 10#include "common/common_types.h"
@@ -78,7 +77,7 @@ struct CodeSet final {
78 } 77 }
79 78
80 /// The overall data that backs this code set. 79 /// The overall data that backs this code set.
81 std::shared_ptr<std::vector<u8>> memory; 80 std::vector<u8> memory;
82 81
83 /// The segments that comprise this code set. 82 /// The segments that comprise this code set.
84 std::array<Segment, 3> segments; 83 std::array<Segment, 3> segments;
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index a7e4ddc05..3f14bfa86 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -34,7 +34,7 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
34 const auto& system = Core::System::GetInstance(); 34 const auto& system = Core::System::GetInstance();
35 35
36 // Lock the global kernel mutex when we enter the kernel HLE. 36 // Lock the global kernel mutex when we enter the kernel HLE.
37 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 37 std::lock_guard lock{HLE::g_hle_lock};
38 38
39 SharedPtr<Thread> thread = 39 SharedPtr<Thread> thread =
40 system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle); 40 system.Kernel().RetrieveThreadFromWakeupCallbackHandleTable(proper_handle);
@@ -62,7 +62,8 @@ static void ThreadWakeupCallback(u64 thread_handle, [[maybe_unused]] s64 cycles_
62 62
63 if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 || 63 if (thread->GetMutexWaitAddress() != 0 || thread->GetCondVarWaitAddress() != 0 ||
64 thread->GetWaitHandle() != 0) { 64 thread->GetWaitHandle() != 0) {
65 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); 65 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex ||
66 thread->GetStatus() == ThreadStatus::WaitCondVar);
66 thread->SetMutexWaitAddress(0); 67 thread->SetMutexWaitAddress(0);
67 thread->SetCondVarWaitAddress(0); 68 thread->SetCondVarWaitAddress(0);
68 thread->SetWaitHandle(0); 69 thread->SetWaitHandle(0);
@@ -114,7 +115,7 @@ struct KernelCore::Impl {
114 115
115 // Creates the default system resource limit 116 // Creates the default system resource limit
116 void InitializeSystemResourceLimit(KernelCore& kernel) { 117 void InitializeSystemResourceLimit(KernelCore& kernel) {
117 system_resource_limit = ResourceLimit::Create(kernel, "System"); 118 system_resource_limit = ResourceLimit::Create(kernel);
118 119
119 // If setting the default system values fails, then something seriously wrong has occurred. 120 // If setting the default system values fails, then something seriously wrong has occurred.
120 ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x200000000) 121 ASSERT(system_resource_limit->SetLimitValue(ResourceType::PhysicalMemory, 0x200000000)
@@ -190,6 +191,10 @@ const Process* KernelCore::CurrentProcess() const {
190 return impl->current_process; 191 return impl->current_process;
191} 192}
192 193
194const std::vector<SharedPtr<Process>>& KernelCore::GetProcessList() const {
195 return impl->process_list;
196}
197
193void KernelCore::AddNamedPort(std::string name, SharedPtr<ClientPort> port) { 198void KernelCore::AddNamedPort(std::string name, SharedPtr<ClientPort> port) {
194 impl->named_ports.emplace(std::move(name), std::move(port)); 199 impl->named_ports.emplace(std::move(name), std::move(port));
195} 200}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 03ea5b659..6b8738599 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -72,6 +72,9 @@ public:
72 /// Retrieves a const pointer to the current process. 72 /// Retrieves a const pointer to the current process.
73 const Process* CurrentProcess() const; 73 const Process* CurrentProcess() const;
74 74
75 /// Retrieves the list of processes.
76 const std::vector<SharedPtr<Process>>& GetProcessList() const;
77
75 /// Adds a port to the named port table 78 /// Adds a port to the named port table
76 void AddNamedPort(std::string name, SharedPtr<ClientPort> port); 79 void AddNamedPort(std::string name, SharedPtr<ClientPort> port);
77 80
diff --git a/src/core/hle/kernel/object.cpp b/src/core/hle/kernel/object.cpp
index 217144efc..10431e94c 100644
--- a/src/core/hle/kernel/object.cpp
+++ b/src/core/hle/kernel/object.cpp
@@ -24,7 +24,6 @@ bool Object::IsWaitable() const {
24 case HandleType::WritableEvent: 24 case HandleType::WritableEvent:
25 case HandleType::SharedMemory: 25 case HandleType::SharedMemory:
26 case HandleType::TransferMemory: 26 case HandleType::TransferMemory:
27 case HandleType::AddressArbiter:
28 case HandleType::ResourceLimit: 27 case HandleType::ResourceLimit:
29 case HandleType::ClientPort: 28 case HandleType::ClientPort:
30 case HandleType::ClientSession: 29 case HandleType::ClientSession:
diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h
index 3f6baa094..332876c27 100644
--- a/src/core/hle/kernel/object.h
+++ b/src/core/hle/kernel/object.h
@@ -25,7 +25,6 @@ enum class HandleType : u32 {
25 TransferMemory, 25 TransferMemory,
26 Thread, 26 Thread,
27 Process, 27 Process,
28 AddressArbiter,
29 ResourceLimit, 28 ResourceLimit,
30 ClientPort, 29 ClientPort,
31 ServerPort, 30 ServerPort,
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index 0d782e4ba..041267318 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -5,6 +5,7 @@
5#include <algorithm> 5#include <algorithm>
6#include <memory> 6#include <memory>
7#include <random> 7#include <random>
8#include "common/alignment.h"
8#include "common/assert.h" 9#include "common/assert.h"
9#include "common/logging/log.h" 10#include "common/logging/log.h"
10#include "core/core.h" 11#include "core/core.h"
@@ -75,6 +76,18 @@ SharedPtr<ResourceLimit> Process::GetResourceLimit() const {
75 return resource_limit; 76 return resource_limit;
76} 77}
77 78
79u64 Process::GetTotalPhysicalMemoryUsed() const {
80 return vm_manager.GetCurrentHeapSize() + main_thread_stack_size + code_memory_size;
81}
82
83void Process::RegisterThread(const Thread* thread) {
84 thread_list.push_back(thread);
85}
86
87void Process::UnregisterThread(const Thread* thread) {
88 thread_list.remove(thread);
89}
90
78ResultCode Process::ClearSignalState() { 91ResultCode Process::ClearSignalState() {
79 if (status == ProcessStatus::Exited) { 92 if (status == ProcessStatus::Exited) {
80 LOG_ERROR(Kernel, "called on a terminated process instance."); 93 LOG_ERROR(Kernel, "called on a terminated process instance.");
@@ -107,14 +120,17 @@ ResultCode Process::LoadFromMetadata(const FileSys::ProgramMetadata& metadata) {
107 return handle_table.SetSize(capabilities.GetHandleTableSize()); 120 return handle_table.SetSize(capabilities.GetHandleTableSize());
108} 121}
109 122
110void Process::Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size) { 123void Process::Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size) {
124 // The kernel always ensures that the given stack size is page aligned.
125 main_thread_stack_size = Common::AlignUp(stack_size, Memory::PAGE_SIZE);
126
111 // Allocate and map the main thread stack 127 // Allocate and map the main thread stack
112 // TODO(bunnei): This is heap area that should be allocated by the kernel and not mapped as part 128 // TODO(bunnei): This is heap area that should be allocated by the kernel and not mapped as part
113 // of the user address space. 129 // of the user address space.
130 const VAddr mapping_address = vm_manager.GetTLSIORegionEndAddress() - main_thread_stack_size;
114 vm_manager 131 vm_manager
115 .MapMemoryBlock(vm_manager.GetTLSIORegionEndAddress() - stack_size, 132 .MapMemoryBlock(mapping_address, std::make_shared<std::vector<u8>>(main_thread_stack_size),
116 std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size, 133 0, main_thread_stack_size, MemoryState::Stack)
117 MemoryState::Stack)
118 .Unwrap(); 134 .Unwrap();
119 135
120 vm_manager.LogLayout(); 136 vm_manager.LogLayout();
@@ -210,11 +226,13 @@ void Process::FreeTLSSlot(VAddr tls_address) {
210} 226}
211 227
212void Process::LoadModule(CodeSet module_, VAddr base_addr) { 228void Process::LoadModule(CodeSet module_, VAddr base_addr) {
229 const auto memory = std::make_shared<std::vector<u8>>(std::move(module_.memory));
230
213 const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions, 231 const auto MapSegment = [&](const CodeSet::Segment& segment, VMAPermission permissions,
214 MemoryState memory_state) { 232 MemoryState memory_state) {
215 const auto vma = vm_manager 233 const auto vma = vm_manager
216 .MapMemoryBlock(segment.addr + base_addr, module_.memory, 234 .MapMemoryBlock(segment.addr + base_addr, memory, segment.offset,
217 segment.offset, segment.size, memory_state) 235 segment.size, memory_state)
218 .Unwrap(); 236 .Unwrap();
219 vm_manager.Reprotect(vma, permissions); 237 vm_manager.Reprotect(vma, permissions);
220 }; 238 };
@@ -224,6 +242,8 @@ void Process::LoadModule(CodeSet module_, VAddr base_addr) {
224 MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData); 242 MapSegment(module_.RODataSegment(), VMAPermission::Read, MemoryState::CodeData);
225 MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData); 243 MapSegment(module_.DataSegment(), VMAPermission::ReadWrite, MemoryState::CodeData);
226 244
245 code_memory_size += module_.memory.size();
246
227 // Clear instruction cache in CPU JIT 247 // Clear instruction cache in CPU JIT
228 system.InvalidateCpuInstructionCaches(); 248 system.InvalidateCpuInstructionCaches();
229} 249}
@@ -237,7 +257,7 @@ void Process::Acquire(Thread* thread) {
237 ASSERT_MSG(!ShouldWait(thread), "Object unavailable!"); 257 ASSERT_MSG(!ShouldWait(thread), "Object unavailable!");
238} 258}
239 259
240bool Process::ShouldWait(Thread* thread) const { 260bool Process::ShouldWait(const Thread* thread) const {
241 return !is_signaled; 261 return !is_signaled;
242} 262}
243 263
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index 1bd7bf5c1..f060f2a3b 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -7,6 +7,7 @@
7#include <array> 7#include <array>
8#include <bitset> 8#include <bitset>
9#include <cstddef> 9#include <cstddef>
10#include <list>
10#include <string> 11#include <string>
11#include <vector> 12#include <vector>
12#include <boost/container/static_vector.hpp> 13#include <boost/container/static_vector.hpp>
@@ -35,14 +36,6 @@ class Thread;
35 36
36struct CodeSet; 37struct CodeSet;
37 38
38struct AddressMapping {
39 // Address and size must be page-aligned
40 VAddr address;
41 u64 size;
42 bool read_only;
43 bool unk_flag;
44};
45
46enum class MemoryRegion : u16 { 39enum class MemoryRegion : u16 {
47 APPLICATION = 1, 40 APPLICATION = 1,
48 SYSTEM = 2, 41 SYSTEM = 2,
@@ -194,6 +187,22 @@ public:
194 return random_entropy.at(index); 187 return random_entropy.at(index);
195 } 188 }
196 189
190 /// Retrieves the total physical memory used by this process in bytes.
191 u64 GetTotalPhysicalMemoryUsed() const;
192
193 /// Gets the list of all threads created with this process as their owner.
194 const std::list<const Thread*>& GetThreadList() const {
195 return thread_list;
196 }
197
198 /// Registers a thread as being created under this process,
199 /// adding it to this process' thread list.
200 void RegisterThread(const Thread* thread);
201
202 /// Unregisters a thread from this process, removing it
203 /// from this process' thread list.
204 void UnregisterThread(const Thread* thread);
205
197 /// Clears the signaled state of the process if and only if it's signaled. 206 /// Clears the signaled state of the process if and only if it's signaled.
198 /// 207 ///
199 /// @pre The process must not be already terminated. If this is called on a 208 /// @pre The process must not be already terminated. If this is called on a
@@ -218,7 +227,7 @@ public:
218 /** 227 /**
219 * Applies address space changes and launches the process main thread. 228 * Applies address space changes and launches the process main thread.
220 */ 229 */
221 void Run(VAddr entry_point, s32 main_thread_priority, u32 stack_size); 230 void Run(VAddr entry_point, s32 main_thread_priority, u64 stack_size);
222 231
223 /** 232 /**
224 * Prepares a process for termination by stopping all of its threads 233 * Prepares a process for termination by stopping all of its threads
@@ -242,7 +251,7 @@ private:
242 ~Process() override; 251 ~Process() override;
243 252
244 /// Checks if the specified thread should wait until this process is available. 253 /// Checks if the specified thread should wait until this process is available.
245 bool ShouldWait(Thread* thread) const override; 254 bool ShouldWait(const Thread* thread) const override;
246 255
247 /// Acquires/locks this process for the specified thread if it's available. 256 /// Acquires/locks this process for the specified thread if it's available.
248 void Acquire(Thread* thread) override; 257 void Acquire(Thread* thread) override;
@@ -255,6 +264,12 @@ private:
255 /// Memory manager for this process. 264 /// Memory manager for this process.
256 Kernel::VMManager vm_manager; 265 Kernel::VMManager vm_manager;
257 266
267 /// Size of the main thread's stack in bytes.
268 u64 main_thread_stack_size = 0;
269
270 /// Size of the loaded code memory in bytes.
271 u64 code_memory_size = 0;
272
258 /// Current status of the process 273 /// Current status of the process
259 ProcessStatus status; 274 ProcessStatus status;
260 275
@@ -307,6 +322,9 @@ private:
307 /// Random values for svcGetInfo RandomEntropy 322 /// Random values for svcGetInfo RandomEntropy
308 std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy; 323 std::array<u64, RANDOM_ENTROPY_SIZE> random_entropy;
309 324
325 /// List of threads that are running with this process as their owner.
326 std::list<const Thread*> thread_list;
327
310 /// System context 328 /// System context
311 Core::System& system; 329 Core::System& system;
312 330
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index 0e5083f70..c2b798a4e 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -14,7 +14,7 @@ namespace Kernel {
14ReadableEvent::ReadableEvent(KernelCore& kernel) : WaitObject{kernel} {} 14ReadableEvent::ReadableEvent(KernelCore& kernel) : WaitObject{kernel} {}
15ReadableEvent::~ReadableEvent() = default; 15ReadableEvent::~ReadableEvent() = default;
16 16
17bool ReadableEvent::ShouldWait(Thread* thread) const { 17bool ReadableEvent::ShouldWait(const Thread* thread) const {
18 return !signaled; 18 return !signaled;
19} 19}
20 20
diff --git a/src/core/hle/kernel/readable_event.h b/src/core/hle/kernel/readable_event.h
index 77a9c362c..2eb9dcbb7 100644
--- a/src/core/hle/kernel/readable_event.h
+++ b/src/core/hle/kernel/readable_event.h
@@ -36,7 +36,7 @@ public:
36 return HANDLE_TYPE; 36 return HANDLE_TYPE;
37 } 37 }
38 38
39 bool ShouldWait(Thread* thread) const override; 39 bool ShouldWait(const Thread* thread) const override;
40 void Acquire(Thread* thread) override; 40 void Acquire(Thread* thread) override;
41 41
42 /// Unconditionally clears the readable event's state. 42 /// Unconditionally clears the readable event's state.
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp
index 2f9695005..173f69915 100644
--- a/src/core/hle/kernel/resource_limit.cpp
+++ b/src/core/hle/kernel/resource_limit.cpp
@@ -16,11 +16,8 @@ constexpr std::size_t ResourceTypeToIndex(ResourceType type) {
16ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {} 16ResourceLimit::ResourceLimit(KernelCore& kernel) : Object{kernel} {}
17ResourceLimit::~ResourceLimit() = default; 17ResourceLimit::~ResourceLimit() = default;
18 18
19SharedPtr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel, std::string name) { 19SharedPtr<ResourceLimit> ResourceLimit::Create(KernelCore& kernel) {
20 SharedPtr<ResourceLimit> resource_limit(new ResourceLimit(kernel)); 20 return new ResourceLimit(kernel);
21
22 resource_limit->name = std::move(name);
23 return resource_limit;
24} 21}
25 22
26s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const { 23s64 ResourceLimit::GetCurrentResourceValue(ResourceType resource) const {
diff --git a/src/core/hle/kernel/resource_limit.h b/src/core/hle/kernel/resource_limit.h
index 59dc11c22..70e09858a 100644
--- a/src/core/hle/kernel/resource_limit.h
+++ b/src/core/hle/kernel/resource_limit.h
@@ -31,16 +31,14 @@ constexpr bool IsValidResourceType(ResourceType type) {
31 31
32class ResourceLimit final : public Object { 32class ResourceLimit final : public Object {
33public: 33public:
34 /** 34 /// Creates a resource limit object.
35 * Creates a resource limit object. 35 static SharedPtr<ResourceLimit> Create(KernelCore& kernel);
36 */
37 static SharedPtr<ResourceLimit> Create(KernelCore& kernel, std::string name = "Unknown");
38 36
39 std::string GetTypeName() const override { 37 std::string GetTypeName() const override {
40 return "ResourceLimit"; 38 return "ResourceLimit";
41 } 39 }
42 std::string GetName() const override { 40 std::string GetName() const override {
43 return name; 41 return GetTypeName();
44 } 42 }
45 43
46 static const HandleType HANDLE_TYPE = HandleType::ResourceLimit; 44 static const HandleType HANDLE_TYPE = HandleType::ResourceLimit;
@@ -95,9 +93,6 @@ private:
95 ResourceArray limits{}; 93 ResourceArray limits{};
96 /// Current resource limit values. 94 /// Current resource limit values.
97 ResourceArray values{}; 95 ResourceArray values{};
98
99 /// Name of resource limit object.
100 std::string name;
101}; 96};
102 97
103} // namespace Kernel 98} // namespace Kernel
diff --git a/src/core/hle/kernel/scheduler.cpp b/src/core/hle/kernel/scheduler.cpp
index cc189cc64..ac501bf7f 100644
--- a/src/core/hle/kernel/scheduler.cpp
+++ b/src/core/hle/kernel/scheduler.cpp
@@ -29,8 +29,8 @@ Scheduler::~Scheduler() {
29} 29}
30 30
31bool Scheduler::HaveReadyThreads() const { 31bool Scheduler::HaveReadyThreads() const {
32 std::lock_guard<std::mutex> lock(scheduler_mutex); 32 std::lock_guard lock{scheduler_mutex};
33 return ready_queue.get_first() != nullptr; 33 return !ready_queue.empty();
34} 34}
35 35
36Thread* Scheduler::GetCurrentThread() const { 36Thread* Scheduler::GetCurrentThread() const {
@@ -46,22 +46,27 @@ Thread* Scheduler::PopNextReadyThread() {
46 Thread* thread = GetCurrentThread(); 46 Thread* thread = GetCurrentThread();
47 47
48 if (thread && thread->GetStatus() == ThreadStatus::Running) { 48 if (thread && thread->GetStatus() == ThreadStatus::Running) {
49 if (ready_queue.empty()) {
50 return thread;
51 }
49 // We have to do better than the current thread. 52 // We have to do better than the current thread.
50 // This call returns null when that's not possible. 53 // This call returns null when that's not possible.
51 next = ready_queue.pop_first_better(thread->GetPriority()); 54 next = ready_queue.front();
52 if (!next) { 55 if (next == nullptr || next->GetPriority() >= thread->GetPriority()) {
53 // Otherwise just keep going with the current thread
54 next = thread; 56 next = thread;
55 } 57 }
56 } else { 58 } else {
57 next = ready_queue.pop_first(); 59 if (ready_queue.empty()) {
60 return nullptr;
61 }
62 next = ready_queue.front();
58 } 63 }
59 64
60 return next; 65 return next;
61} 66}
62 67
63void Scheduler::SwitchContext(Thread* new_thread) { 68void Scheduler::SwitchContext(Thread* new_thread) {
64 Thread* const previous_thread = GetCurrentThread(); 69 Thread* previous_thread = GetCurrentThread();
65 Process* const previous_process = system.Kernel().CurrentProcess(); 70 Process* const previous_process = system.Kernel().CurrentProcess();
66 71
67 UpdateLastContextSwitchTime(previous_thread, previous_process); 72 UpdateLastContextSwitchTime(previous_thread, previous_process);
@@ -75,7 +80,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
75 if (previous_thread->GetStatus() == ThreadStatus::Running) { 80 if (previous_thread->GetStatus() == ThreadStatus::Running) {
76 // This is only the case when a reschedule is triggered without the current thread 81 // This is only the case when a reschedule is triggered without the current thread
77 // yielding execution (i.e. an event triggered, system core time-sliced, etc) 82 // yielding execution (i.e. an event triggered, system core time-sliced, etc)
78 ready_queue.push_front(previous_thread->GetPriority(), previous_thread); 83 ready_queue.add(previous_thread, previous_thread->GetPriority(), false);
79 previous_thread->SetStatus(ThreadStatus::Ready); 84 previous_thread->SetStatus(ThreadStatus::Ready);
80 } 85 }
81 } 86 }
@@ -90,7 +95,7 @@ void Scheduler::SwitchContext(Thread* new_thread) {
90 95
91 current_thread = new_thread; 96 current_thread = new_thread;
92 97
93 ready_queue.remove(new_thread->GetPriority(), new_thread); 98 ready_queue.remove(new_thread, new_thread->GetPriority());
94 new_thread->SetStatus(ThreadStatus::Running); 99 new_thread->SetStatus(ThreadStatus::Running);
95 100
96 auto* const thread_owner_process = current_thread->GetOwnerProcess(); 101 auto* const thread_owner_process = current_thread->GetOwnerProcess();
@@ -127,7 +132,7 @@ void Scheduler::UpdateLastContextSwitchTime(Thread* thread, Process* process) {
127} 132}
128 133
129void Scheduler::Reschedule() { 134void Scheduler::Reschedule() {
130 std::lock_guard<std::mutex> lock(scheduler_mutex); 135 std::lock_guard lock{scheduler_mutex};
131 136
132 Thread* cur = GetCurrentThread(); 137 Thread* cur = GetCurrentThread();
133 Thread* next = PopNextReadyThread(); 138 Thread* next = PopNextReadyThread();
@@ -143,51 +148,54 @@ void Scheduler::Reschedule() {
143 SwitchContext(next); 148 SwitchContext(next);
144} 149}
145 150
146void Scheduler::AddThread(SharedPtr<Thread> thread, u32 priority) { 151void Scheduler::AddThread(SharedPtr<Thread> thread) {
147 std::lock_guard<std::mutex> lock(scheduler_mutex); 152 std::lock_guard lock{scheduler_mutex};
148 153
149 thread_list.push_back(std::move(thread)); 154 thread_list.push_back(std::move(thread));
150 ready_queue.prepare(priority);
151} 155}
152 156
153void Scheduler::RemoveThread(Thread* thread) { 157void Scheduler::RemoveThread(Thread* thread) {
154 std::lock_guard<std::mutex> lock(scheduler_mutex); 158 std::lock_guard lock{scheduler_mutex};
155 159
156 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread), 160 thread_list.erase(std::remove(thread_list.begin(), thread_list.end(), thread),
157 thread_list.end()); 161 thread_list.end());
158} 162}
159 163
160void Scheduler::ScheduleThread(Thread* thread, u32 priority) { 164void Scheduler::ScheduleThread(Thread* thread, u32 priority) {
161 std::lock_guard<std::mutex> lock(scheduler_mutex); 165 std::lock_guard lock{scheduler_mutex};
162 166
163 ASSERT(thread->GetStatus() == ThreadStatus::Ready); 167 ASSERT(thread->GetStatus() == ThreadStatus::Ready);
164 ready_queue.push_back(priority, thread); 168 ready_queue.add(thread, priority);
165} 169}
166 170
167void Scheduler::UnscheduleThread(Thread* thread, u32 priority) { 171void Scheduler::UnscheduleThread(Thread* thread, u32 priority) {
168 std::lock_guard<std::mutex> lock(scheduler_mutex); 172 std::lock_guard lock{scheduler_mutex};
169 173
170 ASSERT(thread->GetStatus() == ThreadStatus::Ready); 174 ASSERT(thread->GetStatus() == ThreadStatus::Ready);
171 ready_queue.remove(priority, thread); 175 ready_queue.remove(thread, priority);
172} 176}
173 177
174void Scheduler::SetThreadPriority(Thread* thread, u32 priority) { 178void Scheduler::SetThreadPriority(Thread* thread, u32 priority) {
175 std::lock_guard<std::mutex> lock(scheduler_mutex); 179 std::lock_guard lock{scheduler_mutex};
180 if (thread->GetPriority() == priority) {
181 return;
182 }
176 183
177 // If thread was ready, adjust queues 184 // If thread was ready, adjust queues
178 if (thread->GetStatus() == ThreadStatus::Ready) 185 if (thread->GetStatus() == ThreadStatus::Ready)
179 ready_queue.move(thread, thread->GetPriority(), priority); 186 ready_queue.adjust(thread, thread->GetPriority(), priority);
180 else
181 ready_queue.prepare(priority);
182} 187}
183 188
184Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const { 189Thread* Scheduler::GetNextSuggestedThread(u32 core, u32 maximum_priority) const {
185 std::lock_guard<std::mutex> lock(scheduler_mutex); 190 std::lock_guard lock{scheduler_mutex};
186 191
187 const u32 mask = 1U << core; 192 const u32 mask = 1U << core;
188 return ready_queue.get_first_filter([mask, maximum_priority](Thread const* thread) { 193 for (auto* thread : ready_queue) {
189 return (thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority; 194 if ((thread->GetAffinityMask() & mask) != 0 && thread->GetPriority() < maximum_priority) {
190 }); 195 return thread;
196 }
197 }
198 return nullptr;
191} 199}
192 200
193void Scheduler::YieldWithoutLoadBalancing(Thread* thread) { 201void Scheduler::YieldWithoutLoadBalancing(Thread* thread) {
diff --git a/src/core/hle/kernel/scheduler.h b/src/core/hle/kernel/scheduler.h
index 1c5bf57d9..b29bf7be8 100644
--- a/src/core/hle/kernel/scheduler.h
+++ b/src/core/hle/kernel/scheduler.h
@@ -7,7 +7,7 @@
7#include <mutex> 7#include <mutex>
8#include <vector> 8#include <vector>
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "common/thread_queue_list.h" 10#include "common/multi_level_queue.h"
11#include "core/hle/kernel/object.h" 11#include "core/hle/kernel/object.h"
12#include "core/hle/kernel/thread.h" 12#include "core/hle/kernel/thread.h"
13 13
@@ -38,7 +38,7 @@ public:
38 u64 GetLastContextSwitchTicks() const; 38 u64 GetLastContextSwitchTicks() const;
39 39
40 /// Adds a new thread to the scheduler 40 /// Adds a new thread to the scheduler
41 void AddThread(SharedPtr<Thread> thread, u32 priority); 41 void AddThread(SharedPtr<Thread> thread);
42 42
43 /// Removes a thread from the scheduler 43 /// Removes a thread from the scheduler
44 void RemoveThread(Thread* thread); 44 void RemoveThread(Thread* thread);
@@ -156,7 +156,7 @@ private:
156 std::vector<SharedPtr<Thread>> thread_list; 156 std::vector<SharedPtr<Thread>> thread_list;
157 157
158 /// Lists only ready thread ids. 158 /// Lists only ready thread ids.
159 Common::ThreadQueueList<Thread*, THREADPRIO_LOWEST + 1> ready_queue; 159 Common::MultiLevelQueue<Thread*, THREADPRIO_LOWEST + 1> ready_queue;
160 160
161 SharedPtr<Thread> current_thread = nullptr; 161 SharedPtr<Thread> current_thread = nullptr;
162 162
diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp
index 0e1515c89..708fdf9e1 100644
--- a/src/core/hle/kernel/server_port.cpp
+++ b/src/core/hle/kernel/server_port.cpp
@@ -30,7 +30,7 @@ void ServerPort::AppendPendingSession(SharedPtr<ServerSession> pending_session)
30 pending_sessions.push_back(std::move(pending_session)); 30 pending_sessions.push_back(std::move(pending_session));
31} 31}
32 32
33bool ServerPort::ShouldWait(Thread* thread) const { 33bool ServerPort::ShouldWait(const Thread* thread) const {
34 // If there are no pending sessions, we wait until a new one is added. 34 // If there are no pending sessions, we wait until a new one is added.
35 return pending_sessions.empty(); 35 return pending_sessions.empty();
36} 36}
diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h
index 9bc667cf2..76293cb8b 100644
--- a/src/core/hle/kernel/server_port.h
+++ b/src/core/hle/kernel/server_port.h
@@ -75,7 +75,7 @@ public:
75 /// waiting to be accepted by this port. 75 /// waiting to be accepted by this port.
76 void AppendPendingSession(SharedPtr<ServerSession> pending_session); 76 void AppendPendingSession(SharedPtr<ServerSession> pending_session);
77 77
78 bool ShouldWait(Thread* thread) const override; 78 bool ShouldWait(const Thread* thread) const override;
79 void Acquire(Thread* thread) override; 79 void Acquire(Thread* thread) override;
80 80
81private: 81private:
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index 4d8a337a7..40cec143e 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -46,7 +46,7 @@ ResultVal<SharedPtr<ServerSession>> ServerSession::Create(KernelCore& kernel, st
46 return MakeResult(std::move(server_session)); 46 return MakeResult(std::move(server_session));
47} 47}
48 48
49bool ServerSession::ShouldWait(Thread* thread) const { 49bool ServerSession::ShouldWait(const Thread* thread) const {
50 // Closed sessions should never wait, an error will be returned from svcReplyAndReceive. 50 // Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
51 if (parent->client == nullptr) 51 if (parent->client == nullptr)
52 return false; 52 return false;
diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h
index aea4ccfeb..79b84bade 100644
--- a/src/core/hle/kernel/server_session.h
+++ b/src/core/hle/kernel/server_session.h
@@ -82,7 +82,7 @@ public:
82 */ 82 */
83 ResultCode HandleSyncRequest(SharedPtr<Thread> thread); 83 ResultCode HandleSyncRequest(SharedPtr<Thread> thread);
84 84
85 bool ShouldWait(Thread* thread) const override; 85 bool ShouldWait(const Thread* thread) const override;
86 86
87 void Acquire(Thread* thread) override; 87 void Acquire(Thread* thread) override;
88 88
diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp
index 62861da36..f15c5ee36 100644
--- a/src/core/hle/kernel/shared_memory.cpp
+++ b/src/core/hle/kernel/shared_memory.cpp
@@ -9,7 +9,6 @@
9#include "core/hle/kernel/errors.h" 9#include "core/hle/kernel/errors.h"
10#include "core/hle/kernel/kernel.h" 10#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/shared_memory.h" 11#include "core/hle/kernel/shared_memory.h"
12#include "core/memory.h"
13 12
14namespace Kernel { 13namespace Kernel {
15 14
@@ -119,7 +118,15 @@ ResultCode SharedMemory::Map(Process& target_process, VAddr address, MemoryPermi
119 ConvertPermissions(permissions)); 118 ConvertPermissions(permissions));
120} 119}
121 120
122ResultCode SharedMemory::Unmap(Process& target_process, VAddr address) { 121ResultCode SharedMemory::Unmap(Process& target_process, VAddr address, u64 unmap_size) {
122 if (unmap_size != size) {
123 LOG_ERROR(Kernel,
124 "Invalid size passed to Unmap. Size must be equal to the size of the "
125 "memory managed. Shared memory size=0x{:016X}, Unmap size=0x{:016X}",
126 size, unmap_size);
127 return ERR_INVALID_SIZE;
128 }
129
123 // TODO(Subv): Verify what happens if the application tries to unmap an address that is not 130 // TODO(Subv): Verify what happens if the application tries to unmap an address that is not
124 // mapped to a SharedMemory. 131 // mapped to a SharedMemory.
125 return target_process.VMManager().UnmapRange(address, size); 132 return target_process.VMManager().UnmapRange(address, size);
diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h
index dab2a6bea..37e18c443 100644
--- a/src/core/hle/kernel/shared_memory.h
+++ b/src/core/hle/kernel/shared_memory.h
@@ -104,11 +104,17 @@ public:
104 104
105 /** 105 /**
106 * Unmaps a shared memory block from the specified address in system memory 106 * Unmaps a shared memory block from the specified address in system memory
107 *
107 * @param target_process Process from which to unmap the memory block. 108 * @param target_process Process from which to unmap the memory block.
108 * @param address Address in system memory where the shared memory block is mapped 109 * @param address Address in system memory where the shared memory block is mapped.
110 * @param unmap_size The amount of bytes to unmap from this shared memory instance.
111 *
109 * @return Result code of the unmap operation 112 * @return Result code of the unmap operation
113 *
114 * @pre The given size to unmap must be the same size as the amount of memory managed by
115 * the SharedMemory instance itself, otherwise ERR_INVALID_SIZE will be returned.
110 */ 116 */
111 ResultCode Unmap(Process& target_process, VAddr address); 117 ResultCode Unmap(Process& target_process, VAddr address, u64 unmap_size);
112 118
113 /** 119 /**
114 * Gets a pointer to the shared memory block 120 * Gets a pointer to the shared memory block
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index e5e7f99e1..ab10db3df 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -175,11 +175,8 @@ static ResultCode SetHeapSize(VAddr* heap_addr, u64 heap_size) {
175 return ERR_INVALID_SIZE; 175 return ERR_INVALID_SIZE;
176 } 176 }
177 177
178 auto& vm_manager = Core::CurrentProcess()->VMManager(); 178 auto& vm_manager = Core::System::GetInstance().Kernel().CurrentProcess()->VMManager();
179 const VAddr heap_base = vm_manager.GetHeapRegionBaseAddress(); 179 const auto alloc_result = vm_manager.SetHeapSize(heap_size);
180 const auto alloc_result =
181 vm_manager.HeapAllocate(heap_base, heap_size, VMAPermission::ReadWrite);
182
183 if (alloc_result.Failed()) { 180 if (alloc_result.Failed()) {
184 return alloc_result.Code(); 181 return alloc_result.Code();
185 } 182 }
@@ -712,7 +709,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
712 HeapRegionBaseAddr = 4, 709 HeapRegionBaseAddr = 4,
713 HeapRegionSize = 5, 710 HeapRegionSize = 5,
714 TotalMemoryUsage = 6, 711 TotalMemoryUsage = 6,
715 TotalHeapUsage = 7, 712 TotalPhysicalMemoryUsed = 7,
716 IsCurrentProcessBeingDebugged = 8, 713 IsCurrentProcessBeingDebugged = 8,
717 RegisterResourceLimit = 9, 714 RegisterResourceLimit = 9,
718 IdleTickCount = 10, 715 IdleTickCount = 10,
@@ -748,7 +745,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
748 case GetInfoType::NewMapRegionBaseAddr: 745 case GetInfoType::NewMapRegionBaseAddr:
749 case GetInfoType::NewMapRegionSize: 746 case GetInfoType::NewMapRegionSize:
750 case GetInfoType::TotalMemoryUsage: 747 case GetInfoType::TotalMemoryUsage:
751 case GetInfoType::TotalHeapUsage: 748 case GetInfoType::TotalPhysicalMemoryUsed:
752 case GetInfoType::IsVirtualAddressMemoryEnabled: 749 case GetInfoType::IsVirtualAddressMemoryEnabled:
753 case GetInfoType::PersonalMmHeapUsage: 750 case GetInfoType::PersonalMmHeapUsage:
754 case GetInfoType::TitleId: 751 case GetInfoType::TitleId:
@@ -808,8 +805,8 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
808 *result = process->VMManager().GetTotalMemoryUsage(); 805 *result = process->VMManager().GetTotalMemoryUsage();
809 return RESULT_SUCCESS; 806 return RESULT_SUCCESS;
810 807
811 case GetInfoType::TotalHeapUsage: 808 case GetInfoType::TotalPhysicalMemoryUsed:
812 *result = process->VMManager().GetTotalHeapUsage(); 809 *result = process->GetTotalPhysicalMemoryUsed();
813 return RESULT_SUCCESS; 810 return RESULT_SUCCESS;
814 811
815 case GetInfoType::IsVirtualAddressMemoryEnabled: 812 case GetInfoType::IsVirtualAddressMemoryEnabled:
@@ -1143,7 +1140,7 @@ static ResultCode UnmapSharedMemory(Handle shared_memory_handle, VAddr addr, u64
1143 return ERR_INVALID_MEMORY_RANGE; 1140 return ERR_INVALID_MEMORY_RANGE;
1144 } 1141 }
1145 1142
1146 return shared_memory->Unmap(*current_process, addr); 1143 return shared_memory->Unmap(*current_process, addr, size);
1147} 1144}
1148 1145
1149static ResultCode QueryProcessMemory(VAddr memory_info_address, VAddr page_info_address, 1146static ResultCode QueryProcessMemory(VAddr memory_info_address, VAddr page_info_address,
@@ -1356,7 +1353,7 @@ static ResultCode WaitProcessWideKeyAtomic(VAddr mutex_addr, VAddr condition_var
1356 current_thread->SetCondVarWaitAddress(condition_variable_addr); 1353 current_thread->SetCondVarWaitAddress(condition_variable_addr);
1357 current_thread->SetMutexWaitAddress(mutex_addr); 1354 current_thread->SetMutexWaitAddress(mutex_addr);
1358 current_thread->SetWaitHandle(thread_handle); 1355 current_thread->SetWaitHandle(thread_handle);
1359 current_thread->SetStatus(ThreadStatus::WaitMutex); 1356 current_thread->SetStatus(ThreadStatus::WaitCondVar);
1360 current_thread->InvalidateWakeupCallback(); 1357 current_thread->InvalidateWakeupCallback();
1361 1358
1362 current_thread->WakeAfterDelay(nano_seconds); 1359 current_thread->WakeAfterDelay(nano_seconds);
@@ -1400,10 +1397,10 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1400 // them all. 1397 // them all.
1401 std::size_t last = waiting_threads.size(); 1398 std::size_t last = waiting_threads.size();
1402 if (target != -1) 1399 if (target != -1)
1403 last = target; 1400 last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
1404 1401
1405 // If there are no threads waiting on this condition variable, just exit 1402 // If there are no threads waiting on this condition variable, just exit
1406 if (last > waiting_threads.size()) 1403 if (last == 0)
1407 return RESULT_SUCCESS; 1404 return RESULT_SUCCESS;
1408 1405
1409 for (std::size_t index = 0; index < last; ++index) { 1406 for (std::size_t index = 0; index < last; ++index) {
@@ -1411,6 +1408,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1411 1408
1412 ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr); 1409 ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
1413 1410
1411 // liberate Cond Var Thread.
1412 thread->SetCondVarWaitAddress(0);
1413
1414 std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex(); 1414 std::size_t current_core = Core::System::GetInstance().CurrentCoreIndex();
1415 1415
1416 auto& monitor = Core::System::GetInstance().Monitor(); 1416 auto& monitor = Core::System::GetInstance().Monitor();
@@ -1429,10 +1429,9 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1429 } 1429 }
1430 } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(), 1430 } while (!monitor.ExclusiveWrite32(current_core, thread->GetMutexWaitAddress(),
1431 thread->GetWaitHandle())); 1431 thread->GetWaitHandle()));
1432
1433 if (mutex_val == 0) { 1432 if (mutex_val == 0) {
1434 // We were able to acquire the mutex, resume this thread. 1433 // We were able to acquire the mutex, resume this thread.
1435 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); 1434 ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
1436 thread->ResumeFromWait(); 1435 thread->ResumeFromWait();
1437 1436
1438 auto* const lock_owner = thread->GetLockOwner(); 1437 auto* const lock_owner = thread->GetLockOwner();
@@ -1442,8 +1441,8 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1442 1441
1443 thread->SetLockOwner(nullptr); 1442 thread->SetLockOwner(nullptr);
1444 thread->SetMutexWaitAddress(0); 1443 thread->SetMutexWaitAddress(0);
1445 thread->SetCondVarWaitAddress(0);
1446 thread->SetWaitHandle(0); 1444 thread->SetWaitHandle(0);
1445 Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
1447 } else { 1446 } else {
1448 // Atomically signal that the mutex now has a waiting thread. 1447 // Atomically signal that the mutex now has a waiting thread.
1449 do { 1448 do {
@@ -1462,12 +1461,11 @@ static ResultCode SignalProcessWideKey(VAddr condition_variable_addr, s32 target
1462 const auto& handle_table = Core::CurrentProcess()->GetHandleTable(); 1461 const auto& handle_table = Core::CurrentProcess()->GetHandleTable();
1463 auto owner = handle_table.Get<Thread>(owner_handle); 1462 auto owner = handle_table.Get<Thread>(owner_handle);
1464 ASSERT(owner); 1463 ASSERT(owner);
1465 ASSERT(thread->GetStatus() == ThreadStatus::WaitMutex); 1464 ASSERT(thread->GetStatus() == ThreadStatus::WaitCondVar);
1466 thread->InvalidateWakeupCallback(); 1465 thread->InvalidateWakeupCallback();
1466 thread->SetStatus(ThreadStatus::WaitMutex);
1467 1467
1468 owner->AddMutexWaiter(thread); 1468 owner->AddMutexWaiter(thread);
1469
1470 Core::System::GetInstance().CpuCore(thread->GetProcessorID()).PrepareReschedule();
1471 } 1469 }
1472 } 1470 }
1473 1471
@@ -1985,6 +1983,83 @@ static ResultCode SetResourceLimitLimitValue(Handle resource_limit, u32 resource
1985 return RESULT_SUCCESS; 1983 return RESULT_SUCCESS;
1986} 1984}
1987 1985
1986static ResultCode GetProcessList(u32* out_num_processes, VAddr out_process_ids,
1987 u32 out_process_ids_size) {
1988 LOG_DEBUG(Kernel_SVC, "called. out_process_ids=0x{:016X}, out_process_ids_size={}",
1989 out_process_ids, out_process_ids_size);
1990
1991 // If the supplied size is negative or greater than INT32_MAX / sizeof(u64), bail.
1992 if ((out_process_ids_size & 0xF0000000) != 0) {
1993 LOG_ERROR(Kernel_SVC,
1994 "Supplied size outside [0, 0x0FFFFFFF] range. out_process_ids_size={}",
1995 out_process_ids_size);
1996 return ERR_OUT_OF_RANGE;
1997 }
1998
1999 const auto& kernel = Core::System::GetInstance().Kernel();
2000 const auto& vm_manager = kernel.CurrentProcess()->VMManager();
2001 const auto total_copy_size = out_process_ids_size * sizeof(u64);
2002
2003 if (out_process_ids_size > 0 &&
2004 !vm_manager.IsWithinAddressSpace(out_process_ids, total_copy_size)) {
2005 LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
2006 out_process_ids, out_process_ids + total_copy_size);
2007 return ERR_INVALID_ADDRESS_STATE;
2008 }
2009
2010 const auto& process_list = kernel.GetProcessList();
2011 const auto num_processes = process_list.size();
2012 const auto copy_amount = std::min(std::size_t{out_process_ids_size}, num_processes);
2013
2014 for (std::size_t i = 0; i < copy_amount; ++i) {
2015 Memory::Write64(out_process_ids, process_list[i]->GetProcessID());
2016 out_process_ids += sizeof(u64);
2017 }
2018
2019 *out_num_processes = static_cast<u32>(num_processes);
2020 return RESULT_SUCCESS;
2021}
2022
2023ResultCode GetThreadList(u32* out_num_threads, VAddr out_thread_ids, u32 out_thread_ids_size,
2024 Handle debug_handle) {
2025 // TODO: Handle this case when debug events are supported.
2026 UNIMPLEMENTED_IF(debug_handle != InvalidHandle);
2027
2028 LOG_DEBUG(Kernel_SVC, "called. out_thread_ids=0x{:016X}, out_thread_ids_size={}",
2029 out_thread_ids, out_thread_ids_size);
2030
2031 // If the size is negative or larger than INT32_MAX / sizeof(u64)
2032 if ((out_thread_ids_size & 0xF0000000) != 0) {
2033 LOG_ERROR(Kernel_SVC, "Supplied size outside [0, 0x0FFFFFFF] range. size={}",
2034 out_thread_ids_size);
2035 return ERR_OUT_OF_RANGE;
2036 }
2037
2038 const auto* const current_process = Core::System::GetInstance().Kernel().CurrentProcess();
2039 const auto& vm_manager = current_process->VMManager();
2040 const auto total_copy_size = out_thread_ids_size * sizeof(u64);
2041
2042 if (out_thread_ids_size > 0 &&
2043 !vm_manager.IsWithinAddressSpace(out_thread_ids, total_copy_size)) {
2044 LOG_ERROR(Kernel_SVC, "Address range outside address space. begin=0x{:016X}, end=0x{:016X}",
2045 out_thread_ids, out_thread_ids + total_copy_size);
2046 return ERR_INVALID_ADDRESS_STATE;
2047 }
2048
2049 const auto& thread_list = current_process->GetThreadList();
2050 const auto num_threads = thread_list.size();
2051 const auto copy_amount = std::min(std::size_t{out_thread_ids_size}, num_threads);
2052
2053 auto list_iter = thread_list.cbegin();
2054 for (std::size_t i = 0; i < copy_amount; ++i, ++list_iter) {
2055 Memory::Write64(out_thread_ids, (*list_iter)->GetThreadID());
2056 out_thread_ids += sizeof(u64);
2057 }
2058
2059 *out_num_threads = static_cast<u32>(num_threads);
2060 return RESULT_SUCCESS;
2061}
2062
1988namespace { 2063namespace {
1989struct FunctionDef { 2064struct FunctionDef {
1990 using Func = void(); 2065 using Func = void();
@@ -2097,8 +2172,8 @@ static const FunctionDef SVC_Table[] = {
2097 {0x62, nullptr, "TerminateDebugProcess"}, 2172 {0x62, nullptr, "TerminateDebugProcess"},
2098 {0x63, nullptr, "GetDebugEvent"}, 2173 {0x63, nullptr, "GetDebugEvent"},
2099 {0x64, nullptr, "ContinueDebugEvent"}, 2174 {0x64, nullptr, "ContinueDebugEvent"},
2100 {0x65, nullptr, "GetProcessList"}, 2175 {0x65, SvcWrap<GetProcessList>, "GetProcessList"},
2101 {0x66, nullptr, "GetThreadList"}, 2176 {0x66, SvcWrap<GetThreadList>, "GetThreadList"},
2102 {0x67, nullptr, "GetDebugThreadContext"}, 2177 {0x67, nullptr, "GetDebugThreadContext"},
2103 {0x68, nullptr, "SetDebugThreadContext"}, 2178 {0x68, nullptr, "SetDebugThreadContext"},
2104 {0x69, nullptr, "QueryDebugProcessMemory"}, 2179 {0x69, nullptr, "QueryDebugProcessMemory"},
@@ -2140,7 +2215,7 @@ void CallSVC(u32 immediate) {
2140 MICROPROFILE_SCOPE(Kernel_SVC); 2215 MICROPROFILE_SCOPE(Kernel_SVC);
2141 2216
2142 // Lock the global kernel mutex when we enter the kernel HLE. 2217 // Lock the global kernel mutex when we enter the kernel HLE.
2143 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 2218 std::lock_guard lock{HLE::g_hle_lock};
2144 2219
2145 const FunctionDef* info = GetSVCInfo(immediate); 2220 const FunctionDef* info = GetSVCInfo(immediate);
2146 if (info) { 2221 if (info) {
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 2a2c2c5ea..b3733680f 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -78,6 +78,14 @@ void SvcWrap() {
78 FuncReturn(retval); 78 FuncReturn(retval);
79} 79}
80 80
81template <ResultCode func(u32*, u64, u32)>
82void SvcWrap() {
83 u32 param_1 = 0;
84 const u32 retval = func(&param_1, Param(1), static_cast<u32>(Param(2))).raw;
85 Core::CurrentArmInterface().SetReg(1, param_1);
86 FuncReturn(retval);
87}
88
81template <ResultCode func(u64*, u32)> 89template <ResultCode func(u64*, u32)>
82void SvcWrap() { 90void SvcWrap() {
83 u64 param_1 = 0; 91 u64 param_1 = 0;
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 3b22e8e0d..1b891f632 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -28,7 +28,7 @@
28 28
29namespace Kernel { 29namespace Kernel {
30 30
31bool Thread::ShouldWait(Thread* thread) const { 31bool Thread::ShouldWait(const Thread* thread) const {
32 return status != ThreadStatus::Dead; 32 return status != ThreadStatus::Dead;
33} 33}
34 34
@@ -62,6 +62,8 @@ void Thread::Stop() {
62 } 62 }
63 wait_objects.clear(); 63 wait_objects.clear();
64 64
65 owner_process->UnregisterThread(this);
66
65 // Mark the TLS slot in the thread's page as free. 67 // Mark the TLS slot in the thread's page as free.
66 owner_process->FreeTLSSlot(tls_address); 68 owner_process->FreeTLSSlot(tls_address);
67} 69}
@@ -105,6 +107,7 @@ void Thread::ResumeFromWait() {
105 case ThreadStatus::WaitSleep: 107 case ThreadStatus::WaitSleep:
106 case ThreadStatus::WaitIPC: 108 case ThreadStatus::WaitIPC:
107 case ThreadStatus::WaitMutex: 109 case ThreadStatus::WaitMutex:
110 case ThreadStatus::WaitCondVar:
108 case ThreadStatus::WaitArb: 111 case ThreadStatus::WaitArb:
109 break; 112 break;
110 113
@@ -198,9 +201,11 @@ ResultVal<SharedPtr<Thread>> Thread::Create(KernelCore& kernel, std::string name
198 thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap(); 201 thread->callback_handle = kernel.ThreadWakeupCallbackHandleTable().Create(thread).Unwrap();
199 thread->owner_process = &owner_process; 202 thread->owner_process = &owner_process;
200 thread->scheduler = &system.Scheduler(processor_id); 203 thread->scheduler = &system.Scheduler(processor_id);
201 thread->scheduler->AddThread(thread, priority); 204 thread->scheduler->AddThread(thread);
202 thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread); 205 thread->tls_address = thread->owner_process->MarkNextAvailableTLSSlotAsUsed(*thread);
203 206
207 thread->owner_process->RegisterThread(thread.get());
208
204 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used 209 // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
205 // to initialize the context 210 // to initialize the context
206 ResetThreadContext(thread->context, stack_top, entry_point, arg); 211 ResetThreadContext(thread->context, stack_top, entry_point, arg);
@@ -228,16 +233,16 @@ void Thread::SetWaitSynchronizationOutput(s32 output) {
228 context.cpu_registers[1] = output; 233 context.cpu_registers[1] = output;
229} 234}
230 235
231s32 Thread::GetWaitObjectIndex(WaitObject* object) const { 236s32 Thread::GetWaitObjectIndex(const WaitObject* object) const {
232 ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything"); 237 ASSERT_MSG(!wait_objects.empty(), "Thread is not waiting for anything");
233 auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object); 238 const auto match = std::find(wait_objects.rbegin(), wait_objects.rend(), object);
234 return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1); 239 return static_cast<s32>(std::distance(match, wait_objects.rend()) - 1);
235} 240}
236 241
237VAddr Thread::GetCommandBufferAddress() const { 242VAddr Thread::GetCommandBufferAddress() const {
238 // Offset from the start of TLS at which the IPC command buffer begins. 243 // Offset from the start of TLS at which the IPC command buffer begins.
239 static constexpr int CommandHeaderOffset = 0x80; 244 constexpr u64 command_header_offset = 0x80;
240 return GetTLSAddress() + CommandHeaderOffset; 245 return GetTLSAddress() + command_header_offset;
241} 246}
242 247
243void Thread::SetStatus(ThreadStatus new_status) { 248void Thread::SetStatus(ThreadStatus new_status) {
@@ -351,7 +356,7 @@ void Thread::ChangeScheduler() {
351 if (*new_processor_id != processor_id) { 356 if (*new_processor_id != processor_id) {
352 // Remove thread from previous core's scheduler 357 // Remove thread from previous core's scheduler
353 scheduler->RemoveThread(this); 358 scheduler->RemoveThread(this);
354 next_scheduler.AddThread(this, current_priority); 359 next_scheduler.AddThread(this);
355 } 360 }
356 361
357 processor_id = *new_processor_id; 362 processor_id = *new_processor_id;
@@ -366,7 +371,7 @@ void Thread::ChangeScheduler() {
366 system.CpuCore(processor_id).PrepareReschedule(); 371 system.CpuCore(processor_id).PrepareReschedule();
367} 372}
368 373
369bool Thread::AllWaitObjectsReady() { 374bool Thread::AllWaitObjectsReady() const {
370 return std::none_of( 375 return std::none_of(
371 wait_objects.begin(), wait_objects.end(), 376 wait_objects.begin(), wait_objects.end(),
372 [this](const SharedPtr<WaitObject>& object) { return object->ShouldWait(this); }); 377 [this](const SharedPtr<WaitObject>& object) { return object->ShouldWait(this); });
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index faad5f391..73e5d1bb4 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -51,7 +51,8 @@ enum class ThreadStatus {
51 WaitIPC, ///< Waiting for the reply from an IPC request 51 WaitIPC, ///< Waiting for the reply from an IPC request
52 WaitSynchAny, ///< Waiting due to WaitSynch1 or WaitSynchN with wait_all = false 52 WaitSynchAny, ///< Waiting due to WaitSynch1 or WaitSynchN with wait_all = false
53 WaitSynchAll, ///< Waiting due to WaitSynchronizationN with wait_all = true 53 WaitSynchAll, ///< Waiting due to WaitSynchronizationN with wait_all = true
54 WaitMutex, ///< Waiting due to an ArbitrateLock/WaitProcessWideKey svc 54 WaitMutex, ///< Waiting due to an ArbitrateLock svc
55 WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc
55 WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc 56 WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc
56 Dormant, ///< Created but not yet made ready 57 Dormant, ///< Created but not yet made ready
57 Dead ///< Run to completion, or forcefully terminated 58 Dead ///< Run to completion, or forcefully terminated
@@ -110,7 +111,7 @@ public:
110 return HANDLE_TYPE; 111 return HANDLE_TYPE;
111 } 112 }
112 113
113 bool ShouldWait(Thread* thread) const override; 114 bool ShouldWait(const Thread* thread) const override;
114 void Acquire(Thread* thread) override; 115 void Acquire(Thread* thread) override;
115 116
116 /** 117 /**
@@ -204,7 +205,7 @@ public:
204 * object in the list. 205 * object in the list.
205 * @param object Object to query the index of. 206 * @param object Object to query the index of.
206 */ 207 */
207 s32 GetWaitObjectIndex(WaitObject* object) const; 208 s32 GetWaitObjectIndex(const WaitObject* object) const;
208 209
209 /** 210 /**
210 * Stops a thread, invalidating it from further use 211 * Stops a thread, invalidating it from further use
@@ -298,7 +299,7 @@ public:
298 } 299 }
299 300
300 /// Determines whether all the objects this thread is waiting on are ready. 301 /// Determines whether all the objects this thread is waiting on are ready.
301 bool AllWaitObjectsReady(); 302 bool AllWaitObjectsReady() const;
302 303
303 const MutexWaitingThreads& GetMutexWaitingThreads() const { 304 const MutexWaitingThreads& GetMutexWaitingThreads() const {
304 return wait_mutex_threads; 305 return wait_mutex_threads;
diff --git a/src/core/hle/kernel/transfer_memory.cpp b/src/core/hle/kernel/transfer_memory.cpp
index 23228e1b5..26c4e5e67 100644
--- a/src/core/hle/kernel/transfer_memory.cpp
+++ b/src/core/hle/kernel/transfer_memory.cpp
@@ -14,8 +14,8 @@ namespace Kernel {
14TransferMemory::TransferMemory(KernelCore& kernel) : Object{kernel} {} 14TransferMemory::TransferMemory(KernelCore& kernel) : Object{kernel} {}
15TransferMemory::~TransferMemory() = default; 15TransferMemory::~TransferMemory() = default;
16 16
17SharedPtr<TransferMemory> TransferMemory::Create(KernelCore& kernel, VAddr base_address, 17SharedPtr<TransferMemory> TransferMemory::Create(KernelCore& kernel, VAddr base_address, u64 size,
18 size_t size, MemoryPermission permissions) { 18 MemoryPermission permissions) {
19 SharedPtr<TransferMemory> transfer_memory{new TransferMemory(kernel)}; 19 SharedPtr<TransferMemory> transfer_memory{new TransferMemory(kernel)};
20 20
21 transfer_memory->base_address = base_address; 21 transfer_memory->base_address = base_address;
@@ -26,7 +26,15 @@ SharedPtr<TransferMemory> TransferMemory::Create(KernelCore& kernel, VAddr base_
26 return transfer_memory; 26 return transfer_memory;
27} 27}
28 28
29ResultCode TransferMemory::MapMemory(VAddr address, size_t size, MemoryPermission permissions) { 29const u8* TransferMemory::GetPointer() const {
30 return backing_block.get()->data();
31}
32
33u64 TransferMemory::GetSize() const {
34 return memory_size;
35}
36
37ResultCode TransferMemory::MapMemory(VAddr address, u64 size, MemoryPermission permissions) {
30 if (memory_size != size) { 38 if (memory_size != size) {
31 return ERR_INVALID_SIZE; 39 return ERR_INVALID_SIZE;
32 } 40 }
@@ -39,13 +47,13 @@ ResultCode TransferMemory::MapMemory(VAddr address, size_t size, MemoryPermissio
39 return ERR_INVALID_STATE; 47 return ERR_INVALID_STATE;
40 } 48 }
41 49
50 backing_block = std::make_shared<std::vector<u8>>(size);
51
42 const auto map_state = owner_permissions == MemoryPermission::None 52 const auto map_state = owner_permissions == MemoryPermission::None
43 ? MemoryState::TransferMemoryIsolated 53 ? MemoryState::TransferMemoryIsolated
44 : MemoryState::TransferMemory; 54 : MemoryState::TransferMemory;
45 auto& vm_manager = owner_process->VMManager(); 55 auto& vm_manager = owner_process->VMManager();
46 const auto map_result = vm_manager.MapMemoryBlock( 56 const auto map_result = vm_manager.MapMemoryBlock(address, backing_block, 0, size, map_state);
47 address, std::make_shared<std::vector<u8>>(size), 0, size, map_state);
48
49 if (map_result.Failed()) { 57 if (map_result.Failed()) {
50 return map_result.Code(); 58 return map_result.Code();
51 } 59 }
@@ -54,7 +62,7 @@ ResultCode TransferMemory::MapMemory(VAddr address, size_t size, MemoryPermissio
54 return RESULT_SUCCESS; 62 return RESULT_SUCCESS;
55} 63}
56 64
57ResultCode TransferMemory::UnmapMemory(VAddr address, size_t size) { 65ResultCode TransferMemory::UnmapMemory(VAddr address, u64 size) {
58 if (memory_size != size) { 66 if (memory_size != size) {
59 return ERR_INVALID_SIZE; 67 return ERR_INVALID_SIZE;
60 } 68 }
diff --git a/src/core/hle/kernel/transfer_memory.h b/src/core/hle/kernel/transfer_memory.h
index ec294951e..a140b1e2b 100644
--- a/src/core/hle/kernel/transfer_memory.h
+++ b/src/core/hle/kernel/transfer_memory.h
@@ -4,6 +4,9 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <memory>
8#include <vector>
9
7#include "core/hle/kernel/object.h" 10#include "core/hle/kernel/object.h"
8 11
9union ResultCode; 12union ResultCode;
@@ -25,7 +28,7 @@ class TransferMemory final : public Object {
25public: 28public:
26 static constexpr HandleType HANDLE_TYPE = HandleType::TransferMemory; 29 static constexpr HandleType HANDLE_TYPE = HandleType::TransferMemory;
27 30
28 static SharedPtr<TransferMemory> Create(KernelCore& kernel, VAddr base_address, size_t size, 31 static SharedPtr<TransferMemory> Create(KernelCore& kernel, VAddr base_address, u64 size,
29 MemoryPermission permissions); 32 MemoryPermission permissions);
30 33
31 TransferMemory(const TransferMemory&) = delete; 34 TransferMemory(const TransferMemory&) = delete;
@@ -46,6 +49,12 @@ public:
46 return HANDLE_TYPE; 49 return HANDLE_TYPE;
47 } 50 }
48 51
52 /// Gets a pointer to the backing block of this instance.
53 const u8* GetPointer() const;
54
55 /// Gets the size of the memory backing this instance in bytes.
56 u64 GetSize() const;
57
49 /// Attempts to map transfer memory with the given range and memory permissions. 58 /// Attempts to map transfer memory with the given range and memory permissions.
50 /// 59 ///
51 /// @param address The base address to being mapping memory at. 60 /// @param address The base address to being mapping memory at.
@@ -56,7 +65,7 @@ public:
56 /// the same values that were given when creating the transfer memory 65 /// the same values that were given when creating the transfer memory
57 /// instance. 66 /// instance.
58 /// 67 ///
59 ResultCode MapMemory(VAddr address, size_t size, MemoryPermission permissions); 68 ResultCode MapMemory(VAddr address, u64 size, MemoryPermission permissions);
60 69
61 /// Unmaps the transfer memory with the given range 70 /// Unmaps the transfer memory with the given range
62 /// 71 ///
@@ -66,17 +75,20 @@ public:
66 /// @pre The given address and size must be the same as the ones used 75 /// @pre The given address and size must be the same as the ones used
67 /// to create the transfer memory instance. 76 /// to create the transfer memory instance.
68 /// 77 ///
69 ResultCode UnmapMemory(VAddr address, size_t size); 78 ResultCode UnmapMemory(VAddr address, u64 size);
70 79
71private: 80private:
72 explicit TransferMemory(KernelCore& kernel); 81 explicit TransferMemory(KernelCore& kernel);
73 ~TransferMemory() override; 82 ~TransferMemory() override;
74 83
84 /// Memory block backing this instance.
85 std::shared_ptr<std::vector<u8>> backing_block;
86
75 /// The base address for the memory managed by this instance. 87 /// The base address for the memory managed by this instance.
76 VAddr base_address = 0; 88 VAddr base_address = 0;
77 89
78 /// Size of the memory, in bytes, that this instance manages. 90 /// Size of the memory, in bytes, that this instance manages.
79 size_t memory_size = 0; 91 u64 memory_size = 0;
80 92
81 /// The memory permissions that are applied to this instance. 93 /// The memory permissions that are applied to this instance.
82 MemoryPermission owner_permissions{}; 94 MemoryPermission owner_permissions{};
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index 22bf55ce7..ec0a480ce 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -256,57 +256,50 @@ ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_p
256 return RESULT_SUCCESS; 256 return RESULT_SUCCESS;
257} 257}
258 258
259ResultVal<VAddr> VMManager::HeapAllocate(VAddr target, u64 size, VMAPermission perms) { 259ResultVal<VAddr> VMManager::SetHeapSize(u64 size) {
260 if (!IsWithinHeapRegion(target, size)) { 260 if (size > GetHeapRegionSize()) {
261 return ERR_INVALID_ADDRESS; 261 return ERR_OUT_OF_MEMORY;
262 }
263
264 // No need to do any additional work if the heap is already the given size.
265 if (size == GetCurrentHeapSize()) {
266 return MakeResult(heap_region_base);
262 } 267 }
263 268
264 if (heap_memory == nullptr) { 269 if (heap_memory == nullptr) {
265 // Initialize heap 270 // Initialize heap
266 heap_memory = std::make_shared<std::vector<u8>>(); 271 heap_memory = std::make_shared<std::vector<u8>>(size);
267 heap_start = heap_end = target; 272 heap_end = heap_region_base + size;
268 } else { 273 } else {
269 UnmapRange(heap_start, heap_end - heap_start); 274 UnmapRange(heap_region_base, GetCurrentHeapSize());
270 }
271
272 // If necessary, expand backing vector to cover new heap extents.
273 if (target < heap_start) {
274 heap_memory->insert(begin(*heap_memory), heap_start - target, 0);
275 heap_start = target;
276 RefreshMemoryBlockMappings(heap_memory.get());
277 }
278 if (target + size > heap_end) {
279 heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0);
280 heap_end = target + size;
281 RefreshMemoryBlockMappings(heap_memory.get());
282 } 275 }
283 ASSERT(heap_end - heap_start == heap_memory->size());
284 276
285 CASCADE_RESULT(auto vma, MapMemoryBlock(target, heap_memory, target - heap_start, size, 277 // If necessary, expand backing vector to cover new heap extents in
286 MemoryState::Heap)); 278 // the case of allocating. Otherwise, shrink the backing memory,
287 Reprotect(vma, perms); 279 // if a smaller heap has been requested.
280 const u64 old_heap_size = GetCurrentHeapSize();
281 if (size > old_heap_size) {
282 const u64 alloc_size = size - old_heap_size;
288 283
289 heap_used = size; 284 heap_memory->insert(heap_memory->end(), alloc_size, 0);
290 285 RefreshMemoryBlockMappings(heap_memory.get());
291 return MakeResult<VAddr>(heap_end - size); 286 } else if (size < old_heap_size) {
292} 287 heap_memory->resize(size);
288 heap_memory->shrink_to_fit();
293 289
294ResultCode VMManager::HeapFree(VAddr target, u64 size) { 290 RefreshMemoryBlockMappings(heap_memory.get());
295 if (!IsWithinHeapRegion(target, size)) {
296 return ERR_INVALID_ADDRESS;
297 } 291 }
298 292
299 if (size == 0) { 293 heap_end = heap_region_base + size;
300 return RESULT_SUCCESS; 294 ASSERT(GetCurrentHeapSize() == heap_memory->size());
301 }
302 295
303 const ResultCode result = UnmapRange(target, size); 296 const auto mapping_result =
304 if (result.IsError()) { 297 MapMemoryBlock(heap_region_base, heap_memory, 0, size, MemoryState::Heap);
305 return result; 298 if (mapping_result.Failed()) {
299 return mapping_result.Code();
306 } 300 }
307 301
308 heap_used -= size; 302 return MakeResult<VAddr>(heap_region_base);
309 return RESULT_SUCCESS;
310} 303}
311 304
312MemoryInfo VMManager::QueryMemory(VAddr address) const { 305MemoryInfo VMManager::QueryMemory(VAddr address) const {
@@ -598,6 +591,7 @@ void VMManager::InitializeMemoryRegionRanges(FileSys::ProgramAddressSpaceType ty
598 591
599 heap_region_base = map_region_end; 592 heap_region_base = map_region_end;
600 heap_region_end = heap_region_base + heap_region_size; 593 heap_region_end = heap_region_base + heap_region_size;
594 heap_end = heap_region_base;
601 595
602 new_map_region_base = heap_region_end; 596 new_map_region_base = heap_region_end;
603 new_map_region_end = new_map_region_base + new_map_region_size; 597 new_map_region_end = new_map_region_base + new_map_region_size;
@@ -692,10 +686,6 @@ u64 VMManager::GetTotalMemoryUsage() const {
692 return 0xF8000000; 686 return 0xF8000000;
693} 687}
694 688
695u64 VMManager::GetTotalHeapUsage() const {
696 return heap_used;
697}
698
699VAddr VMManager::GetAddressSpaceBaseAddress() const { 689VAddr VMManager::GetAddressSpaceBaseAddress() const {
700 return address_space_base; 690 return address_space_base;
701} 691}
@@ -778,6 +768,10 @@ u64 VMManager::GetHeapRegionSize() const {
778 return heap_region_end - heap_region_base; 768 return heap_region_end - heap_region_base;
779} 769}
780 770
771u64 VMManager::GetCurrentHeapSize() const {
772 return heap_end - heap_region_base;
773}
774
781bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const { 775bool VMManager::IsWithinHeapRegion(VAddr address, u64 size) const {
782 return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(), 776 return IsInsideAddressRange(address, size, GetHeapRegionBaseAddress(),
783 GetHeapRegionEndAddress()); 777 GetHeapRegionEndAddress());
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 7cdff6094..6f484b7bf 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -380,11 +380,41 @@ public:
380 /// Changes the permissions of a range of addresses, splitting VMAs as necessary. 380 /// Changes the permissions of a range of addresses, splitting VMAs as necessary.
381 ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms); 381 ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms);
382 382
383 ResultVal<VAddr> HeapAllocate(VAddr target, u64 size, VMAPermission perms);
384 ResultCode HeapFree(VAddr target, u64 size);
385
386 ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state); 383 ResultCode MirrorMemory(VAddr dst_addr, VAddr src_addr, u64 size, MemoryState state);
387 384
385 /// Attempts to allocate a heap with the given size.
386 ///
387 /// @param size The size of the heap to allocate in bytes.
388 ///
389 /// @note If a heap is currently allocated, and this is called
390 /// with a size that is equal to the size of the current heap,
391 /// then this function will do nothing and return the current
392 /// heap's starting address, as there's no need to perform
393 /// any additional heap allocation work.
394 ///
395 /// @note If a heap is currently allocated, and this is called
396 /// with a size less than the current heap's size, then
397 /// this function will attempt to shrink the heap.
398 ///
399 /// @note If a heap is currently allocated, and this is called
400 /// with a size larger than the current heap's size, then
401 /// this function will attempt to extend the size of the heap.
402 ///
403 /// @returns A result indicating either success or failure.
404 /// <p>
405 /// If successful, this function will return a result
406 /// containing the starting address to the allocated heap.
407 /// <p>
408 /// If unsuccessful, this function will return a result
409 /// containing an error code.
410 ///
411 /// @pre The given size must lie within the allowable heap
412 /// memory region managed by this VMManager instance.
413 /// Failure to abide by this will result in ERR_OUT_OF_MEMORY
414 /// being returned as the result.
415 ///
416 ResultVal<VAddr> SetHeapSize(u64 size);
417
388 /// Queries the memory manager for information about the given address. 418 /// Queries the memory manager for information about the given address.
389 /// 419 ///
390 /// @param address The address to query the memory manager about for information. 420 /// @param address The address to query the memory manager about for information.
@@ -418,9 +448,6 @@ public:
418 /// Gets the total memory usage, used by svcGetInfo 448 /// Gets the total memory usage, used by svcGetInfo
419 u64 GetTotalMemoryUsage() const; 449 u64 GetTotalMemoryUsage() const;
420 450
421 /// Gets the total heap usage, used by svcGetInfo
422 u64 GetTotalHeapUsage() const;
423
424 /// Gets the address space base address 451 /// Gets the address space base address
425 VAddr GetAddressSpaceBaseAddress() const; 452 VAddr GetAddressSpaceBaseAddress() const;
426 453
@@ -469,6 +496,13 @@ public:
469 /// Gets the total size of the heap region in bytes. 496 /// Gets the total size of the heap region in bytes.
470 u64 GetHeapRegionSize() const; 497 u64 GetHeapRegionSize() const;
471 498
499 /// Gets the total size of the current heap in bytes.
500 ///
501 /// @note This is the current allocated heap size, not the size
502 /// of the region it's allowed to exist within.
503 ///
504 u64 GetCurrentHeapSize() const;
505
472 /// Determines whether or not the specified range is within the heap region. 506 /// Determines whether or not the specified range is within the heap region.
473 bool IsWithinHeapRegion(VAddr address, u64 size) const; 507 bool IsWithinHeapRegion(VAddr address, u64 size) const;
474 508
@@ -617,9 +651,6 @@ private:
617 VAddr new_map_region_base = 0; 651 VAddr new_map_region_base = 0;
618 VAddr new_map_region_end = 0; 652 VAddr new_map_region_end = 0;
619 653
620 VAddr main_code_region_base = 0;
621 VAddr main_code_region_end = 0;
622
623 VAddr tls_io_region_base = 0; 654 VAddr tls_io_region_base = 0;
624 VAddr tls_io_region_end = 0; 655 VAddr tls_io_region_end = 0;
625 656
@@ -628,9 +659,9 @@ private:
628 // This makes deallocation and reallocation of holes fast and keeps process memory contiguous 659 // This makes deallocation and reallocation of holes fast and keeps process memory contiguous
629 // in the emulator address space, allowing Memory::GetPointer to be reasonably safe. 660 // in the emulator address space, allowing Memory::GetPointer to be reasonably safe.
630 std::shared_ptr<std::vector<u8>> heap_memory; 661 std::shared_ptr<std::vector<u8>> heap_memory;
631 // The left/right bounds of the address space covered by heap_memory. 662
632 VAddr heap_start = 0; 663 // The end of the currently allocated heap. This is not an inclusive
664 // end of the range. This is essentially 'base_address + current_size'.
633 VAddr heap_end = 0; 665 VAddr heap_end = 0;
634 u64 heap_used = 0;
635}; 666};
636} // namespace Kernel 667} // namespace Kernel
diff --git a/src/core/hle/kernel/wait_object.h b/src/core/hle/kernel/wait_object.h
index 5987fb971..04464a51a 100644
--- a/src/core/hle/kernel/wait_object.h
+++ b/src/core/hle/kernel/wait_object.h
@@ -24,7 +24,7 @@ public:
24 * @param thread The thread about which we're deciding. 24 * @param thread The thread about which we're deciding.
25 * @return True if the current thread should wait due to this object being unavailable 25 * @return True if the current thread should wait due to this object being unavailable
26 */ 26 */
27 virtual bool ShouldWait(Thread* thread) const = 0; 27 virtual bool ShouldWait(const Thread* thread) const = 0;
28 28
29 /// Acquire/lock the object for the specified thread if it is available 29 /// Acquire/lock the object for the specified thread if it is available
30 virtual void Acquire(Thread* thread) = 0; 30 virtual void Acquire(Thread* thread) = 0;
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index 9c44e27c6..85271d418 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -13,7 +13,7 @@
13#include "core/hle/kernel/kernel.h" 13#include "core/hle/kernel/kernel.h"
14#include "core/hle/kernel/process.h" 14#include "core/hle/kernel/process.h"
15#include "core/hle/kernel/readable_event.h" 15#include "core/hle/kernel/readable_event.h"
16#include "core/hle/kernel/shared_memory.h" 16#include "core/hle/kernel/transfer_memory.h"
17#include "core/hle/kernel/writable_event.h" 17#include "core/hle/kernel/writable_event.h"
18#include "core/hle/service/acc/profile_manager.h" 18#include "core/hle/service/acc/profile_manager.h"
19#include "core/hle/service/am/am.h" 19#include "core/hle/service/am/am.h"
@@ -239,8 +239,8 @@ ISelfController::ISelfController(std::shared_ptr<NVFlinger::NVFlinger> nvflinger
239 {0, nullptr, "Exit"}, 239 {0, nullptr, "Exit"},
240 {1, &ISelfController::LockExit, "LockExit"}, 240 {1, &ISelfController::LockExit, "LockExit"},
241 {2, &ISelfController::UnlockExit, "UnlockExit"}, 241 {2, &ISelfController::UnlockExit, "UnlockExit"},
242 {3, nullptr, "EnterFatalSection"}, 242 {3, &ISelfController::EnterFatalSection, "EnterFatalSection"},
243 {4, nullptr, "LeaveFatalSection"}, 243 {4, &ISelfController::LeaveFatalSection, "LeaveFatalSection"},
244 {9, &ISelfController::GetLibraryAppletLaunchableEvent, "GetLibraryAppletLaunchableEvent"}, 244 {9, &ISelfController::GetLibraryAppletLaunchableEvent, "GetLibraryAppletLaunchableEvent"},
245 {10, &ISelfController::SetScreenShotPermission, "SetScreenShotPermission"}, 245 {10, &ISelfController::SetScreenShotPermission, "SetScreenShotPermission"},
246 {11, &ISelfController::SetOperationModeChangedNotification, "SetOperationModeChangedNotification"}, 246 {11, &ISelfController::SetOperationModeChangedNotification, "SetOperationModeChangedNotification"},
@@ -285,41 +285,54 @@ ISelfController::ISelfController(std::shared_ptr<NVFlinger::NVFlinger> nvflinger
285 285
286ISelfController::~ISelfController() = default; 286ISelfController::~ISelfController() = default;
287 287
288void ISelfController::SetFocusHandlingMode(Kernel::HLERequestContext& ctx) { 288void ISelfController::LockExit(Kernel::HLERequestContext& ctx) {
289 // Takes 3 input u8s with each field located immediately after the previous
290 // u8, these are bool flags. No output.
291 LOG_WARNING(Service_AM, "(STUBBED) called"); 289 LOG_WARNING(Service_AM, "(STUBBED) called");
292 290
293 IPC::RequestParser rp{ctx}; 291 IPC::ResponseBuilder rb{ctx, 2};
292 rb.Push(RESULT_SUCCESS);
293}
294 294
295 struct FocusHandlingModeParams { 295void ISelfController::UnlockExit(Kernel::HLERequestContext& ctx) {
296 u8 unknown0; 296 LOG_WARNING(Service_AM, "(STUBBED) called");
297 u8 unknown1;
298 u8 unknown2;
299 };
300 auto flags = rp.PopRaw<FocusHandlingModeParams>();
301 297
302 IPC::ResponseBuilder rb{ctx, 2}; 298 IPC::ResponseBuilder rb{ctx, 2};
303 rb.Push(RESULT_SUCCESS); 299 rb.Push(RESULT_SUCCESS);
304} 300}
305 301
306void ISelfController::SetRestartMessageEnabled(Kernel::HLERequestContext& ctx) { 302void ISelfController::EnterFatalSection(Kernel::HLERequestContext& ctx) {
307 LOG_WARNING(Service_AM, "(STUBBED) called"); 303 ++num_fatal_sections_entered;
304 LOG_DEBUG(Service_AM, "called. Num fatal sections entered: {}", num_fatal_sections_entered);
308 305
309 IPC::ResponseBuilder rb{ctx, 2}; 306 IPC::ResponseBuilder rb{ctx, 2};
310 rb.Push(RESULT_SUCCESS); 307 rb.Push(RESULT_SUCCESS);
311} 308}
312 309
313void ISelfController::SetPerformanceModeChangedNotification(Kernel::HLERequestContext& ctx) { 310void ISelfController::LeaveFatalSection(Kernel::HLERequestContext& ctx) {
314 IPC::RequestParser rp{ctx}; 311 LOG_DEBUG(Service_AM, "called.");
315 312
316 bool flag = rp.Pop<bool>(); 313 // Entry and exit of fatal sections must be balanced.
317 LOG_WARNING(Service_AM, "(STUBBED) called flag={}", flag); 314 if (num_fatal_sections_entered == 0) {
315 IPC::ResponseBuilder rb{ctx, 2};
316 rb.Push(ResultCode{ErrorModule::AM, 512});
317 return;
318 }
319
320 --num_fatal_sections_entered;
318 321
319 IPC::ResponseBuilder rb{ctx, 2}; 322 IPC::ResponseBuilder rb{ctx, 2};
320 rb.Push(RESULT_SUCCESS); 323 rb.Push(RESULT_SUCCESS);
321} 324}
322 325
326void ISelfController::GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx) {
327 LOG_WARNING(Service_AM, "(STUBBED) called");
328
329 launchable_event.writable->Signal();
330
331 IPC::ResponseBuilder rb{ctx, 2, 1};
332 rb.Push(RESULT_SUCCESS);
333 rb.PushCopyObjects(launchable_event.readable);
334}
335
323void ISelfController::SetScreenShotPermission(Kernel::HLERequestContext& ctx) { 336void ISelfController::SetScreenShotPermission(Kernel::HLERequestContext& ctx) {
324 LOG_WARNING(Service_AM, "(STUBBED) called"); 337 LOG_WARNING(Service_AM, "(STUBBED) called");
325 338
@@ -337,40 +350,52 @@ void ISelfController::SetOperationModeChangedNotification(Kernel::HLERequestCont
337 rb.Push(RESULT_SUCCESS); 350 rb.Push(RESULT_SUCCESS);
338} 351}
339 352
340void ISelfController::SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx) { 353void ISelfController::SetPerformanceModeChangedNotification(Kernel::HLERequestContext& ctx) {
341 // Takes 3 input u8s with each field located immediately after the previous
342 // u8, these are bool flags. No output.
343 IPC::RequestParser rp{ctx}; 354 IPC::RequestParser rp{ctx};
344 355
345 bool enabled = rp.Pop<bool>(); 356 bool flag = rp.Pop<bool>();
346 LOG_WARNING(Service_AM, "(STUBBED) called enabled={}", enabled); 357 LOG_WARNING(Service_AM, "(STUBBED) called flag={}", flag);
347 358
348 IPC::ResponseBuilder rb{ctx, 2}; 359 IPC::ResponseBuilder rb{ctx, 2};
349 rb.Push(RESULT_SUCCESS); 360 rb.Push(RESULT_SUCCESS);
350} 361}
351 362
352void ISelfController::LockExit(Kernel::HLERequestContext& ctx) { 363void ISelfController::SetFocusHandlingMode(Kernel::HLERequestContext& ctx) {
353 LOG_WARNING(Service_AM, "(STUBBED) called"); 364 // Takes 3 input u8s with each field located immediately after the previous
365 // u8, these are bool flags. No output.
366 IPC::RequestParser rp{ctx};
367
368 struct FocusHandlingModeParams {
369 u8 unknown0;
370 u8 unknown1;
371 u8 unknown2;
372 };
373 const auto flags = rp.PopRaw<FocusHandlingModeParams>();
374
375 LOG_WARNING(Service_AM, "(STUBBED) called. unknown0={}, unknown1={}, unknown2={}",
376 flags.unknown0, flags.unknown1, flags.unknown2);
354 377
355 IPC::ResponseBuilder rb{ctx, 2}; 378 IPC::ResponseBuilder rb{ctx, 2};
356 rb.Push(RESULT_SUCCESS); 379 rb.Push(RESULT_SUCCESS);
357} 380}
358 381
359void ISelfController::UnlockExit(Kernel::HLERequestContext& ctx) { 382void ISelfController::SetRestartMessageEnabled(Kernel::HLERequestContext& ctx) {
360 LOG_WARNING(Service_AM, "(STUBBED) called"); 383 LOG_WARNING(Service_AM, "(STUBBED) called");
361 384
362 IPC::ResponseBuilder rb{ctx, 2}; 385 IPC::ResponseBuilder rb{ctx, 2};
363 rb.Push(RESULT_SUCCESS); 386 rb.Push(RESULT_SUCCESS);
364} 387}
365 388
366void ISelfController::GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx) { 389void ISelfController::SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx) {
367 LOG_WARNING(Service_AM, "(STUBBED) called"); 390 // Takes 3 input u8s with each field located immediately after the previous
391 // u8, these are bool flags. No output.
392 IPC::RequestParser rp{ctx};
368 393
369 launchable_event.writable->Signal(); 394 bool enabled = rp.Pop<bool>();
395 LOG_WARNING(Service_AM, "(STUBBED) called enabled={}", enabled);
370 396
371 IPC::ResponseBuilder rb{ctx, 2, 1}; 397 IPC::ResponseBuilder rb{ctx, 2};
372 rb.Push(RESULT_SUCCESS); 398 rb.Push(RESULT_SUCCESS);
373 rb.PushCopyObjects(launchable_event.readable);
374} 399}
375 400
376void ISelfController::SetScreenShotImageOrientation(Kernel::HLERequestContext& ctx) { 401void ISelfController::SetScreenShotImageOrientation(Kernel::HLERequestContext& ctx) {
@@ -907,19 +932,19 @@ void ILibraryAppletCreator::CreateTransferMemoryStorage(Kernel::HLERequestContex
907 rp.SetCurrentOffset(3); 932 rp.SetCurrentOffset(3);
908 const auto handle{rp.Pop<Kernel::Handle>()}; 933 const auto handle{rp.Pop<Kernel::Handle>()};
909 934
910 const auto shared_mem = 935 const auto transfer_mem =
911 Core::System::GetInstance().CurrentProcess()->GetHandleTable().Get<Kernel::SharedMemory>( 936 Core::System::GetInstance().CurrentProcess()->GetHandleTable().Get<Kernel::TransferMemory>(
912 handle); 937 handle);
913 938
914 if (shared_mem == nullptr) { 939 if (transfer_mem == nullptr) {
915 LOG_ERROR(Service_AM, "shared_mem is a nullpr for handle={:08X}", handle); 940 LOG_ERROR(Service_AM, "shared_mem is a nullpr for handle={:08X}", handle);
916 IPC::ResponseBuilder rb{ctx, 2}; 941 IPC::ResponseBuilder rb{ctx, 2};
917 rb.Push(ResultCode(-1)); 942 rb.Push(ResultCode(-1));
918 return; 943 return;
919 } 944 }
920 945
921 const u8* mem_begin = shared_mem->GetPointer(); 946 const u8* const mem_begin = transfer_mem->GetPointer();
922 const u8* mem_end = mem_begin + shared_mem->GetSize(); 947 const u8* const mem_end = mem_begin + transfer_mem->GetSize();
923 std::vector<u8> memory{mem_begin, mem_end}; 948 std::vector<u8> memory{mem_begin, mem_end};
924 949
925 IPC::ResponseBuilder rb{ctx, 2, 0, 1}; 950 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h
index 565dd8e9e..991b7d47c 100644
--- a/src/core/hle/service/am/am.h
+++ b/src/core/hle/service/am/am.h
@@ -117,17 +117,19 @@ public:
117 ~ISelfController() override; 117 ~ISelfController() override;
118 118
119private: 119private:
120 void SetFocusHandlingMode(Kernel::HLERequestContext& ctx);
121 void SetRestartMessageEnabled(Kernel::HLERequestContext& ctx);
122 void SetPerformanceModeChangedNotification(Kernel::HLERequestContext& ctx);
123 void SetOperationModeChangedNotification(Kernel::HLERequestContext& ctx);
124 void SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx);
125 void LockExit(Kernel::HLERequestContext& ctx); 120 void LockExit(Kernel::HLERequestContext& ctx);
126 void UnlockExit(Kernel::HLERequestContext& ctx); 121 void UnlockExit(Kernel::HLERequestContext& ctx);
122 void EnterFatalSection(Kernel::HLERequestContext& ctx);
123 void LeaveFatalSection(Kernel::HLERequestContext& ctx);
127 void GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx); 124 void GetLibraryAppletLaunchableEvent(Kernel::HLERequestContext& ctx);
125 void SetScreenShotPermission(Kernel::HLERequestContext& ctx);
126 void SetOperationModeChangedNotification(Kernel::HLERequestContext& ctx);
127 void SetPerformanceModeChangedNotification(Kernel::HLERequestContext& ctx);
128 void SetFocusHandlingMode(Kernel::HLERequestContext& ctx);
129 void SetRestartMessageEnabled(Kernel::HLERequestContext& ctx);
130 void SetOutOfFocusSuspendingEnabled(Kernel::HLERequestContext& ctx);
128 void SetScreenShotImageOrientation(Kernel::HLERequestContext& ctx); 131 void SetScreenShotImageOrientation(Kernel::HLERequestContext& ctx);
129 void CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx); 132 void CreateManagedDisplayLayer(Kernel::HLERequestContext& ctx);
130 void SetScreenShotPermission(Kernel::HLERequestContext& ctx);
131 void SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx); 133 void SetHandlesRequestToDisplay(Kernel::HLERequestContext& ctx);
132 void SetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx); 134 void SetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx);
133 void GetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx); 135 void GetIdleTimeDetectionExtension(Kernel::HLERequestContext& ctx);
@@ -135,6 +137,7 @@ private:
135 std::shared_ptr<NVFlinger::NVFlinger> nvflinger; 137 std::shared_ptr<NVFlinger::NVFlinger> nvflinger;
136 Kernel::EventPair launchable_event; 138 Kernel::EventPair launchable_event;
137 u32 idle_time_detection_extension = 0; 139 u32 idle_time_detection_extension = 0;
140 u64 num_fatal_sections_entered = 0;
138}; 141};
139 142
140class ICommonStateGetter final : public ServiceFramework<ICommonStateGetter> { 143class ICommonStateGetter final : public ServiceFramework<ICommonStateGetter> {
diff --git a/src/core/hle/service/audio/audout_u.cpp b/src/core/hle/service/audio/audout_u.cpp
index 21f5e64c7..39acb7b23 100644
--- a/src/core/hle/service/audio/audout_u.cpp
+++ b/src/core/hle/service/audio/audout_u.cpp
@@ -150,7 +150,6 @@ private:
150 void GetReleasedAudioOutBufferImpl(Kernel::HLERequestContext& ctx) { 150 void GetReleasedAudioOutBufferImpl(Kernel::HLERequestContext& ctx) {
151 LOG_DEBUG(Service_Audio, "called {}", ctx.Description()); 151 LOG_DEBUG(Service_Audio, "called {}", ctx.Description());
152 152
153 IPC::RequestParser rp{ctx};
154 const u64 max_count{ctx.GetWriteBufferSize() / sizeof(u64)}; 153 const u64 max_count{ctx.GetWriteBufferSize() / sizeof(u64)};
155 const auto released_buffers{audio_core.GetTagsAndReleaseBuffers(stream, max_count)}; 154 const auto released_buffers{audio_core.GetTagsAndReleaseBuffers(stream, max_count)};
156 155
@@ -194,12 +193,9 @@ private:
194void AudOutU::ListAudioOutsImpl(Kernel::HLERequestContext& ctx) { 193void AudOutU::ListAudioOutsImpl(Kernel::HLERequestContext& ctx) {
195 LOG_DEBUG(Service_Audio, "called"); 194 LOG_DEBUG(Service_Audio, "called");
196 195
197 IPC::RequestParser rp{ctx};
198
199 ctx.WriteBuffer(DefaultDevice); 196 ctx.WriteBuffer(DefaultDevice);
200 197
201 IPC::ResponseBuilder rb{ctx, 3}; 198 IPC::ResponseBuilder rb{ctx, 3};
202
203 rb.Push(RESULT_SUCCESS); 199 rb.Push(RESULT_SUCCESS);
204 rb.Push<u32>(1); // Amount of audio devices 200 rb.Push<u32>(1); // Amount of audio devices
205} 201}
diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp
index c9de10a24..1dde6edb7 100644
--- a/src/core/hle/service/audio/audren_u.cpp
+++ b/src/core/hle/service/audio/audren_u.cpp
@@ -10,6 +10,7 @@
10#include "common/alignment.h" 10#include "common/alignment.h"
11#include "common/common_funcs.h" 11#include "common/common_funcs.h"
12#include "common/logging/log.h" 12#include "common/logging/log.h"
13#include "common/string_util.h"
13#include "core/core.h" 14#include "core/core.h"
14#include "core/hle/ipc_helpers.h" 15#include "core/hle/ipc_helpers.h"
15#include "core/hle/kernel/hle_ipc.h" 16#include "core/hle/kernel/hle_ipc.h"
@@ -184,7 +185,6 @@ public:
184private: 185private:
185 void ListAudioDeviceName(Kernel::HLERequestContext& ctx) { 186 void ListAudioDeviceName(Kernel::HLERequestContext& ctx) {
186 LOG_WARNING(Service_Audio, "(STUBBED) called"); 187 LOG_WARNING(Service_Audio, "(STUBBED) called");
187 IPC::RequestParser rp{ctx};
188 188
189 constexpr std::array<char, 15> audio_interface{{"AudioInterface"}}; 189 constexpr std::array<char, 15> audio_interface{{"AudioInterface"}};
190 ctx.WriteBuffer(audio_interface); 190 ctx.WriteBuffer(audio_interface);
@@ -195,13 +195,13 @@ private:
195 } 195 }
196 196
197 void SetAudioDeviceOutputVolume(Kernel::HLERequestContext& ctx) { 197 void SetAudioDeviceOutputVolume(Kernel::HLERequestContext& ctx) {
198 LOG_WARNING(Service_Audio, "(STUBBED) called");
199
200 IPC::RequestParser rp{ctx}; 198 IPC::RequestParser rp{ctx};
201 f32 volume = static_cast<f32>(rp.Pop<u32>()); 199 const f32 volume = rp.Pop<f32>();
202 200
203 auto file_buffer = ctx.ReadBuffer(); 201 const auto device_name_buffer = ctx.ReadBuffer();
204 auto end = std::find(file_buffer.begin(), file_buffer.end(), '\0'); 202 const std::string name = Common::StringFromBuffer(device_name_buffer);
203
204 LOG_WARNING(Service_Audio, "(STUBBED) called. name={}, volume={}", name, volume);
205 205
206 IPC::ResponseBuilder rb{ctx, 2}; 206 IPC::ResponseBuilder rb{ctx, 2};
207 rb.Push(RESULT_SUCCESS); 207 rb.Push(RESULT_SUCCESS);
@@ -209,7 +209,6 @@ private:
209 209
210 void GetActiveAudioDeviceName(Kernel::HLERequestContext& ctx) { 210 void GetActiveAudioDeviceName(Kernel::HLERequestContext& ctx) {
211 LOG_WARNING(Service_Audio, "(STUBBED) called"); 211 LOG_WARNING(Service_Audio, "(STUBBED) called");
212 IPC::RequestParser rp{ctx};
213 212
214 constexpr std::array<char, 12> audio_interface{{"AudioDevice"}}; 213 constexpr std::array<char, 12> audio_interface{{"AudioDevice"}};
215 ctx.WriteBuffer(audio_interface); 214 ctx.WriteBuffer(audio_interface);
diff --git a/src/core/hle/service/fatal/fatal.cpp b/src/core/hle/service/fatal/fatal.cpp
index 770590d0b..2c229bcad 100644
--- a/src/core/hle/service/fatal/fatal.cpp
+++ b/src/core/hle/service/fatal/fatal.cpp
@@ -25,21 +25,34 @@ Module::Interface::Interface(std::shared_ptr<Module> module, const char* name)
25Module::Interface::~Interface() = default; 25Module::Interface::~Interface() = default;
26 26
27struct FatalInfo { 27struct FatalInfo {
28 std::array<u64_le, 31> registers{}; // TODO(ogniK): See if this actually is registers or 28 enum class Architecture : s32 {
29 // not(find a game which has non zero valeus) 29 AArch64,
30 u64_le unk0{}; 30 AArch32,
31 u64_le unk1{}; 31 };
32 u64_le unk2{}; 32
33 u64_le unk3{}; 33 const char* ArchAsString() const {
34 u64_le unk4{}; 34 return arch == Architecture::AArch64 ? "AArch64" : "AArch32";
35 u64_le unk5{}; 35 }
36 u64_le unk6{}; 36
37 std::array<u64_le, 31> registers{};
38 u64_le sp{};
39 u64_le pc{};
40 u64_le pstate{};
41 u64_le afsr0{};
42 u64_le afsr1{};
43 u64_le esr{};
44 u64_le far{};
37 45
38 std::array<u64_le, 32> backtrace{}; 46 std::array<u64_le, 32> backtrace{};
39 u64_le unk7{}; 47 u64_le program_entry_point{};
40 u64_le unk8{}; 48
49 // Bit flags that indicate which registers have been set with values
50 // for this context. The service itself uses these to determine which
51 // registers to specifically print out.
52 u64_le set_flags{};
53
41 u32_le backtrace_size{}; 54 u32_le backtrace_size{};
42 u32_le unk9{}; 55 Architecture arch{};
43 u32_le unk10{}; // TODO(ogniK): Is this even used or is it just padding? 56 u32_le unk10{}; // TODO(ogniK): Is this even used or is it just padding?
44}; 57};
45static_assert(sizeof(FatalInfo) == 0x250, "FatalInfo is an invalid size"); 58static_assert(sizeof(FatalInfo) == 0x250, "FatalInfo is an invalid size");
@@ -52,36 +65,36 @@ enum class FatalType : u32 {
52 65
53static void GenerateErrorReport(ResultCode error_code, const FatalInfo& info) { 66static void GenerateErrorReport(ResultCode error_code, const FatalInfo& info) {
54 const auto title_id = Core::CurrentProcess()->GetTitleID(); 67 const auto title_id = Core::CurrentProcess()->GetTitleID();
55 std::string crash_report = 68 std::string crash_report = fmt::format(
56 fmt::format("Yuzu {}-{} crash report\n" 69 "Yuzu {}-{} crash report\n"
57 "Title ID: {:016x}\n" 70 "Title ID: {:016x}\n"
58 "Result: 0x{:X} ({:04}-{:04d})\n" 71 "Result: 0x{:X} ({:04}-{:04d})\n"
59 "\n", 72 "Set flags: 0x{:16X}\n"
60 Common::g_scm_branch, Common::g_scm_desc, title_id, error_code.raw, 73 "Program entry point: 0x{:16X}\n"
61 2000 + static_cast<u32>(error_code.module.Value()), 74 "\n",
62 static_cast<u32>(error_code.description.Value()), info.unk8, info.unk7); 75 Common::g_scm_branch, Common::g_scm_desc, title_id, error_code.raw,
76 2000 + static_cast<u32>(error_code.module.Value()),
77 static_cast<u32>(error_code.description.Value()), info.set_flags, info.program_entry_point);
63 if (info.backtrace_size != 0x0) { 78 if (info.backtrace_size != 0x0) {
64 crash_report += "Registers:\n"; 79 crash_report += "Registers:\n";
65 // TODO(ogniK): This is just a guess, find a game which actually has non zero values
66 for (size_t i = 0; i < info.registers.size(); i++) { 80 for (size_t i = 0; i < info.registers.size(); i++) {
67 crash_report += 81 crash_report +=
68 fmt::format(" X[{:02d}]: {:016x}\n", i, info.registers[i]); 82 fmt::format(" X[{:02d}]: {:016x}\n", i, info.registers[i]);
69 } 83 }
70 crash_report += fmt::format(" Unknown 0: {:016x}\n", info.unk0); 84 crash_report += fmt::format(" SP: {:016x}\n", info.sp);
71 crash_report += fmt::format(" Unknown 1: {:016x}\n", info.unk1); 85 crash_report += fmt::format(" PC: {:016x}\n", info.pc);
72 crash_report += fmt::format(" Unknown 2: {:016x}\n", info.unk2); 86 crash_report += fmt::format(" PSTATE: {:016x}\n", info.pstate);
73 crash_report += fmt::format(" Unknown 3: {:016x}\n", info.unk3); 87 crash_report += fmt::format(" AFSR0: {:016x}\n", info.afsr0);
74 crash_report += fmt::format(" Unknown 4: {:016x}\n", info.unk4); 88 crash_report += fmt::format(" AFSR1: {:016x}\n", info.afsr1);
75 crash_report += fmt::format(" Unknown 5: {:016x}\n", info.unk5); 89 crash_report += fmt::format(" ESR: {:016x}\n", info.esr);
76 crash_report += fmt::format(" Unknown 6: {:016x}\n", info.unk6); 90 crash_report += fmt::format(" FAR: {:016x}\n", info.far);
77 crash_report += "\nBacktrace:\n"; 91 crash_report += "\nBacktrace:\n";
78 for (size_t i = 0; i < info.backtrace_size; i++) { 92 for (size_t i = 0; i < info.backtrace_size; i++) {
79 crash_report += 93 crash_report +=
80 fmt::format(" Backtrace[{:02d}]: {:016x}\n", i, info.backtrace[i]); 94 fmt::format(" Backtrace[{:02d}]: {:016x}\n", i, info.backtrace[i]);
81 } 95 }
82 crash_report += fmt::format("\nUnknown 7: 0x{:016x}\n", info.unk7); 96
83 crash_report += fmt::format("Unknown 8: 0x{:016x}\n", info.unk8); 97 crash_report += fmt::format("Architecture: {}\n", info.ArchAsString());
84 crash_report += fmt::format("Unknown 9: 0x{:016x}\n", info.unk9);
85 crash_report += fmt::format("Unknown 10: 0x{:016x}\n", info.unk10); 98 crash_report += fmt::format("Unknown 10: 0x{:016x}\n", info.unk10);
86 } 99 }
87 100
@@ -125,13 +138,13 @@ static void ThrowFatalError(ResultCode error_code, FatalType fatal_type, const F
125 case FatalType::ErrorReport: 138 case FatalType::ErrorReport:
126 GenerateErrorReport(error_code, info); 139 GenerateErrorReport(error_code, info);
127 break; 140 break;
128 }; 141 }
129} 142}
130 143
131void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) { 144void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) {
132 LOG_ERROR(Service_Fatal, "called"); 145 LOG_ERROR(Service_Fatal, "called");
133 IPC::RequestParser rp{ctx}; 146 IPC::RequestParser rp{ctx};
134 auto error_code = rp.Pop<ResultCode>(); 147 const auto error_code = rp.Pop<ResultCode>();
135 148
136 ThrowFatalError(error_code, FatalType::ErrorScreen, {}); 149 ThrowFatalError(error_code, FatalType::ErrorScreen, {});
137 IPC::ResponseBuilder rb{ctx, 2}; 150 IPC::ResponseBuilder rb{ctx, 2};
@@ -141,8 +154,8 @@ void Module::Interface::ThrowFatal(Kernel::HLERequestContext& ctx) {
141void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) { 154void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) {
142 LOG_ERROR(Service_Fatal, "called"); 155 LOG_ERROR(Service_Fatal, "called");
143 IPC::RequestParser rp(ctx); 156 IPC::RequestParser rp(ctx);
144 auto error_code = rp.Pop<ResultCode>(); 157 const auto error_code = rp.Pop<ResultCode>();
145 auto fatal_type = rp.PopEnum<FatalType>(); 158 const auto fatal_type = rp.PopEnum<FatalType>();
146 159
147 ThrowFatalError(error_code, fatal_type, {}); // No info is passed with ThrowFatalWithPolicy 160 ThrowFatalError(error_code, fatal_type, {}); // No info is passed with ThrowFatalWithPolicy
148 IPC::ResponseBuilder rb{ctx, 2}; 161 IPC::ResponseBuilder rb{ctx, 2};
@@ -152,9 +165,9 @@ void Module::Interface::ThrowFatalWithPolicy(Kernel::HLERequestContext& ctx) {
152void Module::Interface::ThrowFatalWithCpuContext(Kernel::HLERequestContext& ctx) { 165void Module::Interface::ThrowFatalWithCpuContext(Kernel::HLERequestContext& ctx) {
153 LOG_ERROR(Service_Fatal, "called"); 166 LOG_ERROR(Service_Fatal, "called");
154 IPC::RequestParser rp(ctx); 167 IPC::RequestParser rp(ctx);
155 auto error_code = rp.Pop<ResultCode>(); 168 const auto error_code = rp.Pop<ResultCode>();
156 auto fatal_type = rp.PopEnum<FatalType>(); 169 const auto fatal_type = rp.PopEnum<FatalType>();
157 auto fatal_info = ctx.ReadBuffer(); 170 const auto fatal_info = ctx.ReadBuffer();
158 FatalInfo info{}; 171 FatalInfo info{};
159 172
160 ASSERT_MSG(fatal_info.size() == sizeof(FatalInfo), "Invalid fatal info buffer size!"); 173 ASSERT_MSG(fatal_info.size() == sizeof(FatalInfo), "Invalid fatal info buffer size!");
diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp
index f03fb629c..592dce31a 100644
--- a/src/core/hle/service/filesystem/fsp_srv.cpp
+++ b/src/core/hle/service/filesystem/fsp_srv.cpp
@@ -315,61 +315,53 @@ public:
315 void CreateFile(Kernel::HLERequestContext& ctx) { 315 void CreateFile(Kernel::HLERequestContext& ctx) {
316 IPC::RequestParser rp{ctx}; 316 IPC::RequestParser rp{ctx};
317 317
318 auto file_buffer = ctx.ReadBuffer(); 318 const auto file_buffer = ctx.ReadBuffer();
319 std::string name = Common::StringFromBuffer(file_buffer); 319 const std::string name = Common::StringFromBuffer(file_buffer);
320 320
321 u64 mode = rp.Pop<u64>(); 321 const u64 mode = rp.Pop<u64>();
322 u32 size = rp.Pop<u32>(); 322 const u32 size = rp.Pop<u32>();
323 323
324 LOG_DEBUG(Service_FS, "called file {} mode 0x{:X} size 0x{:08X}", name, mode, size); 324 LOG_DEBUG(Service_FS, "called. file={}, mode=0x{:X}, size=0x{:08X}", name, mode, size);
325 325
326 IPC::ResponseBuilder rb{ctx, 2}; 326 IPC::ResponseBuilder rb{ctx, 2};
327 rb.Push(backend.CreateFile(name, size)); 327 rb.Push(backend.CreateFile(name, size));
328 } 328 }
329 329
330 void DeleteFile(Kernel::HLERequestContext& ctx) { 330 void DeleteFile(Kernel::HLERequestContext& ctx) {
331 IPC::RequestParser rp{ctx}; 331 const auto file_buffer = ctx.ReadBuffer();
332 332 const std::string name = Common::StringFromBuffer(file_buffer);
333 auto file_buffer = ctx.ReadBuffer();
334 std::string name = Common::StringFromBuffer(file_buffer);
335 333
336 LOG_DEBUG(Service_FS, "called file {}", name); 334 LOG_DEBUG(Service_FS, "called. file={}", name);
337 335
338 IPC::ResponseBuilder rb{ctx, 2}; 336 IPC::ResponseBuilder rb{ctx, 2};
339 rb.Push(backend.DeleteFile(name)); 337 rb.Push(backend.DeleteFile(name));
340 } 338 }
341 339
342 void CreateDirectory(Kernel::HLERequestContext& ctx) { 340 void CreateDirectory(Kernel::HLERequestContext& ctx) {
343 IPC::RequestParser rp{ctx}; 341 const auto file_buffer = ctx.ReadBuffer();
344 342 const std::string name = Common::StringFromBuffer(file_buffer);
345 auto file_buffer = ctx.ReadBuffer();
346 std::string name = Common::StringFromBuffer(file_buffer);
347 343
348 LOG_DEBUG(Service_FS, "called directory {}", name); 344 LOG_DEBUG(Service_FS, "called. directory={}", name);
349 345
350 IPC::ResponseBuilder rb{ctx, 2}; 346 IPC::ResponseBuilder rb{ctx, 2};
351 rb.Push(backend.CreateDirectory(name)); 347 rb.Push(backend.CreateDirectory(name));
352 } 348 }
353 349
354 void DeleteDirectory(Kernel::HLERequestContext& ctx) { 350 void DeleteDirectory(Kernel::HLERequestContext& ctx) {
355 const IPC::RequestParser rp{ctx};
356
357 const auto file_buffer = ctx.ReadBuffer(); 351 const auto file_buffer = ctx.ReadBuffer();
358 std::string name = Common::StringFromBuffer(file_buffer); 352 const std::string name = Common::StringFromBuffer(file_buffer);
359 353
360 LOG_DEBUG(Service_FS, "called directory {}", name); 354 LOG_DEBUG(Service_FS, "called. directory={}", name);
361 355
362 IPC::ResponseBuilder rb{ctx, 2}; 356 IPC::ResponseBuilder rb{ctx, 2};
363 rb.Push(backend.DeleteDirectory(name)); 357 rb.Push(backend.DeleteDirectory(name));
364 } 358 }
365 359
366 void DeleteDirectoryRecursively(Kernel::HLERequestContext& ctx) { 360 void DeleteDirectoryRecursively(Kernel::HLERequestContext& ctx) {
367 const IPC::RequestParser rp{ctx};
368
369 const auto file_buffer = ctx.ReadBuffer(); 361 const auto file_buffer = ctx.ReadBuffer();
370 std::string name = Common::StringFromBuffer(file_buffer); 362 const std::string name = Common::StringFromBuffer(file_buffer);
371 363
372 LOG_DEBUG(Service_FS, "called directory {}", name); 364 LOG_DEBUG(Service_FS, "called. directory={}", name);
373 365
374 IPC::ResponseBuilder rb{ctx, 2}; 366 IPC::ResponseBuilder rb{ctx, 2};
375 rb.Push(backend.DeleteDirectoryRecursively(name)); 367 rb.Push(backend.DeleteDirectoryRecursively(name));
@@ -386,18 +378,16 @@ public:
386 } 378 }
387 379
388 void RenameFile(Kernel::HLERequestContext& ctx) { 380 void RenameFile(Kernel::HLERequestContext& ctx) {
389 IPC::RequestParser rp{ctx};
390
391 std::vector<u8> buffer; 381 std::vector<u8> buffer;
392 buffer.resize(ctx.BufferDescriptorX()[0].Size()); 382 buffer.resize(ctx.BufferDescriptorX()[0].Size());
393 Memory::ReadBlock(ctx.BufferDescriptorX()[0].Address(), buffer.data(), buffer.size()); 383 Memory::ReadBlock(ctx.BufferDescriptorX()[0].Address(), buffer.data(), buffer.size());
394 std::string src_name = Common::StringFromBuffer(buffer); 384 const std::string src_name = Common::StringFromBuffer(buffer);
395 385
396 buffer.resize(ctx.BufferDescriptorX()[1].Size()); 386 buffer.resize(ctx.BufferDescriptorX()[1].Size());
397 Memory::ReadBlock(ctx.BufferDescriptorX()[1].Address(), buffer.data(), buffer.size()); 387 Memory::ReadBlock(ctx.BufferDescriptorX()[1].Address(), buffer.data(), buffer.size());
398 std::string dst_name = Common::StringFromBuffer(buffer); 388 const std::string dst_name = Common::StringFromBuffer(buffer);
399 389
400 LOG_DEBUG(Service_FS, "called file '{}' to file '{}'", src_name, dst_name); 390 LOG_DEBUG(Service_FS, "called. file '{}' to file '{}'", src_name, dst_name);
401 391
402 IPC::ResponseBuilder rb{ctx, 2}; 392 IPC::ResponseBuilder rb{ctx, 2};
403 rb.Push(backend.RenameFile(src_name, dst_name)); 393 rb.Push(backend.RenameFile(src_name, dst_name));
@@ -406,12 +396,12 @@ public:
406 void OpenFile(Kernel::HLERequestContext& ctx) { 396 void OpenFile(Kernel::HLERequestContext& ctx) {
407 IPC::RequestParser rp{ctx}; 397 IPC::RequestParser rp{ctx};
408 398
409 auto file_buffer = ctx.ReadBuffer(); 399 const auto file_buffer = ctx.ReadBuffer();
410 std::string name = Common::StringFromBuffer(file_buffer); 400 const std::string name = Common::StringFromBuffer(file_buffer);
411 401
412 auto mode = static_cast<FileSys::Mode>(rp.Pop<u32>()); 402 const auto mode = static_cast<FileSys::Mode>(rp.Pop<u32>());
413 403
414 LOG_DEBUG(Service_FS, "called file {} mode {}", name, static_cast<u32>(mode)); 404 LOG_DEBUG(Service_FS, "called. file={}, mode={}", name, static_cast<u32>(mode));
415 405
416 auto result = backend.OpenFile(name, mode); 406 auto result = backend.OpenFile(name, mode);
417 if (result.Failed()) { 407 if (result.Failed()) {
@@ -430,13 +420,13 @@ public:
430 void OpenDirectory(Kernel::HLERequestContext& ctx) { 420 void OpenDirectory(Kernel::HLERequestContext& ctx) {
431 IPC::RequestParser rp{ctx}; 421 IPC::RequestParser rp{ctx};
432 422
433 auto file_buffer = ctx.ReadBuffer(); 423 const auto file_buffer = ctx.ReadBuffer();
434 std::string name = Common::StringFromBuffer(file_buffer); 424 const std::string name = Common::StringFromBuffer(file_buffer);
435 425
436 // TODO(Subv): Implement this filter. 426 // TODO(Subv): Implement this filter.
437 u32 filter_flags = rp.Pop<u32>(); 427 const u32 filter_flags = rp.Pop<u32>();
438 428
439 LOG_DEBUG(Service_FS, "called directory {} filter {}", name, filter_flags); 429 LOG_DEBUG(Service_FS, "called. directory={}, filter={}", name, filter_flags);
440 430
441 auto result = backend.OpenDirectory(name); 431 auto result = backend.OpenDirectory(name);
442 if (result.Failed()) { 432 if (result.Failed()) {
@@ -453,12 +443,10 @@ public:
453 } 443 }
454 444
455 void GetEntryType(Kernel::HLERequestContext& ctx) { 445 void GetEntryType(Kernel::HLERequestContext& ctx) {
456 IPC::RequestParser rp{ctx}; 446 const auto file_buffer = ctx.ReadBuffer();
457 447 const std::string name = Common::StringFromBuffer(file_buffer);
458 auto file_buffer = ctx.ReadBuffer();
459 std::string name = Common::StringFromBuffer(file_buffer);
460 448
461 LOG_DEBUG(Service_FS, "called file {}", name); 449 LOG_DEBUG(Service_FS, "called. file={}", name);
462 450
463 auto result = backend.GetEntryType(name); 451 auto result = backend.GetEntryType(name);
464 if (result.Failed()) { 452 if (result.Failed()) {
diff --git a/src/core/hle/service/nfc/nfc.cpp b/src/core/hle/service/nfc/nfc.cpp
index 5c62d42ba..ca88bf97f 100644
--- a/src/core/hle/service/nfc/nfc.cpp
+++ b/src/core/hle/service/nfc/nfc.cpp
@@ -150,7 +150,7 @@ private:
150 150
151 IPC::ResponseBuilder rb{ctx, 3}; 151 IPC::ResponseBuilder rb{ctx, 3};
152 rb.Push(RESULT_SUCCESS); 152 rb.Push(RESULT_SUCCESS);
153 rb.PushRaw<u8>(Settings::values.enable_nfc); 153 rb.PushRaw<u8>(true);
154 } 154 }
155 155
156 void GetStateOld(Kernel::HLERequestContext& ctx) { 156 void GetStateOld(Kernel::HLERequestContext& ctx) {
diff --git a/src/core/hle/service/nfp/nfp.cpp b/src/core/hle/service/nfp/nfp.cpp
index 1c4482e47..c6babdd4d 100644
--- a/src/core/hle/service/nfp/nfp.cpp
+++ b/src/core/hle/service/nfp/nfp.cpp
@@ -335,7 +335,7 @@ void Module::Interface::CreateUserInterface(Kernel::HLERequestContext& ctx) {
335} 335}
336 336
337bool Module::Interface::LoadAmiibo(const std::vector<u8>& buffer) { 337bool Module::Interface::LoadAmiibo(const std::vector<u8>& buffer) {
338 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 338 std::lock_guard lock{HLE::g_hle_lock};
339 if (buffer.size() < sizeof(AmiiboFile)) { 339 if (buffer.size() < sizeof(AmiiboFile)) {
340 return false; 340 return false;
341 } 341 }
diff --git a/src/core/hle/service/sockets/sfdnsres.cpp b/src/core/hle/service/sockets/sfdnsres.cpp
index 13ab1d31e..852e71e4b 100644
--- a/src/core/hle/service/sockets/sfdnsres.cpp
+++ b/src/core/hle/service/sockets/sfdnsres.cpp
@@ -8,12 +8,20 @@
8namespace Service::Sockets { 8namespace Service::Sockets {
9 9
10void SFDNSRES::GetAddrInfo(Kernel::HLERequestContext& ctx) { 10void SFDNSRES::GetAddrInfo(Kernel::HLERequestContext& ctx) {
11 struct Parameters {
12 u8 use_nsd_resolve;
13 u32 unknown;
14 u64 process_id;
15 };
16
11 IPC::RequestParser rp{ctx}; 17 IPC::RequestParser rp{ctx};
18 const auto parameters = rp.PopRaw<Parameters>();
12 19
13 LOG_WARNING(Service, "(STUBBED) called"); 20 LOG_WARNING(Service,
21 "(STUBBED) called. use_nsd_resolve={}, unknown=0x{:08X}, process_id=0x{:016X}",
22 parameters.use_nsd_resolve, parameters.unknown, parameters.process_id);
14 23
15 IPC::ResponseBuilder rb{ctx, 2}; 24 IPC::ResponseBuilder rb{ctx, 2};
16
17 rb.Push(RESULT_SUCCESS); 25 rb.Push(RESULT_SUCCESS);
18} 26}
19 27
diff --git a/src/core/hle/service/spl/module.cpp b/src/core/hle/service/spl/module.cpp
index 8db0c2f13..e724d4ab8 100644
--- a/src/core/hle/service/spl/module.cpp
+++ b/src/core/hle/service/spl/module.cpp
@@ -26,9 +26,7 @@ Module::Interface::~Interface() = default;
26void Module::Interface::GetRandomBytes(Kernel::HLERequestContext& ctx) { 26void Module::Interface::GetRandomBytes(Kernel::HLERequestContext& ctx) {
27 LOG_DEBUG(Service_SPL, "called"); 27 LOG_DEBUG(Service_SPL, "called");
28 28
29 IPC::RequestParser rp{ctx}; 29 const std::size_t size = ctx.GetWriteBufferSize();
30
31 std::size_t size = ctx.GetWriteBufferSize();
32 30
33 std::uniform_int_distribution<u16> distribution(0, std::numeric_limits<u8>::max()); 31 std::uniform_int_distribution<u16> distribution(0, std::numeric_limits<u8>::max());
34 std::vector<u8> data(size); 32 std::vector<u8> data(size);
diff --git a/src/core/hle/service/ssl/ssl.cpp b/src/core/hle/service/ssl/ssl.cpp
index af40a1815..f671f355e 100644
--- a/src/core/hle/service/ssl/ssl.cpp
+++ b/src/core/hle/service/ssl/ssl.cpp
@@ -68,9 +68,16 @@ public:
68 68
69private: 69private:
70 void SetOption(Kernel::HLERequestContext& ctx) { 70 void SetOption(Kernel::HLERequestContext& ctx) {
71 LOG_WARNING(Service_SSL, "(STUBBED) called"); 71 struct Parameters {
72 u8 enable;
73 u32 option;
74 };
72 75
73 IPC::RequestParser rp{ctx}; 76 IPC::RequestParser rp{ctx};
77 const auto parameters = rp.PopRaw<Parameters>();
78
79 LOG_WARNING(Service_SSL, "(STUBBED) called. enable={}, option={}", parameters.enable,
80 parameters.option);
74 81
75 IPC::ResponseBuilder rb{ctx, 2}; 82 IPC::ResponseBuilder rb{ctx, 2};
76 rb.Push(RESULT_SUCCESS); 83 rb.Push(RESULT_SUCCESS);
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index 566cd6006..b77cb495d 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -1037,7 +1037,6 @@ private:
1037 void ListDisplays(Kernel::HLERequestContext& ctx) { 1037 void ListDisplays(Kernel::HLERequestContext& ctx) {
1038 LOG_WARNING(Service_VI, "(STUBBED) called"); 1038 LOG_WARNING(Service_VI, "(STUBBED) called");
1039 1039
1040 IPC::RequestParser rp{ctx};
1041 DisplayInfo display_info; 1040 DisplayInfo display_info;
1042 display_info.width *= static_cast<u64>(Settings::values.resolution_factor); 1041 display_info.width *= static_cast<u64>(Settings::values.resolution_factor);
1043 display_info.height *= static_cast<u64>(Settings::values.resolution_factor); 1042 display_info.height *= static_cast<u64>(Settings::values.resolution_factor);
diff --git a/src/core/loader/elf.cpp b/src/core/loader/elf.cpp
index 8b1920f22..46ac372f6 100644
--- a/src/core/loader/elf.cpp
+++ b/src/core/loader/elf.cpp
@@ -341,7 +341,7 @@ Kernel::CodeSet ElfReader::LoadInto(VAddr vaddr) {
341 } 341 }
342 342
343 codeset.entrypoint = base_addr + header->e_entry; 343 codeset.entrypoint = base_addr + header->e_entry;
344 codeset.memory = std::make_shared<std::vector<u8>>(std::move(program_image)); 344 codeset.memory = std::move(program_image);
345 345
346 LOG_DEBUG(Loader, "Done loading."); 346 LOG_DEBUG(Loader, "Done loading.");
347 347
diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp
index 5de02a94b..31e4a0c84 100644
--- a/src/core/loader/nro.cpp
+++ b/src/core/loader/nro.cpp
@@ -187,7 +187,7 @@ static bool LoadNroImpl(Kernel::Process& process, const std::vector<u8>& data,
187 program_image.resize(static_cast<u32>(program_image.size()) + bss_size); 187 program_image.resize(static_cast<u32>(program_image.size()) + bss_size);
188 188
189 // Load codeset for current process 189 // Load codeset for current process
190 codeset.memory = std::make_shared<std::vector<u8>>(std::move(program_image)); 190 codeset.memory = std::move(program_image);
191 process.LoadModule(std::move(codeset), load_base); 191 process.LoadModule(std::move(codeset), load_base);
192 192
193 // Register module with GDBStub 193 // Register module with GDBStub
diff --git a/src/core/loader/nso.cpp b/src/core/loader/nso.cpp
index 714d85a59..ffe2eea8a 100644
--- a/src/core/loader/nso.cpp
+++ b/src/core/loader/nso.cpp
@@ -4,11 +4,12 @@
4 4
5#include <cinttypes> 5#include <cinttypes>
6#include <vector> 6#include <vector>
7#include <lz4.h> 7
8#include "common/common_funcs.h" 8#include "common/common_funcs.h"
9#include "common/file_util.h" 9#include "common/file_util.h"
10#include "common/hex_util.h" 10#include "common/hex_util.h"
11#include "common/logging/log.h" 11#include "common/logging/log.h"
12#include "common/lz4_compression.h"
12#include "common/swap.h" 13#include "common/swap.h"
13#include "core/core.h" 14#include "core/core.h"
14#include "core/file_sys/patch_manager.h" 15#include "core/file_sys/patch_manager.h"
@@ -35,15 +36,11 @@ static_assert(sizeof(MODHeader) == 0x1c, "MODHeader has incorrect size.");
35 36
36std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data, 37std::vector<u8> DecompressSegment(const std::vector<u8>& compressed_data,
37 const NSOSegmentHeader& header) { 38 const NSOSegmentHeader& header) {
38 std::vector<u8> uncompressed_data(header.size); 39 const std::vector<u8> uncompressed_data =
39 const int bytes_uncompressed = 40 Common::Compression::DecompressDataLZ4(compressed_data, header.size);
40 LZ4_decompress_safe(reinterpret_cast<const char*>(compressed_data.data()),
41 reinterpret_cast<char*>(uncompressed_data.data()),
42 static_cast<int>(compressed_data.size()), header.size);
43 41
44 ASSERT_MSG(bytes_uncompressed == static_cast<int>(header.size) && 42 ASSERT_MSG(uncompressed_data.size() == static_cast<int>(header.size), "{} != {}", header.size,
45 bytes_uncompressed == static_cast<int>(uncompressed_data.size()), 43 uncompressed_data.size());
46 "{} != {} != {}", bytes_uncompressed, header.size, uncompressed_data.size());
47 44
48 return uncompressed_data; 45 return uncompressed_data;
49} 46}
@@ -161,7 +158,7 @@ std::optional<VAddr> AppLoader_NSO::LoadModule(Kernel::Process& process,
161 } 158 }
162 159
163 // Load codeset for current process 160 // Load codeset for current process
164 codeset.memory = std::make_shared<std::vector<u8>>(std::move(program_image)); 161 codeset.memory = std::move(program_image);
165 process.LoadModule(std::move(codeset), load_base); 162 process.LoadModule(std::move(codeset), load_base);
166 163
167 // Register module with GDBStub 164 // Register module with GDBStub
diff --git a/src/core/perf_stats.cpp b/src/core/perf_stats.cpp
index c716a462b..4afd6c8a3 100644
--- a/src/core/perf_stats.cpp
+++ b/src/core/perf_stats.cpp
@@ -18,13 +18,13 @@ using std::chrono::microseconds;
18namespace Core { 18namespace Core {
19 19
20void PerfStats::BeginSystemFrame() { 20void PerfStats::BeginSystemFrame() {
21 std::lock_guard<std::mutex> lock(object_mutex); 21 std::lock_guard lock{object_mutex};
22 22
23 frame_begin = Clock::now(); 23 frame_begin = Clock::now();
24} 24}
25 25
26void PerfStats::EndSystemFrame() { 26void PerfStats::EndSystemFrame() {
27 std::lock_guard<std::mutex> lock(object_mutex); 27 std::lock_guard lock{object_mutex};
28 28
29 auto frame_end = Clock::now(); 29 auto frame_end = Clock::now();
30 accumulated_frametime += frame_end - frame_begin; 30 accumulated_frametime += frame_end - frame_begin;
@@ -35,13 +35,13 @@ void PerfStats::EndSystemFrame() {
35} 35}
36 36
37void PerfStats::EndGameFrame() { 37void PerfStats::EndGameFrame() {
38 std::lock_guard<std::mutex> lock(object_mutex); 38 std::lock_guard lock{object_mutex};
39 39
40 game_frames += 1; 40 game_frames += 1;
41} 41}
42 42
43PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us) { 43PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us) {
44 std::lock_guard<std::mutex> lock(object_mutex); 44 std::lock_guard lock{object_mutex};
45 45
46 const auto now = Clock::now(); 46 const auto now = Clock::now();
47 // Walltime elapsed since stats were reset 47 // Walltime elapsed since stats were reset
@@ -67,7 +67,7 @@ PerfStatsResults PerfStats::GetAndResetStats(microseconds current_system_time_us
67} 67}
68 68
69double PerfStats::GetLastFrameTimeScale() { 69double PerfStats::GetLastFrameTimeScale() {
70 std::lock_guard<std::mutex> lock(object_mutex); 70 std::lock_guard lock{object_mutex};
71 71
72 constexpr double FRAME_LENGTH = 1.0 / 60; 72 constexpr double FRAME_LENGTH = 1.0 / 60;
73 return duration_cast<DoubleSecs>(previous_frame_length).count() / FRAME_LENGTH; 73 return duration_cast<DoubleSecs>(previous_frame_length).count() / FRAME_LENGTH;
diff --git a/src/core/settings.cpp b/src/core/settings.cpp
index 6dd3139cc..6d32ebea3 100644
--- a/src/core/settings.cpp
+++ b/src/core/settings.cpp
@@ -82,7 +82,6 @@ void LogSetting(const std::string& name, const T& value) {
82void LogSettings() { 82void LogSettings() {
83 LOG_INFO(Config, "yuzu Configuration:"); 83 LOG_INFO(Config, "yuzu Configuration:");
84 LogSetting("System_UseDockedMode", Settings::values.use_docked_mode); 84 LogSetting("System_UseDockedMode", Settings::values.use_docked_mode);
85 LogSetting("System_EnableNfc", Settings::values.enable_nfc);
86 LogSetting("System_RngSeed", Settings::values.rng_seed.value_or(0)); 85 LogSetting("System_RngSeed", Settings::values.rng_seed.value_or(0));
87 LogSetting("System_CurrentUser", Settings::values.current_user); 86 LogSetting("System_CurrentUser", Settings::values.current_user);
88 LogSetting("System_LanguageIndex", Settings::values.language_index); 87 LogSetting("System_LanguageIndex", Settings::values.language_index);
diff --git a/src/core/settings.h b/src/core/settings.h
index cdfb2f742..d543eb32f 100644
--- a/src/core/settings.h
+++ b/src/core/settings.h
@@ -349,7 +349,6 @@ struct TouchscreenInput {
349struct Values { 349struct Values {
350 // System 350 // System
351 bool use_docked_mode; 351 bool use_docked_mode;
352 bool enable_nfc;
353 std::optional<u32> rng_seed; 352 std::optional<u32> rng_seed;
354 // Measured in seconds since epoch 353 // Measured in seconds since epoch
355 std::optional<std::chrono::seconds> custom_rtc; 354 std::optional<std::chrono::seconds> custom_rtc;
diff --git a/src/input_common/keyboard.cpp b/src/input_common/keyboard.cpp
index 525fe6abc..078374be5 100644
--- a/src/input_common/keyboard.cpp
+++ b/src/input_common/keyboard.cpp
@@ -36,18 +36,18 @@ struct KeyButtonPair {
36class KeyButtonList { 36class KeyButtonList {
37public: 37public:
38 void AddKeyButton(int key_code, KeyButton* key_button) { 38 void AddKeyButton(int key_code, KeyButton* key_button) {
39 std::lock_guard<std::mutex> guard(mutex); 39 std::lock_guard guard{mutex};
40 list.push_back(KeyButtonPair{key_code, key_button}); 40 list.push_back(KeyButtonPair{key_code, key_button});
41 } 41 }
42 42
43 void RemoveKeyButton(const KeyButton* key_button) { 43 void RemoveKeyButton(const KeyButton* key_button) {
44 std::lock_guard<std::mutex> guard(mutex); 44 std::lock_guard guard{mutex};
45 list.remove_if( 45 list.remove_if(
46 [key_button](const KeyButtonPair& pair) { return pair.key_button == key_button; }); 46 [key_button](const KeyButtonPair& pair) { return pair.key_button == key_button; });
47 } 47 }
48 48
49 void ChangeKeyStatus(int key_code, bool pressed) { 49 void ChangeKeyStatus(int key_code, bool pressed) {
50 std::lock_guard<std::mutex> guard(mutex); 50 std::lock_guard guard{mutex};
51 for (const KeyButtonPair& pair : list) { 51 for (const KeyButtonPair& pair : list) {
52 if (pair.key_code == key_code) 52 if (pair.key_code == key_code)
53 pair.key_button->status.store(pressed); 53 pair.key_button->status.store(pressed);
@@ -55,7 +55,7 @@ public:
55 } 55 }
56 56
57 void ChangeAllKeyStatus(bool pressed) { 57 void ChangeAllKeyStatus(bool pressed) {
58 std::lock_guard<std::mutex> guard(mutex); 58 std::lock_guard guard{mutex};
59 for (const KeyButtonPair& pair : list) { 59 for (const KeyButtonPair& pair : list) {
60 pair.key_button->status.store(pressed); 60 pair.key_button->status.store(pressed);
61 } 61 }
diff --git a/src/input_common/motion_emu.cpp b/src/input_common/motion_emu.cpp
index 6d96d4019..868251628 100644
--- a/src/input_common/motion_emu.cpp
+++ b/src/input_common/motion_emu.cpp
@@ -39,7 +39,7 @@ public:
39 void Tilt(int x, int y) { 39 void Tilt(int x, int y) {
40 auto mouse_move = Common::MakeVec(x, y) - mouse_origin; 40 auto mouse_move = Common::MakeVec(x, y) - mouse_origin;
41 if (is_tilting) { 41 if (is_tilting) {
42 std::lock_guard<std::mutex> guard(tilt_mutex); 42 std::lock_guard guard{tilt_mutex};
43 if (mouse_move.x == 0 && mouse_move.y == 0) { 43 if (mouse_move.x == 0 && mouse_move.y == 0) {
44 tilt_angle = 0; 44 tilt_angle = 0;
45 } else { 45 } else {
@@ -51,13 +51,13 @@ public:
51 } 51 }
52 52
53 void EndTilt() { 53 void EndTilt() {
54 std::lock_guard<std::mutex> guard(tilt_mutex); 54 std::lock_guard guard{tilt_mutex};
55 tilt_angle = 0; 55 tilt_angle = 0;
56 is_tilting = false; 56 is_tilting = false;
57 } 57 }
58 58
59 std::tuple<Common::Vec3<float>, Common::Vec3<float>> GetStatus() { 59 std::tuple<Common::Vec3<float>, Common::Vec3<float>> GetStatus() {
60 std::lock_guard<std::mutex> guard(status_mutex); 60 std::lock_guard guard{status_mutex};
61 return status; 61 return status;
62 } 62 }
63 63
@@ -93,7 +93,7 @@ private:
93 old_q = q; 93 old_q = q;
94 94
95 { 95 {
96 std::lock_guard<std::mutex> guard(tilt_mutex); 96 std::lock_guard guard{tilt_mutex};
97 97
98 // Find the quaternion describing current 3DS tilting 98 // Find the quaternion describing current 3DS tilting
99 q = Common::MakeQuaternion( 99 q = Common::MakeQuaternion(
@@ -115,7 +115,7 @@ private:
115 115
116 // Update the sensor state 116 // Update the sensor state
117 { 117 {
118 std::lock_guard<std::mutex> guard(status_mutex); 118 std::lock_guard guard{status_mutex};
119 status = std::make_tuple(gravity, angular_rate); 119 status = std::make_tuple(gravity, angular_rate);
120 } 120 }
121 } 121 }
diff --git a/src/input_common/sdl/sdl_impl.cpp b/src/input_common/sdl/sdl_impl.cpp
index b132d77f5..5949ecbae 100644
--- a/src/input_common/sdl/sdl_impl.cpp
+++ b/src/input_common/sdl/sdl_impl.cpp
@@ -55,22 +55,22 @@ public:
55 : guid{std::move(guid_)}, port{port_}, sdl_joystick{joystick, deleter} {} 55 : guid{std::move(guid_)}, port{port_}, sdl_joystick{joystick, deleter} {}
56 56
57 void SetButton(int button, bool value) { 57 void SetButton(int button, bool value) {
58 std::lock_guard<std::mutex> lock(mutex); 58 std::lock_guard lock{mutex};
59 state.buttons[button] = value; 59 state.buttons[button] = value;
60 } 60 }
61 61
62 bool GetButton(int button) const { 62 bool GetButton(int button) const {
63 std::lock_guard<std::mutex> lock(mutex); 63 std::lock_guard lock{mutex};
64 return state.buttons.at(button); 64 return state.buttons.at(button);
65 } 65 }
66 66
67 void SetAxis(int axis, Sint16 value) { 67 void SetAxis(int axis, Sint16 value) {
68 std::lock_guard<std::mutex> lock(mutex); 68 std::lock_guard lock{mutex};
69 state.axes[axis] = value; 69 state.axes[axis] = value;
70 } 70 }
71 71
72 float GetAxis(int axis) const { 72 float GetAxis(int axis) const {
73 std::lock_guard<std::mutex> lock(mutex); 73 std::lock_guard lock{mutex};
74 return state.axes.at(axis) / 32767.0f; 74 return state.axes.at(axis) / 32767.0f;
75 } 75 }
76 76
@@ -92,12 +92,12 @@ public:
92 } 92 }
93 93
94 void SetHat(int hat, Uint8 direction) { 94 void SetHat(int hat, Uint8 direction) {
95 std::lock_guard<std::mutex> lock(mutex); 95 std::lock_guard lock{mutex};
96 state.hats[hat] = direction; 96 state.hats[hat] = direction;
97 } 97 }
98 98
99 bool GetHatDirection(int hat, Uint8 direction) const { 99 bool GetHatDirection(int hat, Uint8 direction) const {
100 std::lock_guard<std::mutex> lock(mutex); 100 std::lock_guard lock{mutex};
101 return (state.hats.at(hat) & direction) != 0; 101 return (state.hats.at(hat) & direction) != 0;
102 } 102 }
103 /** 103 /**
@@ -140,7 +140,7 @@ private:
140 * Get the nth joystick with the corresponding GUID 140 * Get the nth joystick with the corresponding GUID
141 */ 141 */
142std::shared_ptr<SDLJoystick> SDLState::GetSDLJoystickByGUID(const std::string& guid, int port) { 142std::shared_ptr<SDLJoystick> SDLState::GetSDLJoystickByGUID(const std::string& guid, int port) {
143 std::lock_guard<std::mutex> lock(joystick_map_mutex); 143 std::lock_guard lock{joystick_map_mutex};
144 const auto it = joystick_map.find(guid); 144 const auto it = joystick_map.find(guid);
145 if (it != joystick_map.end()) { 145 if (it != joystick_map.end()) {
146 while (it->second.size() <= port) { 146 while (it->second.size() <= port) {
@@ -161,7 +161,8 @@ std::shared_ptr<SDLJoystick> SDLState::GetSDLJoystickByGUID(const std::string& g
161std::shared_ptr<SDLJoystick> SDLState::GetSDLJoystickBySDLID(SDL_JoystickID sdl_id) { 161std::shared_ptr<SDLJoystick> SDLState::GetSDLJoystickBySDLID(SDL_JoystickID sdl_id) {
162 auto sdl_joystick = SDL_JoystickFromInstanceID(sdl_id); 162 auto sdl_joystick = SDL_JoystickFromInstanceID(sdl_id);
163 const std::string guid = GetGUID(sdl_joystick); 163 const std::string guid = GetGUID(sdl_joystick);
164 std::lock_guard<std::mutex> lock(joystick_map_mutex); 164
165 std::lock_guard lock{joystick_map_mutex};
165 auto map_it = joystick_map.find(guid); 166 auto map_it = joystick_map.find(guid);
166 if (map_it != joystick_map.end()) { 167 if (map_it != joystick_map.end()) {
167 auto vec_it = std::find_if(map_it->second.begin(), map_it->second.end(), 168 auto vec_it = std::find_if(map_it->second.begin(), map_it->second.end(),
@@ -198,8 +199,9 @@ void SDLState::InitJoystick(int joystick_index) {
198 LOG_ERROR(Input, "failed to open joystick {}", joystick_index); 199 LOG_ERROR(Input, "failed to open joystick {}", joystick_index);
199 return; 200 return;
200 } 201 }
201 std::string guid = GetGUID(sdl_joystick); 202 const std::string guid = GetGUID(sdl_joystick);
202 std::lock_guard<std::mutex> lock(joystick_map_mutex); 203
204 std::lock_guard lock{joystick_map_mutex};
203 if (joystick_map.find(guid) == joystick_map.end()) { 205 if (joystick_map.find(guid) == joystick_map.end()) {
204 auto joystick = std::make_shared<SDLJoystick>(guid, 0, sdl_joystick); 206 auto joystick = std::make_shared<SDLJoystick>(guid, 0, sdl_joystick);
205 joystick_map[guid].emplace_back(std::move(joystick)); 207 joystick_map[guid].emplace_back(std::move(joystick));
@@ -221,7 +223,7 @@ void SDLState::CloseJoystick(SDL_Joystick* sdl_joystick) {
221 std::string guid = GetGUID(sdl_joystick); 223 std::string guid = GetGUID(sdl_joystick);
222 std::shared_ptr<SDLJoystick> joystick; 224 std::shared_ptr<SDLJoystick> joystick;
223 { 225 {
224 std::lock_guard<std::mutex> lock(joystick_map_mutex); 226 std::lock_guard lock{joystick_map_mutex};
225 // This call to guid is safe since the joystick is guaranteed to be in the map 227 // This call to guid is safe since the joystick is guaranteed to be in the map
226 auto& joystick_guid_list = joystick_map[guid]; 228 auto& joystick_guid_list = joystick_map[guid];
227 const auto joystick_it = 229 const auto joystick_it =
@@ -274,7 +276,7 @@ void SDLState::HandleGameControllerEvent(const SDL_Event& event) {
274} 276}
275 277
276void SDLState::CloseJoysticks() { 278void SDLState::CloseJoysticks() {
277 std::lock_guard<std::mutex> lock(joystick_map_mutex); 279 std::lock_guard lock{joystick_map_mutex};
278 joystick_map.clear(); 280 joystick_map.clear();
279} 281}
280 282
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt
index d0284bdf4..c7038b217 100644
--- a/src/tests/CMakeLists.txt
+++ b/src/tests/CMakeLists.txt
@@ -1,5 +1,7 @@
1add_executable(tests 1add_executable(tests
2 common/bit_field.cpp 2 common/bit_field.cpp
3 common/bit_utils.cpp
4 common/multi_level_queue.cpp
3 common/param_package.cpp 5 common/param_package.cpp
4 common/ring_buffer.cpp 6 common/ring_buffer.cpp
5 core/arm/arm_test_common.cpp 7 core/arm/arm_test_common.cpp
diff --git a/src/tests/common/bit_utils.cpp b/src/tests/common/bit_utils.cpp
new file mode 100644
index 000000000..479b5995a
--- /dev/null
+++ b/src/tests/common/bit_utils.cpp
@@ -0,0 +1,23 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <catch2/catch.hpp>
6#include <math.h>
7#include "common/bit_util.h"
8
9namespace Common {
10
11TEST_CASE("BitUtils::CountTrailingZeroes", "[common]") {
12 REQUIRE(Common::CountTrailingZeroes32(0) == 32);
13 REQUIRE(Common::CountTrailingZeroes64(0) == 64);
14 REQUIRE(Common::CountTrailingZeroes32(9) == 0);
15 REQUIRE(Common::CountTrailingZeroes32(8) == 3);
16 REQUIRE(Common::CountTrailingZeroes32(0x801000) == 12);
17 REQUIRE(Common::CountTrailingZeroes64(9) == 0);
18 REQUIRE(Common::CountTrailingZeroes64(8) == 3);
19 REQUIRE(Common::CountTrailingZeroes64(0x801000) == 12);
20 REQUIRE(Common::CountTrailingZeroes64(0x801000000000UL) == 36);
21}
22
23} // namespace Common
diff --git a/src/tests/common/multi_level_queue.cpp b/src/tests/common/multi_level_queue.cpp
new file mode 100644
index 000000000..cca7ec7da
--- /dev/null
+++ b/src/tests/common/multi_level_queue.cpp
@@ -0,0 +1,55 @@
1// Copyright 2019 Yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <catch2/catch.hpp>
6#include <math.h>
7#include "common/common_types.h"
8#include "common/multi_level_queue.h"
9
10namespace Common {
11
12TEST_CASE("MultiLevelQueue", "[common]") {
13 std::array<f32, 8> values = {0.0, 5.0, 1.0, 9.0, 8.0, 2.0, 6.0, 7.0};
14 Common::MultiLevelQueue<f32, 64> mlq;
15 REQUIRE(mlq.empty());
16 mlq.add(values[2], 2);
17 mlq.add(values[7], 7);
18 mlq.add(values[3], 3);
19 mlq.add(values[4], 4);
20 mlq.add(values[0], 0);
21 mlq.add(values[5], 5);
22 mlq.add(values[6], 6);
23 mlq.add(values[1], 1);
24 u32 index = 0;
25 bool all_set = true;
26 for (auto& f : mlq) {
27 all_set &= (f == values[index]);
28 index++;
29 }
30 REQUIRE(all_set);
31 REQUIRE(!mlq.empty());
32 f32 v = 8.0;
33 mlq.add(v, 2);
34 v = -7.0;
35 mlq.add(v, 2, false);
36 REQUIRE(mlq.front(2) == -7.0);
37 mlq.yield(2);
38 REQUIRE(mlq.front(2) == values[2]);
39 REQUIRE(mlq.back(2) == -7.0);
40 REQUIRE(mlq.empty(8));
41 v = 10.0;
42 mlq.add(v, 8);
43 mlq.adjust(v, 8, 9);
44 REQUIRE(mlq.front(9) == v);
45 REQUIRE(mlq.empty(8));
46 REQUIRE(!mlq.empty(9));
47 mlq.adjust(values[0], 0, 9);
48 REQUIRE(mlq.highest_priority_set() == 1);
49 REQUIRE(mlq.lowest_priority_set() == 9);
50 mlq.remove(values[1], 1);
51 REQUIRE(mlq.highest_priority_set() == 2);
52 REQUIRE(mlq.empty(1));
53}
54
55} // namespace Common
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 14b76680f..242a0d1cd 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -128,7 +128,9 @@ if (ENABLE_VULKAN)
128 renderer_vulkan/vk_scheduler.cpp 128 renderer_vulkan/vk_scheduler.cpp
129 renderer_vulkan/vk_scheduler.h 129 renderer_vulkan/vk_scheduler.h
130 renderer_vulkan/vk_stream_buffer.cpp 130 renderer_vulkan/vk_stream_buffer.cpp
131 renderer_vulkan/vk_stream_buffer.h) 131 renderer_vulkan/vk_stream_buffer.h
132 renderer_vulkan/vk_swapchain.cpp
133 renderer_vulkan/vk_swapchain.h)
132 134
133 target_include_directories(video_core PRIVATE ../../externals/Vulkan-Headers/include) 135 target_include_directories(video_core PRIVATE ../../externals/Vulkan-Headers/include)
134 target_compile_definitions(video_core PRIVATE HAS_VULKAN) 136 target_compile_definitions(video_core PRIVATE HAS_VULKAN)
@@ -137,4 +139,4 @@ endif()
137create_target_directory_groups(video_core) 139create_target_directory_groups(video_core)
138 140
139target_link_libraries(video_core PUBLIC common core) 141target_link_libraries(video_core PUBLIC common core)
140target_link_libraries(video_core PRIVATE glad lz4_static) 142target_link_libraries(video_core PRIVATE glad)
diff --git a/src/video_core/debug_utils/debug_utils.cpp b/src/video_core/debug_utils/debug_utils.cpp
index 5ffb492ea..f0ef67535 100644
--- a/src/video_core/debug_utils/debug_utils.cpp
+++ b/src/video_core/debug_utils/debug_utils.cpp
@@ -10,7 +10,7 @@ namespace Tegra {
10 10
11void DebugContext::DoOnEvent(Event event, void* data) { 11void DebugContext::DoOnEvent(Event event, void* data) {
12 { 12 {
13 std::unique_lock<std::mutex> lock(breakpoint_mutex); 13 std::unique_lock lock{breakpoint_mutex};
14 14
15 // TODO(Subv): Commit the rasterizer's caches so framebuffers, render targets, etc. will 15 // TODO(Subv): Commit the rasterizer's caches so framebuffers, render targets, etc. will
16 // show on debug widgets 16 // show on debug widgets
@@ -32,7 +32,7 @@ void DebugContext::DoOnEvent(Event event, void* data) {
32 32
33void DebugContext::Resume() { 33void DebugContext::Resume() {
34 { 34 {
35 std::lock_guard<std::mutex> lock(breakpoint_mutex); 35 std::lock_guard lock{breakpoint_mutex};
36 36
37 // Tell all observers that we are about to resume 37 // Tell all observers that we are about to resume
38 for (auto& breakpoint_observer : breakpoint_observers) { 38 for (auto& breakpoint_observer : breakpoint_observers) {
diff --git a/src/video_core/debug_utils/debug_utils.h b/src/video_core/debug_utils/debug_utils.h
index c235faf46..ac3a2eb01 100644
--- a/src/video_core/debug_utils/debug_utils.h
+++ b/src/video_core/debug_utils/debug_utils.h
@@ -40,7 +40,7 @@ public:
40 /// Constructs the object such that it observes events of the given DebugContext. 40 /// Constructs the object such that it observes events of the given DebugContext.
41 explicit BreakPointObserver(std::shared_ptr<DebugContext> debug_context) 41 explicit BreakPointObserver(std::shared_ptr<DebugContext> debug_context)
42 : context_weak(debug_context) { 42 : context_weak(debug_context) {
43 std::unique_lock<std::mutex> lock(debug_context->breakpoint_mutex); 43 std::unique_lock lock{debug_context->breakpoint_mutex};
44 debug_context->breakpoint_observers.push_back(this); 44 debug_context->breakpoint_observers.push_back(this);
45 } 45 }
46 46
@@ -48,7 +48,7 @@ public:
48 auto context = context_weak.lock(); 48 auto context = context_weak.lock();
49 if (context) { 49 if (context) {
50 { 50 {
51 std::unique_lock<std::mutex> lock(context->breakpoint_mutex); 51 std::unique_lock lock{context->breakpoint_mutex};
52 context->breakpoint_observers.remove(this); 52 context->breakpoint_observers.remove(this);
53 } 53 }
54 54
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 267a03f2d..30b29e14d 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -286,9 +286,10 @@ void GPU::ProcessSemaphoreTriggerMethod() {
286 // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of 286 // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of
287 // CoreTiming 287 // CoreTiming
288 block.timestamp = Core::System::GetInstance().CoreTiming().GetTicks(); 288 block.timestamp = Core::System::GetInstance().CoreTiming().GetTicks();
289 memory_manager->WriteBlock(regs.smaphore_address.SmaphoreAddress(), &block, sizeof(block)); 289 memory_manager->WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block,
290 sizeof(block));
290 } else { 291 } else {
291 const u32 word{memory_manager->Read<u32>(regs.smaphore_address.SmaphoreAddress())}; 292 const u32 word{memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress())};
292 if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) || 293 if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) ||
293 (op == GpuSemaphoreOperation::AcquireGequal && 294 (op == GpuSemaphoreOperation::AcquireGequal &&
294 static_cast<s32>(word - regs.semaphore_sequence) > 0) || 295 static_cast<s32>(word - regs.semaphore_sequence) > 0) ||
@@ -315,11 +316,11 @@ void GPU::ProcessSemaphoreTriggerMethod() {
315} 316}
316 317
317void GPU::ProcessSemaphoreRelease() { 318void GPU::ProcessSemaphoreRelease() {
318 memory_manager->Write<u32>(regs.smaphore_address.SmaphoreAddress(), regs.semaphore_release); 319 memory_manager->Write<u32>(regs.semaphore_address.SemaphoreAddress(), regs.semaphore_release);
319} 320}
320 321
321void GPU::ProcessSemaphoreAcquire() { 322void GPU::ProcessSemaphoreAcquire() {
322 const u32 word = memory_manager->Read<u32>(regs.smaphore_address.SmaphoreAddress()); 323 const u32 word = memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress());
323 const auto value = regs.semaphore_acquire; 324 const auto value = regs.semaphore_acquire;
324 if (word != value) { 325 if (word != value) {
325 regs.acquire_active = true; 326 regs.acquire_active = true;
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index c1830ac8d..de30ea354 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -177,11 +177,11 @@ public:
177 u32 address_high; 177 u32 address_high;
178 u32 address_low; 178 u32 address_low;
179 179
180 GPUVAddr SmaphoreAddress() const { 180 GPUVAddr SemaphoreAddress() const {
181 return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | 181 return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
182 address_low); 182 address_low);
183 } 183 }
184 } smaphore_address; 184 } semaphore_address;
185 185
186 u32 semaphore_sequence; 186 u32 semaphore_sequence;
187 u32 semaphore_trigger; 187 u32 semaphore_trigger;
@@ -263,7 +263,7 @@ private:
263 static_assert(offsetof(GPU::Regs, field_name) == position * 4, \ 263 static_assert(offsetof(GPU::Regs, field_name) == position * 4, \
264 "Field " #field_name " has invalid position") 264 "Field " #field_name " has invalid position")
265 265
266ASSERT_REG_POSITION(smaphore_address, 0x4); 266ASSERT_REG_POSITION(semaphore_address, 0x4);
267ASSERT_REG_POSITION(semaphore_sequence, 0x6); 267ASSERT_REG_POSITION(semaphore_sequence, 0x6);
268ASSERT_REG_POSITION(semaphore_trigger, 0x7); 268ASSERT_REG_POSITION(semaphore_trigger, 0x7);
269ASSERT_REG_POSITION(reference_count, 0x14); 269ASSERT_REG_POSITION(reference_count, 0x14);
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index 086b2f625..c5dc199c5 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -52,8 +52,8 @@ static void RunThread(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_p
52} 52}
53 53
54ThreadManager::ThreadManager(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher) 54ThreadManager::ThreadManager(VideoCore::RendererBase& renderer, Tegra::DmaPusher& dma_pusher)
55 : renderer{renderer}, dma_pusher{dma_pusher}, thread{RunThread, std::ref(renderer), 55 : renderer{renderer}, thread{RunThread, std::ref(renderer), std::ref(dma_pusher),
56 std::ref(dma_pusher), std::ref(state)} {} 56 std::ref(state)} {}
57 57
58ThreadManager::~ThreadManager() { 58ThreadManager::~ThreadManager() {
59 // Notify GPU thread that a shutdown is pending 59 // Notify GPU thread that a shutdown is pending
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
index 8cd7db1c6..70acb2e79 100644
--- a/src/video_core/gpu_thread.h
+++ b/src/video_core/gpu_thread.h
@@ -4,10 +4,8 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <array>
8#include <atomic> 7#include <atomic>
9#include <condition_variable> 8#include <condition_variable>
10#include <memory>
11#include <mutex> 9#include <mutex>
12#include <optional> 10#include <optional>
13#include <thread> 11#include <thread>
@@ -97,13 +95,13 @@ struct SynchState final {
97 std::condition_variable frames_condition; 95 std::condition_variable frames_condition;
98 96
99 void IncrementFramesCounter() { 97 void IncrementFramesCounter() {
100 std::lock_guard<std::mutex> lock{frames_mutex}; 98 std::lock_guard lock{frames_mutex};
101 ++queued_frame_count; 99 ++queued_frame_count;
102 } 100 }
103 101
104 void DecrementFramesCounter() { 102 void DecrementFramesCounter() {
105 { 103 {
106 std::lock_guard<std::mutex> lock{frames_mutex}; 104 std::lock_guard lock{frames_mutex};
107 --queued_frame_count; 105 --queued_frame_count;
108 106
109 if (queued_frame_count) { 107 if (queued_frame_count) {
@@ -115,7 +113,7 @@ struct SynchState final {
115 113
116 void WaitForFrames() { 114 void WaitForFrames() {
117 { 115 {
118 std::lock_guard<std::mutex> lock{frames_mutex}; 116 std::lock_guard lock{frames_mutex};
119 if (!queued_frame_count) { 117 if (!queued_frame_count) {
120 return; 118 return;
121 } 119 }
@@ -123,14 +121,14 @@ struct SynchState final {
123 121
124 // Wait for the GPU to be idle (all commands to be executed) 122 // Wait for the GPU to be idle (all commands to be executed)
125 { 123 {
126 std::unique_lock<std::mutex> lock{frames_mutex}; 124 std::unique_lock lock{frames_mutex};
127 frames_condition.wait(lock, [this] { return !queued_frame_count; }); 125 frames_condition.wait(lock, [this] { return !queued_frame_count; });
128 } 126 }
129 } 127 }
130 128
131 void SignalCommands() { 129 void SignalCommands() {
132 { 130 {
133 std::unique_lock<std::mutex> lock{commands_mutex}; 131 std::unique_lock lock{commands_mutex};
134 if (queue.Empty()) { 132 if (queue.Empty()) {
135 return; 133 return;
136 } 134 }
@@ -140,7 +138,7 @@ struct SynchState final {
140 } 138 }
141 139
142 void WaitForCommands() { 140 void WaitForCommands() {
143 std::unique_lock<std::mutex> lock{commands_mutex}; 141 std::unique_lock lock{commands_mutex};
144 commands_condition.wait(lock, [this] { return !queue.Empty(); }); 142 commands_condition.wait(lock, [this] { return !queue.Empty(); });
145 } 143 }
146 144
@@ -177,7 +175,6 @@ private:
177private: 175private:
178 SynchState state; 176 SynchState state;
179 VideoCore::RendererBase& renderer; 177 VideoCore::RendererBase& renderer;
180 Tegra::DmaPusher& dma_pusher;
181 std::thread thread; 178 std::thread thread;
182 std::thread::id thread_id; 179 std::thread::id thread_id;
183}; 180};
diff --git a/src/video_core/rasterizer_cache.h b/src/video_core/rasterizer_cache.h
index 9fc9f3056..291772186 100644
--- a/src/video_core/rasterizer_cache.h
+++ b/src/video_core/rasterizer_cache.h
@@ -71,8 +71,8 @@ private:
71 bool is_registered{}; ///< Whether the object is currently registered with the cache 71 bool is_registered{}; ///< Whether the object is currently registered with the cache
72 bool is_dirty{}; ///< Whether the object is dirty (out of sync with guest memory) 72 bool is_dirty{}; ///< Whether the object is dirty (out of sync with guest memory)
73 u64 last_modified_ticks{}; ///< When the object was last modified, used for in-order flushing 73 u64 last_modified_ticks{}; ///< When the object was last modified, used for in-order flushing
74 CacheAddr cache_addr{}; ///< Cache address memory, unique from emulated virtual address space
75 const u8* host_ptr{}; ///< Pointer to the memory backing this cached region 74 const u8* host_ptr{}; ///< Pointer to the memory backing this cached region
75 CacheAddr cache_addr{}; ///< Cache address memory, unique from emulated virtual address space
76}; 76};
77 77
78template <class T> 78template <class T>
@@ -84,7 +84,7 @@ public:
84 84
85 /// Write any cached resources overlapping the specified region back to memory 85 /// Write any cached resources overlapping the specified region back to memory
86 void FlushRegion(CacheAddr addr, std::size_t size) { 86 void FlushRegion(CacheAddr addr, std::size_t size) {
87 std::lock_guard<std::recursive_mutex> lock{mutex}; 87 std::lock_guard lock{mutex};
88 88
89 const auto& objects{GetSortedObjectsFromRegion(addr, size)}; 89 const auto& objects{GetSortedObjectsFromRegion(addr, size)};
90 for (auto& object : objects) { 90 for (auto& object : objects) {
@@ -94,7 +94,7 @@ public:
94 94
95 /// Mark the specified region as being invalidated 95 /// Mark the specified region as being invalidated
96 void InvalidateRegion(CacheAddr addr, u64 size) { 96 void InvalidateRegion(CacheAddr addr, u64 size) {
97 std::lock_guard<std::recursive_mutex> lock{mutex}; 97 std::lock_guard lock{mutex};
98 98
99 const auto& objects{GetSortedObjectsFromRegion(addr, size)}; 99 const auto& objects{GetSortedObjectsFromRegion(addr, size)};
100 for (auto& object : objects) { 100 for (auto& object : objects) {
@@ -108,7 +108,7 @@ public:
108 108
109 /// Invalidates everything in the cache 109 /// Invalidates everything in the cache
110 void InvalidateAll() { 110 void InvalidateAll() {
111 std::lock_guard<std::recursive_mutex> lock{mutex}; 111 std::lock_guard lock{mutex};
112 112
113 while (interval_cache.begin() != interval_cache.end()) { 113 while (interval_cache.begin() != interval_cache.end()) {
114 Unregister(*interval_cache.begin()->second.begin()); 114 Unregister(*interval_cache.begin()->second.begin());
@@ -133,7 +133,7 @@ protected:
133 133
134 /// Register an object into the cache 134 /// Register an object into the cache
135 virtual void Register(const T& object) { 135 virtual void Register(const T& object) {
136 std::lock_guard<std::recursive_mutex> lock{mutex}; 136 std::lock_guard lock{mutex};
137 137
138 object->SetIsRegistered(true); 138 object->SetIsRegistered(true);
139 interval_cache.add({GetInterval(object), ObjectSet{object}}); 139 interval_cache.add({GetInterval(object), ObjectSet{object}});
@@ -143,7 +143,7 @@ protected:
143 143
144 /// Unregisters an object from the cache 144 /// Unregisters an object from the cache
145 virtual void Unregister(const T& object) { 145 virtual void Unregister(const T& object) {
146 std::lock_guard<std::recursive_mutex> lock{mutex}; 146 std::lock_guard lock{mutex};
147 147
148 object->SetIsRegistered(false); 148 object->SetIsRegistered(false);
149 rasterizer.UpdatePagesCachedCount(object->GetCpuAddr(), object->GetSizeInBytes(), -1); 149 rasterizer.UpdatePagesCachedCount(object->GetCpuAddr(), object->GetSizeInBytes(), -1);
@@ -153,14 +153,14 @@ protected:
153 153
154 /// Returns a ticks counter used for tracking when cached objects were last modified 154 /// Returns a ticks counter used for tracking when cached objects were last modified
155 u64 GetModifiedTicks() { 155 u64 GetModifiedTicks() {
156 std::lock_guard<std::recursive_mutex> lock{mutex}; 156 std::lock_guard lock{mutex};
157 157
158 return ++modified_ticks; 158 return ++modified_ticks;
159 } 159 }
160 160
161 /// Flushes the specified object, updating appropriate cache state as needed 161 /// Flushes the specified object, updating appropriate cache state as needed
162 void FlushObject(const T& object) { 162 void FlushObject(const T& object) {
163 std::lock_guard<std::recursive_mutex> lock{mutex}; 163 std::lock_guard lock{mutex};
164 164
165 if (!object->IsDirty()) { 165 if (!object->IsDirty()) {
166 return; 166 return;
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index f75c65825..7989ec11b 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -7,7 +7,6 @@
7 7
8#include "common/alignment.h" 8#include "common/alignment.h"
9#include "core/core.h" 9#include "core/core.h"
10#include "core/memory.h"
11#include "video_core/renderer_opengl/gl_buffer_cache.h" 10#include "video_core/renderer_opengl/gl_buffer_cache.h"
12#include "video_core/renderer_opengl/gl_rasterizer.h" 11#include "video_core/renderer_opengl/gl_rasterizer.h"
13 12
@@ -15,8 +14,8 @@ namespace OpenGL {
15 14
16CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, GLintptr offset, 15CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, GLintptr offset,
17 std::size_t alignment, u8* host_ptr) 16 std::size_t alignment, u8* host_ptr)
18 : cpu_addr{cpu_addr}, size{size}, offset{offset}, alignment{alignment}, RasterizerCacheObject{ 17 : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, size{size}, offset{offset},
19 host_ptr} {} 18 alignment{alignment} {}
20 19
21OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size) 20OGLBufferCache::OGLBufferCache(RasterizerOpenGL& rasterizer, std::size_t size)
22 : RasterizerCache{rasterizer}, stream_buffer(size, true) {} 21 : RasterizerCache{rasterizer}, stream_buffer(size, true) {}
diff --git a/src/video_core/renderer_opengl/gl_global_cache.cpp b/src/video_core/renderer_opengl/gl_global_cache.cpp
index 0fbfbad55..5842d6213 100644
--- a/src/video_core/renderer_opengl/gl_global_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_global_cache.cpp
@@ -4,7 +4,6 @@
4 4
5#include <glad/glad.h> 5#include <glad/glad.h>
6 6
7#include "common/assert.h"
8#include "common/logging/log.h" 7#include "common/logging/log.h"
9#include "core/core.h" 8#include "core/core.h"
10#include "video_core/renderer_opengl/gl_global_cache.h" 9#include "video_core/renderer_opengl/gl_global_cache.h"
@@ -15,7 +14,7 @@
15namespace OpenGL { 14namespace OpenGL {
16 15
17CachedGlobalRegion::CachedGlobalRegion(VAddr cpu_addr, u32 size, u8* host_ptr) 16CachedGlobalRegion::CachedGlobalRegion(VAddr cpu_addr, u32 size, u8* host_ptr)
18 : cpu_addr{cpu_addr}, size{size}, RasterizerCacheObject{host_ptr} { 17 : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, size{size} {
19 buffer.Create(); 18 buffer.Create();
20 // Bind and unbind the buffer so it gets allocated by the driver 19 // Bind and unbind the buffer so it gets allocated by the driver
21 glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer.handle); 20 glBindBuffer(GL_SHADER_STORAGE_BUFFER, buffer.handle);
diff --git a/src/video_core/renderer_opengl/gl_primitive_assembler.cpp b/src/video_core/renderer_opengl/gl_primitive_assembler.cpp
index 2bcbd3da2..c3e94d917 100644
--- a/src/video_core/renderer_opengl/gl_primitive_assembler.cpp
+++ b/src/video_core/renderer_opengl/gl_primitive_assembler.cpp
@@ -7,7 +7,7 @@
7#include "common/assert.h" 7#include "common/assert.h"
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "core/core.h" 9#include "core/core.h"
10#include "core/memory.h" 10#include "video_core/memory_manager.h"
11#include "video_core/renderer_opengl/gl_buffer_cache.h" 11#include "video_core/renderer_opengl/gl_buffer_cache.h"
12#include "video_core/renderer_opengl/gl_primitive_assembler.h" 12#include "video_core/renderer_opengl/gl_primitive_assembler.h"
13 13
diff --git a/src/video_core/renderer_opengl/gl_primitive_assembler.h b/src/video_core/renderer_opengl/gl_primitive_assembler.h
index 0e2e7dc36..4e87ce4d6 100644
--- a/src/video_core/renderer_opengl/gl_primitive_assembler.h
+++ b/src/video_core/renderer_opengl/gl_primitive_assembler.h
@@ -4,11 +4,9 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <vector>
8#include <glad/glad.h> 7#include <glad/glad.h>
9 8
10#include "common/common_types.h" 9#include "common/common_types.h"
11#include "video_core/memory_manager.h"
12 10
13namespace OpenGL { 11namespace OpenGL {
14 12
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index e06dfe43f..7ff1e6737 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -17,7 +17,6 @@
17#include "common/microprofile.h" 17#include "common/microprofile.h"
18#include "common/scope_exit.h" 18#include "common/scope_exit.h"
19#include "core/core.h" 19#include "core/core.h"
20#include "core/frontend/emu_window.h"
21#include "core/hle/kernel/process.h" 20#include "core/hle/kernel/process.h"
22#include "core/settings.h" 21#include "core/settings.h"
23#include "video_core/engines/maxwell_3d.h" 22#include "video_core/engines/maxwell_3d.h"
@@ -26,7 +25,6 @@
26#include "video_core/renderer_opengl/gl_shader_gen.h" 25#include "video_core/renderer_opengl/gl_shader_gen.h"
27#include "video_core/renderer_opengl/maxwell_to_gl.h" 26#include "video_core/renderer_opengl/maxwell_to_gl.h"
28#include "video_core/renderer_opengl/renderer_opengl.h" 27#include "video_core/renderer_opengl/renderer_opengl.h"
29#include "video_core/video_core.h"
30 28
31namespace OpenGL { 29namespace OpenGL {
32 30
@@ -100,11 +98,9 @@ struct FramebufferCacheKey {
100 } 98 }
101}; 99};
102 100
103RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& window, Core::System& system, 101RasterizerOpenGL::RasterizerOpenGL(Core::System& system, ScreenInfo& info)
104 ScreenInfo& info) 102 : res_cache{*this}, shader_cache{*this, system}, global_cache{*this}, system{system},
105 : res_cache{*this}, shader_cache{*this, system}, global_cache{*this}, 103 screen_info{info}, buffer_cache(*this, STREAM_BUFFER_SIZE) {
106 emu_window{window}, system{system}, screen_info{info},
107 buffer_cache(*this, STREAM_BUFFER_SIZE) {
108 // Create sampler objects 104 // Create sampler objects
109 for (std::size_t i = 0; i < texture_samplers.size(); ++i) { 105 for (std::size_t i = 0; i < texture_samplers.size(); ++i) {
110 texture_samplers[i].Create(); 106 texture_samplers[i].Create();
@@ -320,7 +316,7 @@ void RasterizerOpenGL::SetupShaders(GLenum primitive_mode) {
320 const std::size_t stage{index == 0 ? 0 : index - 1}; // Stage indices are 0 - 5 316 const std::size_t stage{index == 0 ? 0 : index - 1}; // Stage indices are 0 - 5
321 317
322 GLShader::MaxwellUniformData ubo{}; 318 GLShader::MaxwellUniformData ubo{};
323 ubo.SetFromRegs(gpu.state.shader_stages[stage]); 319 ubo.SetFromRegs(gpu, stage);
324 const GLintptr offset = buffer_cache.UploadHostMemory( 320 const GLintptr offset = buffer_cache.UploadHostMemory(
325 &ubo, sizeof(ubo), static_cast<std::size_t>(uniform_buffer_alignment)); 321 &ubo, sizeof(ubo), static_cast<std::size_t>(uniform_buffer_alignment));
326 322
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 30f3e8acb..54fbf48aa 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -12,15 +12,12 @@
12#include <optional> 12#include <optional>
13#include <tuple> 13#include <tuple>
14#include <utility> 14#include <utility>
15#include <vector>
16 15
17#include <boost/icl/interval_map.hpp> 16#include <boost/icl/interval_map.hpp>
18#include <boost/range/iterator_range.hpp>
19#include <glad/glad.h> 17#include <glad/glad.h>
20 18
21#include "common/common_types.h" 19#include "common/common_types.h"
22#include "video_core/engines/maxwell_3d.h" 20#include "video_core/engines/maxwell_3d.h"
23#include "video_core/memory_manager.h"
24#include "video_core/rasterizer_cache.h" 21#include "video_core/rasterizer_cache.h"
25#include "video_core/rasterizer_interface.h" 22#include "video_core/rasterizer_interface.h"
26#include "video_core/renderer_opengl/gl_buffer_cache.h" 23#include "video_core/renderer_opengl/gl_buffer_cache.h"
@@ -29,10 +26,8 @@
29#include "video_core/renderer_opengl/gl_rasterizer_cache.h" 26#include "video_core/renderer_opengl/gl_rasterizer_cache.h"
30#include "video_core/renderer_opengl/gl_resource_manager.h" 27#include "video_core/renderer_opengl/gl_resource_manager.h"
31#include "video_core/renderer_opengl/gl_shader_cache.h" 28#include "video_core/renderer_opengl/gl_shader_cache.h"
32#include "video_core/renderer_opengl/gl_shader_gen.h"
33#include "video_core/renderer_opengl/gl_shader_manager.h" 29#include "video_core/renderer_opengl/gl_shader_manager.h"
34#include "video_core/renderer_opengl/gl_state.h" 30#include "video_core/renderer_opengl/gl_state.h"
35#include "video_core/renderer_opengl/gl_stream_buffer.h"
36 31
37namespace Core { 32namespace Core {
38class System; 33class System;
@@ -50,8 +45,7 @@ struct FramebufferCacheKey;
50 45
51class RasterizerOpenGL : public VideoCore::RasterizerInterface { 46class RasterizerOpenGL : public VideoCore::RasterizerInterface {
52public: 47public:
53 explicit RasterizerOpenGL(Core::Frontend::EmuWindow& window, Core::System& system, 48 explicit RasterizerOpenGL(Core::System& system, ScreenInfo& info);
54 ScreenInfo& info);
55 ~RasterizerOpenGL() override; 49 ~RasterizerOpenGL() override;
56 50
57 void DrawArrays() override; 51 void DrawArrays() override;
@@ -214,7 +208,6 @@ private:
214 ShaderCacheOpenGL shader_cache; 208 ShaderCacheOpenGL shader_cache;
215 GlobalRegionCacheOpenGL global_cache; 209 GlobalRegionCacheOpenGL global_cache;
216 210
217 Core::Frontend::EmuWindow& emu_window;
218 Core::System& system; 211 Core::System& system;
219 212
220 ScreenInfo& screen_info; 213 ScreenInfo& screen_info;
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
index 0235317c0..7a3280620 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.cpp
@@ -13,7 +13,6 @@
13#include "common/scope_exit.h" 13#include "common/scope_exit.h"
14#include "core/core.h" 14#include "core/core.h"
15#include "core/hle/kernel/process.h" 15#include "core/hle/kernel/process.h"
16#include "core/memory.h"
17#include "core/settings.h" 16#include "core/settings.h"
18#include "video_core/engines/maxwell_3d.h" 17#include "video_core/engines/maxwell_3d.h"
19#include "video_core/morton.h" 18#include "video_core/morton.h"
@@ -562,8 +561,8 @@ void RasterizerCacheOpenGL::CopySurface(const Surface& src_surface, const Surfac
562} 561}
563 562
564CachedSurface::CachedSurface(const SurfaceParams& params) 563CachedSurface::CachedSurface(const SurfaceParams& params)
565 : params{params}, gl_target{SurfaceTargetToGL(params.target)}, 564 : RasterizerCacheObject{params.host_ptr}, params{params},
566 cached_size_in_bytes{params.size_in_bytes}, RasterizerCacheObject{params.host_ptr} { 565 gl_target{SurfaceTargetToGL(params.target)}, cached_size_in_bytes{params.size_in_bytes} {
567 566
568 const auto optional_cpu_addr{ 567 const auto optional_cpu_addr{
569 Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(params.gpu_addr)}; 568 Core::System::GetInstance().GPU().MemoryManager().GpuToCpuAddress(params.gpu_addr)};
diff --git a/src/video_core/renderer_opengl/gl_rasterizer_cache.h b/src/video_core/renderer_opengl/gl_rasterizer_cache.h
index c644271d0..ad4fd3ad2 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer_cache.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer_cache.h
@@ -5,10 +5,9 @@
5#pragma once 5#pragma once
6 6
7#include <array> 7#include <array>
8#include <map>
9#include <memory> 8#include <memory>
10#include <string> 9#include <string>
11#include <unordered_set> 10#include <tuple>
12#include <vector> 11#include <vector>
13 12
14#include "common/alignment.h" 13#include "common/alignment.h"
@@ -538,12 +537,12 @@ private:
538 return nullptr; 537 return nullptr;
539 } 538 }
540 539
541 void Register(const Surface& object) { 540 void Register(const Surface& object) override {
542 RasterizerCache<Surface>::Register(object); 541 RasterizerCache<Surface>::Register(object);
543 } 542 }
544 543
545 /// Unregisters an object from the cache 544 /// Unregisters an object from the cache
546 void Unregister(const Surface& object) { 545 void Unregister(const Surface& object) override {
547 if (object->IsReinterpreted()) { 546 if (object->IsReinterpreted()) {
548 auto interval = GetReinterpretInterval(object); 547 auto interval = GetReinterpretInterval(object);
549 reinterpreted_surfaces.erase(interval); 548 reinterpreted_surfaces.erase(interval);
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 1f8eca6f0..7030db365 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -6,13 +6,11 @@
6#include "common/assert.h" 6#include "common/assert.h"
7#include "common/hash.h" 7#include "common/hash.h"
8#include "core/core.h" 8#include "core/core.h"
9#include "core/memory.h"
10#include "video_core/engines/maxwell_3d.h" 9#include "video_core/engines/maxwell_3d.h"
11#include "video_core/renderer_opengl/gl_rasterizer.h" 10#include "video_core/renderer_opengl/gl_rasterizer.h"
12#include "video_core/renderer_opengl/gl_shader_cache.h" 11#include "video_core/renderer_opengl/gl_shader_cache.h"
13#include "video_core/renderer_opengl/gl_shader_decompiler.h" 12#include "video_core/renderer_opengl/gl_shader_decompiler.h"
14#include "video_core/renderer_opengl/gl_shader_disk_cache.h" 13#include "video_core/renderer_opengl/gl_shader_disk_cache.h"
15#include "video_core/renderer_opengl/gl_shader_manager.h"
16#include "video_core/renderer_opengl/utils.h" 14#include "video_core/renderer_opengl/utils.h"
17#include "video_core/shader/shader_ir.h" 15#include "video_core/shader/shader_ir.h"
18 16
@@ -215,9 +213,9 @@ CachedShader::CachedShader(VAddr cpu_addr, u64 unique_identifier,
215 Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, 213 Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache,
216 const PrecompiledPrograms& precompiled_programs, 214 const PrecompiledPrograms& precompiled_programs,
217 ProgramCode&& program_code, ProgramCode&& program_code_b, u8* host_ptr) 215 ProgramCode&& program_code, ProgramCode&& program_code_b, u8* host_ptr)
218 : host_ptr{host_ptr}, cpu_addr{cpu_addr}, unique_identifier{unique_identifier}, 216 : RasterizerCacheObject{host_ptr}, host_ptr{host_ptr}, cpu_addr{cpu_addr},
219 program_type{program_type}, disk_cache{disk_cache}, 217 unique_identifier{unique_identifier}, program_type{program_type}, disk_cache{disk_cache},
220 precompiled_programs{precompiled_programs}, RasterizerCacheObject{host_ptr} { 218 precompiled_programs{precompiled_programs} {
221 219
222 const std::size_t code_size = CalculateProgramSize(program_code); 220 const std::size_t code_size = CalculateProgramSize(program_code);
223 const std::size_t code_size_b = 221 const std::size_t code_size_b =
@@ -245,9 +243,9 @@ CachedShader::CachedShader(VAddr cpu_addr, u64 unique_identifier,
245 Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache, 243 Maxwell::ShaderProgram program_type, ShaderDiskCacheOpenGL& disk_cache,
246 const PrecompiledPrograms& precompiled_programs, 244 const PrecompiledPrograms& precompiled_programs,
247 GLShader::ProgramResult result, u8* host_ptr) 245 GLShader::ProgramResult result, u8* host_ptr)
248 : cpu_addr{cpu_addr}, unique_identifier{unique_identifier}, program_type{program_type}, 246 : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, unique_identifier{unique_identifier},
249 disk_cache{disk_cache}, precompiled_programs{precompiled_programs}, RasterizerCacheObject{ 247 program_type{program_type}, disk_cache{disk_cache}, precompiled_programs{
250 host_ptr} { 248 precompiled_programs} {
251 249
252 code = std::move(result.first); 250 code = std::move(result.first);
253 entries = result.second; 251 entries = result.second;
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h
index fd1c85115..0cf8e0b3d 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_cache.h
@@ -5,21 +5,20 @@
5#pragma once 5#pragma once
6 6
7#include <array> 7#include <array>
8#include <atomic>
8#include <memory> 9#include <memory>
9#include <set> 10#include <set>
10#include <tuple> 11#include <tuple>
11#include <unordered_map> 12#include <unordered_map>
13#include <vector>
12 14
13#include <glad/glad.h> 15#include <glad/glad.h>
14 16
15#include "common/assert.h"
16#include "common/common_types.h" 17#include "common/common_types.h"
17#include "video_core/rasterizer_cache.h" 18#include "video_core/rasterizer_cache.h"
18#include "video_core/renderer_base.h"
19#include "video_core/renderer_opengl/gl_resource_manager.h" 19#include "video_core/renderer_opengl/gl_resource_manager.h"
20#include "video_core/renderer_opengl/gl_shader_decompiler.h" 20#include "video_core/renderer_opengl/gl_shader_decompiler.h"
21#include "video_core/renderer_opengl/gl_shader_disk_cache.h" 21#include "video_core/renderer_opengl/gl_shader_disk_cache.h"
22#include "video_core/renderer_opengl/gl_shader_gen.h"
23 22
24namespace Core { 23namespace Core {
25class System; 24class System;
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index 11d1169f0..7300a4037 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -1196,11 +1196,12 @@ private:
1196 switch (meta->element) { 1196 switch (meta->element) {
1197 case 0: 1197 case 0:
1198 case 1: 1198 case 1:
1199 return "textureSize(" + sampler + ", " + lod + ')' + GetSwizzle(meta->element); 1199 return "itof(int(textureSize(" + sampler + ", " + lod + ')' +
1200 GetSwizzle(meta->element) + "))";
1200 case 2: 1201 case 2:
1201 return "0"; 1202 return "0";
1202 case 3: 1203 case 3:
1203 return "textureQueryLevels(" + sampler + ')'; 1204 return "itof(textureQueryLevels(" + sampler + "))";
1204 } 1205 }
1205 UNREACHABLE(); 1206 UNREACHABLE();
1206 return "0"; 1207 return "0";
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.h b/src/video_core/renderer_opengl/gl_shader_decompiler.h
index 72aca4938..4e04ab2f8 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.h
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.h
@@ -5,7 +5,6 @@
5#pragma once 5#pragma once
6 6
7#include <array> 7#include <array>
8#include <set>
9#include <string> 8#include <string>
10#include <utility> 9#include <utility>
11#include <vector> 10#include <vector>
diff --git a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp
index 82fc4d44b..d2d979997 100644
--- a/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_disk_cache.cpp
@@ -4,13 +4,13 @@
4 4
5#include <cstring> 5#include <cstring>
6#include <fmt/format.h> 6#include <fmt/format.h>
7#include <lz4.h>
8 7
9#include "common/assert.h" 8#include "common/assert.h"
10#include "common/common_paths.h" 9#include "common/common_paths.h"
11#include "common/common_types.h" 10#include "common/common_types.h"
12#include "common/file_util.h" 11#include "common/file_util.h"
13#include "common/logging/log.h" 12#include "common/logging/log.h"
13#include "common/lz4_compression.h"
14#include "common/scm_rev.h" 14#include "common/scm_rev.h"
15 15
16#include "core/core.h" 16#include "core/core.h"
@@ -49,39 +49,6 @@ ShaderCacheVersionHash GetShaderCacheVersionHash() {
49 return hash; 49 return hash;
50} 50}
51 51
52template <typename T>
53std::vector<u8> CompressData(const T* source, std::size_t source_size) {
54 if (source_size > LZ4_MAX_INPUT_SIZE) {
55 // Source size exceeds LZ4 maximum input size
56 return {};
57 }
58 const auto source_size_int = static_cast<int>(source_size);
59 const int max_compressed_size = LZ4_compressBound(source_size_int);
60 std::vector<u8> compressed(max_compressed_size);
61 const int compressed_size = LZ4_compress_default(reinterpret_cast<const char*>(source),
62 reinterpret_cast<char*>(compressed.data()),
63 source_size_int, max_compressed_size);
64 if (compressed_size <= 0) {
65 // Compression failed
66 return {};
67 }
68 compressed.resize(compressed_size);
69 return compressed;
70}
71
72std::vector<u8> DecompressData(const std::vector<u8>& compressed, std::size_t uncompressed_size) {
73 std::vector<u8> uncompressed(uncompressed_size);
74 const int size_check = LZ4_decompress_safe(reinterpret_cast<const char*>(compressed.data()),
75 reinterpret_cast<char*>(uncompressed.data()),
76 static_cast<int>(compressed.size()),
77 static_cast<int>(uncompressed.size()));
78 if (static_cast<int>(uncompressed_size) != size_check) {
79 // Decompression failed
80 return {};
81 }
82 return uncompressed;
83}
84
85} // namespace 52} // namespace
86 53
87ShaderDiskCacheRaw::ShaderDiskCacheRaw(u64 unique_identifier, Maxwell::ShaderProgram program_type, 54ShaderDiskCacheRaw::ShaderDiskCacheRaw(u64 unique_identifier, Maxwell::ShaderProgram program_type,
@@ -292,7 +259,7 @@ ShaderDiskCacheOpenGL::LoadPrecompiledFile(FileUtil::IOFile& file) {
292 return {}; 259 return {};
293 } 260 }
294 261
295 dump.binary = DecompressData(compressed_binary, binary_length); 262 dump.binary = Common::Compression::DecompressDataLZ4(compressed_binary, binary_length);
296 if (dump.binary.empty()) { 263 if (dump.binary.empty()) {
297 return {}; 264 return {};
298 } 265 }
@@ -321,7 +288,7 @@ std::optional<ShaderDiskCacheDecompiled> ShaderDiskCacheOpenGL::LoadDecompiledEn
321 return {}; 288 return {};
322 } 289 }
323 290
324 const std::vector<u8> code = DecompressData(compressed_code, code_size); 291 const std::vector<u8> code = Common::Compression::DecompressDataLZ4(compressed_code, code_size);
325 if (code.empty()) { 292 if (code.empty()) {
326 return {}; 293 return {};
327 } 294 }
@@ -507,7 +474,8 @@ void ShaderDiskCacheOpenGL::SaveDecompiled(u64 unique_identifier, const std::str
507 if (!IsUsable()) 474 if (!IsUsable())
508 return; 475 return;
509 476
510 const std::vector<u8> compressed_code{CompressData(code.data(), code.size())}; 477 const std::vector<u8> compressed_code{Common::Compression::CompressDataLZ4HC(
478 reinterpret_cast<const u8*>(code.data()), code.size(), 9)};
511 if (compressed_code.empty()) { 479 if (compressed_code.empty()) {
512 LOG_ERROR(Render_OpenGL, "Failed to compress GLSL code - skipping shader {:016x}", 480 LOG_ERROR(Render_OpenGL, "Failed to compress GLSL code - skipping shader {:016x}",
513 unique_identifier); 481 unique_identifier);
@@ -537,7 +505,9 @@ void ShaderDiskCacheOpenGL::SaveDump(const ShaderDiskCacheUsage& usage, GLuint p
537 std::vector<u8> binary(binary_length); 505 std::vector<u8> binary(binary_length);
538 glGetProgramBinary(program, binary_length, nullptr, &binary_format, binary.data()); 506 glGetProgramBinary(program, binary_length, nullptr, &binary_format, binary.data());
539 507
540 const std::vector<u8> compressed_binary = CompressData(binary.data(), binary.size()); 508 const std::vector<u8> compressed_binary =
509 Common::Compression::CompressDataLZ4HC(binary.data(), binary.size(), 9);
510
541 if (compressed_binary.empty()) { 511 if (compressed_binary.empty()) {
542 LOG_ERROR(Render_OpenGL, "Failed to compress binary program in shader={:016x}", 512 LOG_ERROR(Render_OpenGL, "Failed to compress binary program in shader={:016x}",
543 usage.unique_identifier); 513 usage.unique_identifier);
diff --git a/src/video_core/renderer_opengl/gl_shader_gen.cpp b/src/video_core/renderer_opengl/gl_shader_gen.cpp
index 7d96649af..8763d9c71 100644
--- a/src/video_core/renderer_opengl/gl_shader_gen.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_gen.cpp
@@ -3,7 +3,6 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <fmt/format.h> 5#include <fmt/format.h>
6#include "common/assert.h"
7#include "video_core/engines/maxwell_3d.h" 6#include "video_core/engines/maxwell_3d.h"
8#include "video_core/renderer_opengl/gl_shader_decompiler.h" 7#include "video_core/renderer_opengl/gl_shader_decompiler.h"
9#include "video_core/renderer_opengl/gl_shader_gen.h" 8#include "video_core/renderer_opengl/gl_shader_gen.h"
diff --git a/src/video_core/renderer_opengl/gl_shader_gen.h b/src/video_core/renderer_opengl/gl_shader_gen.h
index fba8e681b..fad346b48 100644
--- a/src/video_core/renderer_opengl/gl_shader_gen.h
+++ b/src/video_core/renderer_opengl/gl_shader_gen.h
@@ -4,12 +4,9 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <array>
8#include <string>
9#include <vector> 7#include <vector>
10 8
11#include "common/common_types.h" 9#include "common/common_types.h"
12#include "video_core/engines/shader_bytecode.h"
13#include "video_core/renderer_opengl/gl_shader_decompiler.h" 10#include "video_core/renderer_opengl/gl_shader_decompiler.h"
14#include "video_core/shader/shader_ir.h" 11#include "video_core/shader/shader_ir.h"
15 12
diff --git a/src/video_core/renderer_opengl/gl_shader_manager.cpp b/src/video_core/renderer_opengl/gl_shader_manager.cpp
index 6a30c28d2..eaf3e03a0 100644
--- a/src/video_core/renderer_opengl/gl_shader_manager.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_manager.cpp
@@ -2,15 +2,15 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "core/core.h"
6#include "video_core/renderer_opengl/gl_shader_manager.h" 5#include "video_core/renderer_opengl/gl_shader_manager.h"
7 6
8namespace OpenGL::GLShader { 7namespace OpenGL::GLShader {
9 8
10void MaxwellUniformData::SetFromRegs(const Maxwell3D::State::ShaderStageInfo& shader_stage) { 9using Tegra::Engines::Maxwell3D;
11 const auto& gpu = Core::System::GetInstance().GPU().Maxwell3D(); 10
12 const auto& regs = gpu.regs; 11void MaxwellUniformData::SetFromRegs(const Maxwell3D& maxwell, std::size_t shader_stage) {
13 const auto& state = gpu.state; 12 const auto& regs = maxwell.regs;
13 const auto& state = maxwell.state;
14 14
15 // TODO(bunnei): Support more than one viewport 15 // TODO(bunnei): Support more than one viewport
16 viewport_flip[0] = regs.viewport_transform[0].scale_x < 0.0 ? -1.0f : 1.0f; 16 viewport_flip[0] = regs.viewport_transform[0].scale_x < 0.0 ? -1.0f : 1.0f;
@@ -18,7 +18,7 @@ void MaxwellUniformData::SetFromRegs(const Maxwell3D::State::ShaderStageInfo& sh
18 18
19 u32 func = static_cast<u32>(regs.alpha_test_func); 19 u32 func = static_cast<u32>(regs.alpha_test_func);
20 // Normalize the gl variants of opCompare to be the same as the normal variants 20 // Normalize the gl variants of opCompare to be the same as the normal variants
21 u32 op_gl_variant_base = static_cast<u32>(Tegra::Engines::Maxwell3D::Regs::ComparisonOp::Never); 21 const u32 op_gl_variant_base = static_cast<u32>(Maxwell3D::Regs::ComparisonOp::Never);
22 if (func >= op_gl_variant_base) { 22 if (func >= op_gl_variant_base) {
23 func = func - op_gl_variant_base + 1U; 23 func = func - op_gl_variant_base + 1U;
24 } 24 }
@@ -31,8 +31,9 @@ void MaxwellUniformData::SetFromRegs(const Maxwell3D::State::ShaderStageInfo& sh
31 31
32 // Assign in which stage the position has to be flipped 32 // Assign in which stage the position has to be flipped
33 // (the last stage before the fragment shader). 33 // (the last stage before the fragment shader).
34 if (gpu.regs.shader_config[static_cast<u32>(Maxwell3D::Regs::ShaderProgram::Geometry)].enable) { 34 constexpr u32 geometry_index = static_cast<u32>(Maxwell3D::Regs::ShaderProgram::Geometry);
35 flip_stage = static_cast<u32>(Maxwell3D::Regs::ShaderProgram::Geometry); 35 if (maxwell.regs.shader_config[geometry_index].enable) {
36 flip_stage = geometry_index;
36 } else { 37 } else {
37 flip_stage = static_cast<u32>(Maxwell3D::Regs::ShaderProgram::VertexB); 38 flip_stage = static_cast<u32>(Maxwell3D::Regs::ShaderProgram::VertexB);
38 } 39 }
diff --git a/src/video_core/renderer_opengl/gl_shader_manager.h b/src/video_core/renderer_opengl/gl_shader_manager.h
index 4970aafed..8eef2a920 100644
--- a/src/video_core/renderer_opengl/gl_shader_manager.h
+++ b/src/video_core/renderer_opengl/gl_shader_manager.h
@@ -12,14 +12,13 @@
12 12
13namespace OpenGL::GLShader { 13namespace OpenGL::GLShader {
14 14
15using Tegra::Engines::Maxwell3D;
16
17/// Uniform structure for the Uniform Buffer Object, all vectors must be 16-byte aligned 15/// Uniform structure for the Uniform Buffer Object, all vectors must be 16-byte aligned
18// NOTE: Always keep a vec4 at the end. The GL spec is not clear whether the alignment at 16/// @note Always keep a vec4 at the end. The GL spec is not clear whether the alignment at
19// the end of a uniform block is included in UNIFORM_BLOCK_DATA_SIZE or not. 17/// the end of a uniform block is included in UNIFORM_BLOCK_DATA_SIZE or not.
20// Not following that rule will cause problems on some AMD drivers. 18/// Not following that rule will cause problems on some AMD drivers.
21struct MaxwellUniformData { 19struct MaxwellUniformData {
22 void SetFromRegs(const Maxwell3D::State::ShaderStageInfo& shader_stage); 20 void SetFromRegs(const Tegra::Engines::Maxwell3D& maxwell, std::size_t shader_stage);
21
23 alignas(16) GLvec4 viewport_flip; 22 alignas(16) GLvec4 viewport_flip;
24 struct alignas(16) { 23 struct alignas(16) {
25 GLuint instance_id; 24 GLuint instance_id;
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index 5e3d862c6..d69cba9c3 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -5,7 +5,6 @@
5#include <algorithm> 5#include <algorithm>
6#include <cstddef> 6#include <cstddef>
7#include <cstdlib> 7#include <cstdlib>
8#include <cstring>
9#include <memory> 8#include <memory>
10#include <glad/glad.h> 9#include <glad/glad.h>
11#include "common/assert.h" 10#include "common/assert.h"
@@ -266,7 +265,7 @@ void RendererOpenGL::CreateRasterizer() {
266 } 265 }
267 // Initialize sRGB Usage 266 // Initialize sRGB Usage
268 OpenGLState::ClearsRGBUsed(); 267 OpenGLState::ClearsRGBUsed();
269 rasterizer = std::make_unique<RasterizerOpenGL>(render_window, system, screen_info); 268 rasterizer = std::make_unique<RasterizerOpenGL>(system, screen_info);
270} 269}
271 270
272void RendererOpenGL::ConfigureFramebufferTexture(TextureInfo& texture, 271void RendererOpenGL::ConfigureFramebufferTexture(TextureInfo& texture,
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index eac51ecb3..388b5ffd5 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -19,8 +19,8 @@ namespace Vulkan {
19 19
20CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, u64 offset, 20CachedBufferEntry::CachedBufferEntry(VAddr cpu_addr, std::size_t size, u64 offset,
21 std::size_t alignment, u8* host_ptr) 21 std::size_t alignment, u8* host_ptr)
22 : cpu_addr{cpu_addr}, size{size}, offset{offset}, alignment{alignment}, RasterizerCacheObject{ 22 : RasterizerCacheObject{host_ptr}, cpu_addr{cpu_addr}, size{size}, offset{offset},
23 host_ptr} {} 23 alignment{alignment} {}
24 24
25VKBufferCache::VKBufferCache(Tegra::MemoryManager& tegra_memory_manager, 25VKBufferCache::VKBufferCache(Tegra::MemoryManager& tegra_memory_manager,
26 VideoCore::RasterizerInterface& rasterizer, const VKDevice& device, 26 VideoCore::RasterizerInterface& rasterizer, const VKDevice& device,
diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.cpp b/src/video_core/renderer_vulkan/vk_resource_manager.cpp
index a1e117443..13c46e5b8 100644
--- a/src/video_core/renderer_vulkan/vk_resource_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_resource_manager.cpp
@@ -21,7 +21,7 @@ public:
21 CommandBufferPool(const VKDevice& device) 21 CommandBufferPool(const VKDevice& device)
22 : VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {} 22 : VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {}
23 23
24 void Allocate(std::size_t begin, std::size_t end) { 24 void Allocate(std::size_t begin, std::size_t end) override {
25 const auto dev = device.GetLogical(); 25 const auto dev = device.GetLogical();
26 const auto& dld = device.GetDispatchLoader(); 26 const auto& dld = device.GetDispatchLoader();
27 const u32 graphics_family = device.GetGraphicsFamily(); 27 const u32 graphics_family = device.GetGraphicsFamily();
diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.h b/src/video_core/renderer_vulkan/vk_resource_manager.h
index 5bfe4cead..08ee86fa6 100644
--- a/src/video_core/renderer_vulkan/vk_resource_manager.h
+++ b/src/video_core/renderer_vulkan/vk_resource_manager.h
@@ -97,7 +97,7 @@ private:
97class VKFenceWatch final : public VKResource { 97class VKFenceWatch final : public VKResource {
98public: 98public:
99 explicit VKFenceWatch(); 99 explicit VKFenceWatch();
100 ~VKFenceWatch(); 100 ~VKFenceWatch() override;
101 101
102 /// Waits for the fence to be released. 102 /// Waits for the fence to be released.
103 void Wait(); 103 void Wait();
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp
new file mode 100644
index 000000000..08279e562
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp
@@ -0,0 +1,210 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <array>
7#include <limits>
8#include <vector>
9
10#include "common/assert.h"
11#include "common/logging/log.h"
12#include "core/core.h"
13#include "core/frontend/framebuffer_layout.h"
14#include "video_core/renderer_vulkan/declarations.h"
15#include "video_core/renderer_vulkan/vk_device.h"
16#include "video_core/renderer_vulkan/vk_resource_manager.h"
17#include "video_core/renderer_vulkan/vk_swapchain.h"
18
19namespace Vulkan {
20
21namespace {
22vk::SurfaceFormatKHR ChooseSwapSurfaceFormat(const std::vector<vk::SurfaceFormatKHR>& formats) {
23 if (formats.size() == 1 && formats[0].format == vk::Format::eUndefined) {
24 return {vk::Format::eB8G8R8A8Unorm, vk::ColorSpaceKHR::eSrgbNonlinear};
25 }
26 const auto& found = std::find_if(formats.begin(), formats.end(), [](const auto& format) {
27 return format.format == vk::Format::eB8G8R8A8Unorm &&
28 format.colorSpace == vk::ColorSpaceKHR::eSrgbNonlinear;
29 });
30 return found != formats.end() ? *found : formats[0];
31}
32
33vk::PresentModeKHR ChooseSwapPresentMode(const std::vector<vk::PresentModeKHR>& modes) {
34 // Mailbox doesn't lock the application like fifo (vsync), prefer it
35 const auto& found = std::find_if(modes.begin(), modes.end(), [](const auto& mode) {
36 return mode == vk::PresentModeKHR::eMailbox;
37 });
38 return found != modes.end() ? *found : vk::PresentModeKHR::eFifo;
39}
40
41vk::Extent2D ChooseSwapExtent(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width,
42 u32 height) {
43 constexpr auto undefined_size{std::numeric_limits<u32>::max()};
44 if (capabilities.currentExtent.width != undefined_size) {
45 return capabilities.currentExtent;
46 }
47 vk::Extent2D extent = {width, height};
48 extent.width = std::max(capabilities.minImageExtent.width,
49 std::min(capabilities.maxImageExtent.width, extent.width));
50 extent.height = std::max(capabilities.minImageExtent.height,
51 std::min(capabilities.maxImageExtent.height, extent.height));
52 return extent;
53}
54} // namespace
55
56VKSwapchain::VKSwapchain(vk::SurfaceKHR surface, const VKDevice& device)
57 : surface{surface}, device{device} {}
58
59VKSwapchain::~VKSwapchain() = default;
60
61void VKSwapchain::Create(u32 width, u32 height) {
62 const auto dev = device.GetLogical();
63 const auto& dld = device.GetDispatchLoader();
64 const auto physical_device = device.GetPhysical();
65
66 const vk::SurfaceCapabilitiesKHR capabilities{
67 physical_device.getSurfaceCapabilitiesKHR(surface, dld)};
68 if (capabilities.maxImageExtent.width == 0 || capabilities.maxImageExtent.height == 0) {
69 return;
70 }
71
72 dev.waitIdle(dld);
73 Destroy();
74
75 CreateSwapchain(capabilities, width, height);
76 CreateSemaphores();
77 CreateImageViews();
78
79 fences.resize(image_count, nullptr);
80}
81
82void VKSwapchain::AcquireNextImage() {
83 const auto dev{device.GetLogical()};
84 const auto& dld{device.GetDispatchLoader()};
85 dev.acquireNextImageKHR(*swapchain, std::numeric_limits<u64>::max(),
86 *present_semaphores[frame_index], {}, &image_index, dld);
87
88 if (auto& fence = fences[image_index]; fence) {
89 fence->Wait();
90 fence->Release();
91 fence = nullptr;
92 }
93}
94
95bool VKSwapchain::Present(vk::Semaphore render_semaphore, VKFence& fence) {
96 const vk::Semaphore present_semaphore{*present_semaphores[frame_index]};
97 const std::array<vk::Semaphore, 2> semaphores{present_semaphore, render_semaphore};
98 const u32 wait_semaphore_count{render_semaphore ? 2U : 1U};
99 const auto& dld{device.GetDispatchLoader()};
100 const auto present_queue{device.GetPresentQueue()};
101 bool recreated = false;
102
103 const vk::PresentInfoKHR present_info(wait_semaphore_count, semaphores.data(), 1,
104 &swapchain.get(), &image_index, {});
105 switch (const auto result = present_queue.presentKHR(&present_info, dld); result) {
106 case vk::Result::eSuccess:
107 break;
108 case vk::Result::eErrorOutOfDateKHR:
109 if (current_width > 0 && current_height > 0) {
110 Create(current_width, current_height);
111 recreated = true;
112 }
113 break;
114 default:
115 LOG_CRITICAL(Render_Vulkan, "Vulkan failed to present swapchain due to {}!",
116 vk::to_string(result));
117 UNREACHABLE();
118 }
119
120 ASSERT(fences[image_index] == nullptr);
121 fences[image_index] = &fence;
122 frame_index = (frame_index + 1) % image_count;
123 return recreated;
124}
125
126bool VKSwapchain::HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const {
127 // TODO(Rodrigo): Handle framebuffer pixel format changes
128 return framebuffer.width != current_width || framebuffer.height != current_height;
129}
130
131void VKSwapchain::CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width,
132 u32 height) {
133 const auto dev{device.GetLogical()};
134 const auto& dld{device.GetDispatchLoader()};
135 const auto physical_device{device.GetPhysical()};
136
137 const std::vector<vk::SurfaceFormatKHR> formats{
138 physical_device.getSurfaceFormatsKHR(surface, dld)};
139
140 const std::vector<vk::PresentModeKHR> present_modes{
141 physical_device.getSurfacePresentModesKHR(surface, dld)};
142
143 const vk::SurfaceFormatKHR surface_format{ChooseSwapSurfaceFormat(formats)};
144 const vk::PresentModeKHR present_mode{ChooseSwapPresentMode(present_modes)};
145 extent = ChooseSwapExtent(capabilities, width, height);
146
147 current_width = extent.width;
148 current_height = extent.height;
149
150 u32 requested_image_count{capabilities.minImageCount + 1};
151 if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) {
152 requested_image_count = capabilities.maxImageCount;
153 }
154
155 vk::SwapchainCreateInfoKHR swapchain_ci(
156 {}, surface, requested_image_count, surface_format.format, surface_format.colorSpace,
157 extent, 1, vk::ImageUsageFlagBits::eColorAttachment, {}, {}, {},
158 capabilities.currentTransform, vk::CompositeAlphaFlagBitsKHR::eOpaque, present_mode, false,
159 {});
160
161 const u32 graphics_family{device.GetGraphicsFamily()};
162 const u32 present_family{device.GetPresentFamily()};
163 const std::array<u32, 2> queue_indices{graphics_family, present_family};
164 if (graphics_family != present_family) {
165 swapchain_ci.imageSharingMode = vk::SharingMode::eConcurrent;
166 swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size());
167 swapchain_ci.pQueueFamilyIndices = queue_indices.data();
168 } else {
169 swapchain_ci.imageSharingMode = vk::SharingMode::eExclusive;
170 }
171
172 swapchain = dev.createSwapchainKHRUnique(swapchain_ci, nullptr, dld);
173
174 images = dev.getSwapchainImagesKHR(*swapchain, dld);
175 image_count = static_cast<u32>(images.size());
176 image_format = surface_format.format;
177}
178
179void VKSwapchain::CreateSemaphores() {
180 const auto dev{device.GetLogical()};
181 const auto& dld{device.GetDispatchLoader()};
182
183 present_semaphores.resize(image_count);
184 for (std::size_t i = 0; i < image_count; i++) {
185 present_semaphores[i] = dev.createSemaphoreUnique({}, nullptr, dld);
186 }
187}
188
189void VKSwapchain::CreateImageViews() {
190 const auto dev{device.GetLogical()};
191 const auto& dld{device.GetDispatchLoader()};
192
193 image_views.resize(image_count);
194 for (std::size_t i = 0; i < image_count; i++) {
195 const vk::ImageViewCreateInfo image_view_ci({}, images[i], vk::ImageViewType::e2D,
196 image_format, {},
197 {vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1});
198 image_views[i] = dev.createImageViewUnique(image_view_ci, nullptr, dld);
199 }
200}
201
202void VKSwapchain::Destroy() {
203 frame_index = 0;
204 present_semaphores.clear();
205 framebuffers.clear();
206 image_views.clear();
207 swapchain.reset();
208}
209
210} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.h b/src/video_core/renderer_vulkan/vk_swapchain.h
new file mode 100644
index 000000000..2ad84f185
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_swapchain.h
@@ -0,0 +1,92 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <vector>
8
9#include "common/common_types.h"
10#include "video_core/renderer_vulkan/declarations.h"
11
12namespace Layout {
13struct FramebufferLayout;
14}
15
16namespace Vulkan {
17
18class VKDevice;
19class VKFence;
20
21class VKSwapchain {
22public:
23 explicit VKSwapchain(vk::SurfaceKHR surface, const VKDevice& device);
24 ~VKSwapchain();
25
26 /// Creates (or recreates) the swapchain with a given size.
27 void Create(u32 width, u32 height);
28
29 /// Acquires the next image in the swapchain, waits as needed.
30 void AcquireNextImage();
31
32 /// Presents the rendered image to the swapchain. Returns true when the swapchains had to be
33 /// recreated. Takes responsability for the ownership of fence.
34 bool Present(vk::Semaphore render_semaphore, VKFence& fence);
35
36 /// Returns true when the framebuffer layout has changed.
37 bool HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const;
38
39 const vk::Extent2D& GetSize() const {
40 return extent;
41 }
42
43 u32 GetImageCount() const {
44 return image_count;
45 }
46
47 u32 GetImageIndex() const {
48 return image_index;
49 }
50
51 vk::Image GetImageIndex(u32 index) const {
52 return images[index];
53 }
54
55 vk::ImageView GetImageViewIndex(u32 index) const {
56 return *image_views[index];
57 }
58
59 vk::Format GetImageFormat() const {
60 return image_format;
61 }
62
63private:
64 void CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width, u32 height);
65 void CreateSemaphores();
66 void CreateImageViews();
67
68 void Destroy();
69
70 const vk::SurfaceKHR surface;
71 const VKDevice& device;
72
73 UniqueSwapchainKHR swapchain;
74
75 u32 image_count{};
76 std::vector<vk::Image> images;
77 std::vector<UniqueImageView> image_views;
78 std::vector<UniqueFramebuffer> framebuffers;
79 std::vector<VKFence*> fences;
80 std::vector<UniqueSemaphore> present_semaphores;
81
82 u32 image_index{};
83 u32 frame_index{};
84
85 vk::Format image_format{};
86 vk::Extent2D extent{};
87
88 u32 current_width{};
89 u32 current_height{};
90};
91
92} // namespace Vulkan
diff --git a/src/web_service/web_backend.cpp b/src/web_service/web_backend.cpp
index 40da1a4e2..dc149d2ed 100644
--- a/src/web_service/web_backend.cpp
+++ b/src/web_service/web_backend.cpp
@@ -24,7 +24,7 @@ constexpr u32 TIMEOUT_SECONDS = 30;
24struct Client::Impl { 24struct Client::Impl {
25 Impl(std::string host, std::string username, std::string token) 25 Impl(std::string host, std::string username, std::string token)
26 : host{std::move(host)}, username{std::move(username)}, token{std::move(token)} { 26 : host{std::move(host)}, username{std::move(username)}, token{std::move(token)} {
27 std::lock_guard<std::mutex> lock(jwt_cache.mutex); 27 std::lock_guard lock{jwt_cache.mutex};
28 if (this->username == jwt_cache.username && this->token == jwt_cache.token) { 28 if (this->username == jwt_cache.username && this->token == jwt_cache.token) {
29 jwt = jwt_cache.jwt; 29 jwt = jwt_cache.jwt;
30 } 30 }
@@ -151,7 +151,7 @@ struct Client::Impl {
151 if (result.result_code != Common::WebResult::Code::Success) { 151 if (result.result_code != Common::WebResult::Code::Success) {
152 LOG_ERROR(WebService, "UpdateJWT failed"); 152 LOG_ERROR(WebService, "UpdateJWT failed");
153 } else { 153 } else {
154 std::lock_guard<std::mutex> lock(jwt_cache.mutex); 154 std::lock_guard lock{jwt_cache.mutex};
155 jwt_cache.username = username; 155 jwt_cache.username = username;
156 jwt_cache.token = token; 156 jwt_cache.token = token;
157 jwt_cache.jwt = jwt = result.returned_data; 157 jwt_cache.jwt = jwt = result.returned_data;
diff --git a/src/yuzu/applets/profile_select.cpp b/src/yuzu/applets/profile_select.cpp
index 5c1b65a2c..f95f7fe3c 100644
--- a/src/yuzu/applets/profile_select.cpp
+++ b/src/yuzu/applets/profile_select.cpp
@@ -58,10 +58,7 @@ QtProfileSelectionDialog::QtProfileSelectionDialog(QWidget* parent)
58 58
59 scroll_area = new QScrollArea; 59 scroll_area = new QScrollArea;
60 60
61 buttons = new QDialogButtonBox; 61 buttons = new QDialogButtonBox(QDialogButtonBox::Cancel | QDialogButtonBox::Ok);
62 buttons->addButton(tr("Cancel"), QDialogButtonBox::RejectRole);
63 buttons->addButton(tr("OK"), QDialogButtonBox::AcceptRole);
64
65 connect(buttons, &QDialogButtonBox::accepted, this, &QtProfileSelectionDialog::accept); 62 connect(buttons, &QDialogButtonBox::accepted, this, &QtProfileSelectionDialog::accept);
66 connect(buttons, &QDialogButtonBox::rejected, this, &QtProfileSelectionDialog::reject); 63 connect(buttons, &QDialogButtonBox::rejected, this, &QtProfileSelectionDialog::reject);
67 64
@@ -163,6 +160,6 @@ void QtProfileSelector::SelectProfile(
163 160
164void QtProfileSelector::MainWindowFinishedSelection(std::optional<Service::Account::UUID> uuid) { 161void QtProfileSelector::MainWindowFinishedSelection(std::optional<Service::Account::UUID> uuid) {
165 // Acquire the HLE mutex 162 // Acquire the HLE mutex
166 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 163 std::lock_guard lock{HLE::g_hle_lock};
167 callback(uuid); 164 callback(uuid);
168} 165}
diff --git a/src/yuzu/applets/software_keyboard.cpp b/src/yuzu/applets/software_keyboard.cpp
index 8a26fdff1..f3eb29b25 100644
--- a/src/yuzu/applets/software_keyboard.cpp
+++ b/src/yuzu/applets/software_keyboard.cpp
@@ -75,13 +75,13 @@ QtSoftwareKeyboardDialog::QtSoftwareKeyboardDialog(
75 length_label->setText(QStringLiteral("%1/%2").arg(text.size()).arg(parameters.max_length)); 75 length_label->setText(QStringLiteral("%1/%2").arg(text.size()).arg(parameters.max_length));
76 }); 76 });
77 77
78 buttons = new QDialogButtonBox; 78 buttons = new QDialogButtonBox(QDialogButtonBox::Cancel);
79 buttons->addButton(tr("Cancel"), QDialogButtonBox::RejectRole); 79 if (parameters.submit_text.empty()) {
80 buttons->addButton(parameters.submit_text.empty() 80 buttons->addButton(QDialogButtonBox::Ok);
81 ? tr("OK") 81 } else {
82 : QString::fromStdU16String(parameters.submit_text), 82 buttons->addButton(QString::fromStdU16String(parameters.submit_text),
83 QDialogButtonBox::AcceptRole); 83 QDialogButtonBox::AcceptRole);
84 84 }
85 connect(buttons, &QDialogButtonBox::accepted, this, &QtSoftwareKeyboardDialog::accept); 85 connect(buttons, &QDialogButtonBox::accepted, this, &QtSoftwareKeyboardDialog::accept);
86 connect(buttons, &QDialogButtonBox::rejected, this, &QtSoftwareKeyboardDialog::reject); 86 connect(buttons, &QDialogButtonBox::rejected, this, &QtSoftwareKeyboardDialog::reject);
87 layout->addWidget(header_label); 87 layout->addWidget(header_label);
@@ -141,12 +141,12 @@ void QtSoftwareKeyboard::SendTextCheckDialog(std::u16string error_message,
141 141
142void QtSoftwareKeyboard::MainWindowFinishedText(std::optional<std::u16string> text) { 142void QtSoftwareKeyboard::MainWindowFinishedText(std::optional<std::u16string> text) {
143 // Acquire the HLE mutex 143 // Acquire the HLE mutex
144 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 144 std::lock_guard lock{HLE::g_hle_lock};
145 text_output(text); 145 text_output(text);
146} 146}
147 147
148void QtSoftwareKeyboard::MainWindowFinishedCheckDialog() { 148void QtSoftwareKeyboard::MainWindowFinishedCheckDialog() {
149 // Acquire the HLE mutex 149 // Acquire the HLE mutex
150 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 150 std::lock_guard lock{HLE::g_hle_lock};
151 finished_check(); 151 finished_check();
152} 152}
diff --git a/src/yuzu/applets/web_browser.cpp b/src/yuzu/applets/web_browser.cpp
index 979b9ec14..ac80b2fa2 100644
--- a/src/yuzu/applets/web_browser.cpp
+++ b/src/yuzu/applets/web_browser.cpp
@@ -104,12 +104,12 @@ void QtWebBrowser::OpenPage(std::string_view url, std::function<void()> unpack_r
104 104
105void QtWebBrowser::MainWindowUnpackRomFS() { 105void QtWebBrowser::MainWindowUnpackRomFS() {
106 // Acquire the HLE mutex 106 // Acquire the HLE mutex
107 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 107 std::lock_guard lock{HLE::g_hle_lock};
108 unpack_romfs_callback(); 108 unpack_romfs_callback();
109} 109}
110 110
111void QtWebBrowser::MainWindowFinishedBrowsing() { 111void QtWebBrowser::MainWindowFinishedBrowsing() {
112 // Acquire the HLE mutex 112 // Acquire the HLE mutex
113 std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); 113 std::lock_guard lock{HLE::g_hle_lock};
114 finished_callback(); 114 finished_callback();
115} 115}
diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp
index 05ad19e1d..7438fbc0a 100644
--- a/src/yuzu/bootmanager.cpp
+++ b/src/yuzu/bootmanager.cpp
@@ -67,7 +67,7 @@ void EmuThread::run() {
67 67
68 was_active = false; 68 was_active = false;
69 } else { 69 } else {
70 std::unique_lock<std::mutex> lock(running_mutex); 70 std::unique_lock lock{running_mutex};
71 running_cv.wait(lock, [this] { return IsRunning() || exec_step || stop_run; }); 71 running_cv.wait(lock, [this] { return IsRunning() || exec_step || stop_run; });
72 } 72 }
73 } 73 }
diff --git a/src/yuzu/bootmanager.h b/src/yuzu/bootmanager.h
index 7226e690e..3183621bc 100644
--- a/src/yuzu/bootmanager.h
+++ b/src/yuzu/bootmanager.h
@@ -53,7 +53,7 @@ public:
53 * @note This function is thread-safe 53 * @note This function is thread-safe
54 */ 54 */
55 void SetRunning(bool running) { 55 void SetRunning(bool running) {
56 std::unique_lock<std::mutex> lock(running_mutex); 56 std::unique_lock lock{running_mutex};
57 this->running = running; 57 this->running = running;
58 lock.unlock(); 58 lock.unlock();
59 running_cv.notify_all(); 59 running_cv.notify_all();
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp
index 4650f96a3..dead9f807 100644
--- a/src/yuzu/configuration/config.cpp
+++ b/src/yuzu/configuration/config.cpp
@@ -413,7 +413,6 @@ void Config::ReadValues() {
413 413
414 qt_config->beginGroup("System"); 414 qt_config->beginGroup("System");
415 Settings::values.use_docked_mode = ReadSetting("use_docked_mode", false).toBool(); 415 Settings::values.use_docked_mode = ReadSetting("use_docked_mode", false).toBool();
416 Settings::values.enable_nfc = ReadSetting("enable_nfc", true).toBool();
417 416
418 Settings::values.current_user = 417 Settings::values.current_user =
419 std::clamp<int>(ReadSetting("current_user", 0).toInt(), 0, Service::Account::MAX_USERS - 1); 418 std::clamp<int>(ReadSetting("current_user", 0).toInt(), 0, Service::Account::MAX_USERS - 1);
@@ -675,7 +674,6 @@ void Config::SaveValues() {
675 674
676 qt_config->beginGroup("System"); 675 qt_config->beginGroup("System");
677 WriteSetting("use_docked_mode", Settings::values.use_docked_mode, false); 676 WriteSetting("use_docked_mode", Settings::values.use_docked_mode, false);
678 WriteSetting("enable_nfc", Settings::values.enable_nfc, true);
679 WriteSetting("current_user", Settings::values.current_user, 0); 677 WriteSetting("current_user", Settings::values.current_user, 0);
680 WriteSetting("language_index", Settings::values.language_index, 1); 678 WriteSetting("language_index", Settings::values.language_index, 1);
681 679
diff --git a/src/yuzu/configuration/configure_general.cpp b/src/yuzu/configuration/configure_general.cpp
index 4116b6cd7..389fcf667 100644
--- a/src/yuzu/configuration/configure_general.cpp
+++ b/src/yuzu/configuration/configure_general.cpp
@@ -33,7 +33,6 @@ void ConfigureGeneral::setConfiguration() {
33 ui->toggle_user_on_boot->setChecked(UISettings::values.select_user_on_boot); 33 ui->toggle_user_on_boot->setChecked(UISettings::values.select_user_on_boot);
34 ui->theme_combobox->setCurrentIndex(ui->theme_combobox->findData(UISettings::values.theme)); 34 ui->theme_combobox->setCurrentIndex(ui->theme_combobox->findData(UISettings::values.theme));
35 ui->use_cpu_jit->setChecked(Settings::values.use_cpu_jit); 35 ui->use_cpu_jit->setChecked(Settings::values.use_cpu_jit);
36 ui->enable_nfc->setChecked(Settings::values.enable_nfc);
37} 36}
38 37
39void ConfigureGeneral::PopulateHotkeyList(const HotkeyRegistry& registry) { 38void ConfigureGeneral::PopulateHotkeyList(const HotkeyRegistry& registry) {
@@ -48,5 +47,4 @@ void ConfigureGeneral::applyConfiguration() {
48 ui->theme_combobox->itemData(ui->theme_combobox->currentIndex()).toString(); 47 ui->theme_combobox->itemData(ui->theme_combobox->currentIndex()).toString();
49 48
50 Settings::values.use_cpu_jit = ui->use_cpu_jit->isChecked(); 49 Settings::values.use_cpu_jit = ui->use_cpu_jit->isChecked();
51 Settings::values.enable_nfc = ui->enable_nfc->isChecked();
52} 50}
diff --git a/src/yuzu/configuration/configure_general.ui b/src/yuzu/configuration/configure_general.ui
index dff0ad5d0..01d1c0b8e 100644
--- a/src/yuzu/configuration/configure_general.ui
+++ b/src/yuzu/configuration/configure_general.ui
@@ -71,26 +71,6 @@
71 </widget> 71 </widget>
72 </item> 72 </item>
73 <item> 73 <item>
74 <widget class="QGroupBox" name="EmulationGroupBox">
75 <property name="title">
76 <string>Emulation</string>
77 </property>
78 <layout class="QHBoxLayout" name="EmulationHorizontalLayout">
79 <item>
80 <layout class="QVBoxLayout" name="EmulationVerticalLayout">
81 <item>
82 <widget class="QCheckBox" name="enable_nfc">
83 <property name="text">
84 <string>Enable NFC</string>
85 </property>
86 </widget>
87 </item>
88 </layout>
89 </item>
90 </layout>
91 </widget>
92 </item>
93 <item>
94 <widget class="QGroupBox" name="theme_group_box"> 74 <widget class="QGroupBox" name="theme_group_box">
95 <property name="title"> 75 <property name="title">
96 <string>Theme</string> 76 <string>Theme</string>
diff --git a/src/yuzu/debugger/profiler.cpp b/src/yuzu/debugger/profiler.cpp
index 8b30e0a85..86e03e46d 100644
--- a/src/yuzu/debugger/profiler.cpp
+++ b/src/yuzu/debugger/profiler.cpp
@@ -7,6 +7,7 @@
7#include <QMouseEvent> 7#include <QMouseEvent>
8#include <QPainter> 8#include <QPainter>
9#include <QString> 9#include <QString>
10#include <QTimer>
10#include "common/common_types.h" 11#include "common/common_types.h"
11#include "common/microprofile.h" 12#include "common/microprofile.h"
12#include "yuzu/debugger/profiler.h" 13#include "yuzu/debugger/profiler.h"
diff --git a/src/yuzu/debugger/profiler.h b/src/yuzu/debugger/profiler.h
index eae1e9e3c..8e69fdb06 100644
--- a/src/yuzu/debugger/profiler.h
+++ b/src/yuzu/debugger/profiler.h
@@ -4,10 +4,11 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <QAbstractItemModel> 7#include <QWidget>
8#include <QDockWidget> 8
9#include <QTimer> 9class QAction;
10#include "common/microprofile.h" 10class QHideEvent;
11class QShowEvent;
11 12
12class MicroProfileDialog : public QWidget { 13class MicroProfileDialog : public QWidget {
13 Q_OBJECT 14 Q_OBJECT
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp
index 06ad74ffe..593bb681f 100644
--- a/src/yuzu/debugger/wait_tree.cpp
+++ b/src/yuzu/debugger/wait_tree.cpp
@@ -234,6 +234,9 @@ QString WaitTreeThread::GetText() const {
234 case Kernel::ThreadStatus::WaitMutex: 234 case Kernel::ThreadStatus::WaitMutex:
235 status = tr("waiting for mutex"); 235 status = tr("waiting for mutex");
236 break; 236 break;
237 case Kernel::ThreadStatus::WaitCondVar:
238 status = tr("waiting for condition variable");
239 break;
237 case Kernel::ThreadStatus::WaitArb: 240 case Kernel::ThreadStatus::WaitArb:
238 status = tr("waiting for address arbiter"); 241 status = tr("waiting for address arbiter");
239 break; 242 break;
@@ -269,6 +272,7 @@ QColor WaitTreeThread::GetColor() const {
269 case Kernel::ThreadStatus::WaitSynchAll: 272 case Kernel::ThreadStatus::WaitSynchAll:
270 case Kernel::ThreadStatus::WaitSynchAny: 273 case Kernel::ThreadStatus::WaitSynchAny:
271 case Kernel::ThreadStatus::WaitMutex: 274 case Kernel::ThreadStatus::WaitMutex:
275 case Kernel::ThreadStatus::WaitCondVar:
272 case Kernel::ThreadStatus::WaitArb: 276 case Kernel::ThreadStatus::WaitArb:
273 return QColor(Qt::GlobalColor::red); 277 return QColor(Qt::GlobalColor::red);
274 case Kernel::ThreadStatus::Dormant: 278 case Kernel::ThreadStatus::Dormant:
diff --git a/src/yuzu/game_list.cpp b/src/yuzu/game_list.cpp
index c0e3c5fa9..4422a572b 100644
--- a/src/yuzu/game_list.cpp
+++ b/src/yuzu/game_list.cpp
@@ -329,6 +329,8 @@ void GameList::PopupContextMenu(const QPoint& menu_location) {
329 QMenu context_menu; 329 QMenu context_menu;
330 QAction* open_save_location = context_menu.addAction(tr("Open Save Data Location")); 330 QAction* open_save_location = context_menu.addAction(tr("Open Save Data Location"));
331 QAction* open_lfs_location = context_menu.addAction(tr("Open Mod Data Location")); 331 QAction* open_lfs_location = context_menu.addAction(tr("Open Mod Data Location"));
332 QAction* open_transferable_shader_cache =
333 context_menu.addAction(tr("Open Transferable Shader Cache"));
332 context_menu.addSeparator(); 334 context_menu.addSeparator();
333 QAction* dump_romfs = context_menu.addAction(tr("Dump RomFS")); 335 QAction* dump_romfs = context_menu.addAction(tr("Dump RomFS"));
334 QAction* copy_tid = context_menu.addAction(tr("Copy Title ID to Clipboard")); 336 QAction* copy_tid = context_menu.addAction(tr("Copy Title ID to Clipboard"));
@@ -344,6 +346,8 @@ void GameList::PopupContextMenu(const QPoint& menu_location) {
344 [&]() { emit OpenFolderRequested(program_id, GameListOpenTarget::SaveData); }); 346 [&]() { emit OpenFolderRequested(program_id, GameListOpenTarget::SaveData); });
345 connect(open_lfs_location, &QAction::triggered, 347 connect(open_lfs_location, &QAction::triggered,
346 [&]() { emit OpenFolderRequested(program_id, GameListOpenTarget::ModData); }); 348 [&]() { emit OpenFolderRequested(program_id, GameListOpenTarget::ModData); });
349 connect(open_transferable_shader_cache, &QAction::triggered,
350 [&]() { emit OpenTransferableShaderCacheRequested(program_id); });
347 connect(dump_romfs, &QAction::triggered, [&]() { emit DumpRomFSRequested(program_id, path); }); 351 connect(dump_romfs, &QAction::triggered, [&]() { emit DumpRomFSRequested(program_id, path); });
348 connect(copy_tid, &QAction::triggered, [&]() { emit CopyTIDRequested(program_id); }); 352 connect(copy_tid, &QAction::triggered, [&]() { emit CopyTIDRequested(program_id); });
349 connect(navigate_to_gamedb_entry, &QAction::triggered, 353 connect(navigate_to_gamedb_entry, &QAction::triggered,
diff --git a/src/yuzu/game_list.h b/src/yuzu/game_list.h
index b317eb2fc..8ea5cbaaa 100644
--- a/src/yuzu/game_list.h
+++ b/src/yuzu/game_list.h
@@ -66,6 +66,7 @@ signals:
66 void GameChosen(QString game_path); 66 void GameChosen(QString game_path);
67 void ShouldCancelWorker(); 67 void ShouldCancelWorker();
68 void OpenFolderRequested(u64 program_id, GameListOpenTarget target); 68 void OpenFolderRequested(u64 program_id, GameListOpenTarget target);
69 void OpenTransferableShaderCacheRequested(u64 program_id);
69 void DumpRomFSRequested(u64 program_id, const std::string& game_path); 70 void DumpRomFSRequested(u64 program_id, const std::string& game_path);
70 void CopyTIDRequested(u64 program_id); 71 void CopyTIDRequested(u64 program_id);
71 void NavigateToGamedbEntryRequested(u64 program_id, 72 void NavigateToGamedbEntryRequested(u64 program_id,
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index 41ba3c4c6..2b9db69a3 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -37,14 +37,20 @@ static FileSys::VirtualFile VfsDirectoryCreateFileWrapper(const FileSys::Virtual
37#include <glad/glad.h> 37#include <glad/glad.h>
38 38
39#define QT_NO_OPENGL 39#define QT_NO_OPENGL
40#include <QClipboard>
41#include <QDesktopServices>
40#include <QDesktopWidget> 42#include <QDesktopWidget>
41#include <QDialogButtonBox> 43#include <QDialogButtonBox>
42#include <QFile> 44#include <QFile>
43#include <QFileDialog> 45#include <QFileDialog>
46#include <QInputDialog>
44#include <QMessageBox> 47#include <QMessageBox>
48#include <QProgressBar>
49#include <QProgressDialog>
50#include <QShortcut>
51#include <QStatusBar>
45#include <QtConcurrent/QtConcurrent> 52#include <QtConcurrent/QtConcurrent>
46#include <QtGui> 53
47#include <QtWidgets>
48#include <fmt/format.h> 54#include <fmt/format.h>
49#include "common/common_paths.h" 55#include "common/common_paths.h"
50#include "common/detached_tasks.h" 56#include "common/detached_tasks.h"
@@ -55,11 +61,9 @@ static FileSys::VirtualFile VfsDirectoryCreateFileWrapper(const FileSys::Virtual
55#include "common/microprofile.h" 61#include "common/microprofile.h"
56#include "common/scm_rev.h" 62#include "common/scm_rev.h"
57#include "common/scope_exit.h" 63#include "common/scope_exit.h"
58#include "common/string_util.h"
59#include "common/telemetry.h" 64#include "common/telemetry.h"
60#include "core/core.h" 65#include "core/core.h"
61#include "core/crypto/key_manager.h" 66#include "core/crypto/key_manager.h"
62#include "core/file_sys/bis_factory.h"
63#include "core/file_sys/card_image.h" 67#include "core/file_sys/card_image.h"
64#include "core/file_sys/content_archive.h" 68#include "core/file_sys/content_archive.h"
65#include "core/file_sys/control_metadata.h" 69#include "core/file_sys/control_metadata.h"
@@ -71,7 +75,6 @@ static FileSys::VirtualFile VfsDirectoryCreateFileWrapper(const FileSys::Virtual
71#include "core/frontend/applets/software_keyboard.h" 75#include "core/frontend/applets/software_keyboard.h"
72#include "core/hle/kernel/process.h" 76#include "core/hle/kernel/process.h"
73#include "core/hle/service/filesystem/filesystem.h" 77#include "core/hle/service/filesystem/filesystem.h"
74#include "core/hle/service/filesystem/fsp_ldr.h"
75#include "core/hle/service/nfp/nfp.h" 78#include "core/hle/service/nfp/nfp.h"
76#include "core/hle/service/sm/sm.h" 79#include "core/hle/service/sm/sm.h"
77#include "core/loader/loader.h" 80#include "core/loader/loader.h"
@@ -648,6 +651,8 @@ void GMainWindow::RestoreUIState() {
648void GMainWindow::ConnectWidgetEvents() { 651void GMainWindow::ConnectWidgetEvents() {
649 connect(game_list, &GameList::GameChosen, this, &GMainWindow::OnGameListLoadFile); 652 connect(game_list, &GameList::GameChosen, this, &GMainWindow::OnGameListLoadFile);
650 connect(game_list, &GameList::OpenFolderRequested, this, &GMainWindow::OnGameListOpenFolder); 653 connect(game_list, &GameList::OpenFolderRequested, this, &GMainWindow::OnGameListOpenFolder);
654 connect(game_list, &GameList::OpenTransferableShaderCacheRequested, this,
655 &GMainWindow::OnTransferableShaderCacheOpenFile);
651 connect(game_list, &GameList::DumpRomFSRequested, this, &GMainWindow::OnGameListDumpRomFS); 656 connect(game_list, &GameList::DumpRomFSRequested, this, &GMainWindow::OnGameListDumpRomFS);
652 connect(game_list, &GameList::CopyTIDRequested, this, &GMainWindow::OnGameListCopyTID); 657 connect(game_list, &GameList::CopyTIDRequested, this, &GMainWindow::OnGameListCopyTID);
653 connect(game_list, &GameList::NavigateToGamedbEntryRequested, this, 658 connect(game_list, &GameList::NavigateToGamedbEntryRequested, this,
@@ -1082,6 +1087,39 @@ void GMainWindow::OnGameListOpenFolder(u64 program_id, GameListOpenTarget target
1082 QDesktopServices::openUrl(QUrl::fromLocalFile(qpath)); 1087 QDesktopServices::openUrl(QUrl::fromLocalFile(qpath));
1083} 1088}
1084 1089
1090void GMainWindow::OnTransferableShaderCacheOpenFile(u64 program_id) {
1091 ASSERT(program_id != 0);
1092
1093 const QString tranferable_shader_cache_folder_path =
1094 QString::fromStdString(FileUtil::GetUserPath(FileUtil::UserPath::ShaderDir)) + "opengl" +
1095 DIR_SEP + "transferable";
1096
1097 const QString transferable_shader_cache_file_path =
1098 tranferable_shader_cache_folder_path + DIR_SEP +
1099 QString::fromStdString(fmt::format("{:016X}.bin", program_id));
1100
1101 if (!QFile::exists(transferable_shader_cache_file_path)) {
1102 QMessageBox::warning(this, tr("Error Opening Transferable Shader Cache"),
1103 tr("A shader cache for this title does not exist."));
1104 return;
1105 }
1106
1107 // Windows supports opening a folder with selecting a specified file in explorer. On every other
1108 // OS we just open the transferable shader cache folder without preselecting the transferable
1109 // shader cache file for the selected game.
1110#if defined(Q_OS_WIN)
1111 const QString explorer = QStringLiteral("explorer");
1112 QStringList param;
1113 if (!QFileInfo(transferable_shader_cache_file_path).isDir()) {
1114 param << QStringLiteral("/select,");
1115 }
1116 param << QDir::toNativeSeparators(transferable_shader_cache_file_path);
1117 QProcess::startDetached(explorer, param);
1118#else
1119 QDesktopServices::openUrl(QUrl::fromLocalFile(tranferable_shader_cache_folder_path));
1120#endif
1121}
1122
1085static std::size_t CalculateRomFSEntrySize(const FileSys::VirtualDir& dir, bool full) { 1123static std::size_t CalculateRomFSEntrySize(const FileSys::VirtualDir& dir, bool full) {
1086 std::size_t out = 0; 1124 std::size_t out = 0;
1087 1125
diff --git a/src/yuzu/main.h b/src/yuzu/main.h
index e07c892cf..7f3aa998e 100644
--- a/src/yuzu/main.h
+++ b/src/yuzu/main.h
@@ -176,6 +176,7 @@ private slots:
176 /// Called whenever a user selects a game in the game list widget. 176 /// Called whenever a user selects a game in the game list widget.
177 void OnGameListLoadFile(QString game_path); 177 void OnGameListLoadFile(QString game_path);
178 void OnGameListOpenFolder(u64 program_id, GameListOpenTarget target); 178 void OnGameListOpenFolder(u64 program_id, GameListOpenTarget target);
179 void OnTransferableShaderCacheOpenFile(u64 program_id);
179 void OnGameListDumpRomFS(u64 program_id, const std::string& game_path); 180 void OnGameListDumpRomFS(u64 program_id, const std::string& game_path);
180 void OnGameListCopyTID(u64 program_id); 181 void OnGameListCopyTID(u64 program_id);
181 void OnGameListNavigateToGamedbEntry(u64 program_id, 182 void OnGameListNavigateToGamedbEntry(u64 program_id,
diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp
index 32e78049c..f24cc77fe 100644
--- a/src/yuzu_cmd/config.cpp
+++ b/src/yuzu_cmd/config.cpp
@@ -319,7 +319,6 @@ void Config::ReadValues() {
319 319
320 // System 320 // System
321 Settings::values.use_docked_mode = sdl2_config->GetBoolean("System", "use_docked_mode", false); 321 Settings::values.use_docked_mode = sdl2_config->GetBoolean("System", "use_docked_mode", false);
322 Settings::values.enable_nfc = sdl2_config->GetBoolean("System", "enable_nfc", true);
323 const auto size = sdl2_config->GetInteger("System", "users_size", 0); 322 const auto size = sdl2_config->GetInteger("System", "users_size", 0);
324 323
325 Settings::values.current_user = std::clamp<int>( 324 Settings::values.current_user = std::clamp<int>(