summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/CMakeLists.txt4
-rw-r--r--src/audio_core/sink_context.h6
-rw-r--r--src/audio_core/voice_context.h36
-rw-r--r--src/common/CMakeLists.txt7
-rw-r--r--src/common/alignment.h29
-rw-r--r--src/common/atomic_ops.cpp75
-rw-r--r--src/common/atomic_ops.h71
-rw-r--r--src/common/bit_util.h125
-rw-r--r--src/common/color.h271
-rw-r--r--src/common/common_funcs.h16
-rw-r--r--src/common/intrusive_red_black_tree.h602
-rw-r--r--src/common/logging/backend.cpp16
-rw-r--r--src/common/parent_of_member.h191
-rw-r--r--src/common/timer.cpp159
-rw-r--r--src/common/timer.h41
-rw-r--r--src/common/tree.h674
-rw-r--r--src/common/uuid.h4
-rw-r--r--src/common/x64/native_clock.cpp110
-rw-r--r--src/common/x64/native_clock.h21
-rw-r--r--src/core/CMakeLists.txt19
-rw-r--r--src/core/arm/arm_interface.h7
-rw-r--r--src/core/core_timing.cpp1
-rw-r--r--src/core/file_sys/content_archive.cpp24
-rw-r--r--src/core/file_sys/savedata_factory.h4
-rw-r--r--src/core/frontend/emu_window.cpp43
-rw-r--r--src/core/frontend/emu_window.h13
-rw-r--r--src/core/frontend/input.h7
-rw-r--r--src/core/frontend/input_interpreter.cpp4
-rw-r--r--src/core/frontend/input_interpreter.h29
-rw-r--r--src/core/hle/ipc.h8
-rw-r--r--src/core/hle/kernel/address_arbiter.cpp317
-rw-r--r--src/core/hle/kernel/address_arbiter.h91
-rw-r--r--src/core/hle/kernel/client_port.cpp3
-rw-r--r--src/core/hle/kernel/client_session.cpp11
-rw-r--r--src/core/hle/kernel/client_session.h8
-rw-r--r--src/core/hle/kernel/errors.h3
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp367
-rw-r--r--src/core/hle/kernel/k_address_arbiter.h70
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp349
-rw-r--r--src/core/hle/kernel/k_condition_variable.h59
-rw-r--r--src/core/hle/kernel/k_priority_queue.h4
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp45
-rw-r--r--src/core/hle/kernel/k_scheduler.h5
-rw-r--r--src/core/hle/kernel/k_scheduler_lock.h2
-rw-r--r--src/core/hle/kernel/k_synchronization_object.cpp172
-rw-r--r--src/core/hle/kernel/k_synchronization_object.h58
-rw-r--r--src/core/hle/kernel/kernel.cpp19
-rw-r--r--src/core/hle/kernel/kernel.h7
-rw-r--r--src/core/hle/kernel/memory/memory_layout.h19
-rw-r--r--src/core/hle/kernel/memory/page_heap.h4
-rw-r--r--src/core/hle/kernel/mutex.cpp170
-rw-r--r--src/core/hle/kernel/mutex.h42
-rw-r--r--src/core/hle/kernel/object.h5
-rw-r--r--src/core/hle/kernel/process.cpp67
-rw-r--r--src/core/hle/kernel/process.h64
-rw-r--r--src/core/hle/kernel/process_capability.cpp4
-rw-r--r--src/core/hle/kernel/readable_event.cpp18
-rw-r--r--src/core/hle/kernel/readable_event.h12
-rw-r--r--src/core/hle/kernel/server_port.cpp14
-rw-r--r--src/core/hle/kernel/server_port.h7
-rw-r--r--src/core/hle/kernel/server_session.cpp23
-rw-r--r--src/core/hle/kernel/server_session.h12
-rw-r--r--src/core/hle/kernel/session.cpp11
-rw-r--r--src/core/hle/kernel/session.h8
-rw-r--r--src/core/hle/kernel/svc.cpp397
-rw-r--r--src/core/hle/kernel/svc_common.h14
-rw-r--r--src/core/hle/kernel/svc_results.h20
-rw-r--r--src/core/hle/kernel/svc_types.h12
-rw-r--r--src/core/hle/kernel/svc_wrap.h47
-rw-r--r--src/core/hle/kernel/synchronization.cpp116
-rw-r--r--src/core/hle/kernel/synchronization.h44
-rw-r--r--src/core/hle/kernel/synchronization_object.cpp49
-rw-r--r--src/core/hle/kernel/synchronization_object.h77
-rw-r--r--src/core/hle/kernel/thread.cpp328
-rw-r--r--src/core/hle/kernel/thread.h497
-rw-r--r--src/core/hle/kernel/time_manager.cpp9
-rw-r--r--src/core/hle/service/acc/acc.cpp63
-rw-r--r--src/core/hle/service/acc/acc.h5
-rw-r--r--src/core/hle/service/acc/acc_su.cpp2
-rw-r--r--src/core/hle/service/acc/acc_u0.cpp2
-rw-r--r--src/core/hle/service/acc/acc_u1.cpp2
-rw-r--r--src/core/hle/service/acc/profile_manager.cpp10
-rw-r--r--src/core/hle/service/acc/profile_manager.h18
-rw-r--r--src/core/hle/service/am/applets/error.cpp10
-rw-r--r--src/core/hle/service/audio/audout_u.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/npad.cpp8
-rw-r--r--src/core/hle/service/hid/controllers/npad.h23
-rw-r--r--src/core/hle/service/hid/controllers/touchscreen.cpp127
-rw-r--r--src/core/hle/service/hid/controllers/touchscreen.h32
-rw-r--r--src/core/hle/service/hid/hid.cpp148
-rw-r--r--src/core/hle/service/mii/manager.cpp1
-rw-r--r--src/core/hle/service/mii/manager.h106
-rw-r--r--src/core/hle/service/nfp/nfp.cpp6
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.cpp4
-rw-r--r--src/core/hle/service/sm/sm.cpp3
-rw-r--r--src/core/hle/service/time/clock_types.h26
-rw-r--r--src/core/hle/service/time/time_zone_types.h22
-rw-r--r--src/input_common/touch_from_button.cpp15
-rw-r--r--src/input_common/udp/client.cpp118
-rw-r--r--src/input_common/udp/client.h24
-rw-r--r--src/input_common/udp/protocol.h16
-rw-r--r--src/input_common/udp/udp.cpp32
-rw-r--r--src/tests/CMakeLists.txt2
-rw-r--r--src/tests/common/bit_utils.cpp23
-rw-r--r--src/tests/video_core/buffer_base.cpp473
-rw-r--r--src/video_core/CMakeLists.txt7
-rw-r--r--src/video_core/buffer_cache/buffer_base.h495
-rw-r--r--src/video_core/cdma_pusher.cpp4
-rw-r--r--src/video_core/cdma_pusher.h2
-rw-r--r--src/video_core/command_classes/codecs/h264.cpp4
-rw-r--r--src/video_core/engines/fermi_2d.h30
-rw-r--r--src/video_core/engines/kepler_compute.h16
-rw-r--r--src/video_core/engines/kepler_memory.h4
-rw-r--r--src/video_core/engines/maxwell_3d.h150
-rw-r--r--src/video_core/engines/shader_header.h38
-rw-r--r--src/video_core/gpu.h8
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.cpp10
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.cpp22
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.h10
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp44
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h22
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp32
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.h15
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.cpp54
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.h15
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.cpp230
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.h132
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp17
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h10
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.cpp5
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp126
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.h62
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp22
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h22
-rw-r--r--src/video_core/texture_cache/accelerated_swizzle.cpp4
-rw-r--r--src/video_core/texture_cache/util.cpp12
-rw-r--r--src/video_core/textures/astc.cpp41
-rw-r--r--src/video_core/textures/decoders.cpp8
-rw-r--r--src/video_core/vulkan_common/vulkan_debug_callback.h2
-rw-r--r--src/video_core/vulkan_common/vulkan_device.cpp11
-rw-r--r--src/video_core/vulkan_common/vulkan_device.h35
-rw-r--r--src/video_core/vulkan_common/vulkan_memory_allocator.cpp268
-rw-r--r--src/video_core/vulkan_common/vulkan_memory_allocator.h117
-rw-r--r--src/video_core/vulkan_common/vulkan_wrapper.h284
-rw-r--r--src/yuzu/applets/profile_select.cpp2
-rw-r--r--src/yuzu/bootmanager.cpp78
-rw-r--r--src/yuzu/bootmanager.h8
-rw-r--r--src/yuzu/configuration/config.cpp15
-rw-r--r--src/yuzu/configuration/configure_dialog.cpp22
-rw-r--r--src/yuzu/configuration/configure_input_player.cpp10
-rw-r--r--src/yuzu/configuration/configure_motion_touch.cpp63
-rw-r--r--src/yuzu/configuration/configure_motion_touch.ui16
-rw-r--r--src/yuzu/configuration/configure_profile_manager.cpp6
-rw-r--r--src/yuzu/configuration/configure_service.cpp2
-rw-r--r--src/yuzu/configuration/configure_touchscreen_advanced.cpp3
-rw-r--r--src/yuzu/configuration/configure_touchscreen_advanced.ui29
-rw-r--r--src/yuzu/debugger/wait_tree.cpp128
-rw-r--r--src/yuzu/debugger/wait_tree.h17
-rw-r--r--src/yuzu/game_list.cpp60
-rw-r--r--src/yuzu/game_list_p.h2
-rw-r--r--src/yuzu_cmd/config.cpp4
-rw-r--r--src/yuzu_cmd/emu_window/emu_window_sdl2.cpp12
-rw-r--r--src/yuzu_tester/CMakeLists.txt32
-rw-r--r--src/yuzu_tester/config.cpp194
-rw-r--r--src/yuzu_tester/config.h24
-rw-r--r--src/yuzu_tester/default_ini.h182
-rw-r--r--src/yuzu_tester/emu_window/emu_window_sdl2_hide.cpp146
-rw-r--r--src/yuzu_tester/emu_window/emu_window_sdl2_hide.h37
-rw-r--r--src/yuzu_tester/resource.h16
-rw-r--r--src/yuzu_tester/service/yuzutest.cpp115
-rw-r--r--src/yuzu_tester/service/yuzutest.h25
-rw-r--r--src/yuzu_tester/yuzu.cpp268
-rw-r--r--src/yuzu_tester/yuzu.rc17
174 files changed, 6335 insertions, 5212 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 61adbef28..478246b6f 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -67,8 +67,11 @@ else()
67 -Werror=implicit-fallthrough 67 -Werror=implicit-fallthrough
68 -Werror=missing-declarations 68 -Werror=missing-declarations
69 -Werror=reorder 69 -Werror=reorder
70 -Werror=switch
70 -Werror=uninitialized 71 -Werror=uninitialized
72 -Werror=unused-function
71 -Werror=unused-result 73 -Werror=unused-result
74 -Werror=unused-variable
72 -Wextra 75 -Wextra
73 -Wmissing-declarations 76 -Wmissing-declarations
74 -Wno-attributes 77 -Wno-attributes
@@ -127,7 +130,6 @@ add_subdirectory(tests)
127 130
128if (ENABLE_SDL2) 131if (ENABLE_SDL2)
129 add_subdirectory(yuzu_cmd) 132 add_subdirectory(yuzu_cmd)
130 add_subdirectory(yuzu_tester)
131endif() 133endif()
132 134
133if (ENABLE_QT) 135if (ENABLE_QT)
diff --git a/src/audio_core/sink_context.h b/src/audio_core/sink_context.h
index 05541becb..66ee4e8a0 100644
--- a/src/audio_core/sink_context.h
+++ b/src/audio_core/sink_context.h
@@ -40,17 +40,17 @@ public:
40 SinkSampleFormat sample_format; 40 SinkSampleFormat sample_format;
41 std::array<u8, AudioCommon::MAX_CHANNEL_COUNT> input; 41 std::array<u8, AudioCommon::MAX_CHANNEL_COUNT> input;
42 bool in_use; 42 bool in_use;
43 INSERT_UNION_PADDING_BYTES(5); 43 INSERT_PADDING_BYTES_NOINIT(5);
44 }; 44 };
45 static_assert(sizeof(CircularBufferIn) == 0x28, 45 static_assert(sizeof(CircularBufferIn) == 0x28,
46 "SinkInfo::CircularBufferIn is in invalid size"); 46 "SinkInfo::CircularBufferIn is in invalid size");
47 47
48 struct DeviceIn { 48 struct DeviceIn {
49 std::array<u8, 255> device_name; 49 std::array<u8, 255> device_name;
50 INSERT_UNION_PADDING_BYTES(1); 50 INSERT_PADDING_BYTES_NOINIT(1);
51 s32_le input_count; 51 s32_le input_count;
52 std::array<u8, AudioCommon::MAX_CHANNEL_COUNT> input; 52 std::array<u8, AudioCommon::MAX_CHANNEL_COUNT> input;
53 INSERT_UNION_PADDING_BYTES(1); 53 INSERT_PADDING_BYTES_NOINIT(1);
54 bool down_matrix_enabled; 54 bool down_matrix_enabled;
55 DownmixCoefficients down_matrix_coef; 55 DownmixCoefficients down_matrix_coef;
56 }; 56 };
diff --git a/src/audio_core/voice_context.h b/src/audio_core/voice_context.h
index 863248761..70359cadb 100644
--- a/src/audio_core/voice_context.h
+++ b/src/audio_core/voice_context.h
@@ -86,28 +86,28 @@ struct BehaviorFlags {
86static_assert(sizeof(BehaviorFlags) == 0x4, "BehaviorFlags is an invalid size"); 86static_assert(sizeof(BehaviorFlags) == 0x4, "BehaviorFlags is an invalid size");
87 87
88struct ADPCMContext { 88struct ADPCMContext {
89 u16 header{}; 89 u16 header;
90 s16 yn1{}; 90 s16 yn1;
91 s16 yn2{}; 91 s16 yn2;
92}; 92};
93static_assert(sizeof(ADPCMContext) == 0x6, "ADPCMContext is an invalid size"); 93static_assert(sizeof(ADPCMContext) == 0x6, "ADPCMContext is an invalid size");
94 94
95struct VoiceState { 95struct VoiceState {
96 s64 played_sample_count{}; 96 s64 played_sample_count;
97 s32 offset{}; 97 s32 offset;
98 s32 wave_buffer_index{}; 98 s32 wave_buffer_index;
99 std::array<bool, AudioCommon::MAX_WAVE_BUFFERS> is_wave_buffer_valid{}; 99 std::array<bool, AudioCommon::MAX_WAVE_BUFFERS> is_wave_buffer_valid;
100 s32 wave_buffer_consumed{}; 100 s32 wave_buffer_consumed;
101 std::array<s32, AudioCommon::MAX_SAMPLE_HISTORY> sample_history{}; 101 std::array<s32, AudioCommon::MAX_SAMPLE_HISTORY> sample_history;
102 s32 fraction{}; 102 s32 fraction;
103 VAddr context_address{}; 103 VAddr context_address;
104 Codec::ADPCM_Coeff coeff{}; 104 Codec::ADPCM_Coeff coeff;
105 ADPCMContext context{}; 105 ADPCMContext context;
106 std::array<s64, 2> biquad_filter_state{}; 106 std::array<s64, 2> biquad_filter_state;
107 std::array<s32, AudioCommon::MAX_MIX_BUFFERS> previous_samples{}; 107 std::array<s32, AudioCommon::MAX_MIX_BUFFERS> previous_samples;
108 u32 external_context_size{}; 108 u32 external_context_size;
109 bool is_external_context_used{}; 109 bool is_external_context_used;
110 bool voice_dropped{}; 110 bool voice_dropped;
111}; 111};
112 112
113class VoiceChannelResource { 113class VoiceChannelResource {
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 2c2bd2ee8..f77575a00 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -98,7 +98,6 @@ add_library(common STATIC
98 algorithm.h 98 algorithm.h
99 alignment.h 99 alignment.h
100 assert.h 100 assert.h
101 atomic_ops.cpp
102 atomic_ops.h 101 atomic_ops.h
103 detached_tasks.cpp 102 detached_tasks.cpp
104 detached_tasks.h 103 detached_tasks.h
@@ -108,7 +107,6 @@ add_library(common STATIC
108 bit_util.h 107 bit_util.h
109 cityhash.cpp 108 cityhash.cpp
110 cityhash.h 109 cityhash.h
111 color.h
112 common_funcs.h 110 common_funcs.h
113 common_paths.h 111 common_paths.h
114 common_types.h 112 common_types.h
@@ -123,6 +121,7 @@ add_library(common STATIC
123 hash.h 121 hash.h
124 hex_util.cpp 122 hex_util.cpp
125 hex_util.h 123 hex_util.h
124 intrusive_red_black_tree.h
126 logging/backend.cpp 125 logging/backend.cpp
127 logging/backend.h 126 logging/backend.h
128 logging/filter.cpp 127 logging/filter.cpp
@@ -143,6 +142,7 @@ add_library(common STATIC
143 page_table.h 142 page_table.h
144 param_package.cpp 143 param_package.cpp
145 param_package.h 144 param_package.h
145 parent_of_member.h
146 quaternion.h 146 quaternion.h
147 ring_buffer.h 147 ring_buffer.h
148 scm_rev.cpp 148 scm_rev.cpp
@@ -165,8 +165,7 @@ add_library(common STATIC
165 threadsafe_queue.h 165 threadsafe_queue.h
166 time_zone.cpp 166 time_zone.cpp
167 time_zone.h 167 time_zone.h
168 timer.cpp 168 tree.h
169 timer.h
170 uint128.cpp 169 uint128.cpp
171 uint128.h 170 uint128.h
172 uuid.cpp 171 uuid.cpp
diff --git a/src/common/alignment.h b/src/common/alignment.h
index 5040043de..fb81f10d8 100644
--- a/src/common/alignment.h
+++ b/src/common/alignment.h
@@ -9,50 +9,45 @@
9namespace Common { 9namespace Common {
10 10
11template <typename T> 11template <typename T>
12[[nodiscard]] constexpr T AlignUp(T value, std::size_t size) { 12requires std::is_unsigned_v<T>[[nodiscard]] constexpr T AlignUp(T value, size_t size) {
13 static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
14 auto mod{static_cast<T>(value % size)}; 13 auto mod{static_cast<T>(value % size)};
15 value -= mod; 14 value -= mod;
16 return static_cast<T>(mod == T{0} ? value : value + size); 15 return static_cast<T>(mod == T{0} ? value : value + size);
17} 16}
18 17
19template <typename T> 18template <typename T>
20[[nodiscard]] constexpr T AlignDown(T value, std::size_t size) { 19requires std::is_unsigned_v<T>[[nodiscard]] constexpr T AlignUpLog2(T value, size_t align_log2) {
21 static_assert(std::is_unsigned_v<T>, "T must be an unsigned value."); 20 return static_cast<T>((value + ((1ULL << align_log2) - 1)) >> align_log2 << align_log2);
22 return static_cast<T>(value - value % size);
23} 21}
24 22
25template <typename T> 23template <typename T>
26[[nodiscard]] constexpr T AlignBits(T value, std::size_t align) { 24requires std::is_unsigned_v<T>[[nodiscard]] constexpr T AlignDown(T value, size_t size) {
27 static_assert(std::is_unsigned_v<T>, "T must be an unsigned value."); 25 return static_cast<T>(value - value % size);
28 return static_cast<T>((value + ((1ULL << align) - 1)) >> align << align);
29} 26}
30 27
31template <typename T> 28template <typename T>
32[[nodiscard]] constexpr bool Is4KBAligned(T value) { 29requires std::is_unsigned_v<T>[[nodiscard]] constexpr bool Is4KBAligned(T value) {
33 static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
34 return (value & 0xFFF) == 0; 30 return (value & 0xFFF) == 0;
35} 31}
36 32
37template <typename T> 33template <typename T>
38[[nodiscard]] constexpr bool IsWordAligned(T value) { 34requires std::is_unsigned_v<T>[[nodiscard]] constexpr bool IsWordAligned(T value) {
39 static_assert(std::is_unsigned_v<T>, "T must be an unsigned value.");
40 return (value & 0b11) == 0; 35 return (value & 0b11) == 0;
41} 36}
42 37
43template <typename T> 38template <typename T>
44[[nodiscard]] constexpr bool IsAligned(T value, std::size_t alignment) { 39requires std::is_integral_v<T>[[nodiscard]] constexpr bool IsAligned(T value, size_t alignment) {
45 using U = typename std::make_unsigned<T>::type; 40 using U = typename std::make_unsigned_t<T>;
46 const U mask = static_cast<U>(alignment - 1); 41 const U mask = static_cast<U>(alignment - 1);
47 return (value & mask) == 0; 42 return (value & mask) == 0;
48} 43}
49 44
50template <typename T, std::size_t Align = 16> 45template <typename T, size_t Align = 16>
51class AlignmentAllocator { 46class AlignmentAllocator {
52public: 47public:
53 using value_type = T; 48 using value_type = T;
54 using size_type = std::size_t; 49 using size_type = size_t;
55 using difference_type = std::ptrdiff_t; 50 using difference_type = ptrdiff_t;
56 51
57 using propagate_on_container_copy_assignment = std::true_type; 52 using propagate_on_container_copy_assignment = std::true_type;
58 using propagate_on_container_move_assignment = std::true_type; 53 using propagate_on_container_move_assignment = std::true_type;
diff --git a/src/common/atomic_ops.cpp b/src/common/atomic_ops.cpp
deleted file mode 100644
index 1612d0e67..000000000
--- a/src/common/atomic_ops.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <cstring>
6
7#include "common/atomic_ops.h"
8
9#if _MSC_VER
10#include <intrin.h>
11#endif
12
13namespace Common {
14
15#if _MSC_VER
16
17bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) {
18 const u8 result =
19 _InterlockedCompareExchange8(reinterpret_cast<volatile char*>(pointer), value, expected);
20 return result == expected;
21}
22
23bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) {
24 const u16 result =
25 _InterlockedCompareExchange16(reinterpret_cast<volatile short*>(pointer), value, expected);
26 return result == expected;
27}
28
29bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) {
30 const u32 result =
31 _InterlockedCompareExchange(reinterpret_cast<volatile long*>(pointer), value, expected);
32 return result == expected;
33}
34
35bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) {
36 const u64 result = _InterlockedCompareExchange64(reinterpret_cast<volatile __int64*>(pointer),
37 value, expected);
38 return result == expected;
39}
40
41bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) {
42 return _InterlockedCompareExchange128(reinterpret_cast<volatile __int64*>(pointer), value[1],
43 value[0],
44 reinterpret_cast<__int64*>(expected.data())) != 0;
45}
46
47#else
48
49bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) {
50 return __sync_bool_compare_and_swap(pointer, expected, value);
51}
52
53bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) {
54 return __sync_bool_compare_and_swap(pointer, expected, value);
55}
56
57bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) {
58 return __sync_bool_compare_and_swap(pointer, expected, value);
59}
60
61bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) {
62 return __sync_bool_compare_and_swap(pointer, expected, value);
63}
64
65bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) {
66 unsigned __int128 value_a;
67 unsigned __int128 expected_a;
68 std::memcpy(&value_a, value.data(), sizeof(u128));
69 std::memcpy(&expected_a, expected.data(), sizeof(u128));
70 return __sync_bool_compare_and_swap((unsigned __int128*)pointer, expected_a, value_a);
71}
72
73#endif
74
75} // namespace Common
diff --git a/src/common/atomic_ops.h b/src/common/atomic_ops.h
index b46888589..2b1f515e8 100644
--- a/src/common/atomic_ops.h
+++ b/src/common/atomic_ops.h
@@ -4,14 +4,75 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <cstring>
8#include <memory>
9
7#include "common/common_types.h" 10#include "common/common_types.h"
8 11
12#if _MSC_VER
13#include <intrin.h>
14#endif
15
9namespace Common { 16namespace Common {
10 17
11[[nodiscard]] bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected); 18#if _MSC_VER
12[[nodiscard]] bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected); 19
13[[nodiscard]] bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected); 20[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) {
14[[nodiscard]] bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected); 21 const u8 result =
15[[nodiscard]] bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected); 22 _InterlockedCompareExchange8(reinterpret_cast<volatile char*>(pointer), value, expected);
23 return result == expected;
24}
25
26[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) {
27 const u16 result =
28 _InterlockedCompareExchange16(reinterpret_cast<volatile short*>(pointer), value, expected);
29 return result == expected;
30}
31
32[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) {
33 const u32 result =
34 _InterlockedCompareExchange(reinterpret_cast<volatile long*>(pointer), value, expected);
35 return result == expected;
36}
37
38[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) {
39 const u64 result = _InterlockedCompareExchange64(reinterpret_cast<volatile __int64*>(pointer),
40 value, expected);
41 return result == expected;
42}
43
44[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) {
45 return _InterlockedCompareExchange128(reinterpret_cast<volatile __int64*>(pointer), value[1],
46 value[0],
47 reinterpret_cast<__int64*>(expected.data())) != 0;
48}
49
50#else
51
52[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u8* pointer, u8 value, u8 expected) {
53 return __sync_bool_compare_and_swap(pointer, expected, value);
54}
55
56[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u16* pointer, u16 value, u16 expected) {
57 return __sync_bool_compare_and_swap(pointer, expected, value);
58}
59
60[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u32* pointer, u32 value, u32 expected) {
61 return __sync_bool_compare_and_swap(pointer, expected, value);
62}
63
64[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u64 value, u64 expected) {
65 return __sync_bool_compare_and_swap(pointer, expected, value);
66}
67
68[[nodiscard]] inline bool AtomicCompareAndSwap(volatile u64* pointer, u128 value, u128 expected) {
69 unsigned __int128 value_a;
70 unsigned __int128 expected_a;
71 std::memcpy(&value_a, value.data(), sizeof(u128));
72 std::memcpy(&expected_a, expected.data(), sizeof(u128));
73 return __sync_bool_compare_and_swap((unsigned __int128*)pointer, expected_a, value_a);
74}
75
76#endif
16 77
17} // namespace Common 78} // namespace Common
diff --git a/src/common/bit_util.h b/src/common/bit_util.h
index 29f59a9a3..64520ca4e 100644
--- a/src/common/bit_util.h
+++ b/src/common/bit_util.h
@@ -4,13 +4,10 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <bit>
7#include <climits> 8#include <climits>
8#include <cstddef> 9#include <cstddef>
9 10
10#ifdef _MSC_VER
11#include <intrin.h>
12#endif
13
14#include "common/common_types.h" 11#include "common/common_types.h"
15 12
16namespace Common { 13namespace Common {
@@ -21,124 +18,30 @@ template <typename T>
21 return sizeof(T) * CHAR_BIT; 18 return sizeof(T) * CHAR_BIT;
22} 19}
23 20
24#ifdef _MSC_VER 21[[nodiscard]] constexpr u32 MostSignificantBit32(const u32 value) {
25[[nodiscard]] inline u32 CountLeadingZeroes32(u32 value) { 22 return 31U - static_cast<u32>(std::countl_zero(value));
26 unsigned long leading_zero = 0;
27
28 if (_BitScanReverse(&leading_zero, value) != 0) {
29 return 31 - leading_zero;
30 }
31
32 return 32;
33}
34
35[[nodiscard]] inline u32 CountLeadingZeroes64(u64 value) {
36 unsigned long leading_zero = 0;
37
38 if (_BitScanReverse64(&leading_zero, value) != 0) {
39 return 63 - leading_zero;
40 }
41
42 return 64;
43}
44#else
45[[nodiscard]] inline u32 CountLeadingZeroes32(u32 value) {
46 if (value == 0) {
47 return 32;
48 }
49
50 return static_cast<u32>(__builtin_clz(value));
51}
52
53[[nodiscard]] inline u32 CountLeadingZeroes64(u64 value) {
54 if (value == 0) {
55 return 64;
56 }
57
58 return static_cast<u32>(__builtin_clzll(value));
59}
60#endif
61
62#ifdef _MSC_VER
63[[nodiscard]] inline u32 CountTrailingZeroes32(u32 value) {
64 unsigned long trailing_zero = 0;
65
66 if (_BitScanForward(&trailing_zero, value) != 0) {
67 return trailing_zero;
68 }
69
70 return 32;
71}
72
73[[nodiscard]] inline u32 CountTrailingZeroes64(u64 value) {
74 unsigned long trailing_zero = 0;
75
76 if (_BitScanForward64(&trailing_zero, value) != 0) {
77 return trailing_zero;
78 }
79
80 return 64;
81}
82#else
83[[nodiscard]] inline u32 CountTrailingZeroes32(u32 value) {
84 if (value == 0) {
85 return 32;
86 }
87
88 return static_cast<u32>(__builtin_ctz(value));
89}
90
91[[nodiscard]] inline u32 CountTrailingZeroes64(u64 value) {
92 if (value == 0) {
93 return 64;
94 }
95
96 return static_cast<u32>(__builtin_ctzll(value));
97} 23}
98#endif
99
100#ifdef _MSC_VER
101 24
102[[nodiscard]] inline u32 MostSignificantBit32(const u32 value) { 25[[nodiscard]] constexpr u32 MostSignificantBit64(const u64 value) {
103 unsigned long result; 26 return 63U - static_cast<u32>(std::countl_zero(value));
104 _BitScanReverse(&result, value);
105 return static_cast<u32>(result);
106} 27}
107 28
108[[nodiscard]] inline u32 MostSignificantBit64(const u64 value) { 29[[nodiscard]] constexpr u32 Log2Floor32(const u32 value) {
109 unsigned long result;
110 _BitScanReverse64(&result, value);
111 return static_cast<u32>(result);
112}
113
114#else
115
116[[nodiscard]] inline u32 MostSignificantBit32(const u32 value) {
117 return 31U - static_cast<u32>(__builtin_clz(value));
118}
119
120[[nodiscard]] inline u32 MostSignificantBit64(const u64 value) {
121 return 63U - static_cast<u32>(__builtin_clzll(value));
122}
123
124#endif
125
126[[nodiscard]] inline u32 Log2Floor32(const u32 value) {
127 return MostSignificantBit32(value); 30 return MostSignificantBit32(value);
128} 31}
129 32
130[[nodiscard]] inline u32 Log2Ceil32(const u32 value) { 33[[nodiscard]] constexpr u32 Log2Floor64(const u64 value) {
131 const u32 log2_f = Log2Floor32(value); 34 return MostSignificantBit64(value);
132 return log2_f + ((value ^ (1U << log2_f)) != 0U);
133} 35}
134 36
135[[nodiscard]] inline u32 Log2Floor64(const u64 value) { 37[[nodiscard]] constexpr u32 Log2Ceil32(const u32 value) {
136 return MostSignificantBit64(value); 38 const u32 log2_f = Log2Floor32(value);
39 return log2_f + static_cast<u32>((value ^ (1U << log2_f)) != 0U);
137} 40}
138 41
139[[nodiscard]] inline u32 Log2Ceil64(const u64 value) { 42[[nodiscard]] constexpr u32 Log2Ceil64(const u64 value) {
140 const u64 log2_f = static_cast<u64>(Log2Floor64(value)); 43 const u64 log2_f = Log2Floor64(value);
141 return static_cast<u32>(log2_f + ((value ^ (1ULL << log2_f)) != 0ULL)); 44 return static_cast<u32>(log2_f + static_cast<u64>((value ^ (1ULL << log2_f)) != 0ULL));
142} 45}
143 46
144} // namespace Common 47} // namespace Common
diff --git a/src/common/color.h b/src/common/color.h
deleted file mode 100644
index bbcac858e..000000000
--- a/src/common/color.h
+++ /dev/null
@@ -1,271 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <cstring>
8
9#include "common/common_types.h"
10#include "common/swap.h"
11#include "common/vector_math.h"
12
13namespace Common::Color {
14
15/// Convert a 1-bit color component to 8 bit
16[[nodiscard]] constexpr u8 Convert1To8(u8 value) {
17 return value * 255;
18}
19
20/// Convert a 4-bit color component to 8 bit
21[[nodiscard]] constexpr u8 Convert4To8(u8 value) {
22 return (value << 4) | value;
23}
24
25/// Convert a 5-bit color component to 8 bit
26[[nodiscard]] constexpr u8 Convert5To8(u8 value) {
27 return (value << 3) | (value >> 2);
28}
29
30/// Convert a 6-bit color component to 8 bit
31[[nodiscard]] constexpr u8 Convert6To8(u8 value) {
32 return (value << 2) | (value >> 4);
33}
34
35/// Convert a 8-bit color component to 1 bit
36[[nodiscard]] constexpr u8 Convert8To1(u8 value) {
37 return value >> 7;
38}
39
40/// Convert a 8-bit color component to 4 bit
41[[nodiscard]] constexpr u8 Convert8To4(u8 value) {
42 return value >> 4;
43}
44
45/// Convert a 8-bit color component to 5 bit
46[[nodiscard]] constexpr u8 Convert8To5(u8 value) {
47 return value >> 3;
48}
49
50/// Convert a 8-bit color component to 6 bit
51[[nodiscard]] constexpr u8 Convert8To6(u8 value) {
52 return value >> 2;
53}
54
55/**
56 * Decode a color stored in RGBA8 format
57 * @param bytes Pointer to encoded source color
58 * @return Result color decoded as Common::Vec4<u8>
59 */
60[[nodiscard]] inline Common::Vec4<u8> DecodeRGBA8(const u8* bytes) {
61 return {bytes[3], bytes[2], bytes[1], bytes[0]};
62}
63
64/**
65 * Decode a color stored in RGB8 format
66 * @param bytes Pointer to encoded source color
67 * @return Result color decoded as Common::Vec4<u8>
68 */
69[[nodiscard]] inline Common::Vec4<u8> DecodeRGB8(const u8* bytes) {
70 return {bytes[2], bytes[1], bytes[0], 255};
71}
72
73/**
74 * Decode a color stored in RG8 (aka HILO8) format
75 * @param bytes Pointer to encoded source color
76 * @return Result color decoded as Common::Vec4<u8>
77 */
78[[nodiscard]] inline Common::Vec4<u8> DecodeRG8(const u8* bytes) {
79 return {bytes[1], bytes[0], 0, 255};
80}
81
82/**
83 * Decode a color stored in RGB565 format
84 * @param bytes Pointer to encoded source color
85 * @return Result color decoded as Common::Vec4<u8>
86 */
87[[nodiscard]] inline Common::Vec4<u8> DecodeRGB565(const u8* bytes) {
88 u16_le pixel;
89 std::memcpy(&pixel, bytes, sizeof(pixel));
90 return {Convert5To8((pixel >> 11) & 0x1F), Convert6To8((pixel >> 5) & 0x3F),
91 Convert5To8(pixel & 0x1F), 255};
92}
93
94/**
95 * Decode a color stored in RGB5A1 format
96 * @param bytes Pointer to encoded source color
97 * @return Result color decoded as Common::Vec4<u8>
98 */
99[[nodiscard]] inline Common::Vec4<u8> DecodeRGB5A1(const u8* bytes) {
100 u16_le pixel;
101 std::memcpy(&pixel, bytes, sizeof(pixel));
102 return {Convert5To8((pixel >> 11) & 0x1F), Convert5To8((pixel >> 6) & 0x1F),
103 Convert5To8((pixel >> 1) & 0x1F), Convert1To8(pixel & 0x1)};
104}
105
106/**
107 * Decode a color stored in RGBA4 format
108 * @param bytes Pointer to encoded source color
109 * @return Result color decoded as Common::Vec4<u8>
110 */
111[[nodiscard]] inline Common::Vec4<u8> DecodeRGBA4(const u8* bytes) {
112 u16_le pixel;
113 std::memcpy(&pixel, bytes, sizeof(pixel));
114 return {Convert4To8((pixel >> 12) & 0xF), Convert4To8((pixel >> 8) & 0xF),
115 Convert4To8((pixel >> 4) & 0xF), Convert4To8(pixel & 0xF)};
116}
117
118/**
119 * Decode a depth value stored in D16 format
120 * @param bytes Pointer to encoded source value
121 * @return Depth value as an u32
122 */
123[[nodiscard]] inline u32 DecodeD16(const u8* bytes) {
124 u16_le data;
125 std::memcpy(&data, bytes, sizeof(data));
126 return data;
127}
128
129/**
130 * Decode a depth value stored in D24 format
131 * @param bytes Pointer to encoded source value
132 * @return Depth value as an u32
133 */
134[[nodiscard]] inline u32 DecodeD24(const u8* bytes) {
135 return (bytes[2] << 16) | (bytes[1] << 8) | bytes[0];
136}
137
138/**
139 * Decode a depth value and a stencil value stored in D24S8 format
140 * @param bytes Pointer to encoded source values
141 * @return Resulting values stored as a Common::Vec2
142 */
143[[nodiscard]] inline Common::Vec2<u32> DecodeD24S8(const u8* bytes) {
144 return {static_cast<u32>((bytes[2] << 16) | (bytes[1] << 8) | bytes[0]), bytes[3]};
145}
146
147/**
148 * Encode a color as RGBA8 format
149 * @param color Source color to encode
150 * @param bytes Destination pointer to store encoded color
151 */
152inline void EncodeRGBA8(const Common::Vec4<u8>& color, u8* bytes) {
153 bytes[3] = color.r();
154 bytes[2] = color.g();
155 bytes[1] = color.b();
156 bytes[0] = color.a();
157}
158
159/**
160 * Encode a color as RGB8 format
161 * @param color Source color to encode
162 * @param bytes Destination pointer to store encoded color
163 */
164inline void EncodeRGB8(const Common::Vec4<u8>& color, u8* bytes) {
165 bytes[2] = color.r();
166 bytes[1] = color.g();
167 bytes[0] = color.b();
168}
169
170/**
171 * Encode a color as RG8 (aka HILO8) format
172 * @param color Source color to encode
173 * @param bytes Destination pointer to store encoded color
174 */
175inline void EncodeRG8(const Common::Vec4<u8>& color, u8* bytes) {
176 bytes[1] = color.r();
177 bytes[0] = color.g();
178}
179/**
180 * Encode a color as RGB565 format
181 * @param color Source color to encode
182 * @param bytes Destination pointer to store encoded color
183 */
184inline void EncodeRGB565(const Common::Vec4<u8>& color, u8* bytes) {
185 const u16_le data =
186 (Convert8To5(color.r()) << 11) | (Convert8To6(color.g()) << 5) | Convert8To5(color.b());
187
188 std::memcpy(bytes, &data, sizeof(data));
189}
190
191/**
192 * Encode a color as RGB5A1 format
193 * @param color Source color to encode
194 * @param bytes Destination pointer to store encoded color
195 */
196inline void EncodeRGB5A1(const Common::Vec4<u8>& color, u8* bytes) {
197 const u16_le data = (Convert8To5(color.r()) << 11) | (Convert8To5(color.g()) << 6) |
198 (Convert8To5(color.b()) << 1) | Convert8To1(color.a());
199
200 std::memcpy(bytes, &data, sizeof(data));
201}
202
203/**
204 * Encode a color as RGBA4 format
205 * @param color Source color to encode
206 * @param bytes Destination pointer to store encoded color
207 */
208inline void EncodeRGBA4(const Common::Vec4<u8>& color, u8* bytes) {
209 const u16 data = (Convert8To4(color.r()) << 12) | (Convert8To4(color.g()) << 8) |
210 (Convert8To4(color.b()) << 4) | Convert8To4(color.a());
211
212 std::memcpy(bytes, &data, sizeof(data));
213}
214
215/**
216 * Encode a 16 bit depth value as D16 format
217 * @param value 16 bit source depth value to encode
218 * @param bytes Pointer where to store the encoded value
219 */
220inline void EncodeD16(u32 value, u8* bytes) {
221 const u16_le data = static_cast<u16>(value);
222 std::memcpy(bytes, &data, sizeof(data));
223}
224
225/**
226 * Encode a 24 bit depth value as D24 format
227 * @param value 24 bit source depth value to encode
228 * @param bytes Pointer where to store the encoded value
229 */
230inline void EncodeD24(u32 value, u8* bytes) {
231 bytes[0] = value & 0xFF;
232 bytes[1] = (value >> 8) & 0xFF;
233 bytes[2] = (value >> 16) & 0xFF;
234}
235
236/**
237 * Encode a 24 bit depth and 8 bit stencil values as D24S8 format
238 * @param depth 24 bit source depth value to encode
239 * @param stencil 8 bit source stencil value to encode
240 * @param bytes Pointer where to store the encoded value
241 */
242inline void EncodeD24S8(u32 depth, u8 stencil, u8* bytes) {
243 bytes[0] = depth & 0xFF;
244 bytes[1] = (depth >> 8) & 0xFF;
245 bytes[2] = (depth >> 16) & 0xFF;
246 bytes[3] = stencil;
247}
248
249/**
250 * Encode a 24 bit depth value as D24X8 format (32 bits per pixel with 8 bits unused)
251 * @param depth 24 bit source depth value to encode
252 * @param bytes Pointer where to store the encoded value
253 * @note unused bits will not be modified
254 */
255inline void EncodeD24X8(u32 depth, u8* bytes) {
256 bytes[0] = depth & 0xFF;
257 bytes[1] = (depth >> 8) & 0xFF;
258 bytes[2] = (depth >> 16) & 0xFF;
259}
260
261/**
262 * Encode an 8 bit stencil value as X24S8 format (32 bits per pixel with 24 bits unused)
263 * @param stencil 8 bit source stencil value to encode
264 * @param bytes Pointer where to store the encoded value
265 * @note unused bits will not be modified
266 */
267inline void EncodeX24S8(u8 stencil, u8* bytes) {
268 bytes[3] = stencil;
269}
270
271} // namespace Common::Color
diff --git a/src/common/common_funcs.h b/src/common/common_funcs.h
index 367b6bf6e..75f3027fb 100644
--- a/src/common/common_funcs.h
+++ b/src/common/common_funcs.h
@@ -24,10 +24,10 @@
24#define INSERT_PADDING_WORDS(num_words) \ 24#define INSERT_PADDING_WORDS(num_words) \
25 std::array<u32, num_words> CONCAT2(pad, __LINE__) {} 25 std::array<u32, num_words> CONCAT2(pad, __LINE__) {}
26 26
27/// These are similar to the INSERT_PADDING_* macros, but are needed for padding unions. This is 27/// These are similar to the INSERT_PADDING_* macros but do not zero-initialize the contents.
28/// because unions can only be initialized by one member. 28/// This keeps the structure trivial to construct.
29#define INSERT_UNION_PADDING_BYTES(num_bytes) std::array<u8, num_bytes> CONCAT2(pad, __LINE__) 29#define INSERT_PADDING_BYTES_NOINIT(num_bytes) std::array<u8, num_bytes> CONCAT2(pad, __LINE__)
30#define INSERT_UNION_PADDING_WORDS(num_words) std::array<u32, num_words> CONCAT2(pad, __LINE__) 30#define INSERT_PADDING_WORDS_NOINIT(num_words) std::array<u32, num_words> CONCAT2(pad, __LINE__)
31 31
32#ifndef _MSC_VER 32#ifndef _MSC_VER
33 33
@@ -93,6 +93,14 @@ __declspec(dllimport) void __stdcall DebugBreak(void);
93 return static_cast<T>(key) == 0; \ 93 return static_cast<T>(key) == 0; \
94 } 94 }
95 95
96/// Evaluates a boolean expression, and returns a result unless that expression is true.
97#define R_UNLESS(expr, res) \
98 { \
99 if (!(expr)) { \
100 return res; \
101 } \
102 }
103
96namespace Common { 104namespace Common {
97 105
98[[nodiscard]] constexpr u32 MakeMagic(char a, char b, char c, char d) { 106[[nodiscard]] constexpr u32 MakeMagic(char a, char b, char c, char d) {
diff --git a/src/common/intrusive_red_black_tree.h b/src/common/intrusive_red_black_tree.h
new file mode 100644
index 000000000..c0bbcd457
--- /dev/null
+++ b/src/common/intrusive_red_black_tree.h
@@ -0,0 +1,602 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/parent_of_member.h"
8#include "common/tree.h"
9
10namespace Common {
11
12namespace impl {
13
14class IntrusiveRedBlackTreeImpl;
15
16}
17
18struct IntrusiveRedBlackTreeNode {
19public:
20 using EntryType = RBEntry<IntrusiveRedBlackTreeNode>;
21
22 constexpr IntrusiveRedBlackTreeNode() = default;
23
24 void SetEntry(const EntryType& new_entry) {
25 entry = new_entry;
26 }
27
28 [[nodiscard]] EntryType& GetEntry() {
29 return entry;
30 }
31
32 [[nodiscard]] const EntryType& GetEntry() const {
33 return entry;
34 }
35
36private:
37 EntryType entry{};
38
39 friend class impl::IntrusiveRedBlackTreeImpl;
40
41 template <class, class, class>
42 friend class IntrusiveRedBlackTree;
43};
44
45template <class T, class Traits, class Comparator>
46class IntrusiveRedBlackTree;
47
48namespace impl {
49
50class IntrusiveRedBlackTreeImpl {
51private:
52 template <class, class, class>
53 friend class ::Common::IntrusiveRedBlackTree;
54
55 using RootType = RBHead<IntrusiveRedBlackTreeNode>;
56 RootType root;
57
58public:
59 template <bool Const>
60 class Iterator;
61
62 using value_type = IntrusiveRedBlackTreeNode;
63 using size_type = size_t;
64 using difference_type = ptrdiff_t;
65 using pointer = value_type*;
66 using const_pointer = const value_type*;
67 using reference = value_type&;
68 using const_reference = const value_type&;
69 using iterator = Iterator<false>;
70 using const_iterator = Iterator<true>;
71
72 template <bool Const>
73 class Iterator {
74 public:
75 using iterator_category = std::bidirectional_iterator_tag;
76 using value_type = typename IntrusiveRedBlackTreeImpl::value_type;
77 using difference_type = typename IntrusiveRedBlackTreeImpl::difference_type;
78 using pointer = std::conditional_t<Const, IntrusiveRedBlackTreeImpl::const_pointer,
79 IntrusiveRedBlackTreeImpl::pointer>;
80 using reference = std::conditional_t<Const, IntrusiveRedBlackTreeImpl::const_reference,
81 IntrusiveRedBlackTreeImpl::reference>;
82
83 private:
84 pointer node;
85
86 public:
87 explicit Iterator(pointer n) : node(n) {}
88
89 bool operator==(const Iterator& rhs) const {
90 return this->node == rhs.node;
91 }
92
93 bool operator!=(const Iterator& rhs) const {
94 return !(*this == rhs);
95 }
96
97 pointer operator->() const {
98 return this->node;
99 }
100
101 reference operator*() const {
102 return *this->node;
103 }
104
105 Iterator& operator++() {
106 this->node = GetNext(this->node);
107 return *this;
108 }
109
110 Iterator& operator--() {
111 this->node = GetPrev(this->node);
112 return *this;
113 }
114
115 Iterator operator++(int) {
116 const Iterator it{*this};
117 ++(*this);
118 return it;
119 }
120
121 Iterator operator--(int) {
122 const Iterator it{*this};
123 --(*this);
124 return it;
125 }
126
127 operator Iterator<true>() const {
128 return Iterator<true>(this->node);
129 }
130 };
131
132private:
133 // Define accessors using RB_* functions.
134 bool EmptyImpl() const {
135 return root.IsEmpty();
136 }
137
138 IntrusiveRedBlackTreeNode* GetMinImpl() const {
139 return RB_MIN(const_cast<RootType*>(&root));
140 }
141
142 IntrusiveRedBlackTreeNode* GetMaxImpl() const {
143 return RB_MAX(const_cast<RootType*>(&root));
144 }
145
146 IntrusiveRedBlackTreeNode* RemoveImpl(IntrusiveRedBlackTreeNode* node) {
147 return RB_REMOVE(&root, node);
148 }
149
150public:
151 static IntrusiveRedBlackTreeNode* GetNext(IntrusiveRedBlackTreeNode* node) {
152 return RB_NEXT(node);
153 }
154
155 static IntrusiveRedBlackTreeNode* GetPrev(IntrusiveRedBlackTreeNode* node) {
156 return RB_PREV(node);
157 }
158
159 static const IntrusiveRedBlackTreeNode* GetNext(const IntrusiveRedBlackTreeNode* node) {
160 return static_cast<const IntrusiveRedBlackTreeNode*>(
161 GetNext(const_cast<IntrusiveRedBlackTreeNode*>(node)));
162 }
163
164 static const IntrusiveRedBlackTreeNode* GetPrev(const IntrusiveRedBlackTreeNode* node) {
165 return static_cast<const IntrusiveRedBlackTreeNode*>(
166 GetPrev(const_cast<IntrusiveRedBlackTreeNode*>(node)));
167 }
168
169public:
170 constexpr IntrusiveRedBlackTreeImpl() {}
171
172 // Iterator accessors.
173 iterator begin() {
174 return iterator(this->GetMinImpl());
175 }
176
177 const_iterator begin() const {
178 return const_iterator(this->GetMinImpl());
179 }
180
181 iterator end() {
182 return iterator(static_cast<IntrusiveRedBlackTreeNode*>(nullptr));
183 }
184
185 const_iterator end() const {
186 return const_iterator(static_cast<const IntrusiveRedBlackTreeNode*>(nullptr));
187 }
188
189 const_iterator cbegin() const {
190 return this->begin();
191 }
192
193 const_iterator cend() const {
194 return this->end();
195 }
196
197 iterator iterator_to(reference ref) {
198 return iterator(&ref);
199 }
200
201 const_iterator iterator_to(const_reference ref) const {
202 return const_iterator(&ref);
203 }
204
205 // Content management.
206 bool empty() const {
207 return this->EmptyImpl();
208 }
209
210 reference back() {
211 return *this->GetMaxImpl();
212 }
213
214 const_reference back() const {
215 return *this->GetMaxImpl();
216 }
217
218 reference front() {
219 return *this->GetMinImpl();
220 }
221
222 const_reference front() const {
223 return *this->GetMinImpl();
224 }
225
226 iterator erase(iterator it) {
227 auto cur = std::addressof(*it);
228 auto next = GetNext(cur);
229 this->RemoveImpl(cur);
230 return iterator(next);
231 }
232};
233
234} // namespace impl
235
236template <typename T>
237concept HasLightCompareType = requires {
238 { std::is_same<typename T::LightCompareType, void>::value }
239 ->std::convertible_to<bool>;
240};
241
242namespace impl {
243
244template <typename T, typename Default>
245consteval auto* GetLightCompareType() {
246 if constexpr (HasLightCompareType<T>) {
247 return static_cast<typename T::LightCompareType*>(nullptr);
248 } else {
249 return static_cast<Default*>(nullptr);
250 }
251}
252
253} // namespace impl
254
255template <typename T, typename Default>
256using LightCompareType = std::remove_pointer_t<decltype(impl::GetLightCompareType<T, Default>())>;
257
258template <class T, class Traits, class Comparator>
259class IntrusiveRedBlackTree {
260
261public:
262 using ImplType = impl::IntrusiveRedBlackTreeImpl;
263
264private:
265 ImplType impl{};
266
267public:
268 template <bool Const>
269 class Iterator;
270
271 using value_type = T;
272 using size_type = size_t;
273 using difference_type = ptrdiff_t;
274 using pointer = T*;
275 using const_pointer = const T*;
276 using reference = T&;
277 using const_reference = const T&;
278 using iterator = Iterator<false>;
279 using const_iterator = Iterator<true>;
280
281 using light_value_type = LightCompareType<Comparator, value_type>;
282 using const_light_pointer = const light_value_type*;
283 using const_light_reference = const light_value_type&;
284
285 template <bool Const>
286 class Iterator {
287 public:
288 friend class IntrusiveRedBlackTree<T, Traits, Comparator>;
289
290 using ImplIterator =
291 std::conditional_t<Const, ImplType::const_iterator, ImplType::iterator>;
292
293 using iterator_category = std::bidirectional_iterator_tag;
294 using value_type = typename IntrusiveRedBlackTree::value_type;
295 using difference_type = typename IntrusiveRedBlackTree::difference_type;
296 using pointer = std::conditional_t<Const, IntrusiveRedBlackTree::const_pointer,
297 IntrusiveRedBlackTree::pointer>;
298 using reference = std::conditional_t<Const, IntrusiveRedBlackTree::const_reference,
299 IntrusiveRedBlackTree::reference>;
300
301 private:
302 ImplIterator iterator;
303
304 private:
305 explicit Iterator(ImplIterator it) : iterator(it) {}
306
307 explicit Iterator(typename std::conditional<Const, ImplType::const_iterator,
308 ImplType::iterator>::type::pointer ptr)
309 : iterator(ptr) {}
310
311 ImplIterator GetImplIterator() const {
312 return this->iterator;
313 }
314
315 public:
316 bool operator==(const Iterator& rhs) const {
317 return this->iterator == rhs.iterator;
318 }
319
320 bool operator!=(const Iterator& rhs) const {
321 return !(*this == rhs);
322 }
323
324 pointer operator->() const {
325 return Traits::GetParent(std::addressof(*this->iterator));
326 }
327
328 reference operator*() const {
329 return *Traits::GetParent(std::addressof(*this->iterator));
330 }
331
332 Iterator& operator++() {
333 ++this->iterator;
334 return *this;
335 }
336
337 Iterator& operator--() {
338 --this->iterator;
339 return *this;
340 }
341
342 Iterator operator++(int) {
343 const Iterator it{*this};
344 ++this->iterator;
345 return it;
346 }
347
348 Iterator operator--(int) {
349 const Iterator it{*this};
350 --this->iterator;
351 return it;
352 }
353
354 operator Iterator<true>() const {
355 return Iterator<true>(this->iterator);
356 }
357 };
358
359private:
360 static int CompareImpl(const IntrusiveRedBlackTreeNode* lhs,
361 const IntrusiveRedBlackTreeNode* rhs) {
362 return Comparator::Compare(*Traits::GetParent(lhs), *Traits::GetParent(rhs));
363 }
364
365 static int LightCompareImpl(const void* elm, const IntrusiveRedBlackTreeNode* rhs) {
366 return Comparator::Compare(*static_cast<const_light_pointer>(elm), *Traits::GetParent(rhs));
367 }
368
369 // Define accessors using RB_* functions.
370 IntrusiveRedBlackTreeNode* InsertImpl(IntrusiveRedBlackTreeNode* node) {
371 return RB_INSERT(&impl.root, node, CompareImpl);
372 }
373
374 IntrusiveRedBlackTreeNode* FindImpl(const IntrusiveRedBlackTreeNode* node) const {
375 return RB_FIND(const_cast<ImplType::RootType*>(&impl.root),
376 const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
377 }
378
379 IntrusiveRedBlackTreeNode* NFindImpl(const IntrusiveRedBlackTreeNode* node) const {
380 return RB_NFIND(const_cast<ImplType::RootType*>(&impl.root),
381 const_cast<IntrusiveRedBlackTreeNode*>(node), CompareImpl);
382 }
383
384 IntrusiveRedBlackTreeNode* FindLightImpl(const_light_pointer lelm) const {
385 return RB_FIND_LIGHT(const_cast<ImplType::RootType*>(&impl.root),
386 static_cast<const void*>(lelm), LightCompareImpl);
387 }
388
389 IntrusiveRedBlackTreeNode* NFindLightImpl(const_light_pointer lelm) const {
390 return RB_NFIND_LIGHT(const_cast<ImplType::RootType*>(&impl.root),
391 static_cast<const void*>(lelm), LightCompareImpl);
392 }
393
394public:
395 constexpr IntrusiveRedBlackTree() = default;
396
397 // Iterator accessors.
398 iterator begin() {
399 return iterator(this->impl.begin());
400 }
401
402 const_iterator begin() const {
403 return const_iterator(this->impl.begin());
404 }
405
406 iterator end() {
407 return iterator(this->impl.end());
408 }
409
410 const_iterator end() const {
411 return const_iterator(this->impl.end());
412 }
413
414 const_iterator cbegin() const {
415 return this->begin();
416 }
417
418 const_iterator cend() const {
419 return this->end();
420 }
421
422 iterator iterator_to(reference ref) {
423 return iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
424 }
425
426 const_iterator iterator_to(const_reference ref) const {
427 return const_iterator(this->impl.iterator_to(*Traits::GetNode(std::addressof(ref))));
428 }
429
430 // Content management.
431 bool empty() const {
432 return this->impl.empty();
433 }
434
435 reference back() {
436 return *Traits::GetParent(std::addressof(this->impl.back()));
437 }
438
439 const_reference back() const {
440 return *Traits::GetParent(std::addressof(this->impl.back()));
441 }
442
443 reference front() {
444 return *Traits::GetParent(std::addressof(this->impl.front()));
445 }
446
447 const_reference front() const {
448 return *Traits::GetParent(std::addressof(this->impl.front()));
449 }
450
451 iterator erase(iterator it) {
452 return iterator(this->impl.erase(it.GetImplIterator()));
453 }
454
455 iterator insert(reference ref) {
456 ImplType::pointer node = Traits::GetNode(std::addressof(ref));
457 this->InsertImpl(node);
458 return iterator(node);
459 }
460
461 iterator find(const_reference ref) const {
462 return iterator(this->FindImpl(Traits::GetNode(std::addressof(ref))));
463 }
464
465 iterator nfind(const_reference ref) const {
466 return iterator(this->NFindImpl(Traits::GetNode(std::addressof(ref))));
467 }
468
469 iterator find_light(const_light_reference ref) const {
470 return iterator(this->FindLightImpl(std::addressof(ref)));
471 }
472
473 iterator nfind_light(const_light_reference ref) const {
474 return iterator(this->NFindLightImpl(std::addressof(ref)));
475 }
476};
477
478template <auto T, class Derived = impl::GetParentType<T>>
479class IntrusiveRedBlackTreeMemberTraits;
480
481template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
482class IntrusiveRedBlackTreeMemberTraits<Member, Derived> {
483public:
484 template <class Comparator>
485 using TreeType = IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraits, Comparator>;
486 using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
487
488private:
489 template <class, class, class>
490 friend class IntrusiveRedBlackTree;
491
492 friend class impl::IntrusiveRedBlackTreeImpl;
493
494 static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
495 return std::addressof(parent->*Member);
496 }
497
498 static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
499 return std::addressof(parent->*Member);
500 }
501
502 static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
503 return GetParentPointer<Member, Derived>(node);
504 }
505
506 static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
507 return GetParentPointer<Member, Derived>(node);
508 }
509
510private:
511 static constexpr TypedStorage<Derived> DerivedStorage = {};
512 static_assert(GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage));
513};
514
515template <auto T, class Derived = impl::GetParentType<T>>
516class IntrusiveRedBlackTreeMemberTraitsDeferredAssert;
517
518template <class Parent, IntrusiveRedBlackTreeNode Parent::*Member, class Derived>
519class IntrusiveRedBlackTreeMemberTraitsDeferredAssert<Member, Derived> {
520public:
521 template <class Comparator>
522 using TreeType =
523 IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeMemberTraitsDeferredAssert, Comparator>;
524 using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
525
526 static constexpr bool IsValid() {
527 TypedStorage<Derived> DerivedStorage = {};
528 return GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage);
529 }
530
531private:
532 template <class, class, class>
533 friend class IntrusiveRedBlackTree;
534
535 friend class impl::IntrusiveRedBlackTreeImpl;
536
537 static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
538 return std::addressof(parent->*Member);
539 }
540
541 static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
542 return std::addressof(parent->*Member);
543 }
544
545 static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
546 return GetParentPointer<Member, Derived>(node);
547 }
548
549 static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
550 return GetParentPointer<Member, Derived>(node);
551 }
552};
553
554template <class Derived>
555class IntrusiveRedBlackTreeBaseNode : public IntrusiveRedBlackTreeNode {
556public:
557 constexpr Derived* GetPrev() {
558 return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
559 }
560 constexpr const Derived* GetPrev() const {
561 return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetPrev(this));
562 }
563
564 constexpr Derived* GetNext() {
565 return static_cast<Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
566 }
567 constexpr const Derived* GetNext() const {
568 return static_cast<const Derived*>(impl::IntrusiveRedBlackTreeImpl::GetNext(this));
569 }
570};
571
572template <class Derived>
573class IntrusiveRedBlackTreeBaseTraits {
574public:
575 template <class Comparator>
576 using TreeType = IntrusiveRedBlackTree<Derived, IntrusiveRedBlackTreeBaseTraits, Comparator>;
577 using TreeTypeImpl = impl::IntrusiveRedBlackTreeImpl;
578
579private:
580 template <class, class, class>
581 friend class IntrusiveRedBlackTree;
582
583 friend class impl::IntrusiveRedBlackTreeImpl;
584
585 static constexpr IntrusiveRedBlackTreeNode* GetNode(Derived* parent) {
586 return static_cast<IntrusiveRedBlackTreeNode*>(parent);
587 }
588
589 static constexpr IntrusiveRedBlackTreeNode const* GetNode(Derived const* parent) {
590 return static_cast<const IntrusiveRedBlackTreeNode*>(parent);
591 }
592
593 static constexpr Derived* GetParent(IntrusiveRedBlackTreeNode* node) {
594 return static_cast<Derived*>(node);
595 }
596
597 static constexpr Derived const* GetParent(const IntrusiveRedBlackTreeNode* node) {
598 return static_cast<const Derived*>(node);
599 }
600};
601
602} // namespace Common
diff --git a/src/common/logging/backend.cpp b/src/common/logging/backend.cpp
index 631f64d05..2d4d2e9e7 100644
--- a/src/common/logging/backend.cpp
+++ b/src/common/logging/backend.cpp
@@ -145,10 +145,18 @@ void ColorConsoleBackend::Write(const Entry& entry) {
145 PrintColoredMessage(entry); 145 PrintColoredMessage(entry);
146} 146}
147 147
148// _SH_DENYWR allows read only access to the file for other programs. 148FileBackend::FileBackend(const std::string& filename) : bytes_written(0) {
149// It is #defined to 0 on other platforms 149 if (Common::FS::Exists(filename + ".old.txt")) {
150FileBackend::FileBackend(const std::string& filename) 150 Common::FS::Delete(filename + ".old.txt");
151 : file(filename, "w", _SH_DENYWR), bytes_written(0) {} 151 }
152 if (Common::FS::Exists(filename)) {
153 Common::FS::Rename(filename, filename + ".old.txt");
154 }
155
156 // _SH_DENYWR allows read only access to the file for other programs.
157 // It is #defined to 0 on other platforms
158 file = Common::FS::IOFile(filename, "w", _SH_DENYWR);
159}
152 160
153void FileBackend::Write(const Entry& entry) { 161void FileBackend::Write(const Entry& entry) {
154 // prevent logs from going over the maximum size (in case its spamming and the user doesn't 162 // prevent logs from going over the maximum size (in case its spamming and the user doesn't
diff --git a/src/common/parent_of_member.h b/src/common/parent_of_member.h
new file mode 100644
index 000000000..d9a14529d
--- /dev/null
+++ b/src/common/parent_of_member.h
@@ -0,0 +1,191 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <type_traits>
8
9#include "common/assert.h"
10#include "common/common_types.h"
11
12namespace Common {
13namespace detail {
14template <typename T, size_t Size, size_t Align>
15struct TypedStorageImpl {
16 std::aligned_storage_t<Size, Align> storage_;
17};
18} // namespace detail
19
20template <typename T>
21using TypedStorage = detail::TypedStorageImpl<T, sizeof(T), alignof(T)>;
22
23template <typename T>
24static constexpr T* GetPointer(TypedStorage<T>& ts) {
25 return static_cast<T*>(static_cast<void*>(std::addressof(ts.storage_)));
26}
27
28template <typename T>
29static constexpr const T* GetPointer(const TypedStorage<T>& ts) {
30 return static_cast<const T*>(static_cast<const void*>(std::addressof(ts.storage_)));
31}
32
33namespace impl {
34
35template <size_t MaxDepth>
36struct OffsetOfUnionHolder {
37 template <typename ParentType, typename MemberType, size_t Offset>
38 union UnionImpl {
39 using PaddingMember = char;
40 static constexpr size_t GetOffset() {
41 return Offset;
42 }
43
44#pragma pack(push, 1)
45 struct {
46 PaddingMember padding[Offset];
47 MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
48 } data;
49#pragma pack(pop)
50 UnionImpl<ParentType, MemberType, Offset + 1> next_union;
51 };
52
53 template <typename ParentType, typename MemberType>
54 union UnionImpl<ParentType, MemberType, 0> {
55 static constexpr size_t GetOffset() {
56 return 0;
57 }
58
59 struct {
60 MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1];
61 } data;
62 UnionImpl<ParentType, MemberType, 1> next_union;
63 };
64
65 template <typename ParentType, typename MemberType>
66 union UnionImpl<ParentType, MemberType, MaxDepth> {};
67};
68
69template <typename ParentType, typename MemberType>
70struct OffsetOfCalculator {
71 using UnionHolder =
72 typename OffsetOfUnionHolder<sizeof(MemberType)>::template UnionImpl<ParentType, MemberType,
73 0>;
74 union Union {
75 char c{};
76 UnionHolder first_union;
77 TypedStorage<ParentType> parent;
78
79 constexpr Union() : c() {}
80 };
81 static constexpr Union U = {};
82
83 static constexpr const MemberType* GetNextAddress(const MemberType* start,
84 const MemberType* target) {
85 while (start < target) {
86 start++;
87 }
88 return start;
89 }
90
91 static constexpr std::ptrdiff_t GetDifference(const MemberType* start,
92 const MemberType* target) {
93 return (target - start) * sizeof(MemberType);
94 }
95
96 template <typename CurUnion>
97 static constexpr std::ptrdiff_t OffsetOfImpl(MemberType ParentType::*member,
98 CurUnion& cur_union) {
99 constexpr size_t Offset = CurUnion::GetOffset();
100 const auto target = std::addressof(GetPointer(U.parent)->*member);
101 const auto start = std::addressof(cur_union.data.members[0]);
102 const auto next = GetNextAddress(start, target);
103
104 if (next != target) {
105 if constexpr (Offset < sizeof(MemberType) - 1) {
106 return OffsetOfImpl(member, cur_union.next_union);
107 } else {
108 UNREACHABLE();
109 }
110 }
111
112 return (next - start) * sizeof(MemberType) + Offset;
113 }
114
115 static constexpr std::ptrdiff_t OffsetOf(MemberType ParentType::*member) {
116 return OffsetOfImpl(member, U.first_union);
117 }
118};
119
120template <typename T>
121struct GetMemberPointerTraits;
122
123template <typename P, typename M>
124struct GetMemberPointerTraits<M P::*> {
125 using Parent = P;
126 using Member = M;
127};
128
129template <auto MemberPtr>
130using GetParentType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Parent;
131
132template <auto MemberPtr>
133using GetMemberType = typename GetMemberPointerTraits<decltype(MemberPtr)>::Member;
134
135template <auto MemberPtr, typename RealParentType = GetParentType<MemberPtr>>
136static inline std::ptrdiff_t OffsetOf = [] {
137 using DeducedParentType = GetParentType<MemberPtr>;
138 using MemberType = GetMemberType<MemberPtr>;
139 static_assert(std::is_base_of<DeducedParentType, RealParentType>::value ||
140 std::is_same<RealParentType, DeducedParentType>::value);
141
142 return OffsetOfCalculator<RealParentType, MemberType>::OffsetOf(MemberPtr);
143}();
144
145} // namespace impl
146
147template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
148constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>* member) {
149 std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>;
150 return *static_cast<RealParentType*>(
151 static_cast<void*>(static_cast<uint8_t*>(static_cast<void*>(member)) - Offset));
152}
153
154template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
155constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const* member) {
156 std::ptrdiff_t Offset = impl::OffsetOf<MemberPtr, RealParentType>;
157 return *static_cast<const RealParentType*>(static_cast<const void*>(
158 static_cast<const uint8_t*>(static_cast<const void*>(member)) - Offset));
159}
160
161template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
162constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>* member) {
163 return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
164}
165
166template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
167constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const* member) {
168 return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
169}
170
171template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
172constexpr RealParentType& GetParentReference(impl::GetMemberType<MemberPtr>& member) {
173 return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
174}
175
176template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
177constexpr RealParentType const& GetParentReference(impl::GetMemberType<MemberPtr> const& member) {
178 return GetParentReference<MemberPtr, RealParentType>(std::addressof(member));
179}
180
181template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
182constexpr RealParentType* GetParentPointer(impl::GetMemberType<MemberPtr>& member) {
183 return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
184}
185
186template <auto MemberPtr, typename RealParentType = impl::GetParentType<MemberPtr>>
187constexpr RealParentType const* GetParentPointer(impl::GetMemberType<MemberPtr> const& member) {
188 return std::addressof(GetParentReference<MemberPtr, RealParentType>(member));
189}
190
191} // namespace Common
diff --git a/src/common/timer.cpp b/src/common/timer.cpp
deleted file mode 100644
index d17dc2a50..000000000
--- a/src/common/timer.cpp
+++ /dev/null
@@ -1,159 +0,0 @@
1// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <ctime>
6#include <fmt/format.h>
7#include "common/common_types.h"
8#include "common/string_util.h"
9#include "common/timer.h"
10
11namespace Common {
12
13std::chrono::milliseconds Timer::GetTimeMs() {
14 return std::chrono::duration_cast<std::chrono::milliseconds>(
15 std::chrono::system_clock::now().time_since_epoch());
16}
17
18// --------------------------------------------
19// Initiate, Start, Stop, and Update the time
20// --------------------------------------------
21
22// Set initial values for the class
23Timer::Timer() : m_LastTime(0), m_StartTime(0), m_Running(false) {
24 Update();
25}
26
27// Write the starting time
28void Timer::Start() {
29 m_StartTime = GetTimeMs();
30 m_Running = true;
31}
32
33// Stop the timer
34void Timer::Stop() {
35 // Write the final time
36 m_LastTime = GetTimeMs();
37 m_Running = false;
38}
39
40// Update the last time variable
41void Timer::Update() {
42 m_LastTime = GetTimeMs();
43 // TODO(ector) - QPF
44}
45
46// -------------------------------------
47// Get time difference and elapsed time
48// -------------------------------------
49
50// Get the number of milliseconds since the last Update()
51std::chrono::milliseconds Timer::GetTimeDifference() {
52 return GetTimeMs() - m_LastTime;
53}
54
55// Add the time difference since the last Update() to the starting time.
56// This is used to compensate for a paused game.
57void Timer::AddTimeDifference() {
58 m_StartTime += GetTimeDifference();
59}
60
61// Get the time elapsed since the Start()
62std::chrono::milliseconds Timer::GetTimeElapsed() {
63 // If we have not started yet, return 1 (because then I don't
64 // have to change the FPS calculation in CoreRerecording.cpp .
65 if (m_StartTime.count() == 0)
66 return std::chrono::milliseconds(1);
67
68 // Return the final timer time if the timer is stopped
69 if (!m_Running)
70 return (m_LastTime - m_StartTime);
71
72 return (GetTimeMs() - m_StartTime);
73}
74
75// Get the formatted time elapsed since the Start()
76std::string Timer::GetTimeElapsedFormatted() const {
77 // If we have not started yet, return zero
78 if (m_StartTime.count() == 0)
79 return "00:00:00:000";
80
81 // The number of milliseconds since the start.
82 // Use a different value if the timer is stopped.
83 std::chrono::milliseconds Milliseconds;
84 if (m_Running)
85 Milliseconds = GetTimeMs() - m_StartTime;
86 else
87 Milliseconds = m_LastTime - m_StartTime;
88 // Seconds
89 std::chrono::seconds Seconds = std::chrono::duration_cast<std::chrono::seconds>(Milliseconds);
90 // Minutes
91 std::chrono::minutes Minutes = std::chrono::duration_cast<std::chrono::minutes>(Milliseconds);
92 // Hours
93 std::chrono::hours Hours = std::chrono::duration_cast<std::chrono::hours>(Milliseconds);
94
95 std::string TmpStr = fmt::format("{:02}:{:02}:{:02}:{:03}", Hours.count(), Minutes.count() % 60,
96 Seconds.count() % 60, Milliseconds.count() % 1000);
97 return TmpStr;
98}
99
100// Get the number of seconds since January 1 1970
101std::chrono::seconds Timer::GetTimeSinceJan1970() {
102 return std::chrono::duration_cast<std::chrono::seconds>(GetTimeMs());
103}
104
105std::chrono::seconds Timer::GetLocalTimeSinceJan1970() {
106 time_t sysTime, tzDiff, tzDST;
107 struct tm* gmTime;
108
109 time(&sysTime);
110
111 // Account for DST where needed
112 gmTime = localtime(&sysTime);
113 if (gmTime->tm_isdst == 1)
114 tzDST = 3600;
115 else
116 tzDST = 0;
117
118 // Lazy way to get local time in sec
119 gmTime = gmtime(&sysTime);
120 tzDiff = sysTime - mktime(gmTime);
121
122 return std::chrono::seconds(sysTime + tzDiff + tzDST);
123}
124
125// Return the current time formatted as Minutes:Seconds:Milliseconds
126// in the form 00:00:000.
127std::string Timer::GetTimeFormatted() {
128 time_t sysTime;
129 struct tm* gmTime;
130 char tmp[13];
131
132 time(&sysTime);
133 gmTime = localtime(&sysTime);
134
135 strftime(tmp, 6, "%M:%S", gmTime);
136
137 u64 milliseconds = static_cast<u64>(GetTimeMs().count()) % 1000;
138 return fmt::format("{}:{:03}", tmp, milliseconds);
139}
140
141// Returns a timestamp with decimals for precise time comparisons
142// ----------------
143double Timer::GetDoubleTime() {
144 // Get continuous timestamp
145 auto tmp_seconds = static_cast<u64>(GetTimeSinceJan1970().count());
146 const auto ms = static_cast<double>(static_cast<u64>(GetTimeMs().count()) % 1000);
147
148 // Remove a few years. We only really want enough seconds to make
149 // sure that we are detecting actual actions, perhaps 60 seconds is
150 // enough really, but I leave a year of seconds anyway, in case the
151 // user's clock is incorrect or something like that.
152 tmp_seconds = tmp_seconds - (38 * 365 * 24 * 60 * 60);
153
154 // Make a smaller integer that fits in the double
155 const auto seconds = static_cast<u32>(tmp_seconds);
156 return seconds + ms;
157}
158
159} // Namespace Common
diff --git a/src/common/timer.h b/src/common/timer.h
deleted file mode 100644
index 8894a143d..000000000
--- a/src/common/timer.h
+++ /dev/null
@@ -1,41 +0,0 @@
1// Copyright 2013 Dolphin Emulator Project / 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <chrono>
8#include <string>
9#include "common/common_types.h"
10
11namespace Common {
12class Timer {
13public:
14 Timer();
15
16 void Start();
17 void Stop();
18 void Update();
19
20 // The time difference is always returned in milliseconds, regardless of alternative internal
21 // representation
22 [[nodiscard]] std::chrono::milliseconds GetTimeDifference();
23 void AddTimeDifference();
24
25 [[nodiscard]] static std::chrono::seconds GetTimeSinceJan1970();
26 [[nodiscard]] static std::chrono::seconds GetLocalTimeSinceJan1970();
27 [[nodiscard]] static double GetDoubleTime();
28
29 [[nodiscard]] static std::string GetTimeFormatted();
30 [[nodiscard]] std::string GetTimeElapsedFormatted() const;
31 [[nodiscard]] std::chrono::milliseconds GetTimeElapsed();
32
33 [[nodiscard]] static std::chrono::milliseconds GetTimeMs();
34
35private:
36 std::chrono::milliseconds m_LastTime;
37 std::chrono::milliseconds m_StartTime;
38 bool m_Running;
39};
40
41} // Namespace Common
diff --git a/src/common/tree.h b/src/common/tree.h
new file mode 100644
index 000000000..3da49e422
--- /dev/null
+++ b/src/common/tree.h
@@ -0,0 +1,674 @@
1/* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */
2/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
3/* $FreeBSD$ */
4
5/*-
6 * Copyright 2002 Niels Provos <provos@citi.umich.edu>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#pragma once
31
32/*
33 * This file defines data structures for red-black trees.
34 *
35 * A red-black tree is a binary search tree with the node color as an
36 * extra attribute. It fulfills a set of conditions:
37 * - every search path from the root to a leaf consists of the
38 * same number of black nodes,
39 * - each red node (except for the root) has a black parent,
40 * - each leaf node is black.
41 *
42 * Every operation on a red-black tree is bounded as O(lg n).
43 * The maximum height of a red-black tree is 2lg (n+1).
44 */
45
46namespace Common {
47template <typename T>
48class RBHead {
49public:
50 [[nodiscard]] T* Root() {
51 return rbh_root;
52 }
53
54 [[nodiscard]] const T* Root() const {
55 return rbh_root;
56 }
57
58 void SetRoot(T* root) {
59 rbh_root = root;
60 }
61
62 [[nodiscard]] bool IsEmpty() const {
63 return Root() == nullptr;
64 }
65
66private:
67 T* rbh_root = nullptr;
68};
69
70enum class EntryColor {
71 Black,
72 Red,
73};
74
75template <typename T>
76class RBEntry {
77public:
78 [[nodiscard]] T* Left() {
79 return rbe_left;
80 }
81
82 [[nodiscard]] const T* Left() const {
83 return rbe_left;
84 }
85
86 void SetLeft(T* left) {
87 rbe_left = left;
88 }
89
90 [[nodiscard]] T* Right() {
91 return rbe_right;
92 }
93
94 [[nodiscard]] const T* Right() const {
95 return rbe_right;
96 }
97
98 void SetRight(T* right) {
99 rbe_right = right;
100 }
101
102 [[nodiscard]] T* Parent() {
103 return rbe_parent;
104 }
105
106 [[nodiscard]] const T* Parent() const {
107 return rbe_parent;
108 }
109
110 void SetParent(T* parent) {
111 rbe_parent = parent;
112 }
113
114 [[nodiscard]] bool IsBlack() const {
115 return rbe_color == EntryColor::Black;
116 }
117
118 [[nodiscard]] bool IsRed() const {
119 return rbe_color == EntryColor::Red;
120 }
121
122 [[nodiscard]] EntryColor Color() const {
123 return rbe_color;
124 }
125
126 void SetColor(EntryColor color) {
127 rbe_color = color;
128 }
129
130private:
131 T* rbe_left = nullptr;
132 T* rbe_right = nullptr;
133 T* rbe_parent = nullptr;
134 EntryColor rbe_color{};
135};
136
137template <typename Node>
138[[nodiscard]] RBEntry<Node>& RB_ENTRY(Node* node) {
139 return node->GetEntry();
140}
141
142template <typename Node>
143[[nodiscard]] const RBEntry<Node>& RB_ENTRY(const Node* node) {
144 return node->GetEntry();
145}
146
147template <typename Node>
148[[nodiscard]] Node* RB_PARENT(Node* node) {
149 return RB_ENTRY(node).Parent();
150}
151
152template <typename Node>
153[[nodiscard]] const Node* RB_PARENT(const Node* node) {
154 return RB_ENTRY(node).Parent();
155}
156
157template <typename Node>
158void RB_SET_PARENT(Node* node, Node* parent) {
159 return RB_ENTRY(node).SetParent(parent);
160}
161
162template <typename Node>
163[[nodiscard]] Node* RB_LEFT(Node* node) {
164 return RB_ENTRY(node).Left();
165}
166
167template <typename Node>
168[[nodiscard]] const Node* RB_LEFT(const Node* node) {
169 return RB_ENTRY(node).Left();
170}
171
172template <typename Node>
173void RB_SET_LEFT(Node* node, Node* left) {
174 return RB_ENTRY(node).SetLeft(left);
175}
176
177template <typename Node>
178[[nodiscard]] Node* RB_RIGHT(Node* node) {
179 return RB_ENTRY(node).Right();
180}
181
182template <typename Node>
183[[nodiscard]] const Node* RB_RIGHT(const Node* node) {
184 return RB_ENTRY(node).Right();
185}
186
187template <typename Node>
188void RB_SET_RIGHT(Node* node, Node* right) {
189 return RB_ENTRY(node).SetRight(right);
190}
191
192template <typename Node>
193[[nodiscard]] bool RB_IS_BLACK(const Node* node) {
194 return RB_ENTRY(node).IsBlack();
195}
196
197template <typename Node>
198[[nodiscard]] bool RB_IS_RED(const Node* node) {
199 return RB_ENTRY(node).IsRed();
200}
201
202template <typename Node>
203[[nodiscard]] EntryColor RB_COLOR(const Node* node) {
204 return RB_ENTRY(node).Color();
205}
206
207template <typename Node>
208void RB_SET_COLOR(Node* node, EntryColor color) {
209 return RB_ENTRY(node).SetColor(color);
210}
211
212template <typename Node>
213void RB_SET(Node* node, Node* parent) {
214 auto& entry = RB_ENTRY(node);
215 entry.SetParent(parent);
216 entry.SetLeft(nullptr);
217 entry.SetRight(nullptr);
218 entry.SetColor(EntryColor::Red);
219}
220
221template <typename Node>
222void RB_SET_BLACKRED(Node* black, Node* red) {
223 RB_SET_COLOR(black, EntryColor::Black);
224 RB_SET_COLOR(red, EntryColor::Red);
225}
226
227template <typename Node>
228void RB_ROTATE_LEFT(RBHead<Node>* head, Node* elm, Node*& tmp) {
229 tmp = RB_RIGHT(elm);
230 RB_SET_RIGHT(elm, RB_LEFT(tmp));
231 if (RB_RIGHT(elm) != nullptr) {
232 RB_SET_PARENT(RB_LEFT(tmp), elm);
233 }
234
235 RB_SET_PARENT(tmp, RB_PARENT(elm));
236 if (RB_PARENT(tmp) != nullptr) {
237 if (elm == RB_LEFT(RB_PARENT(elm))) {
238 RB_SET_LEFT(RB_PARENT(elm), tmp);
239 } else {
240 RB_SET_RIGHT(RB_PARENT(elm), tmp);
241 }
242 } else {
243 head->SetRoot(tmp);
244 }
245
246 RB_SET_LEFT(tmp, elm);
247 RB_SET_PARENT(elm, tmp);
248}
249
250template <typename Node>
251void RB_ROTATE_RIGHT(RBHead<Node>* head, Node* elm, Node*& tmp) {
252 tmp = RB_LEFT(elm);
253 RB_SET_LEFT(elm, RB_RIGHT(tmp));
254 if (RB_LEFT(elm) != nullptr) {
255 RB_SET_PARENT(RB_RIGHT(tmp), elm);
256 }
257
258 RB_SET_PARENT(tmp, RB_PARENT(elm));
259 if (RB_PARENT(tmp) != nullptr) {
260 if (elm == RB_LEFT(RB_PARENT(elm))) {
261 RB_SET_LEFT(RB_PARENT(elm), tmp);
262 } else {
263 RB_SET_RIGHT(RB_PARENT(elm), tmp);
264 }
265 } else {
266 head->SetRoot(tmp);
267 }
268
269 RB_SET_RIGHT(tmp, elm);
270 RB_SET_PARENT(elm, tmp);
271}
272
273template <typename Node>
274void RB_INSERT_COLOR(RBHead<Node>* head, Node* elm) {
275 Node* parent = nullptr;
276 Node* tmp = nullptr;
277
278 while ((parent = RB_PARENT(elm)) != nullptr && RB_IS_RED(parent)) {
279 Node* gparent = RB_PARENT(parent);
280 if (parent == RB_LEFT(gparent)) {
281 tmp = RB_RIGHT(gparent);
282 if (tmp && RB_IS_RED(tmp)) {
283 RB_SET_COLOR(tmp, EntryColor::Black);
284 RB_SET_BLACKRED(parent, gparent);
285 elm = gparent;
286 continue;
287 }
288
289 if (RB_RIGHT(parent) == elm) {
290 RB_ROTATE_LEFT(head, parent, tmp);
291 tmp = parent;
292 parent = elm;
293 elm = tmp;
294 }
295
296 RB_SET_BLACKRED(parent, gparent);
297 RB_ROTATE_RIGHT(head, gparent, tmp);
298 } else {
299 tmp = RB_LEFT(gparent);
300 if (tmp && RB_IS_RED(tmp)) {
301 RB_SET_COLOR(tmp, EntryColor::Black);
302 RB_SET_BLACKRED(parent, gparent);
303 elm = gparent;
304 continue;
305 }
306
307 if (RB_LEFT(parent) == elm) {
308 RB_ROTATE_RIGHT(head, parent, tmp);
309 tmp = parent;
310 parent = elm;
311 elm = tmp;
312 }
313
314 RB_SET_BLACKRED(parent, gparent);
315 RB_ROTATE_LEFT(head, gparent, tmp);
316 }
317 }
318
319 RB_SET_COLOR(head->Root(), EntryColor::Black);
320}
321
322template <typename Node>
323void RB_REMOVE_COLOR(RBHead<Node>* head, Node* parent, Node* elm) {
324 Node* tmp;
325 while ((elm == nullptr || RB_IS_BLACK(elm)) && elm != head->Root()) {
326 if (RB_LEFT(parent) == elm) {
327 tmp = RB_RIGHT(parent);
328 if (RB_IS_RED(tmp)) {
329 RB_SET_BLACKRED(tmp, parent);
330 RB_ROTATE_LEFT(head, parent, tmp);
331 tmp = RB_RIGHT(parent);
332 }
333
334 if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
335 (RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
336 RB_SET_COLOR(tmp, EntryColor::Red);
337 elm = parent;
338 parent = RB_PARENT(elm);
339 } else {
340 if (RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp))) {
341 Node* oleft;
342 if ((oleft = RB_LEFT(tmp)) != nullptr) {
343 RB_SET_COLOR(oleft, EntryColor::Black);
344 }
345
346 RB_SET_COLOR(tmp, EntryColor::Red);
347 RB_ROTATE_RIGHT(head, tmp, oleft);
348 tmp = RB_RIGHT(parent);
349 }
350
351 RB_SET_COLOR(tmp, RB_COLOR(parent));
352 RB_SET_COLOR(parent, EntryColor::Black);
353 if (RB_RIGHT(tmp)) {
354 RB_SET_COLOR(RB_RIGHT(tmp), EntryColor::Black);
355 }
356
357 RB_ROTATE_LEFT(head, parent, tmp);
358 elm = head->Root();
359 break;
360 }
361 } else {
362 tmp = RB_LEFT(parent);
363 if (RB_IS_RED(tmp)) {
364 RB_SET_BLACKRED(tmp, parent);
365 RB_ROTATE_RIGHT(head, parent, tmp);
366 tmp = RB_LEFT(parent);
367 }
368
369 if ((RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) &&
370 (RB_RIGHT(tmp) == nullptr || RB_IS_BLACK(RB_RIGHT(tmp)))) {
371 RB_SET_COLOR(tmp, EntryColor::Red);
372 elm = parent;
373 parent = RB_PARENT(elm);
374 } else {
375 if (RB_LEFT(tmp) == nullptr || RB_IS_BLACK(RB_LEFT(tmp))) {
376 Node* oright;
377 if ((oright = RB_RIGHT(tmp)) != nullptr) {
378 RB_SET_COLOR(oright, EntryColor::Black);
379 }
380
381 RB_SET_COLOR(tmp, EntryColor::Red);
382 RB_ROTATE_LEFT(head, tmp, oright);
383 tmp = RB_LEFT(parent);
384 }
385
386 RB_SET_COLOR(tmp, RB_COLOR(parent));
387 RB_SET_COLOR(parent, EntryColor::Black);
388
389 if (RB_LEFT(tmp)) {
390 RB_SET_COLOR(RB_LEFT(tmp), EntryColor::Black);
391 }
392
393 RB_ROTATE_RIGHT(head, parent, tmp);
394 elm = head->Root();
395 break;
396 }
397 }
398 }
399
400 if (elm) {
401 RB_SET_COLOR(elm, EntryColor::Black);
402 }
403}
404
405template <typename Node>
406Node* RB_REMOVE(RBHead<Node>* head, Node* elm) {
407 Node* child = nullptr;
408 Node* parent = nullptr;
409 Node* old = elm;
410 EntryColor color{};
411
412 const auto finalize = [&] {
413 if (color == EntryColor::Black) {
414 RB_REMOVE_COLOR(head, parent, child);
415 }
416
417 return old;
418 };
419
420 if (RB_LEFT(elm) == nullptr) {
421 child = RB_RIGHT(elm);
422 } else if (RB_RIGHT(elm) == nullptr) {
423 child = RB_LEFT(elm);
424 } else {
425 Node* left;
426 elm = RB_RIGHT(elm);
427 while ((left = RB_LEFT(elm)) != nullptr) {
428 elm = left;
429 }
430
431 child = RB_RIGHT(elm);
432 parent = RB_PARENT(elm);
433 color = RB_COLOR(elm);
434
435 if (child) {
436 RB_SET_PARENT(child, parent);
437 }
438 if (parent) {
439 if (RB_LEFT(parent) == elm) {
440 RB_SET_LEFT(parent, child);
441 } else {
442 RB_SET_RIGHT(parent, child);
443 }
444 } else {
445 head->SetRoot(child);
446 }
447
448 if (RB_PARENT(elm) == old) {
449 parent = elm;
450 }
451
452 elm->SetEntry(old->GetEntry());
453
454 if (RB_PARENT(old)) {
455 if (RB_LEFT(RB_PARENT(old)) == old) {
456 RB_SET_LEFT(RB_PARENT(old), elm);
457 } else {
458 RB_SET_RIGHT(RB_PARENT(old), elm);
459 }
460 } else {
461 head->SetRoot(elm);
462 }
463 RB_SET_PARENT(RB_LEFT(old), elm);
464 if (RB_RIGHT(old)) {
465 RB_SET_PARENT(RB_RIGHT(old), elm);
466 }
467 if (parent) {
468 left = parent;
469 }
470
471 return finalize();
472 }
473
474 parent = RB_PARENT(elm);
475 color = RB_COLOR(elm);
476
477 if (child) {
478 RB_SET_PARENT(child, parent);
479 }
480 if (parent) {
481 if (RB_LEFT(parent) == elm) {
482 RB_SET_LEFT(parent, child);
483 } else {
484 RB_SET_RIGHT(parent, child);
485 }
486 } else {
487 head->SetRoot(child);
488 }
489
490 return finalize();
491}
492
493// Inserts a node into the RB tree
494template <typename Node, typename CompareFunction>
495Node* RB_INSERT(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
496 Node* parent = nullptr;
497 Node* tmp = head->Root();
498 int comp = 0;
499
500 while (tmp) {
501 parent = tmp;
502 comp = cmp(elm, parent);
503 if (comp < 0) {
504 tmp = RB_LEFT(tmp);
505 } else if (comp > 0) {
506 tmp = RB_RIGHT(tmp);
507 } else {
508 return tmp;
509 }
510 }
511
512 RB_SET(elm, parent);
513
514 if (parent != nullptr) {
515 if (comp < 0) {
516 RB_SET_LEFT(parent, elm);
517 } else {
518 RB_SET_RIGHT(parent, elm);
519 }
520 } else {
521 head->SetRoot(elm);
522 }
523
524 RB_INSERT_COLOR(head, elm);
525 return nullptr;
526}
527
528// Finds the node with the same key as elm
529template <typename Node, typename CompareFunction>
530Node* RB_FIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
531 Node* tmp = head->Root();
532
533 while (tmp) {
534 const int comp = cmp(elm, tmp);
535 if (comp < 0) {
536 tmp = RB_LEFT(tmp);
537 } else if (comp > 0) {
538 tmp = RB_RIGHT(tmp);
539 } else {
540 return tmp;
541 }
542 }
543
544 return nullptr;
545}
546
547// Finds the first node greater than or equal to the search key
548template <typename Node, typename CompareFunction>
549Node* RB_NFIND(RBHead<Node>* head, Node* elm, CompareFunction cmp) {
550 Node* tmp = head->Root();
551 Node* res = nullptr;
552
553 while (tmp) {
554 const int comp = cmp(elm, tmp);
555 if (comp < 0) {
556 res = tmp;
557 tmp = RB_LEFT(tmp);
558 } else if (comp > 0) {
559 tmp = RB_RIGHT(tmp);
560 } else {
561 return tmp;
562 }
563 }
564
565 return res;
566}
567
568// Finds the node with the same key as lelm
569template <typename Node, typename CompareFunction>
570Node* RB_FIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp) {
571 Node* tmp = head->Root();
572
573 while (tmp) {
574 const int comp = lcmp(lelm, tmp);
575 if (comp < 0) {
576 tmp = RB_LEFT(tmp);
577 } else if (comp > 0) {
578 tmp = RB_RIGHT(tmp);
579 } else {
580 return tmp;
581 }
582 }
583
584 return nullptr;
585}
586
587// Finds the first node greater than or equal to the search key
588template <typename Node, typename CompareFunction>
589Node* RB_NFIND_LIGHT(RBHead<Node>* head, const void* lelm, CompareFunction lcmp) {
590 Node* tmp = head->Root();
591 Node* res = nullptr;
592
593 while (tmp) {
594 const int comp = lcmp(lelm, tmp);
595 if (comp < 0) {
596 res = tmp;
597 tmp = RB_LEFT(tmp);
598 } else if (comp > 0) {
599 tmp = RB_RIGHT(tmp);
600 } else {
601 return tmp;
602 }
603 }
604
605 return res;
606}
607
608template <typename Node>
609Node* RB_NEXT(Node* elm) {
610 if (RB_RIGHT(elm)) {
611 elm = RB_RIGHT(elm);
612 while (RB_LEFT(elm)) {
613 elm = RB_LEFT(elm);
614 }
615 } else {
616 if (RB_PARENT(elm) && (elm == RB_LEFT(RB_PARENT(elm)))) {
617 elm = RB_PARENT(elm);
618 } else {
619 while (RB_PARENT(elm) && (elm == RB_RIGHT(RB_PARENT(elm)))) {
620 elm = RB_PARENT(elm);
621 }
622 elm = RB_PARENT(elm);
623 }
624 }
625 return elm;
626}
627
628template <typename Node>
629Node* RB_PREV(Node* elm) {
630 if (RB_LEFT(elm)) {
631 elm = RB_LEFT(elm);
632 while (RB_RIGHT(elm)) {
633 elm = RB_RIGHT(elm);
634 }
635 } else {
636 if (RB_PARENT(elm) && (elm == RB_RIGHT(RB_PARENT(elm)))) {
637 elm = RB_PARENT(elm);
638 } else {
639 while (RB_PARENT(elm) && (elm == RB_LEFT(RB_PARENT(elm)))) {
640 elm = RB_PARENT(elm);
641 }
642 elm = RB_PARENT(elm);
643 }
644 }
645 return elm;
646}
647
648template <typename Node>
649Node* RB_MINMAX(RBHead<Node>* head, bool is_min) {
650 Node* tmp = head->Root();
651 Node* parent = nullptr;
652
653 while (tmp) {
654 parent = tmp;
655 if (is_min) {
656 tmp = RB_LEFT(tmp);
657 } else {
658 tmp = RB_RIGHT(tmp);
659 }
660 }
661
662 return parent;
663}
664
665template <typename Node>
666Node* RB_MIN(RBHead<Node>* head) {
667 return RB_MINMAX(head, true);
668}
669
670template <typename Node>
671Node* RB_MAX(RBHead<Node>* head) {
672 return RB_MINMAX(head, false);
673}
674} // namespace Common
diff --git a/src/common/uuid.h b/src/common/uuid.h
index 4ab9a25f0..2e7a18405 100644
--- a/src/common/uuid.h
+++ b/src/common/uuid.h
@@ -14,8 +14,8 @@ constexpr u128 INVALID_UUID{{0, 0}};
14 14
15struct UUID { 15struct UUID {
16 // UUIDs which are 0 are considered invalid! 16 // UUIDs which are 0 are considered invalid!
17 u128 uuid = INVALID_UUID; 17 u128 uuid;
18 constexpr UUID() = default; 18 UUID() = default;
19 constexpr explicit UUID(const u128& id) : uuid{id} {} 19 constexpr explicit UUID(const u128& id) : uuid{id} {}
20 constexpr explicit UUID(const u64 lo, const u64 hi) : uuid{{lo, hi}} {} 20 constexpr explicit UUID(const u64 lo, const u64 hi) : uuid{{lo, hi}} {}
21 21
diff --git a/src/common/x64/native_clock.cpp b/src/common/x64/native_clock.cpp
index eb8a7782f..a65f6b832 100644
--- a/src/common/x64/native_clock.cpp
+++ b/src/common/x64/native_clock.cpp
@@ -2,19 +2,74 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <array>
5#include <chrono> 6#include <chrono>
7#include <limits>
6#include <mutex> 8#include <mutex>
7#include <thread> 9#include <thread>
8 10
9#ifdef _MSC_VER 11#ifdef _MSC_VER
10#include <intrin.h> 12#include <intrin.h>
13
14#pragma intrinsic(__umulh)
15#pragma intrinsic(_udiv128)
11#else 16#else
12#include <x86intrin.h> 17#include <x86intrin.h>
13#endif 18#endif
14 19
20#include "common/atomic_ops.h"
15#include "common/uint128.h" 21#include "common/uint128.h"
16#include "common/x64/native_clock.h" 22#include "common/x64/native_clock.h"
17 23
24namespace {
25
26[[nodiscard]] u64 GetFixedPoint64Factor(u64 numerator, u64 divisor) {
27#ifdef __SIZEOF_INT128__
28 const auto base = static_cast<unsigned __int128>(numerator) << 64ULL;
29 return static_cast<u64>(base / divisor);
30#elif defined(_M_X64) || defined(_M_ARM64)
31 std::array<u64, 2> r = {0, numerator};
32 u64 remainder;
33#if _MSC_VER < 1923
34 return udiv128(r[1], r[0], divisor, &remainder);
35#else
36 return _udiv128(r[1], r[0], divisor, &remainder);
37#endif
38#else
39 // This one is bit more inaccurate.
40 return MultiplyAndDivide64(std::numeric_limits<u64>::max(), numerator, divisor);
41#endif
42}
43
44[[nodiscard]] u64 MultiplyHigh(u64 a, u64 b) {
45#ifdef __SIZEOF_INT128__
46 return (static_cast<unsigned __int128>(a) * static_cast<unsigned __int128>(b)) >> 64;
47#elif defined(_M_X64) || defined(_M_ARM64)
48 return __umulh(a, b); // MSVC
49#else
50 // Generic fallback
51 const u64 a_lo = u32(a);
52 const u64 a_hi = a >> 32;
53 const u64 b_lo = u32(b);
54 const u64 b_hi = b >> 32;
55
56 const u64 a_x_b_hi = a_hi * b_hi;
57 const u64 a_x_b_mid = a_hi * b_lo;
58 const u64 b_x_a_mid = b_hi * a_lo;
59 const u64 a_x_b_lo = a_lo * b_lo;
60
61 const u64 carry_bit = (static_cast<u64>(static_cast<u32>(a_x_b_mid)) +
62 static_cast<u64>(static_cast<u32>(b_x_a_mid)) + (a_x_b_lo >> 32)) >>
63 32;
64
65 const u64 multhi = a_x_b_hi + (a_x_b_mid >> 32) + (b_x_a_mid >> 32) + carry_bit;
66
67 return multhi;
68#endif
69}
70
71} // namespace
72
18namespace Common { 73namespace Common {
19 74
20u64 EstimateRDTSCFrequency() { 75u64 EstimateRDTSCFrequency() {
@@ -48,54 +103,71 @@ NativeClock::NativeClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequen
48 : WallClock(emulated_cpu_frequency_, emulated_clock_frequency_, true), rtsc_frequency{ 103 : WallClock(emulated_cpu_frequency_, emulated_clock_frequency_, true), rtsc_frequency{
49 rtsc_frequency_} { 104 rtsc_frequency_} {
50 _mm_mfence(); 105 _mm_mfence();
51 last_measure = __rdtsc(); 106 time_point.inner.last_measure = __rdtsc();
52 accumulated_ticks = 0U; 107 time_point.inner.accumulated_ticks = 0U;
108 ns_rtsc_factor = GetFixedPoint64Factor(1000000000, rtsc_frequency);
109 us_rtsc_factor = GetFixedPoint64Factor(1000000, rtsc_frequency);
110 ms_rtsc_factor = GetFixedPoint64Factor(1000, rtsc_frequency);
111 clock_rtsc_factor = GetFixedPoint64Factor(emulated_clock_frequency, rtsc_frequency);
112 cpu_rtsc_factor = GetFixedPoint64Factor(emulated_cpu_frequency, rtsc_frequency);
53} 113}
54 114
55u64 NativeClock::GetRTSC() { 115u64 NativeClock::GetRTSC() {
56 std::scoped_lock scope{rtsc_serialize}; 116 TimePoint new_time_point{};
57 _mm_mfence(); 117 TimePoint current_time_point{};
58 const u64 current_measure = __rdtsc(); 118 do {
59 u64 diff = current_measure - last_measure; 119 current_time_point.pack = time_point.pack;
60 diff = diff & ~static_cast<u64>(static_cast<s64>(diff) >> 63); // max(diff, 0) 120 _mm_mfence();
61 if (current_measure > last_measure) { 121 const u64 current_measure = __rdtsc();
62 last_measure = current_measure; 122 u64 diff = current_measure - current_time_point.inner.last_measure;
63 } 123 diff = diff & ~static_cast<u64>(static_cast<s64>(diff) >> 63); // max(diff, 0)
64 accumulated_ticks += diff; 124 new_time_point.inner.last_measure = current_measure > current_time_point.inner.last_measure
125 ? current_measure
126 : current_time_point.inner.last_measure;
127 new_time_point.inner.accumulated_ticks = current_time_point.inner.accumulated_ticks + diff;
128 } while (!Common::AtomicCompareAndSwap(time_point.pack.data(), new_time_point.pack,
129 current_time_point.pack));
65 /// The clock cannot be more precise than the guest timer, remove the lower bits 130 /// The clock cannot be more precise than the guest timer, remove the lower bits
66 return accumulated_ticks & inaccuracy_mask; 131 return new_time_point.inner.accumulated_ticks & inaccuracy_mask;
67} 132}
68 133
69void NativeClock::Pause(bool is_paused) { 134void NativeClock::Pause(bool is_paused) {
70 if (!is_paused) { 135 if (!is_paused) {
71 _mm_mfence(); 136 TimePoint current_time_point{};
72 last_measure = __rdtsc(); 137 TimePoint new_time_point{};
138 do {
139 current_time_point.pack = time_point.pack;
140 new_time_point.pack = current_time_point.pack;
141 _mm_mfence();
142 new_time_point.inner.last_measure = __rdtsc();
143 } while (!Common::AtomicCompareAndSwap(time_point.pack.data(), new_time_point.pack,
144 current_time_point.pack));
73 } 145 }
74} 146}
75 147
76std::chrono::nanoseconds NativeClock::GetTimeNS() { 148std::chrono::nanoseconds NativeClock::GetTimeNS() {
77 const u64 rtsc_value = GetRTSC(); 149 const u64 rtsc_value = GetRTSC();
78 return std::chrono::nanoseconds{MultiplyAndDivide64(rtsc_value, 1000000000, rtsc_frequency)}; 150 return std::chrono::nanoseconds{MultiplyHigh(rtsc_value, ns_rtsc_factor)};
79} 151}
80 152
81std::chrono::microseconds NativeClock::GetTimeUS() { 153std::chrono::microseconds NativeClock::GetTimeUS() {
82 const u64 rtsc_value = GetRTSC(); 154 const u64 rtsc_value = GetRTSC();
83 return std::chrono::microseconds{MultiplyAndDivide64(rtsc_value, 1000000, rtsc_frequency)}; 155 return std::chrono::microseconds{MultiplyHigh(rtsc_value, us_rtsc_factor)};
84} 156}
85 157
86std::chrono::milliseconds NativeClock::GetTimeMS() { 158std::chrono::milliseconds NativeClock::GetTimeMS() {
87 const u64 rtsc_value = GetRTSC(); 159 const u64 rtsc_value = GetRTSC();
88 return std::chrono::milliseconds{MultiplyAndDivide64(rtsc_value, 1000, rtsc_frequency)}; 160 return std::chrono::milliseconds{MultiplyHigh(rtsc_value, ms_rtsc_factor)};
89} 161}
90 162
91u64 NativeClock::GetClockCycles() { 163u64 NativeClock::GetClockCycles() {
92 const u64 rtsc_value = GetRTSC(); 164 const u64 rtsc_value = GetRTSC();
93 return MultiplyAndDivide64(rtsc_value, emulated_clock_frequency, rtsc_frequency); 165 return MultiplyHigh(rtsc_value, clock_rtsc_factor);
94} 166}
95 167
96u64 NativeClock::GetCPUCycles() { 168u64 NativeClock::GetCPUCycles() {
97 const u64 rtsc_value = GetRTSC(); 169 const u64 rtsc_value = GetRTSC();
98 return MultiplyAndDivide64(rtsc_value, emulated_cpu_frequency, rtsc_frequency); 170 return MultiplyHigh(rtsc_value, cpu_rtsc_factor);
99} 171}
100 172
101} // namespace X64 173} // namespace X64
diff --git a/src/common/x64/native_clock.h b/src/common/x64/native_clock.h
index 6d1e32ac8..7cbd400d2 100644
--- a/src/common/x64/native_clock.h
+++ b/src/common/x64/native_clock.h
@@ -6,7 +6,6 @@
6 6
7#include <optional> 7#include <optional>
8 8
9#include "common/spin_lock.h"
10#include "common/wall_clock.h" 9#include "common/wall_clock.h"
11 10
12namespace Common { 11namespace Common {
@@ -32,14 +31,28 @@ public:
32private: 31private:
33 u64 GetRTSC(); 32 u64 GetRTSC();
34 33
34 union alignas(16) TimePoint {
35 TimePoint() : pack{} {}
36 u128 pack{};
37 struct Inner {
38 u64 last_measure{};
39 u64 accumulated_ticks{};
40 } inner;
41 };
42
35 /// value used to reduce the native clocks accuracy as some apss rely on 43 /// value used to reduce the native clocks accuracy as some apss rely on
36 /// undefined behavior where the level of accuracy in the clock shouldn't 44 /// undefined behavior where the level of accuracy in the clock shouldn't
37 /// be higher. 45 /// be higher.
38 static constexpr u64 inaccuracy_mask = ~(UINT64_C(0x400) - 1); 46 static constexpr u64 inaccuracy_mask = ~(UINT64_C(0x400) - 1);
39 47
40 SpinLock rtsc_serialize{}; 48 TimePoint time_point;
41 u64 last_measure{}; 49 // factors
42 u64 accumulated_ticks{}; 50 u64 clock_rtsc_factor{};
51 u64 cpu_rtsc_factor{};
52 u64 ns_rtsc_factor{};
53 u64 us_rtsc_factor{};
54 u64 ms_rtsc_factor{};
55
43 u64 rtsc_frequency; 56 u64 rtsc_frequency;
44}; 57};
45} // namespace X64 58} // namespace X64
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 893df433a..2f6b22747 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -142,8 +142,6 @@ add_library(core STATIC
142 hardware_interrupt_manager.h 142 hardware_interrupt_manager.h
143 hle/ipc.h 143 hle/ipc.h
144 hle/ipc_helpers.h 144 hle/ipc_helpers.h
145 hle/kernel/address_arbiter.cpp
146 hle/kernel/address_arbiter.h
147 hle/kernel/client_port.cpp 145 hle/kernel/client_port.cpp
148 hle/kernel/client_port.h 146 hle/kernel/client_port.h
149 hle/kernel/client_session.cpp 147 hle/kernel/client_session.cpp
@@ -157,13 +155,19 @@ add_library(core STATIC
157 hle/kernel/handle_table.h 155 hle/kernel/handle_table.h
158 hle/kernel/hle_ipc.cpp 156 hle/kernel/hle_ipc.cpp
159 hle/kernel/hle_ipc.h 157 hle/kernel/hle_ipc.h
158 hle/kernel/k_address_arbiter.cpp
159 hle/kernel/k_address_arbiter.h
160 hle/kernel/k_affinity_mask.h 160 hle/kernel/k_affinity_mask.h
161 hle/kernel/k_condition_variable.cpp
162 hle/kernel/k_condition_variable.h
161 hle/kernel/k_priority_queue.h 163 hle/kernel/k_priority_queue.h
162 hle/kernel/k_scheduler.cpp 164 hle/kernel/k_scheduler.cpp
163 hle/kernel/k_scheduler.h 165 hle/kernel/k_scheduler.h
164 hle/kernel/k_scheduler_lock.h 166 hle/kernel/k_scheduler_lock.h
165 hle/kernel/k_scoped_lock.h 167 hle/kernel/k_scoped_lock.h
166 hle/kernel/k_scoped_scheduler_lock_and_sleep.h 168 hle/kernel/k_scoped_scheduler_lock_and_sleep.h
169 hle/kernel/k_synchronization_object.cpp
170 hle/kernel/k_synchronization_object.h
167 hle/kernel/kernel.cpp 171 hle/kernel/kernel.cpp
168 hle/kernel/kernel.h 172 hle/kernel/kernel.h
169 hle/kernel/memory/address_space_info.cpp 173 hle/kernel/memory/address_space_info.cpp
@@ -183,8 +187,6 @@ add_library(core STATIC
183 hle/kernel/memory/slab_heap.h 187 hle/kernel/memory/slab_heap.h
184 hle/kernel/memory/system_control.cpp 188 hle/kernel/memory/system_control.cpp
185 hle/kernel/memory/system_control.h 189 hle/kernel/memory/system_control.h
186 hle/kernel/mutex.cpp
187 hle/kernel/mutex.h
188 hle/kernel/object.cpp 190 hle/kernel/object.cpp
189 hle/kernel/object.h 191 hle/kernel/object.h
190 hle/kernel/physical_core.cpp 192 hle/kernel/physical_core.cpp
@@ -210,12 +212,10 @@ add_library(core STATIC
210 hle/kernel/shared_memory.h 212 hle/kernel/shared_memory.h
211 hle/kernel/svc.cpp 213 hle/kernel/svc.cpp
212 hle/kernel/svc.h 214 hle/kernel/svc.h
215 hle/kernel/svc_common.h
216 hle/kernel/svc_results.h
213 hle/kernel/svc_types.h 217 hle/kernel/svc_types.h
214 hle/kernel/svc_wrap.h 218 hle/kernel/svc_wrap.h
215 hle/kernel/synchronization_object.cpp
216 hle/kernel/synchronization_object.h
217 hle/kernel/synchronization.cpp
218 hle/kernel/synchronization.h
219 hle/kernel/thread.cpp 219 hle/kernel/thread.cpp
220 hle/kernel/thread.h 220 hle/kernel/thread.h
221 hle/kernel/time_manager.cpp 221 hle/kernel/time_manager.cpp
@@ -643,10 +643,9 @@ else()
643 -Werror=conversion 643 -Werror=conversion
644 -Werror=ignored-qualifiers 644 -Werror=ignored-qualifiers
645 -Werror=implicit-fallthrough 645 -Werror=implicit-fallthrough
646 -Werror=reorder
647 -Werror=sign-compare 646 -Werror=sign-compare
648 -Werror=unused-variable
649 647
648 $<$<CXX_COMPILER_ID:GNU>:-Werror=class-memaccess>
650 $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter> 649 $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
651 $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable> 650 $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
652 651
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h
index 70098c526..9a0151736 100644
--- a/src/core/arm/arm_interface.h
+++ b/src/core/arm/arm_interface.h
@@ -26,9 +26,10 @@ using CPUInterrupts = std::array<CPUInterruptHandler, Core::Hardware::NUM_CPU_CO
26/// Generic ARMv8 CPU interface 26/// Generic ARMv8 CPU interface
27class ARM_Interface : NonCopyable { 27class ARM_Interface : NonCopyable {
28public: 28public:
29 explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers, bool uses_wall_clock) 29 explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers_,
30 : system{system_}, interrupt_handlers{interrupt_handlers}, uses_wall_clock{ 30 bool uses_wall_clock_)
31 uses_wall_clock} {} 31 : system{system_}, interrupt_handlers{interrupt_handlers_}, uses_wall_clock{
32 uses_wall_clock_} {}
32 virtual ~ARM_Interface() = default; 33 virtual ~ARM_Interface() = default;
33 34
34 struct ThreadContext32 { 35 struct ThreadContext32 {
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index e6c8461a5..874b5673a 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -49,6 +49,7 @@ void CoreTiming::ThreadEntry(CoreTiming& instance) {
49 Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh); 49 Common::SetCurrentThreadPriority(Common::ThreadPriority::VeryHigh);
50 instance.on_thread_init(); 50 instance.on_thread_init();
51 instance.ThreadLoop(); 51 instance.ThreadLoop();
52 MicroProfileOnThreadExit();
52} 53}
53 54
54void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) { 55void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
diff --git a/src/core/file_sys/content_archive.cpp b/src/core/file_sys/content_archive.cpp
index a6c0337fa..d12218fc2 100644
--- a/src/core/file_sys/content_archive.cpp
+++ b/src/core/file_sys/content_archive.cpp
@@ -43,17 +43,17 @@ static_assert(sizeof(IVFCLevel) == 0x18, "IVFCLevel has incorrect size.");
43struct IVFCHeader { 43struct IVFCHeader {
44 u32_le magic; 44 u32_le magic;
45 u32_le magic_number; 45 u32_le magic_number;
46 INSERT_UNION_PADDING_BYTES(8); 46 INSERT_PADDING_BYTES_NOINIT(8);
47 std::array<IVFCLevel, 6> levels; 47 std::array<IVFCLevel, 6> levels;
48 INSERT_UNION_PADDING_BYTES(64); 48 INSERT_PADDING_BYTES_NOINIT(64);
49}; 49};
50static_assert(sizeof(IVFCHeader) == 0xE0, "IVFCHeader has incorrect size."); 50static_assert(sizeof(IVFCHeader) == 0xE0, "IVFCHeader has incorrect size.");
51 51
52struct NCASectionHeaderBlock { 52struct NCASectionHeaderBlock {
53 INSERT_UNION_PADDING_BYTES(3); 53 INSERT_PADDING_BYTES_NOINIT(3);
54 NCASectionFilesystemType filesystem_type; 54 NCASectionFilesystemType filesystem_type;
55 NCASectionCryptoType crypto_type; 55 NCASectionCryptoType crypto_type;
56 INSERT_UNION_PADDING_BYTES(3); 56 INSERT_PADDING_BYTES_NOINIT(3);
57}; 57};
58static_assert(sizeof(NCASectionHeaderBlock) == 0x8, "NCASectionHeaderBlock has incorrect size."); 58static_assert(sizeof(NCASectionHeaderBlock) == 0x8, "NCASectionHeaderBlock has incorrect size.");
59 59
@@ -61,7 +61,7 @@ struct NCASectionRaw {
61 NCASectionHeaderBlock header; 61 NCASectionHeaderBlock header;
62 std::array<u8, 0x138> block_data; 62 std::array<u8, 0x138> block_data;
63 std::array<u8, 0x8> section_ctr; 63 std::array<u8, 0x8> section_ctr;
64 INSERT_UNION_PADDING_BYTES(0xB8); 64 INSERT_PADDING_BYTES_NOINIT(0xB8);
65}; 65};
66static_assert(sizeof(NCASectionRaw) == 0x200, "NCASectionRaw has incorrect size."); 66static_assert(sizeof(NCASectionRaw) == 0x200, "NCASectionRaw has incorrect size.");
67 67
@@ -69,19 +69,19 @@ struct PFS0Superblock {
69 NCASectionHeaderBlock header_block; 69 NCASectionHeaderBlock header_block;
70 std::array<u8, 0x20> hash; 70 std::array<u8, 0x20> hash;
71 u32_le size; 71 u32_le size;
72 INSERT_UNION_PADDING_BYTES(4); 72 INSERT_PADDING_BYTES_NOINIT(4);
73 u64_le hash_table_offset; 73 u64_le hash_table_offset;
74 u64_le hash_table_size; 74 u64_le hash_table_size;
75 u64_le pfs0_header_offset; 75 u64_le pfs0_header_offset;
76 u64_le pfs0_size; 76 u64_le pfs0_size;
77 INSERT_UNION_PADDING_BYTES(0x1B0); 77 INSERT_PADDING_BYTES_NOINIT(0x1B0);
78}; 78};
79static_assert(sizeof(PFS0Superblock) == 0x200, "PFS0Superblock has incorrect size."); 79static_assert(sizeof(PFS0Superblock) == 0x200, "PFS0Superblock has incorrect size.");
80 80
81struct RomFSSuperblock { 81struct RomFSSuperblock {
82 NCASectionHeaderBlock header_block; 82 NCASectionHeaderBlock header_block;
83 IVFCHeader ivfc; 83 IVFCHeader ivfc;
84 INSERT_UNION_PADDING_BYTES(0x118); 84 INSERT_PADDING_BYTES_NOINIT(0x118);
85}; 85};
86static_assert(sizeof(RomFSSuperblock) == 0x200, "RomFSSuperblock has incorrect size."); 86static_assert(sizeof(RomFSSuperblock) == 0x200, "RomFSSuperblock has incorrect size.");
87 87
@@ -89,19 +89,19 @@ struct BKTRHeader {
89 u64_le offset; 89 u64_le offset;
90 u64_le size; 90 u64_le size;
91 u32_le magic; 91 u32_le magic;
92 INSERT_UNION_PADDING_BYTES(0x4); 92 INSERT_PADDING_BYTES_NOINIT(0x4);
93 u32_le number_entries; 93 u32_le number_entries;
94 INSERT_UNION_PADDING_BYTES(0x4); 94 INSERT_PADDING_BYTES_NOINIT(0x4);
95}; 95};
96static_assert(sizeof(BKTRHeader) == 0x20, "BKTRHeader has incorrect size."); 96static_assert(sizeof(BKTRHeader) == 0x20, "BKTRHeader has incorrect size.");
97 97
98struct BKTRSuperblock { 98struct BKTRSuperblock {
99 NCASectionHeaderBlock header_block; 99 NCASectionHeaderBlock header_block;
100 IVFCHeader ivfc; 100 IVFCHeader ivfc;
101 INSERT_UNION_PADDING_BYTES(0x18); 101 INSERT_PADDING_BYTES_NOINIT(0x18);
102 BKTRHeader relocation; 102 BKTRHeader relocation;
103 BKTRHeader subsection; 103 BKTRHeader subsection;
104 INSERT_UNION_PADDING_BYTES(0xC0); 104 INSERT_PADDING_BYTES_NOINIT(0xC0);
105}; 105};
106static_assert(sizeof(BKTRSuperblock) == 0x200, "BKTRSuperblock has incorrect size."); 106static_assert(sizeof(BKTRSuperblock) == 0x200, "BKTRSuperblock has incorrect size.");
107 107
diff --git a/src/core/file_sys/savedata_factory.h b/src/core/file_sys/savedata_factory.h
index 17f774baa..86c9f5350 100644
--- a/src/core/file_sys/savedata_factory.h
+++ b/src/core/file_sys/savedata_factory.h
@@ -58,7 +58,7 @@ struct SaveDataAttribute {
58 SaveDataType type; 58 SaveDataType type;
59 SaveDataRank rank; 59 SaveDataRank rank;
60 u16 index; 60 u16 index;
61 INSERT_PADDING_BYTES(4); 61 INSERT_PADDING_BYTES_NOINIT(4);
62 u64 zero_1; 62 u64 zero_1;
63 u64 zero_2; 63 u64 zero_2;
64 u64 zero_3; 64 u64 zero_3;
@@ -72,7 +72,7 @@ struct SaveDataExtraData {
72 u64 owner_id; 72 u64 owner_id;
73 s64 timestamp; 73 s64 timestamp;
74 SaveDataFlags flags; 74 SaveDataFlags flags;
75 INSERT_PADDING_BYTES(4); 75 INSERT_PADDING_BYTES_NOINIT(4);
76 s64 available_size; 76 s64 available_size;
77 s64 journal_size; 77 s64 journal_size;
78 s64 commit_id; 78 s64 commit_id;
diff --git a/src/core/frontend/emu_window.cpp b/src/core/frontend/emu_window.cpp
index 8c1193894..ee7a58b1c 100644
--- a/src/core/frontend/emu_window.cpp
+++ b/src/core/frontend/emu_window.cpp
@@ -21,21 +21,18 @@ public:
21 21
22 std::mutex mutex; 22 std::mutex mutex;
23 23
24 bool touch_pressed = false; ///< True if touchpad area is currently pressed, otherwise false 24 Input::TouchStatus status;
25
26 float touch_x = 0.0f; ///< Touchpad X-position
27 float touch_y = 0.0f; ///< Touchpad Y-position
28 25
29private: 26private:
30 class Device : public Input::TouchDevice { 27 class Device : public Input::TouchDevice {
31 public: 28 public:
32 explicit Device(std::weak_ptr<TouchState>&& touch_state) : touch_state(touch_state) {} 29 explicit Device(std::weak_ptr<TouchState>&& touch_state) : touch_state(touch_state) {}
33 std::tuple<float, float, bool> GetStatus() const override { 30 Input::TouchStatus GetStatus() const override {
34 if (auto state = touch_state.lock()) { 31 if (auto state = touch_state.lock()) {
35 std::lock_guard guard{state->mutex}; 32 std::lock_guard guard{state->mutex};
36 return std::make_tuple(state->touch_x, state->touch_y, state->touch_pressed); 33 return state->status;
37 } 34 }
38 return std::make_tuple(0.0f, 0.0f, false); 35 return {};
39 } 36 }
40 37
41 private: 38 private:
@@ -79,36 +76,44 @@ std::tuple<unsigned, unsigned> EmuWindow::ClipToTouchScreen(unsigned new_x, unsi
79 return std::make_tuple(new_x, new_y); 76 return std::make_tuple(new_x, new_y);
80} 77}
81 78
82void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y) { 79void EmuWindow::TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y, std::size_t id) {
83 if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y)) 80 if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y)) {
84 return; 81 return;
82 }
83 if (id >= touch_state->status.size()) {
84 return;
85 }
85 86
86 std::lock_guard guard{touch_state->mutex}; 87 std::lock_guard guard{touch_state->mutex};
87 touch_state->touch_x = 88 const float x =
88 static_cast<float>(framebuffer_x - framebuffer_layout.screen.left) / 89 static_cast<float>(framebuffer_x - framebuffer_layout.screen.left) /
89 static_cast<float>(framebuffer_layout.screen.right - framebuffer_layout.screen.left); 90 static_cast<float>(framebuffer_layout.screen.right - framebuffer_layout.screen.left);
90 touch_state->touch_y = 91 const float y =
91 static_cast<float>(framebuffer_y - framebuffer_layout.screen.top) / 92 static_cast<float>(framebuffer_y - framebuffer_layout.screen.top) /
92 static_cast<float>(framebuffer_layout.screen.bottom - framebuffer_layout.screen.top); 93 static_cast<float>(framebuffer_layout.screen.bottom - framebuffer_layout.screen.top);
93 94
94 touch_state->touch_pressed = true; 95 touch_state->status[id] = std::make_tuple(x, y, true);
95} 96}
96 97
97void EmuWindow::TouchReleased() { 98void EmuWindow::TouchReleased(std::size_t id) {
99 if (id >= touch_state->status.size()) {
100 return;
101 }
98 std::lock_guard guard{touch_state->mutex}; 102 std::lock_guard guard{touch_state->mutex};
99 touch_state->touch_pressed = false; 103 touch_state->status[id] = std::make_tuple(0.0f, 0.0f, false);
100 touch_state->touch_x = 0;
101 touch_state->touch_y = 0;
102} 104}
103 105
104void EmuWindow::TouchMoved(unsigned framebuffer_x, unsigned framebuffer_y) { 106void EmuWindow::TouchMoved(unsigned framebuffer_x, unsigned framebuffer_y, std::size_t id) {
105 if (!touch_state->touch_pressed) 107 if (id >= touch_state->status.size()) {
108 return;
109 }
110 if (!std::get<2>(touch_state->status[id]))
106 return; 111 return;
107 112
108 if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y)) 113 if (!IsWithinTouchscreen(framebuffer_layout, framebuffer_x, framebuffer_y))
109 std::tie(framebuffer_x, framebuffer_y) = ClipToTouchScreen(framebuffer_x, framebuffer_y); 114 std::tie(framebuffer_x, framebuffer_y) = ClipToTouchScreen(framebuffer_x, framebuffer_y);
110 115
111 TouchPressed(framebuffer_x, framebuffer_y); 116 TouchPressed(framebuffer_x, framebuffer_y, id);
112} 117}
113 118
114void EmuWindow::UpdateCurrentFramebufferLayout(unsigned width, unsigned height) { 119void EmuWindow::UpdateCurrentFramebufferLayout(unsigned width, unsigned height) {
diff --git a/src/core/frontend/emu_window.h b/src/core/frontend/emu_window.h
index 276d2b906..2436c6580 100644
--- a/src/core/frontend/emu_window.h
+++ b/src/core/frontend/emu_window.h
@@ -117,18 +117,23 @@ public:
117 * Signal that a touch pressed event has occurred (e.g. mouse click pressed) 117 * Signal that a touch pressed event has occurred (e.g. mouse click pressed)
118 * @param framebuffer_x Framebuffer x-coordinate that was pressed 118 * @param framebuffer_x Framebuffer x-coordinate that was pressed
119 * @param framebuffer_y Framebuffer y-coordinate that was pressed 119 * @param framebuffer_y Framebuffer y-coordinate that was pressed
120 * @param id Touch event ID
120 */ 121 */
121 void TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y); 122 void TouchPressed(unsigned framebuffer_x, unsigned framebuffer_y, std::size_t id);
122 123
123 /// Signal that a touch released event has occurred (e.g. mouse click released) 124 /**
124 void TouchReleased(); 125 * Signal that a touch released event has occurred (e.g. mouse click released)
126 * @param id Touch event ID
127 */
128 void TouchReleased(std::size_t id);
125 129
126 /** 130 /**
127 * Signal that a touch movement event has occurred (e.g. mouse was moved over the emu window) 131 * Signal that a touch movement event has occurred (e.g. mouse was moved over the emu window)
128 * @param framebuffer_x Framebuffer x-coordinate 132 * @param framebuffer_x Framebuffer x-coordinate
129 * @param framebuffer_y Framebuffer y-coordinate 133 * @param framebuffer_y Framebuffer y-coordinate
134 * @param id Touch event ID
130 */ 135 */
131 void TouchMoved(unsigned framebuffer_x, unsigned framebuffer_y); 136 void TouchMoved(unsigned framebuffer_x, unsigned framebuffer_y, std::size_t id);
132 137
133 /** 138 /**
134 * Returns currently active configuration. 139 * Returns currently active configuration.
diff --git a/src/core/frontend/input.h b/src/core/frontend/input.h
index de51a754e..f014dfea3 100644
--- a/src/core/frontend/input.h
+++ b/src/core/frontend/input.h
@@ -163,10 +163,11 @@ using MotionStatus = std::tuple<Common::Vec3<float>, Common::Vec3<float>, Common
163using MotionDevice = InputDevice<MotionStatus>; 163using MotionDevice = InputDevice<MotionStatus>;
164 164
165/** 165/**
166 * A touch status is an object that returns a tuple of two floats and a bool. The floats are 166 * A touch status is an object that returns an array of 16 tuple elements of two floats and a bool.
167 * x and y coordinates in the range 0.0 - 1.0, and the bool indicates whether it is pressed. 167 * The floats are x and y coordinates in the range 0.0 - 1.0, and the bool indicates whether it is
168 * pressed.
168 */ 169 */
169using TouchStatus = std::tuple<float, float, bool>; 170using TouchStatus = std::array<std::tuple<float, float, bool>, 16>;
170 171
171/** 172/**
172 * A touch device is an input device that returns a touch status object 173 * A touch device is an input device that returns a touch status object
diff --git a/src/core/frontend/input_interpreter.cpp b/src/core/frontend/input_interpreter.cpp
index 66ae506cd..ec5fe660e 100644
--- a/src/core/frontend/input_interpreter.cpp
+++ b/src/core/frontend/input_interpreter.cpp
@@ -25,6 +25,10 @@ void InputInterpreter::PollInput() {
25 button_states[current_index] = button_state; 25 button_states[current_index] = button_state;
26} 26}
27 27
28bool InputInterpreter::IsButtonPressed(HIDButton button) const {
29 return (button_states[current_index] & (1U << static_cast<u8>(button))) != 0;
30}
31
28bool InputInterpreter::IsButtonPressedOnce(HIDButton button) const { 32bool InputInterpreter::IsButtonPressedOnce(HIDButton button) const {
29 const bool current_press = 33 const bool current_press =
30 (button_states[current_index] & (1U << static_cast<u8>(button))) != 0; 34 (button_states[current_index] & (1U << static_cast<u8>(button))) != 0;
diff --git a/src/core/frontend/input_interpreter.h b/src/core/frontend/input_interpreter.h
index fea9aebe6..73fc47ffb 100644
--- a/src/core/frontend/input_interpreter.h
+++ b/src/core/frontend/input_interpreter.h
@@ -67,6 +67,27 @@ public:
67 void PollInput(); 67 void PollInput();
68 68
69 /** 69 /**
70 * Checks whether the button is pressed.
71 *
72 * @param button The button to check.
73 *
74 * @returns True when the button is pressed.
75 */
76 [[nodiscard]] bool IsButtonPressed(HIDButton button) const;
77
78 /**
79 * Checks whether any of the buttons in the parameter list is pressed.
80 *
81 * @tparam HIDButton The buttons to check.
82 *
83 * @returns True when at least one of the buttons is pressed.
84 */
85 template <HIDButton... T>
86 [[nodiscard]] bool IsAnyButtonPressed() {
87 return (IsButtonPressed(T) || ...);
88 }
89
90 /**
70 * The specified button is considered to be pressed once 91 * The specified button is considered to be pressed once
71 * if it is currently pressed and not pressed previously. 92 * if it is currently pressed and not pressed previously.
72 * 93 *
@@ -79,12 +100,12 @@ public:
79 /** 100 /**
80 * Checks whether any of the buttons in the parameter list is pressed once. 101 * Checks whether any of the buttons in the parameter list is pressed once.
81 * 102 *
82 * @tparam HIDButton The buttons to check. 103 * @tparam T The buttons to check.
83 * 104 *
84 * @returns True when at least one of the buttons is pressed once. 105 * @returns True when at least one of the buttons is pressed once.
85 */ 106 */
86 template <HIDButton... T> 107 template <HIDButton... T>
87 [[nodiscard]] bool IsAnyButtonPressedOnce() { 108 [[nodiscard]] bool IsAnyButtonPressedOnce() const {
88 return (IsButtonPressedOnce(T) || ...); 109 return (IsButtonPressedOnce(T) || ...);
89 } 110 }
90 111
@@ -100,12 +121,12 @@ public:
100 /** 121 /**
101 * Checks whether any of the buttons in the parameter list is held down. 122 * Checks whether any of the buttons in the parameter list is held down.
102 * 123 *
103 * @tparam HIDButton The buttons to check. 124 * @tparam T The buttons to check.
104 * 125 *
105 * @returns True when at least one of the buttons is held down. 126 * @returns True when at least one of the buttons is held down.
106 */ 127 */
107 template <HIDButton... T> 128 template <HIDButton... T>
108 [[nodiscard]] bool IsAnyButtonHeld() { 129 [[nodiscard]] bool IsAnyButtonHeld() const {
109 return (IsButtonHeld(T) || ...); 130 return (IsButtonHeld(T) || ...);
110 } 131 }
111 132
diff --git a/src/core/hle/ipc.h b/src/core/hle/ipc.h
index 7ce313190..55b1716e4 100644
--- a/src/core/hle/ipc.h
+++ b/src/core/hle/ipc.h
@@ -146,7 +146,7 @@ static_assert(sizeof(BufferDescriptorC) == 8, "BufferDescriptorC size is incorre
146 146
147struct DataPayloadHeader { 147struct DataPayloadHeader {
148 u32_le magic; 148 u32_le magic;
149 INSERT_PADDING_WORDS(1); 149 INSERT_PADDING_WORDS_NOINIT(1);
150}; 150};
151static_assert(sizeof(DataPayloadHeader) == 8, "DataPayloadHeader size is incorrect"); 151static_assert(sizeof(DataPayloadHeader) == 8, "DataPayloadHeader size is incorrect");
152 152
@@ -160,7 +160,7 @@ struct DomainMessageHeader {
160 // Used when responding to an IPC request, Server -> Client. 160 // Used when responding to an IPC request, Server -> Client.
161 struct { 161 struct {
162 u32_le num_objects; 162 u32_le num_objects;
163 INSERT_UNION_PADDING_WORDS(3); 163 INSERT_PADDING_WORDS_NOINIT(3);
164 }; 164 };
165 165
166 // Used when performing an IPC request, Client -> Server. 166 // Used when performing an IPC request, Client -> Server.
@@ -171,10 +171,10 @@ struct DomainMessageHeader {
171 BitField<16, 16, u32> size; 171 BitField<16, 16, u32> size;
172 }; 172 };
173 u32_le object_id; 173 u32_le object_id;
174 INSERT_UNION_PADDING_WORDS(2); 174 INSERT_PADDING_WORDS_NOINIT(2);
175 }; 175 };
176 176
177 std::array<u32, 4> raw{}; 177 std::array<u32, 4> raw;
178 }; 178 };
179}; 179};
180static_assert(sizeof(DomainMessageHeader) == 16, "DomainMessageHeader size is incorrect"); 180static_assert(sizeof(DomainMessageHeader) == 16, "DomainMessageHeader size is incorrect");
diff --git a/src/core/hle/kernel/address_arbiter.cpp b/src/core/hle/kernel/address_arbiter.cpp
deleted file mode 100644
index 20ffa7d47..000000000
--- a/src/core/hle/kernel/address_arbiter.cpp
+++ /dev/null
@@ -1,317 +0,0 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <vector>
7
8#include "common/assert.h"
9#include "common/common_types.h"
10#include "core/arm/exclusive_monitor.h"
11#include "core/core.h"
12#include "core/hle/kernel/address_arbiter.h"
13#include "core/hle/kernel/errors.h"
14#include "core/hle/kernel/handle_table.h"
15#include "core/hle/kernel/k_scheduler.h"
16#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
17#include "core/hle/kernel/kernel.h"
18#include "core/hle/kernel/thread.h"
19#include "core/hle/kernel/time_manager.h"
20#include "core/hle/result.h"
21#include "core/memory.h"
22
23namespace Kernel {
24
25// Wake up num_to_wake (or all) threads in a vector.
26void AddressArbiter::WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads,
27 s32 num_to_wake) {
28 // Only process up to 'target' threads, unless 'target' is <= 0, in which case process
29 // them all.
30 std::size_t last = waiting_threads.size();
31 if (num_to_wake > 0) {
32 last = std::min(last, static_cast<std::size_t>(num_to_wake));
33 }
34
35 // Signal the waiting threads.
36 for (std::size_t i = 0; i < last; i++) {
37 waiting_threads[i]->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
38 RemoveThread(waiting_threads[i]);
39 waiting_threads[i]->WaitForArbitration(false);
40 waiting_threads[i]->ResumeFromWait();
41 }
42}
43
44AddressArbiter::AddressArbiter(Core::System& system) : system{system} {}
45AddressArbiter::~AddressArbiter() = default;
46
47ResultCode AddressArbiter::SignalToAddress(VAddr address, SignalType type, s32 value,
48 s32 num_to_wake) {
49 switch (type) {
50 case SignalType::Signal:
51 return SignalToAddressOnly(address, num_to_wake);
52 case SignalType::IncrementAndSignalIfEqual:
53 return IncrementAndSignalToAddressIfEqual(address, value, num_to_wake);
54 case SignalType::ModifyByWaitingCountAndSignalIfEqual:
55 return ModifyByWaitingCountAndSignalToAddressIfEqual(address, value, num_to_wake);
56 default:
57 return ERR_INVALID_ENUM_VALUE;
58 }
59}
60
61ResultCode AddressArbiter::SignalToAddressOnly(VAddr address, s32 num_to_wake) {
62 KScopedSchedulerLock lock(system.Kernel());
63 const std::vector<std::shared_ptr<Thread>> waiting_threads =
64 GetThreadsWaitingOnAddress(address);
65 WakeThreads(waiting_threads, num_to_wake);
66 return RESULT_SUCCESS;
67}
68
69ResultCode AddressArbiter::IncrementAndSignalToAddressIfEqual(VAddr address, s32 value,
70 s32 num_to_wake) {
71 KScopedSchedulerLock lock(system.Kernel());
72 auto& memory = system.Memory();
73
74 // Ensure that we can write to the address.
75 if (!memory.IsValidVirtualAddress(address)) {
76 return ERR_INVALID_ADDRESS_STATE;
77 }
78
79 const std::size_t current_core = system.CurrentCoreIndex();
80 auto& monitor = system.Monitor();
81 u32 current_value;
82 do {
83 current_value = monitor.ExclusiveRead32(current_core, address);
84
85 if (current_value != static_cast<u32>(value)) {
86 return ERR_INVALID_STATE;
87 }
88 current_value++;
89 } while (!monitor.ExclusiveWrite32(current_core, address, current_value));
90
91 return SignalToAddressOnly(address, num_to_wake);
92}
93
94ResultCode AddressArbiter::ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
95 s32 num_to_wake) {
96 KScopedSchedulerLock lock(system.Kernel());
97 auto& memory = system.Memory();
98
99 // Ensure that we can write to the address.
100 if (!memory.IsValidVirtualAddress(address)) {
101 return ERR_INVALID_ADDRESS_STATE;
102 }
103
104 // Get threads waiting on the address.
105 const std::vector<std::shared_ptr<Thread>> waiting_threads =
106 GetThreadsWaitingOnAddress(address);
107
108 const std::size_t current_core = system.CurrentCoreIndex();
109 auto& monitor = system.Monitor();
110 s32 updated_value;
111 do {
112 updated_value = monitor.ExclusiveRead32(current_core, address);
113
114 if (updated_value != value) {
115 return ERR_INVALID_STATE;
116 }
117 // Determine the modified value depending on the waiting count.
118 if (num_to_wake <= 0) {
119 if (waiting_threads.empty()) {
120 updated_value = value + 1;
121 } else {
122 updated_value = value - 1;
123 }
124 } else {
125 if (waiting_threads.empty()) {
126 updated_value = value + 1;
127 } else if (waiting_threads.size() <= static_cast<u32>(num_to_wake)) {
128 updated_value = value - 1;
129 } else {
130 updated_value = value;
131 }
132 }
133 } while (!monitor.ExclusiveWrite32(current_core, address, updated_value));
134
135 WakeThreads(waiting_threads, num_to_wake);
136 return RESULT_SUCCESS;
137}
138
139ResultCode AddressArbiter::WaitForAddress(VAddr address, ArbitrationType type, s32 value,
140 s64 timeout_ns) {
141 switch (type) {
142 case ArbitrationType::WaitIfLessThan:
143 return WaitForAddressIfLessThan(address, value, timeout_ns, false);
144 case ArbitrationType::DecrementAndWaitIfLessThan:
145 return WaitForAddressIfLessThan(address, value, timeout_ns, true);
146 case ArbitrationType::WaitIfEqual:
147 return WaitForAddressIfEqual(address, value, timeout_ns);
148 default:
149 return ERR_INVALID_ENUM_VALUE;
150 }
151}
152
153ResultCode AddressArbiter::WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
154 bool should_decrement) {
155 auto& memory = system.Memory();
156 auto& kernel = system.Kernel();
157 Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
158
159 Handle event_handle = InvalidHandle;
160 {
161 KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
162
163 if (current_thread->IsPendingTermination()) {
164 lock.CancelSleep();
165 return ERR_THREAD_TERMINATING;
166 }
167
168 // Ensure that we can read the address.
169 if (!memory.IsValidVirtualAddress(address)) {
170 lock.CancelSleep();
171 return ERR_INVALID_ADDRESS_STATE;
172 }
173
174 s32 current_value = static_cast<s32>(memory.Read32(address));
175 if (current_value >= value) {
176 lock.CancelSleep();
177 return ERR_INVALID_STATE;
178 }
179
180 current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
181
182 s32 decrement_value;
183
184 const std::size_t current_core = system.CurrentCoreIndex();
185 auto& monitor = system.Monitor();
186 do {
187 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
188 if (should_decrement) {
189 decrement_value = current_value - 1;
190 } else {
191 decrement_value = current_value;
192 }
193 } while (
194 !monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value)));
195
196 // Short-circuit without rescheduling, if timeout is zero.
197 if (timeout == 0) {
198 lock.CancelSleep();
199 return RESULT_TIMEOUT;
200 }
201
202 current_thread->SetArbiterWaitAddress(address);
203 InsertThread(SharedFrom(current_thread));
204 current_thread->SetStatus(ThreadStatus::WaitArb);
205 current_thread->WaitForArbitration(true);
206 }
207
208 if (event_handle != InvalidHandle) {
209 auto& time_manager = kernel.TimeManager();
210 time_manager.UnscheduleTimeEvent(event_handle);
211 }
212
213 {
214 KScopedSchedulerLock lock(kernel);
215 if (current_thread->IsWaitingForArbitration()) {
216 RemoveThread(SharedFrom(current_thread));
217 current_thread->WaitForArbitration(false);
218 }
219 }
220
221 return current_thread->GetSignalingResult();
222}
223
224ResultCode AddressArbiter::WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout) {
225 auto& memory = system.Memory();
226 auto& kernel = system.Kernel();
227 Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
228
229 Handle event_handle = InvalidHandle;
230 {
231 KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, timeout);
232
233 if (current_thread->IsPendingTermination()) {
234 lock.CancelSleep();
235 return ERR_THREAD_TERMINATING;
236 }
237
238 // Ensure that we can read the address.
239 if (!memory.IsValidVirtualAddress(address)) {
240 lock.CancelSleep();
241 return ERR_INVALID_ADDRESS_STATE;
242 }
243
244 s32 current_value = static_cast<s32>(memory.Read32(address));
245 if (current_value != value) {
246 lock.CancelSleep();
247 return ERR_INVALID_STATE;
248 }
249
250 // Short-circuit without rescheduling, if timeout is zero.
251 if (timeout == 0) {
252 lock.CancelSleep();
253 return RESULT_TIMEOUT;
254 }
255
256 current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
257 current_thread->SetArbiterWaitAddress(address);
258 InsertThread(SharedFrom(current_thread));
259 current_thread->SetStatus(ThreadStatus::WaitArb);
260 current_thread->WaitForArbitration(true);
261 }
262
263 if (event_handle != InvalidHandle) {
264 auto& time_manager = kernel.TimeManager();
265 time_manager.UnscheduleTimeEvent(event_handle);
266 }
267
268 {
269 KScopedSchedulerLock lock(kernel);
270 if (current_thread->IsWaitingForArbitration()) {
271 RemoveThread(SharedFrom(current_thread));
272 current_thread->WaitForArbitration(false);
273 }
274 }
275
276 return current_thread->GetSignalingResult();
277}
278
279void AddressArbiter::InsertThread(std::shared_ptr<Thread> thread) {
280 const VAddr arb_addr = thread->GetArbiterWaitAddress();
281 std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
282
283 const auto iter =
284 std::find_if(thread_list.cbegin(), thread_list.cend(), [&thread](const auto& entry) {
285 return entry->GetPriority() >= thread->GetPriority();
286 });
287
288 if (iter == thread_list.cend()) {
289 thread_list.push_back(std::move(thread));
290 } else {
291 thread_list.insert(iter, std::move(thread));
292 }
293}
294
295void AddressArbiter::RemoveThread(std::shared_ptr<Thread> thread) {
296 const VAddr arb_addr = thread->GetArbiterWaitAddress();
297 std::list<std::shared_ptr<Thread>>& thread_list = arb_threads[arb_addr];
298
299 const auto iter = std::find_if(thread_list.cbegin(), thread_list.cend(),
300 [&thread](const auto& entry) { return thread == entry; });
301
302 if (iter != thread_list.cend()) {
303 thread_list.erase(iter);
304 }
305}
306
307std::vector<std::shared_ptr<Thread>> AddressArbiter::GetThreadsWaitingOnAddress(
308 VAddr address) const {
309 const auto iter = arb_threads.find(address);
310 if (iter == arb_threads.cend()) {
311 return {};
312 }
313
314 const std::list<std::shared_ptr<Thread>>& thread_list = iter->second;
315 return {thread_list.cbegin(), thread_list.cend()};
316}
317} // namespace Kernel
diff --git a/src/core/hle/kernel/address_arbiter.h b/src/core/hle/kernel/address_arbiter.h
deleted file mode 100644
index b91edc67d..000000000
--- a/src/core/hle/kernel/address_arbiter.h
+++ /dev/null
@@ -1,91 +0,0 @@
1// Copyright 2018 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <list>
8#include <memory>
9#include <unordered_map>
10#include <vector>
11
12#include "common/common_types.h"
13
14union ResultCode;
15
16namespace Core {
17class System;
18}
19
20namespace Kernel {
21
22class Thread;
23
24class AddressArbiter {
25public:
26 enum class ArbitrationType {
27 WaitIfLessThan = 0,
28 DecrementAndWaitIfLessThan = 1,
29 WaitIfEqual = 2,
30 };
31
32 enum class SignalType {
33 Signal = 0,
34 IncrementAndSignalIfEqual = 1,
35 ModifyByWaitingCountAndSignalIfEqual = 2,
36 };
37
38 explicit AddressArbiter(Core::System& system);
39 ~AddressArbiter();
40
41 AddressArbiter(const AddressArbiter&) = delete;
42 AddressArbiter& operator=(const AddressArbiter&) = delete;
43
44 AddressArbiter(AddressArbiter&&) = default;
45 AddressArbiter& operator=(AddressArbiter&&) = delete;
46
47 /// Signals an address being waited on with a particular signaling type.
48 ResultCode SignalToAddress(VAddr address, SignalType type, s32 value, s32 num_to_wake);
49
50 /// Waits on an address with a particular arbitration type.
51 ResultCode WaitForAddress(VAddr address, ArbitrationType type, s32 value, s64 timeout_ns);
52
53private:
54 /// Signals an address being waited on.
55 ResultCode SignalToAddressOnly(VAddr address, s32 num_to_wake);
56
57 /// Signals an address being waited on and increments its value if equal to the value argument.
58 ResultCode IncrementAndSignalToAddressIfEqual(VAddr address, s32 value, s32 num_to_wake);
59
60 /// Signals an address being waited on and modifies its value based on waiting thread count if
61 /// equal to the value argument.
62 ResultCode ModifyByWaitingCountAndSignalToAddressIfEqual(VAddr address, s32 value,
63 s32 num_to_wake);
64
65 /// Waits on an address if the value passed is less than the argument value,
66 /// optionally decrementing.
67 ResultCode WaitForAddressIfLessThan(VAddr address, s32 value, s64 timeout,
68 bool should_decrement);
69
70 /// Waits on an address if the value passed is equal to the argument value.
71 ResultCode WaitForAddressIfEqual(VAddr address, s32 value, s64 timeout);
72
73 /// Wake up num_to_wake (or all) threads in a vector.
74 void WakeThreads(const std::vector<std::shared_ptr<Thread>>& waiting_threads, s32 num_to_wake);
75
76 /// Insert a thread into the address arbiter container
77 void InsertThread(std::shared_ptr<Thread> thread);
78
79 /// Removes a thread from the address arbiter container
80 void RemoveThread(std::shared_ptr<Thread> thread);
81
82 // Gets the threads waiting on an address.
83 std::vector<std::shared_ptr<Thread>> GetThreadsWaitingOnAddress(VAddr address) const;
84
85 /// List of threads waiting for a address arbiter
86 std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> arb_threads;
87
88 Core::System& system;
89};
90
91} // namespace Kernel
diff --git a/src/core/hle/kernel/client_port.cpp b/src/core/hle/kernel/client_port.cpp
index 8aff2227a..f8f005f15 100644
--- a/src/core/hle/kernel/client_port.cpp
+++ b/src/core/hle/kernel/client_port.cpp
@@ -33,9 +33,6 @@ ResultVal<std::shared_ptr<ClientSession>> ClientPort::Connect() {
33 server_port->AppendPendingSession(std::move(server)); 33 server_port->AppendPendingSession(std::move(server));
34 } 34 }
35 35
36 // Wake the threads waiting on the ServerPort
37 server_port->Signal();
38
39 return MakeResult(std::move(client)); 36 return MakeResult(std::move(client));
40} 37}
41 38
diff --git a/src/core/hle/kernel/client_session.cpp b/src/core/hle/kernel/client_session.cpp
index be9eba519..e8e52900d 100644
--- a/src/core/hle/kernel/client_session.cpp
+++ b/src/core/hle/kernel/client_session.cpp
@@ -12,7 +12,7 @@
12 12
13namespace Kernel { 13namespace Kernel {
14 14
15ClientSession::ClientSession(KernelCore& kernel) : SynchronizationObject{kernel} {} 15ClientSession::ClientSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
16 16
17ClientSession::~ClientSession() { 17ClientSession::~ClientSession() {
18 // This destructor will be called automatically when the last ClientSession handle is closed by 18 // This destructor will be called automatically when the last ClientSession handle is closed by
@@ -22,15 +22,6 @@ ClientSession::~ClientSession() {
22 } 22 }
23} 23}
24 24
25bool ClientSession::ShouldWait(const Thread* thread) const {
26 UNIMPLEMENTED();
27 return {};
28}
29
30void ClientSession::Acquire(Thread* thread) {
31 UNIMPLEMENTED();
32}
33
34bool ClientSession::IsSignaled() const { 25bool ClientSession::IsSignaled() const {
35 UNIMPLEMENTED(); 26 UNIMPLEMENTED();
36 return true; 27 return true;
diff --git a/src/core/hle/kernel/client_session.h b/src/core/hle/kernel/client_session.h
index e5e0690c2..d5c9ebee8 100644
--- a/src/core/hle/kernel/client_session.h
+++ b/src/core/hle/kernel/client_session.h
@@ -7,7 +7,7 @@
7#include <memory> 7#include <memory>
8#include <string> 8#include <string>
9 9
10#include "core/hle/kernel/synchronization_object.h" 10#include "core/hle/kernel/k_synchronization_object.h"
11#include "core/hle/result.h" 11#include "core/hle/result.h"
12 12
13union ResultCode; 13union ResultCode;
@@ -26,7 +26,7 @@ class KernelCore;
26class Session; 26class Session;
27class Thread; 27class Thread;
28 28
29class ClientSession final : public SynchronizationObject { 29class ClientSession final : public KSynchronizationObject {
30public: 30public:
31 explicit ClientSession(KernelCore& kernel); 31 explicit ClientSession(KernelCore& kernel);
32 ~ClientSession() override; 32 ~ClientSession() override;
@@ -49,10 +49,6 @@ public:
49 ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory, 49 ResultCode SendSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory,
50 Core::Timing::CoreTiming& core_timing); 50 Core::Timing::CoreTiming& core_timing);
51 51
52 bool ShouldWait(const Thread* thread) const override;
53
54 void Acquire(Thread* thread) override;
55
56 bool IsSignaled() const override; 52 bool IsSignaled() const override;
57 53
58private: 54private:
diff --git a/src/core/hle/kernel/errors.h b/src/core/hle/kernel/errors.h
index d4e5d88cf..7d32a39f0 100644
--- a/src/core/hle/kernel/errors.h
+++ b/src/core/hle/kernel/errors.h
@@ -13,12 +13,14 @@ namespace Kernel {
13constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7}; 13constexpr ResultCode ERR_MAX_CONNECTIONS_REACHED{ErrorModule::Kernel, 7};
14constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14}; 14constexpr ResultCode ERR_INVALID_CAPABILITY_DESCRIPTOR{ErrorModule::Kernel, 14};
15constexpr ResultCode ERR_THREAD_TERMINATING{ErrorModule::Kernel, 59}; 15constexpr ResultCode ERR_THREAD_TERMINATING{ErrorModule::Kernel, 59};
16constexpr ResultCode ERR_TERMINATION_REQUESTED{ErrorModule::Kernel, 59};
16constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101}; 17constexpr ResultCode ERR_INVALID_SIZE{ErrorModule::Kernel, 101};
17constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102}; 18constexpr ResultCode ERR_INVALID_ADDRESS{ErrorModule::Kernel, 102};
18constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103}; 19constexpr ResultCode ERR_OUT_OF_RESOURCES{ErrorModule::Kernel, 103};
19constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104}; 20constexpr ResultCode ERR_OUT_OF_MEMORY{ErrorModule::Kernel, 104};
20constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105}; 21constexpr ResultCode ERR_HANDLE_TABLE_FULL{ErrorModule::Kernel, 105};
21constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106}; 22constexpr ResultCode ERR_INVALID_ADDRESS_STATE{ErrorModule::Kernel, 106};
23constexpr ResultCode ERR_INVALID_CURRENT_MEMORY{ErrorModule::Kernel, 106};
22constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS{ErrorModule::Kernel, 108}; 24constexpr ResultCode ERR_INVALID_MEMORY_PERMISSIONS{ErrorModule::Kernel, 108};
23constexpr ResultCode ERR_INVALID_MEMORY_RANGE{ErrorModule::Kernel, 110}; 25constexpr ResultCode ERR_INVALID_MEMORY_RANGE{ErrorModule::Kernel, 110};
24constexpr ResultCode ERR_INVALID_PROCESSOR_ID{ErrorModule::Kernel, 113}; 26constexpr ResultCode ERR_INVALID_PROCESSOR_ID{ErrorModule::Kernel, 113};
@@ -28,6 +30,7 @@ constexpr ResultCode ERR_INVALID_POINTER{ErrorModule::Kernel, 115};
28constexpr ResultCode ERR_INVALID_COMBINATION{ErrorModule::Kernel, 116}; 30constexpr ResultCode ERR_INVALID_COMBINATION{ErrorModule::Kernel, 116};
29constexpr ResultCode RESULT_TIMEOUT{ErrorModule::Kernel, 117}; 31constexpr ResultCode RESULT_TIMEOUT{ErrorModule::Kernel, 117};
30constexpr ResultCode ERR_SYNCHRONIZATION_CANCELED{ErrorModule::Kernel, 118}; 32constexpr ResultCode ERR_SYNCHRONIZATION_CANCELED{ErrorModule::Kernel, 118};
33constexpr ResultCode ERR_CANCELLED{ErrorModule::Kernel, 118};
31constexpr ResultCode ERR_OUT_OF_RANGE{ErrorModule::Kernel, 119}; 34constexpr ResultCode ERR_OUT_OF_RANGE{ErrorModule::Kernel, 119};
32constexpr ResultCode ERR_INVALID_ENUM_VALUE{ErrorModule::Kernel, 120}; 35constexpr ResultCode ERR_INVALID_ENUM_VALUE{ErrorModule::Kernel, 120};
33constexpr ResultCode ERR_NOT_FOUND{ErrorModule::Kernel, 121}; 36constexpr ResultCode ERR_NOT_FOUND{ErrorModule::Kernel, 121};
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
new file mode 100644
index 000000000..d9e702f13
--- /dev/null
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -0,0 +1,367 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/arm/exclusive_monitor.h"
6#include "core/core.h"
7#include "core/hle/kernel/k_address_arbiter.h"
8#include "core/hle/kernel/k_scheduler.h"
9#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
10#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/svc_results.h"
12#include "core/hle/kernel/thread.h"
13#include "core/hle/kernel/time_manager.h"
14#include "core/memory.h"
15
16namespace Kernel {
17
18KAddressArbiter::KAddressArbiter(Core::System& system_)
19 : system{system_}, kernel{system.Kernel()} {}
20KAddressArbiter::~KAddressArbiter() = default;
21
22namespace {
23
24bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
25 *out = system.Memory().Read32(address);
26 return true;
27}
28
29bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
30 auto& monitor = system.Monitor();
31 const auto current_core = system.CurrentCoreIndex();
32
33 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
34 // TODO(bunnei): We should call CanAccessAtomic(..) here.
35
36 // Load the value from the address.
37 const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
38
39 // Compare it to the desired one.
40 if (current_value < value) {
41 // If less than, we want to try to decrement.
42 const s32 decrement_value = current_value - 1;
43
44 // Decrement and try to store.
45 if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(decrement_value))) {
46 // If we failed to store, try again.
47 DecrementIfLessThan(system, out, address, value);
48 }
49 } else {
50 // Otherwise, clear our exclusive hold and finish
51 monitor.ClearExclusive();
52 }
53
54 // We're done.
55 *out = current_value;
56 return true;
57}
58
59bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
60 auto& monitor = system.Monitor();
61 const auto current_core = system.CurrentCoreIndex();
62
63 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
64 // TODO(bunnei): We should call CanAccessAtomic(..) here.
65
66 // Load the value from the address.
67 const s32 current_value = static_cast<s32>(monitor.ExclusiveRead32(current_core, address));
68
69 // Compare it to the desired one.
70 if (current_value == value) {
71 // If equal, we want to try to write the new value.
72
73 // Try to store.
74 if (!monitor.ExclusiveWrite32(current_core, address, static_cast<u32>(new_value))) {
75 // If we failed to store, try again.
76 UpdateIfEqual(system, out, address, value, new_value);
77 }
78 } else {
79 // Otherwise, clear our exclusive hold and finish.
80 monitor.ClearExclusive();
81 }
82
83 // We're done.
84 *out = current_value;
85 return true;
86}
87
88} // namespace
89
90ResultCode KAddressArbiter::Signal(VAddr addr, s32 count) {
91 // Perform signaling.
92 s32 num_waiters{};
93 {
94 KScopedSchedulerLock sl(kernel);
95
96 auto it = thread_tree.nfind_light({addr, -1});
97 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
98 (it->GetAddressArbiterKey() == addr)) {
99 Thread* target_thread = std::addressof(*it);
100 target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
101
102 ASSERT(target_thread->IsWaitingForAddressArbiter());
103 target_thread->Wakeup();
104
105 it = thread_tree.erase(it);
106 target_thread->ClearAddressArbiter();
107 ++num_waiters;
108 }
109 }
110 return RESULT_SUCCESS;
111}
112
113ResultCode KAddressArbiter::SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count) {
114 // Perform signaling.
115 s32 num_waiters{};
116 {
117 KScopedSchedulerLock sl(kernel);
118
119 // Check the userspace value.
120 s32 user_value{};
121 R_UNLESS(UpdateIfEqual(system, std::addressof(user_value), addr, value, value + 1),
122 Svc::ResultInvalidCurrentMemory);
123 R_UNLESS(user_value == value, Svc::ResultInvalidState);
124
125 auto it = thread_tree.nfind_light({addr, -1});
126 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
127 (it->GetAddressArbiterKey() == addr)) {
128 Thread* target_thread = std::addressof(*it);
129 target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
130
131 ASSERT(target_thread->IsWaitingForAddressArbiter());
132 target_thread->Wakeup();
133
134 it = thread_tree.erase(it);
135 target_thread->ClearAddressArbiter();
136 ++num_waiters;
137 }
138 }
139 return RESULT_SUCCESS;
140}
141
142ResultCode KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count) {
143 // Perform signaling.
144 s32 num_waiters{};
145 {
146 KScopedSchedulerLock sl(kernel);
147
148 auto it = thread_tree.nfind_light({addr, -1});
149 // Determine the updated value.
150 s32 new_value{};
151 if (/*GetTargetFirmware() >= TargetFirmware_7_0_0*/ true) {
152 if (count <= 0) {
153 if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
154 new_value = value - 2;
155 } else {
156 new_value = value + 1;
157 }
158 } else {
159 if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
160 auto tmp_it = it;
161 s32 tmp_num_waiters{};
162 while ((++tmp_it != thread_tree.end()) &&
163 (tmp_it->GetAddressArbiterKey() == addr)) {
164 if ((tmp_num_waiters++) >= count) {
165 break;
166 }
167 }
168
169 if (tmp_num_waiters < count) {
170 new_value = value - 1;
171 } else {
172 new_value = value;
173 }
174 } else {
175 new_value = value + 1;
176 }
177 }
178 } else {
179 if (count <= 0) {
180 if ((it != thread_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
181 new_value = value - 1;
182 } else {
183 new_value = value + 1;
184 }
185 } else {
186 auto tmp_it = it;
187 s32 tmp_num_waiters{};
188 while ((tmp_it != thread_tree.end()) && (tmp_it->GetAddressArbiterKey() == addr) &&
189 (tmp_num_waiters < count + 1)) {
190 ++tmp_num_waiters;
191 ++tmp_it;
192 }
193
194 if (tmp_num_waiters == 0) {
195 new_value = value + 1;
196 } else if (tmp_num_waiters <= count) {
197 new_value = value - 1;
198 } else {
199 new_value = value;
200 }
201 }
202 }
203
204 // Check the userspace value.
205 s32 user_value{};
206 bool succeeded{};
207 if (value != new_value) {
208 succeeded = UpdateIfEqual(system, std::addressof(user_value), addr, value, new_value);
209 } else {
210 succeeded = ReadFromUser(system, std::addressof(user_value), addr);
211 }
212
213 R_UNLESS(succeeded, Svc::ResultInvalidCurrentMemory);
214 R_UNLESS(user_value == value, Svc::ResultInvalidState);
215
216 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
217 (it->GetAddressArbiterKey() == addr)) {
218 Thread* target_thread = std::addressof(*it);
219 target_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
220
221 ASSERT(target_thread->IsWaitingForAddressArbiter());
222 target_thread->Wakeup();
223
224 it = thread_tree.erase(it);
225 target_thread->ClearAddressArbiter();
226 ++num_waiters;
227 }
228 }
229 return RESULT_SUCCESS;
230}
231
232ResultCode KAddressArbiter::WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout) {
233 // Prepare to wait.
234 Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
235 Handle timer = InvalidHandle;
236
237 {
238 KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout);
239
240 // Check that the thread isn't terminating.
241 if (cur_thread->IsTerminationRequested()) {
242 slp.CancelSleep();
243 return Svc::ResultTerminationRequested;
244 }
245
246 // Set the synced object.
247 cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
248
249 // Read the value from userspace.
250 s32 user_value{};
251 bool succeeded{};
252 if (decrement) {
253 succeeded = DecrementIfLessThan(system, std::addressof(user_value), addr, value);
254 } else {
255 succeeded = ReadFromUser(system, std::addressof(user_value), addr);
256 }
257
258 if (!succeeded) {
259 slp.CancelSleep();
260 return Svc::ResultInvalidCurrentMemory;
261 }
262
263 // Check that the value is less than the specified one.
264 if (user_value >= value) {
265 slp.CancelSleep();
266 return Svc::ResultInvalidState;
267 }
268
269 // Check that the timeout is non-zero.
270 if (timeout == 0) {
271 slp.CancelSleep();
272 return Svc::ResultTimedOut;
273 }
274
275 // Set the arbiter.
276 cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr);
277 thread_tree.insert(*cur_thread);
278 cur_thread->SetState(ThreadState::Waiting);
279 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
280 }
281
282 // Cancel the timer wait.
283 if (timer != InvalidHandle) {
284 auto& time_manager = kernel.TimeManager();
285 time_manager.UnscheduleTimeEvent(timer);
286 }
287
288 // Remove from the address arbiter.
289 {
290 KScopedSchedulerLock sl(kernel);
291
292 if (cur_thread->IsWaitingForAddressArbiter()) {
293 thread_tree.erase(thread_tree.iterator_to(*cur_thread));
294 cur_thread->ClearAddressArbiter();
295 }
296 }
297
298 // Get the result.
299 KSynchronizationObject* dummy{};
300 return cur_thread->GetWaitResult(std::addressof(dummy));
301}
302
303ResultCode KAddressArbiter::WaitIfEqual(VAddr addr, s32 value, s64 timeout) {
304 // Prepare to wait.
305 Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
306 Handle timer = InvalidHandle;
307
308 {
309 KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout);
310
311 // Check that the thread isn't terminating.
312 if (cur_thread->IsTerminationRequested()) {
313 slp.CancelSleep();
314 return Svc::ResultTerminationRequested;
315 }
316
317 // Set the synced object.
318 cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
319
320 // Read the value from userspace.
321 s32 user_value{};
322 if (!ReadFromUser(system, std::addressof(user_value), addr)) {
323 slp.CancelSleep();
324 return Svc::ResultInvalidCurrentMemory;
325 }
326
327 // Check that the value is equal.
328 if (value != user_value) {
329 slp.CancelSleep();
330 return Svc::ResultInvalidState;
331 }
332
333 // Check that the timeout is non-zero.
334 if (timeout == 0) {
335 slp.CancelSleep();
336 return Svc::ResultTimedOut;
337 }
338
339 // Set the arbiter.
340 cur_thread->SetAddressArbiter(std::addressof(thread_tree), addr);
341 thread_tree.insert(*cur_thread);
342 cur_thread->SetState(ThreadState::Waiting);
343 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Arbitration);
344 }
345
346 // Cancel the timer wait.
347 if (timer != InvalidHandle) {
348 auto& time_manager = kernel.TimeManager();
349 time_manager.UnscheduleTimeEvent(timer);
350 }
351
352 // Remove from the address arbiter.
353 {
354 KScopedSchedulerLock sl(kernel);
355
356 if (cur_thread->IsWaitingForAddressArbiter()) {
357 thread_tree.erase(thread_tree.iterator_to(*cur_thread));
358 cur_thread->ClearAddressArbiter();
359 }
360 }
361
362 // Get the result.
363 KSynchronizationObject* dummy{};
364 return cur_thread->GetWaitResult(std::addressof(dummy));
365}
366
367} // namespace Kernel
diff --git a/src/core/hle/kernel/k_address_arbiter.h b/src/core/hle/kernel/k_address_arbiter.h
new file mode 100644
index 000000000..8d379b524
--- /dev/null
+++ b/src/core/hle/kernel/k_address_arbiter.h
@@ -0,0 +1,70 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/assert.h"
8#include "common/common_types.h"
9#include "core/hle/kernel/k_condition_variable.h"
10#include "core/hle/kernel/svc_types.h"
11
12union ResultCode;
13
14namespace Core {
15class System;
16}
17
18namespace Kernel {
19
20class KernelCore;
21
22class KAddressArbiter {
23public:
24 using ThreadTree = KConditionVariable::ThreadTree;
25
26 explicit KAddressArbiter(Core::System& system_);
27 ~KAddressArbiter();
28
29 [[nodiscard]] ResultCode SignalToAddress(VAddr addr, Svc::SignalType type, s32 value,
30 s32 count) {
31 switch (type) {
32 case Svc::SignalType::Signal:
33 return Signal(addr, count);
34 case Svc::SignalType::SignalAndIncrementIfEqual:
35 return SignalAndIncrementIfEqual(addr, value, count);
36 case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
37 return SignalAndModifyByWaitingCountIfEqual(addr, value, count);
38 }
39 UNREACHABLE();
40 return RESULT_UNKNOWN;
41 }
42
43 [[nodiscard]] ResultCode WaitForAddress(VAddr addr, Svc::ArbitrationType type, s32 value,
44 s64 timeout) {
45 switch (type) {
46 case Svc::ArbitrationType::WaitIfLessThan:
47 return WaitIfLessThan(addr, value, false, timeout);
48 case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
49 return WaitIfLessThan(addr, value, true, timeout);
50 case Svc::ArbitrationType::WaitIfEqual:
51 return WaitIfEqual(addr, value, timeout);
52 }
53 UNREACHABLE();
54 return RESULT_UNKNOWN;
55 }
56
57private:
58 [[nodiscard]] ResultCode Signal(VAddr addr, s32 count);
59 [[nodiscard]] ResultCode SignalAndIncrementIfEqual(VAddr addr, s32 value, s32 count);
60 [[nodiscard]] ResultCode SignalAndModifyByWaitingCountIfEqual(VAddr addr, s32 value, s32 count);
61 [[nodiscard]] ResultCode WaitIfLessThan(VAddr addr, s32 value, bool decrement, s64 timeout);
62 [[nodiscard]] ResultCode WaitIfEqual(VAddr addr, s32 value, s64 timeout);
63
64 ThreadTree thread_tree;
65
66 Core::System& system;
67 KernelCore& kernel;
68};
69
70} // namespace Kernel
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
new file mode 100644
index 000000000..49a068310
--- /dev/null
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -0,0 +1,349 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <vector>
6
7#include "core/arm/exclusive_monitor.h"
8#include "core/core.h"
9#include "core/hle/kernel/k_condition_variable.h"
10#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
12#include "core/hle/kernel/k_synchronization_object.h"
13#include "core/hle/kernel/kernel.h"
14#include "core/hle/kernel/process.h"
15#include "core/hle/kernel/svc_common.h"
16#include "core/hle/kernel/svc_results.h"
17#include "core/hle/kernel/thread.h"
18#include "core/memory.h"
19
20namespace Kernel {
21
22namespace {
23
24bool ReadFromUser(Core::System& system, u32* out, VAddr address) {
25 *out = system.Memory().Read32(address);
26 return true;
27}
28
29bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
30 system.Memory().Write32(address, *p);
31 return true;
32}
33
34bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
35 u32 new_orr_mask) {
36 auto& monitor = system.Monitor();
37 const auto current_core = system.CurrentCoreIndex();
38
39 // Load the value from the address.
40 const auto expected = monitor.ExclusiveRead32(current_core, address);
41
42 // Orr in the new mask.
43 u32 value = expected | new_orr_mask;
44
45 // If the value is zero, use the if_zero value, otherwise use the newly orr'd value.
46 if (!expected) {
47 value = if_zero;
48 }
49
50 // Try to store.
51 if (!monitor.ExclusiveWrite32(current_core, address, value)) {
52 // If we failed to store, try again.
53 return UpdateLockAtomic(system, out, address, if_zero, new_orr_mask);
54 }
55
56 // We're done.
57 *out = expected;
58 return true;
59}
60
61} // namespace
62
63KConditionVariable::KConditionVariable(Core::System& system_)
64 : system{system_}, kernel{system.Kernel()} {}
65
66KConditionVariable::~KConditionVariable() = default;
67
68ResultCode KConditionVariable::SignalToAddress(VAddr addr) {
69 Thread* owner_thread = kernel.CurrentScheduler()->GetCurrentThread();
70
71 // Signal the address.
72 {
73 KScopedSchedulerLock sl(kernel);
74
75 // Remove waiter thread.
76 s32 num_waiters{};
77 Thread* next_owner_thread =
78 owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
79
80 // Determine the next tag.
81 u32 next_value{};
82 if (next_owner_thread) {
83 next_value = next_owner_thread->GetAddressKeyValue();
84 if (num_waiters > 1) {
85 next_value |= Svc::HandleWaitMask;
86 }
87
88 next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
89 next_owner_thread->Wakeup();
90 }
91
92 // Write the value to userspace.
93 if (!WriteToUser(system, addr, std::addressof(next_value))) {
94 if (next_owner_thread) {
95 next_owner_thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory);
96 }
97
98 return Svc::ResultInvalidCurrentMemory;
99 }
100 }
101
102 return RESULT_SUCCESS;
103}
104
105ResultCode KConditionVariable::WaitForAddress(Handle handle, VAddr addr, u32 value) {
106 Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
107
108 // Wait for the address.
109 {
110 std::shared_ptr<Thread> owner_thread;
111 ASSERT(!owner_thread);
112 {
113 KScopedSchedulerLock sl(kernel);
114 cur_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
115
116 // Check if the thread should terminate.
117 R_UNLESS(!cur_thread->IsTerminationRequested(), Svc::ResultTerminationRequested);
118
119 {
120 // Read the tag from userspace.
121 u32 test_tag{};
122 R_UNLESS(ReadFromUser(system, std::addressof(test_tag), addr),
123 Svc::ResultInvalidCurrentMemory);
124
125 // If the tag isn't the handle (with wait mask), we're done.
126 R_UNLESS(test_tag == (handle | Svc::HandleWaitMask), RESULT_SUCCESS);
127
128 // Get the lock owner thread.
129 owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<Thread>(handle);
130 R_UNLESS(owner_thread, Svc::ResultInvalidHandle);
131
132 // Update the lock.
133 cur_thread->SetAddressKey(addr, value);
134 owner_thread->AddWaiter(cur_thread);
135 cur_thread->SetState(ThreadState::Waiting);
136 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
137 cur_thread->SetMutexWaitAddressForDebugging(addr);
138 }
139 }
140 ASSERT(owner_thread);
141 }
142
143 // Remove the thread as a waiter from the lock owner.
144 {
145 KScopedSchedulerLock sl(kernel);
146 Thread* owner_thread = cur_thread->GetLockOwner();
147 if (owner_thread != nullptr) {
148 owner_thread->RemoveWaiter(cur_thread);
149 }
150 }
151
152 // Get the wait result.
153 KSynchronizationObject* dummy{};
154 return cur_thread->GetWaitResult(std::addressof(dummy));
155}
156
157Thread* KConditionVariable::SignalImpl(Thread* thread) {
158 // Check pre-conditions.
159 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
160
161 // Update the tag.
162 VAddr address = thread->GetAddressKey();
163 u32 own_tag = thread->GetAddressKeyValue();
164
165 u32 prev_tag{};
166 bool can_access{};
167 {
168 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
169 // TODO(bunnei): We should call CanAccessAtomic(..) here.
170 can_access = true;
171 if (can_access) {
172 UpdateLockAtomic(system, std::addressof(prev_tag), address, own_tag,
173 Svc::HandleWaitMask);
174 }
175 }
176
177 Thread* thread_to_close = nullptr;
178 if (can_access) {
179 if (prev_tag == InvalidHandle) {
180 // If nobody held the lock previously, we're all good.
181 thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
182 thread->Wakeup();
183 } else {
184 // Get the previous owner.
185 auto owner_thread = kernel.CurrentProcess()->GetHandleTable().Get<Thread>(
186 prev_tag & ~Svc::HandleWaitMask);
187
188 if (owner_thread) {
189 // Add the thread as a waiter on the owner.
190 owner_thread->AddWaiter(thread);
191 thread_to_close = owner_thread.get();
192 } else {
193 // The lock was tagged with a thread that doesn't exist.
194 thread->SetSyncedObject(nullptr, Svc::ResultInvalidState);
195 thread->Wakeup();
196 }
197 }
198 } else {
199 // If the address wasn't accessible, note so.
200 thread->SetSyncedObject(nullptr, Svc::ResultInvalidCurrentMemory);
201 thread->Wakeup();
202 }
203
204 return thread_to_close;
205}
206
207void KConditionVariable::Signal(u64 cv_key, s32 count) {
208 // Prepare for signaling.
209 constexpr int MaxThreads = 16;
210
211 // TODO(bunnei): This should just be Thread once we implement KAutoObject instead of using
212 // std::shared_ptr.
213 std::vector<std::shared_ptr<Thread>> thread_list;
214 std::array<Thread*, MaxThreads> thread_array;
215 s32 num_to_close{};
216
217 // Perform signaling.
218 s32 num_waiters{};
219 {
220 KScopedSchedulerLock sl(kernel);
221
222 auto it = thread_tree.nfind_light({cv_key, -1});
223 while ((it != thread_tree.end()) && (count <= 0 || num_waiters < count) &&
224 (it->GetConditionVariableKey() == cv_key)) {
225 Thread* target_thread = std::addressof(*it);
226
227 if (Thread* thread = SignalImpl(target_thread); thread != nullptr) {
228 if (num_to_close < MaxThreads) {
229 thread_array[num_to_close++] = thread;
230 } else {
231 thread_list.push_back(SharedFrom(thread));
232 }
233 }
234
235 it = thread_tree.erase(it);
236 target_thread->ClearConditionVariable();
237 ++num_waiters;
238 }
239
240 // If we have no waiters, clear the has waiter flag.
241 if (it == thread_tree.end() || it->GetConditionVariableKey() != cv_key) {
242 const u32 has_waiter_flag{};
243 WriteToUser(system, cv_key, std::addressof(has_waiter_flag));
244 }
245 }
246
247 // Close threads in the array.
248 for (auto i = 0; i < num_to_close; ++i) {
249 thread_array[i]->Close();
250 }
251
252 // Close threads in the list.
253 for (auto it = thread_list.begin(); it != thread_list.end(); it = thread_list.erase(it)) {
254 (*it)->Close();
255 }
256}
257
258ResultCode KConditionVariable::Wait(VAddr addr, u64 key, u32 value, s64 timeout) {
259 // Prepare to wait.
260 Thread* cur_thread = kernel.CurrentScheduler()->GetCurrentThread();
261 Handle timer = InvalidHandle;
262
263 {
264 KScopedSchedulerLockAndSleep slp(kernel, timer, cur_thread, timeout);
265
266 // Set the synced object.
267 cur_thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
268
269 // Check that the thread isn't terminating.
270 if (cur_thread->IsTerminationRequested()) {
271 slp.CancelSleep();
272 return Svc::ResultTerminationRequested;
273 }
274
275 // Update the value and process for the next owner.
276 {
277 // Remove waiter thread.
278 s32 num_waiters{};
279 Thread* next_owner_thread =
280 cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr);
281
282 // Update for the next owner thread.
283 u32 next_value{};
284 if (next_owner_thread != nullptr) {
285 // Get the next tag value.
286 next_value = next_owner_thread->GetAddressKeyValue();
287 if (num_waiters > 1) {
288 next_value |= Svc::HandleWaitMask;
289 }
290
291 // Wake up the next owner.
292 next_owner_thread->SetSyncedObject(nullptr, RESULT_SUCCESS);
293 next_owner_thread->Wakeup();
294 }
295
296 // Write to the cv key.
297 {
298 const u32 has_waiter_flag = 1;
299 WriteToUser(system, key, std::addressof(has_waiter_flag));
300 // TODO(bunnei): We should call DataMemoryBarrier(..) here.
301 }
302
303 // Write the value to userspace.
304 if (!WriteToUser(system, addr, std::addressof(next_value))) {
305 slp.CancelSleep();
306 return Svc::ResultInvalidCurrentMemory;
307 }
308 }
309
310 // Update condition variable tracking.
311 {
312 cur_thread->SetConditionVariable(std::addressof(thread_tree), addr, key, value);
313 thread_tree.insert(*cur_thread);
314 }
315
316 // If the timeout is non-zero, set the thread as waiting.
317 if (timeout != 0) {
318 cur_thread->SetState(ThreadState::Waiting);
319 cur_thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::ConditionVar);
320 cur_thread->SetMutexWaitAddressForDebugging(addr);
321 }
322 }
323
324 // Cancel the timer wait.
325 if (timer != InvalidHandle) {
326 auto& time_manager = kernel.TimeManager();
327 time_manager.UnscheduleTimeEvent(timer);
328 }
329
330 // Remove from the condition variable.
331 {
332 KScopedSchedulerLock sl(kernel);
333
334 if (Thread* owner = cur_thread->GetLockOwner(); owner != nullptr) {
335 owner->RemoveWaiter(cur_thread);
336 }
337
338 if (cur_thread->IsWaitingForConditionVariable()) {
339 thread_tree.erase(thread_tree.iterator_to(*cur_thread));
340 cur_thread->ClearConditionVariable();
341 }
342 }
343
344 // Get the result.
345 KSynchronizationObject* dummy{};
346 return cur_thread->GetWaitResult(std::addressof(dummy));
347}
348
349} // namespace Kernel
diff --git a/src/core/hle/kernel/k_condition_variable.h b/src/core/hle/kernel/k_condition_variable.h
new file mode 100644
index 000000000..98ed5b323
--- /dev/null
+++ b/src/core/hle/kernel/k_condition_variable.h
@@ -0,0 +1,59 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/assert.h"
8#include "common/common_types.h"
9
10#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/kernel.h"
12#include "core/hle/kernel/thread.h"
13#include "core/hle/result.h"
14
15namespace Core {
16class System;
17}
18
19namespace Kernel {
20
21class KConditionVariable {
22public:
23 using ThreadTree = typename Thread::ConditionVariableThreadTreeType;
24
25 explicit KConditionVariable(Core::System& system_);
26 ~KConditionVariable();
27
28 // Arbitration
29 [[nodiscard]] ResultCode SignalToAddress(VAddr addr);
30 [[nodiscard]] ResultCode WaitForAddress(Handle handle, VAddr addr, u32 value);
31
32 // Condition variable
33 void Signal(u64 cv_key, s32 count);
34 [[nodiscard]] ResultCode Wait(VAddr addr, u64 key, u32 value, s64 timeout);
35
36private:
37 [[nodiscard]] Thread* SignalImpl(Thread* thread);
38
39 ThreadTree thread_tree;
40
41 Core::System& system;
42 KernelCore& kernel;
43};
44
45inline void BeforeUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
46 Thread* thread) {
47 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
48
49 tree->erase(tree->iterator_to(*thread));
50}
51
52inline void AfterUpdatePriority(const KernelCore& kernel, KConditionVariable::ThreadTree* tree,
53 Thread* thread) {
54 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
55
56 tree->insert(*thread);
57}
58
59} // namespace Kernel
diff --git a/src/core/hle/kernel/k_priority_queue.h b/src/core/hle/kernel/k_priority_queue.h
index 99fb8fe93..0dc929040 100644
--- a/src/core/hle/kernel/k_priority_queue.h
+++ b/src/core/hle/kernel/k_priority_queue.h
@@ -8,11 +8,11 @@
8#pragma once 8#pragma once
9 9
10#include <array> 10#include <array>
11#include <bit>
11#include <concepts> 12#include <concepts>
12 13
13#include "common/assert.h" 14#include "common/assert.h"
14#include "common/bit_set.h" 15#include "common/bit_set.h"
15#include "common/bit_util.h"
16#include "common/common_types.h" 16#include "common/common_types.h"
17#include "common/concepts.h" 17#include "common/concepts.h"
18 18
@@ -268,7 +268,7 @@ private:
268 } 268 }
269 269
270 constexpr s32 GetNextCore(u64& affinity) { 270 constexpr s32 GetNextCore(u64& affinity) {
271 const s32 core = Common::CountTrailingZeroes64(affinity); 271 const s32 core = std::countr_zero(affinity);
272 ClearAffinityBit(affinity, core); 272 ClearAffinityBit(affinity, core);
273 return core; 273 return core;
274 } 274 }
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index c5fd82a6b..12b5619fb 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -5,6 +5,8 @@
5// This file references various implementation details from Atmosphere, an open-source firmware for 5// This file references various implementation details from Atmosphere, an open-source firmware for
6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX. 6// the Nintendo Switch. Copyright 2018-2020 Atmosphere-NX.
7 7
8#include <bit>
9
8#include "common/assert.h" 10#include "common/assert.h"
9#include "common/bit_util.h" 11#include "common/bit_util.h"
10#include "common/fiber.h" 12#include "common/fiber.h"
@@ -31,12 +33,12 @@ static void IncrementScheduledCount(Kernel::Thread* thread) {
31 33
32void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule, 34void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedule,
33 Core::EmuThreadHandle global_thread) { 35 Core::EmuThreadHandle global_thread) {
34 u32 current_core = global_thread.host_handle; 36 const u32 current_core = global_thread.host_handle;
35 bool must_context_switch = global_thread.guest_handle != InvalidHandle && 37 bool must_context_switch = global_thread.guest_handle != InvalidHandle &&
36 (current_core < Core::Hardware::NUM_CPU_CORES); 38 (current_core < Core::Hardware::NUM_CPU_CORES);
37 39
38 while (cores_pending_reschedule != 0) { 40 while (cores_pending_reschedule != 0) {
39 u32 core = Common::CountTrailingZeroes64(cores_pending_reschedule); 41 const auto core = static_cast<u32>(std::countr_zero(cores_pending_reschedule));
40 ASSERT(core < Core::Hardware::NUM_CPU_CORES); 42 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
41 if (!must_context_switch || core != current_core) { 43 if (!must_context_switch || core != current_core) {
42 auto& phys_core = kernel.PhysicalCore(core); 44 auto& phys_core = kernel.PhysicalCore(core);
@@ -109,7 +111,7 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
109 111
110 // Idle cores are bad. We're going to try to migrate threads to each idle core in turn. 112 // Idle cores are bad. We're going to try to migrate threads to each idle core in turn.
111 while (idle_cores != 0) { 113 while (idle_cores != 0) {
112 u32 core_id = Common::CountTrailingZeroes64(idle_cores); 114 const auto core_id = static_cast<u32>(std::countr_zero(idle_cores));
113 if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) { 115 if (Thread* suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
114 s32 migration_candidates[Core::Hardware::NUM_CPU_CORES]; 116 s32 migration_candidates[Core::Hardware::NUM_CPU_CORES];
115 size_t num_candidates = 0; 117 size_t num_candidates = 0;
@@ -180,22 +182,22 @@ u64 KScheduler::UpdateHighestPriorityThreadsImpl(KernelCore& kernel) {
180 return cores_needing_scheduling; 182 return cores_needing_scheduling;
181} 183}
182 184
183void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state) { 185void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, ThreadState old_state) {
184 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 186 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
185 187
186 // Check if the state has changed, because if it hasn't there's nothing to do. 188 // Check if the state has changed, because if it hasn't there's nothing to do.
187 const auto cur_state = thread->scheduling_state; 189 const auto cur_state = thread->GetRawState();
188 if (cur_state == old_state) { 190 if (cur_state == old_state) {
189 return; 191 return;
190 } 192 }
191 193
192 // Update the priority queues. 194 // Update the priority queues.
193 if (old_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { 195 if (old_state == ThreadState::Runnable) {
194 // If we were previously runnable, then we're not runnable now, and we should remove. 196 // If we were previously runnable, then we're not runnable now, and we should remove.
195 GetPriorityQueue(kernel).Remove(thread); 197 GetPriorityQueue(kernel).Remove(thread);
196 IncrementScheduledCount(thread); 198 IncrementScheduledCount(thread);
197 SetSchedulerUpdateNeeded(kernel); 199 SetSchedulerUpdateNeeded(kernel);
198 } else if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { 200 } else if (cur_state == ThreadState::Runnable) {
199 // If we're now runnable, then we weren't previously, and we should add. 201 // If we're now runnable, then we weren't previously, and we should add.
200 GetPriorityQueue(kernel).PushBack(thread); 202 GetPriorityQueue(kernel).PushBack(thread);
201 IncrementScheduledCount(thread); 203 IncrementScheduledCount(thread);
@@ -203,13 +205,11 @@ void KScheduler::OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 ol
203 } 205 }
204} 206}
205 207
206void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread, 208void KScheduler::OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, s32 old_priority) {
207 u32 old_priority) {
208
209 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 209 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
210 210
211 // If the thread is runnable, we want to change its priority in the queue. 211 // If the thread is runnable, we want to change its priority in the queue.
212 if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { 212 if (thread->GetRawState() == ThreadState::Runnable) {
213 GetPriorityQueue(kernel).ChangePriority( 213 GetPriorityQueue(kernel).ChangePriority(
214 old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread); 214 old_priority, thread == kernel.CurrentScheduler()->GetCurrentThread(), thread);
215 IncrementScheduledCount(thread); 215 IncrementScheduledCount(thread);
@@ -222,7 +222,7 @@ void KScheduler::OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
222 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 222 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
223 223
224 // If the thread is runnable, we want to change its affinity in the queue. 224 // If the thread is runnable, we want to change its affinity in the queue.
225 if (thread->scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { 225 if (thread->GetRawState() == ThreadState::Runnable) {
226 GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread); 226 GetPriorityQueue(kernel).ChangeAffinityMask(old_core, old_affinity, thread);
227 IncrementScheduledCount(thread); 227 IncrementScheduledCount(thread);
228 SetSchedulerUpdateNeeded(kernel); 228 SetSchedulerUpdateNeeded(kernel);
@@ -292,7 +292,7 @@ void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
292 292
293 // If the best thread we can choose has a priority the same or worse than ours, try to 293 // If the best thread we can choose has a priority the same or worse than ours, try to
294 // migrate a higher priority thread. 294 // migrate a higher priority thread.
295 if (best_thread != nullptr && best_thread->GetPriority() >= static_cast<u32>(priority)) { 295 if (best_thread != nullptr && best_thread->GetPriority() >= priority) {
296 Thread* suggested = priority_queue.GetSuggestedFront(core_id); 296 Thread* suggested = priority_queue.GetSuggestedFront(core_id);
297 while (suggested != nullptr) { 297 while (suggested != nullptr) {
298 // If the suggestion's priority is the same as ours, don't bother. 298 // If the suggestion's priority is the same as ours, don't bother.
@@ -395,8 +395,8 @@ void KScheduler::YieldWithoutCoreMigration() {
395 { 395 {
396 KScopedSchedulerLock lock(kernel); 396 KScopedSchedulerLock lock(kernel);
397 397
398 const auto cur_state = cur_thread.scheduling_state; 398 const auto cur_state = cur_thread.GetRawState();
399 if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { 399 if (cur_state == ThreadState::Runnable) {
400 // Put the current thread at the back of the queue. 400 // Put the current thread at the back of the queue.
401 Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); 401 Thread* next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
402 IncrementScheduledCount(std::addressof(cur_thread)); 402 IncrementScheduledCount(std::addressof(cur_thread));
@@ -436,8 +436,8 @@ void KScheduler::YieldWithCoreMigration() {
436 { 436 {
437 KScopedSchedulerLock lock(kernel); 437 KScopedSchedulerLock lock(kernel);
438 438
439 const auto cur_state = cur_thread.scheduling_state; 439 const auto cur_state = cur_thread.GetRawState();
440 if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { 440 if (cur_state == ThreadState::Runnable) {
441 // Get the current active core. 441 // Get the current active core.
442 const s32 core_id = cur_thread.GetActiveCore(); 442 const s32 core_id = cur_thread.GetActiveCore();
443 443
@@ -526,8 +526,8 @@ void KScheduler::YieldToAnyThread() {
526 { 526 {
527 KScopedSchedulerLock lock(kernel); 527 KScopedSchedulerLock lock(kernel);
528 528
529 const auto cur_state = cur_thread.scheduling_state; 529 const auto cur_state = cur_thread.GetRawState();
530 if (cur_state == static_cast<u32>(ThreadSchedStatus::Runnable)) { 530 if (cur_state == ThreadState::Runnable) {
531 // Get the current active core. 531 // Get the current active core.
532 const s32 core_id = cur_thread.GetActiveCore(); 532 const s32 core_id = cur_thread.GetActiveCore();
533 533
@@ -645,8 +645,7 @@ void KScheduler::Unload(Thread* thread) {
645 645
646void KScheduler::Reload(Thread* thread) { 646void KScheduler::Reload(Thread* thread) {
647 if (thread) { 647 if (thread) {
648 ASSERT_MSG(thread->GetSchedulingStatus() == ThreadSchedStatus::Runnable, 648 ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
649 "Thread must be runnable.");
650 649
651 // Cancel any outstanding wakeup events for this thread 650 // Cancel any outstanding wakeup events for this thread
652 thread->SetIsRunning(true); 651 thread->SetIsRunning(true);
@@ -725,7 +724,7 @@ void KScheduler::SwitchToCurrent() {
725 do { 724 do {
726 if (current_thread != nullptr && !current_thread->IsHLEThread()) { 725 if (current_thread != nullptr && !current_thread->IsHLEThread()) {
727 current_thread->context_guard.lock(); 726 current_thread->context_guard.lock();
728 if (!current_thread->IsRunnable()) { 727 if (current_thread->GetRawState() != ThreadState::Runnable) {
729 current_thread->context_guard.unlock(); 728 current_thread->context_guard.unlock();
730 break; 729 break;
731 } 730 }
@@ -772,7 +771,7 @@ void KScheduler::Initialize() {
772 771
773 { 772 {
774 KScopedSchedulerLock lock{system.Kernel()}; 773 KScopedSchedulerLock lock{system.Kernel()};
775 idle_thread->SetStatus(ThreadStatus::Ready); 774 idle_thread->SetState(ThreadState::Runnable);
776 } 775 }
777} 776}
778 777
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index e84abc84c..783665123 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -100,11 +100,10 @@ public:
100 void YieldToAnyThread(); 100 void YieldToAnyThread();
101 101
102 /// Notify the scheduler a thread's status has changed. 102 /// Notify the scheduler a thread's status has changed.
103 static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, u32 old_state); 103 static void OnThreadStateChanged(KernelCore& kernel, Thread* thread, ThreadState old_state);
104 104
105 /// Notify the scheduler a thread's priority has changed. 105 /// Notify the scheduler a thread's priority has changed.
106 static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, Thread* current_thread, 106 static void OnThreadPriorityChanged(KernelCore& kernel, Thread* thread, s32 old_priority);
107 u32 old_priority);
108 107
109 /// Notify the scheduler a thread's core and/or affinity mask has changed. 108 /// Notify the scheduler a thread's core and/or affinity mask has changed.
110 static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread, 109 static void OnThreadAffinityMaskChanged(KernelCore& kernel, Thread* thread,
diff --git a/src/core/hle/kernel/k_scheduler_lock.h b/src/core/hle/kernel/k_scheduler_lock.h
index 2f1c1f691..9b40bd22c 100644
--- a/src/core/hle/kernel/k_scheduler_lock.h
+++ b/src/core/hle/kernel/k_scheduler_lock.h
@@ -19,7 +19,7 @@ class KernelCore;
19template <typename SchedulerType> 19template <typename SchedulerType>
20class KAbstractSchedulerLock { 20class KAbstractSchedulerLock {
21public: 21public:
22 explicit KAbstractSchedulerLock(KernelCore& kernel) : kernel{kernel} {} 22 explicit KAbstractSchedulerLock(KernelCore& kernel_) : kernel{kernel_} {}
23 23
24 bool IsLockedByCurrentThread() const { 24 bool IsLockedByCurrentThread() const {
25 return this->owner_thread == kernel.GetCurrentEmuThreadID(); 25 return this->owner_thread == kernel.GetCurrentEmuThreadID();
diff --git a/src/core/hle/kernel/k_synchronization_object.cpp b/src/core/hle/kernel/k_synchronization_object.cpp
new file mode 100644
index 000000000..1c508cb55
--- /dev/null
+++ b/src/core/hle/kernel/k_synchronization_object.cpp
@@ -0,0 +1,172 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "common/common_types.h"
7#include "core/hle/kernel/k_scheduler.h"
8#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
9#include "core/hle/kernel/k_synchronization_object.h"
10#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/svc_results.h"
12#include "core/hle/kernel/thread.h"
13
14namespace Kernel {
15
16ResultCode KSynchronizationObject::Wait(KernelCore& kernel, s32* out_index,
17 KSynchronizationObject** objects, const s32 num_objects,
18 s64 timeout) {
19 // Allocate space on stack for thread nodes.
20 std::vector<ThreadListNode> thread_nodes(num_objects);
21
22 // Prepare for wait.
23 Thread* thread = kernel.CurrentScheduler()->GetCurrentThread();
24 Handle timer = InvalidHandle;
25
26 {
27 // Setup the scheduling lock and sleep.
28 KScopedSchedulerLockAndSleep slp(kernel, timer, thread, timeout);
29
30 // Check if any of the objects are already signaled.
31 for (auto i = 0; i < num_objects; ++i) {
32 ASSERT(objects[i] != nullptr);
33
34 if (objects[i]->IsSignaled()) {
35 *out_index = i;
36 slp.CancelSleep();
37 return RESULT_SUCCESS;
38 }
39 }
40
41 // Check if the timeout is zero.
42 if (timeout == 0) {
43 slp.CancelSleep();
44 return Svc::ResultTimedOut;
45 }
46
47 // Check if the thread should terminate.
48 if (thread->IsTerminationRequested()) {
49 slp.CancelSleep();
50 return Svc::ResultTerminationRequested;
51 }
52
53 // Check if waiting was canceled.
54 if (thread->IsWaitCancelled()) {
55 slp.CancelSleep();
56 thread->ClearWaitCancelled();
57 return Svc::ResultCancelled;
58 }
59
60 // Add the waiters.
61 for (auto i = 0; i < num_objects; ++i) {
62 thread_nodes[i].thread = thread;
63 thread_nodes[i].next = nullptr;
64
65 if (objects[i]->thread_list_tail == nullptr) {
66 objects[i]->thread_list_head = std::addressof(thread_nodes[i]);
67 } else {
68 objects[i]->thread_list_tail->next = std::addressof(thread_nodes[i]);
69 }
70
71 objects[i]->thread_list_tail = std::addressof(thread_nodes[i]);
72 }
73
74 // For debugging only
75 thread->SetWaitObjectsForDebugging({objects, static_cast<std::size_t>(num_objects)});
76
77 // Mark the thread as waiting.
78 thread->SetCancellable();
79 thread->SetSyncedObject(nullptr, Svc::ResultTimedOut);
80 thread->SetState(ThreadState::Waiting);
81 thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Synchronization);
82 }
83
84 // The lock/sleep is done, so we should be able to get our result.
85
86 // Thread is no longer cancellable.
87 thread->ClearCancellable();
88
89 // For debugging only
90 thread->SetWaitObjectsForDebugging({});
91
92 // Cancel the timer as needed.
93 if (timer != InvalidHandle) {
94 auto& time_manager = kernel.TimeManager();
95 time_manager.UnscheduleTimeEvent(timer);
96 }
97
98 // Get the wait result.
99 ResultCode wait_result{RESULT_SUCCESS};
100 s32 sync_index = -1;
101 {
102 KScopedSchedulerLock lock(kernel);
103 KSynchronizationObject* synced_obj;
104 wait_result = thread->GetWaitResult(std::addressof(synced_obj));
105
106 for (auto i = 0; i < num_objects; ++i) {
107 // Unlink the object from the list.
108 ThreadListNode* prev_ptr =
109 reinterpret_cast<ThreadListNode*>(std::addressof(objects[i]->thread_list_head));
110 ThreadListNode* prev_val = nullptr;
111 ThreadListNode *prev, *tail_prev;
112
113 do {
114 prev = prev_ptr;
115 prev_ptr = prev_ptr->next;
116 tail_prev = prev_val;
117 prev_val = prev_ptr;
118 } while (prev_ptr != std::addressof(thread_nodes[i]));
119
120 if (objects[i]->thread_list_tail == std::addressof(thread_nodes[i])) {
121 objects[i]->thread_list_tail = tail_prev;
122 }
123
124 prev->next = thread_nodes[i].next;
125
126 if (objects[i] == synced_obj) {
127 sync_index = i;
128 }
129 }
130 }
131
132 // Set output.
133 *out_index = sync_index;
134 return wait_result;
135}
136
137KSynchronizationObject::KSynchronizationObject(KernelCore& kernel) : Object{kernel} {}
138
139KSynchronizationObject ::~KSynchronizationObject() = default;
140
141void KSynchronizationObject::NotifyAvailable(ResultCode result) {
142 KScopedSchedulerLock lock(kernel);
143
144 // If we're not signaled, we've nothing to notify.
145 if (!this->IsSignaled()) {
146 return;
147 }
148
149 // Iterate over each thread.
150 for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
151 Thread* thread = cur_node->thread;
152 if (thread->GetState() == ThreadState::Waiting) {
153 thread->SetSyncedObject(this, result);
154 thread->SetState(ThreadState::Runnable);
155 }
156 }
157}
158
159std::vector<Thread*> KSynchronizationObject::GetWaitingThreadsForDebugging() const {
160 std::vector<Thread*> threads;
161
162 // If debugging, dump the list of waiters.
163 {
164 KScopedSchedulerLock lock(kernel);
165 for (auto* cur_node = thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
166 threads.emplace_back(cur_node->thread);
167 }
168 }
169
170 return threads;
171}
172} // namespace Kernel
diff --git a/src/core/hle/kernel/k_synchronization_object.h b/src/core/hle/kernel/k_synchronization_object.h
new file mode 100644
index 000000000..14d80ebf1
--- /dev/null
+++ b/src/core/hle/kernel/k_synchronization_object.h
@@ -0,0 +1,58 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <vector>
8
9#include "core/hle/kernel/object.h"
10#include "core/hle/result.h"
11
12namespace Kernel {
13
14class KernelCore;
15class Synchronization;
16class Thread;
17
18/// Class that represents a Kernel object that a thread can be waiting on
19class KSynchronizationObject : public Object {
20public:
21 struct ThreadListNode {
22 ThreadListNode* next{};
23 Thread* thread{};
24 };
25
26 [[nodiscard]] static ResultCode Wait(KernelCore& kernel, s32* out_index,
27 KSynchronizationObject** objects, const s32 num_objects,
28 s64 timeout);
29
30 [[nodiscard]] virtual bool IsSignaled() const = 0;
31
32 [[nodiscard]] std::vector<Thread*> GetWaitingThreadsForDebugging() const;
33
34protected:
35 explicit KSynchronizationObject(KernelCore& kernel);
36 virtual ~KSynchronizationObject();
37
38 void NotifyAvailable(ResultCode result);
39 void NotifyAvailable() {
40 return this->NotifyAvailable(RESULT_SUCCESS);
41 }
42
43private:
44 ThreadListNode* thread_list_head{};
45 ThreadListNode* thread_list_tail{};
46};
47
48// Specialization of DynamicObjectCast for KSynchronizationObjects
49template <>
50inline std::shared_ptr<KSynchronizationObject> DynamicObjectCast<KSynchronizationObject>(
51 std::shared_ptr<Object> object) {
52 if (object != nullptr && object->IsWaitable()) {
53 return std::static_pointer_cast<KSynchronizationObject>(object);
54 }
55 return nullptr;
56}
57
58} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index e8ece8164..c0ff287a6 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -38,7 +38,6 @@
38#include "core/hle/kernel/resource_limit.h" 38#include "core/hle/kernel/resource_limit.h"
39#include "core/hle/kernel/service_thread.h" 39#include "core/hle/kernel/service_thread.h"
40#include "core/hle/kernel/shared_memory.h" 40#include "core/hle/kernel/shared_memory.h"
41#include "core/hle/kernel/synchronization.h"
42#include "core/hle/kernel/thread.h" 41#include "core/hle/kernel/thread.h"
43#include "core/hle/kernel/time_manager.h" 42#include "core/hle/kernel/time_manager.h"
44#include "core/hle/lock.h" 43#include "core/hle/lock.h"
@@ -51,8 +50,7 @@ namespace Kernel {
51 50
52struct KernelCore::Impl { 51struct KernelCore::Impl {
53 explicit Impl(Core::System& system, KernelCore& kernel) 52 explicit Impl(Core::System& system, KernelCore& kernel)
54 : synchronization{system}, time_manager{system}, global_handle_table{kernel}, system{ 53 : time_manager{system}, global_handle_table{kernel}, system{system} {}
55 system} {}
56 54
57 void SetMulticore(bool is_multicore) { 55 void SetMulticore(bool is_multicore) {
58 this->is_multicore = is_multicore; 56 this->is_multicore = is_multicore;
@@ -307,7 +305,6 @@ struct KernelCore::Impl {
307 std::vector<std::shared_ptr<Process>> process_list; 305 std::vector<std::shared_ptr<Process>> process_list;
308 Process* current_process = nullptr; 306 Process* current_process = nullptr;
309 std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context; 307 std::unique_ptr<Kernel::GlobalSchedulerContext> global_scheduler_context;
310 Kernel::Synchronization synchronization;
311 Kernel::TimeManager time_manager; 308 Kernel::TimeManager time_manager;
312 309
313 std::shared_ptr<ResourceLimit> system_resource_limit; 310 std::shared_ptr<ResourceLimit> system_resource_limit;
@@ -461,14 +458,6 @@ const std::array<Core::CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>& Kern
461 return impl->interrupts; 458 return impl->interrupts;
462} 459}
463 460
464Kernel::Synchronization& KernelCore::Synchronization() {
465 return impl->synchronization;
466}
467
468const Kernel::Synchronization& KernelCore::Synchronization() const {
469 return impl->synchronization;
470}
471
472Kernel::TimeManager& KernelCore::TimeManager() { 461Kernel::TimeManager& KernelCore::TimeManager() {
473 return impl->time_manager; 462 return impl->time_manager;
474} 463}
@@ -613,9 +602,11 @@ void KernelCore::Suspend(bool in_suspention) {
613 const bool should_suspend = exception_exited || in_suspention; 602 const bool should_suspend = exception_exited || in_suspention;
614 { 603 {
615 KScopedSchedulerLock lock(*this); 604 KScopedSchedulerLock lock(*this);
616 ThreadStatus status = should_suspend ? ThreadStatus::Ready : ThreadStatus::WaitSleep; 605 const auto state = should_suspend ? ThreadState::Runnable : ThreadState::Waiting;
617 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) { 606 for (std::size_t i = 0; i < Core::Hardware::NUM_CPU_CORES; i++) {
618 impl->suspend_threads[i]->SetStatus(status); 607 impl->suspend_threads[i]->SetState(state);
608 impl->suspend_threads[i]->SetWaitReasonForDebugging(
609 ThreadWaitReasonForDebugging::Suspended);
619 } 610 }
620 } 611 }
621} 612}
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index e3169f5a7..933d9a7d6 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -33,7 +33,6 @@ template <typename T>
33class SlabHeap; 33class SlabHeap;
34} // namespace Memory 34} // namespace Memory
35 35
36class AddressArbiter;
37class ClientPort; 36class ClientPort;
38class GlobalSchedulerContext; 37class GlobalSchedulerContext;
39class HandleTable; 38class HandleTable;
@@ -129,12 +128,6 @@ public:
129 /// Gets the an instance of the current physical CPU core. 128 /// Gets the an instance of the current physical CPU core.
130 const Kernel::PhysicalCore& CurrentPhysicalCore() const; 129 const Kernel::PhysicalCore& CurrentPhysicalCore() const;
131 130
132 /// Gets the an instance of the Synchronization Interface.
133 Kernel::Synchronization& Synchronization();
134
135 /// Gets the an instance of the Synchronization Interface.
136 const Kernel::Synchronization& Synchronization() const;
137
138 /// Gets the an instance of the TimeManager Interface. 131 /// Gets the an instance of the TimeManager Interface.
139 Kernel::TimeManager& TimeManager(); 132 Kernel::TimeManager& TimeManager();
140 133
diff --git a/src/core/hle/kernel/memory/memory_layout.h b/src/core/hle/kernel/memory/memory_layout.h
index 9b3d6267a..c7c0b2f49 100644
--- a/src/core/hle/kernel/memory/memory_layout.h
+++ b/src/core/hle/kernel/memory/memory_layout.h
@@ -5,9 +5,28 @@
5#pragma once 5#pragma once
6 6
7#include "common/common_types.h" 7#include "common/common_types.h"
8#include "core/device_memory.h"
8 9
9namespace Kernel::Memory { 10namespace Kernel::Memory {
10 11
12constexpr std::size_t KernelAslrAlignment = 2 * 1024 * 1024;
13constexpr std::size_t KernelVirtualAddressSpaceWidth = 1ULL << 39;
14constexpr std::size_t KernelPhysicalAddressSpaceWidth = 1ULL << 48;
15constexpr std::size_t KernelVirtualAddressSpaceBase = 0ULL - KernelVirtualAddressSpaceWidth;
16constexpr std::size_t KernelVirtualAddressSpaceEnd =
17 KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment);
18constexpr std::size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1;
19constexpr std::size_t KernelVirtualAddressSpaceSize =
20 KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase;
21
22constexpr bool IsKernelAddressKey(VAddr key) {
23 return KernelVirtualAddressSpaceBase <= key && key <= KernelVirtualAddressSpaceLast;
24}
25
26constexpr bool IsKernelAddress(VAddr address) {
27 return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd;
28}
29
11class MemoryRegion final { 30class MemoryRegion final {
12 friend class MemoryLayout; 31 friend class MemoryLayout;
13 32
diff --git a/src/core/hle/kernel/memory/page_heap.h b/src/core/hle/kernel/memory/page_heap.h
index 22b0de860..131093284 100644
--- a/src/core/hle/kernel/memory/page_heap.h
+++ b/src/core/hle/kernel/memory/page_heap.h
@@ -8,11 +8,11 @@
8#pragma once 8#pragma once
9 9
10#include <array> 10#include <array>
11#include <bit>
11#include <vector> 12#include <vector>
12 13
13#include "common/alignment.h" 14#include "common/alignment.h"
14#include "common/assert.h" 15#include "common/assert.h"
15#include "common/bit_util.h"
16#include "common/common_funcs.h" 16#include "common/common_funcs.h"
17#include "common/common_types.h" 17#include "common/common_types.h"
18#include "core/hle/kernel/memory/memory_types.h" 18#include "core/hle/kernel/memory/memory_types.h"
@@ -105,7 +105,7 @@ private:
105 ASSERT(depth == 0); 105 ASSERT(depth == 0);
106 return -1; 106 return -1;
107 } 107 }
108 offset = offset * 64 + Common::CountTrailingZeroes64(v); 108 offset = offset * 64 + static_cast<u32>(std::countr_zero(v));
109 ++depth; 109 ++depth;
110 } while (depth < static_cast<s32>(used_depths)); 110 } while (depth < static_cast<s32>(used_depths));
111 111
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
deleted file mode 100644
index 4f8075e0e..000000000
--- a/src/core/hle/kernel/mutex.cpp
+++ /dev/null
@@ -1,170 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <memory>
6#include <utility>
7#include <vector>
8
9#include "common/assert.h"
10#include "common/logging/log.h"
11#include "core/core.h"
12#include "core/hle/kernel/errors.h"
13#include "core/hle/kernel/handle_table.h"
14#include "core/hle/kernel/k_scheduler.h"
15#include "core/hle/kernel/kernel.h"
16#include "core/hle/kernel/mutex.h"
17#include "core/hle/kernel/object.h"
18#include "core/hle/kernel/process.h"
19#include "core/hle/kernel/thread.h"
20#include "core/hle/result.h"
21#include "core/memory.h"
22
23namespace Kernel {
24
25/// Returns the number of threads that are waiting for a mutex, and the highest priority one among
26/// those.
27static std::pair<std::shared_ptr<Thread>, u32> GetHighestPriorityMutexWaitingThread(
28 const std::shared_ptr<Thread>& current_thread, VAddr mutex_addr) {
29
30 std::shared_ptr<Thread> highest_priority_thread;
31 u32 num_waiters = 0;
32
33 for (const auto& thread : current_thread->GetMutexWaitingThreads()) {
34 if (thread->GetMutexWaitAddress() != mutex_addr)
35 continue;
36
37 ++num_waiters;
38 if (highest_priority_thread == nullptr ||
39 thread->GetPriority() < highest_priority_thread->GetPriority()) {
40 highest_priority_thread = thread;
41 }
42 }
43
44 return {highest_priority_thread, num_waiters};
45}
46
47/// Update the mutex owner field of all threads waiting on the mutex to point to the new owner.
48static void TransferMutexOwnership(VAddr mutex_addr, std::shared_ptr<Thread> current_thread,
49 std::shared_ptr<Thread> new_owner) {
50 current_thread->RemoveMutexWaiter(new_owner);
51 const auto threads = current_thread->GetMutexWaitingThreads();
52 for (const auto& thread : threads) {
53 if (thread->GetMutexWaitAddress() != mutex_addr)
54 continue;
55
56 ASSERT(thread->GetLockOwner() == current_thread.get());
57 current_thread->RemoveMutexWaiter(thread);
58 if (new_owner != thread)
59 new_owner->AddMutexWaiter(thread);
60 }
61}
62
63Mutex::Mutex(Core::System& system) : system{system} {}
64Mutex::~Mutex() = default;
65
66ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
67 Handle requesting_thread_handle) {
68 // The mutex address must be 4-byte aligned
69 if ((address % sizeof(u32)) != 0) {
70 LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
71 return ERR_INVALID_ADDRESS;
72 }
73
74 auto& kernel = system.Kernel();
75 std::shared_ptr<Thread> current_thread =
76 SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
77 {
78 KScopedSchedulerLock lock(kernel);
79 // The mutex address must be 4-byte aligned
80 if ((address % sizeof(u32)) != 0) {
81 return ERR_INVALID_ADDRESS;
82 }
83
84 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
85 std::shared_ptr<Thread> holding_thread = handle_table.Get<Thread>(holding_thread_handle);
86 std::shared_ptr<Thread> requesting_thread =
87 handle_table.Get<Thread>(requesting_thread_handle);
88
89 // TODO(Subv): It is currently unknown if it is possible to lock a mutex in behalf of
90 // another thread.
91 ASSERT(requesting_thread == current_thread);
92
93 current_thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
94
95 const u32 addr_value = system.Memory().Read32(address);
96
97 // If the mutex isn't being held, just return success.
98 if (addr_value != (holding_thread_handle | Mutex::MutexHasWaitersFlag)) {
99 return RESULT_SUCCESS;
100 }
101
102 if (holding_thread == nullptr) {
103 return ERR_INVALID_HANDLE;
104 }
105
106 // Wait until the mutex is released
107 current_thread->SetMutexWaitAddress(address);
108 current_thread->SetWaitHandle(requesting_thread_handle);
109
110 current_thread->SetStatus(ThreadStatus::WaitMutex);
111
112 // Update the lock holder thread's priority to prevent priority inversion.
113 holding_thread->AddMutexWaiter(current_thread);
114 }
115
116 {
117 KScopedSchedulerLock lock(kernel);
118 auto* owner = current_thread->GetLockOwner();
119 if (owner != nullptr) {
120 owner->RemoveMutexWaiter(current_thread);
121 }
122 }
123 return current_thread->GetSignalingResult();
124}
125
126std::pair<ResultCode, std::shared_ptr<Thread>> Mutex::Unlock(std::shared_ptr<Thread> owner,
127 VAddr address) {
128 // The mutex address must be 4-byte aligned
129 if ((address % sizeof(u32)) != 0) {
130 LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
131 return {ERR_INVALID_ADDRESS, nullptr};
132 }
133
134 auto [new_owner, num_waiters] = GetHighestPriorityMutexWaitingThread(owner, address);
135 if (new_owner == nullptr) {
136 system.Memory().Write32(address, 0);
137 return {RESULT_SUCCESS, nullptr};
138 }
139 // Transfer the ownership of the mutex from the previous owner to the new one.
140 TransferMutexOwnership(address, owner, new_owner);
141 u32 mutex_value = new_owner->GetWaitHandle();
142 if (num_waiters >= 2) {
143 // Notify the guest that there are still some threads waiting for the mutex
144 mutex_value |= Mutex::MutexHasWaitersFlag;
145 }
146 new_owner->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
147 new_owner->SetLockOwner(nullptr);
148 new_owner->ResumeFromWait();
149
150 system.Memory().Write32(address, mutex_value);
151 return {RESULT_SUCCESS, new_owner};
152}
153
154ResultCode Mutex::Release(VAddr address) {
155 auto& kernel = system.Kernel();
156 KScopedSchedulerLock lock(kernel);
157
158 std::shared_ptr<Thread> current_thread =
159 SharedFrom(kernel.CurrentScheduler()->GetCurrentThread());
160
161 auto [result, new_owner] = Unlock(current_thread, address);
162
163 if (result != RESULT_SUCCESS && new_owner != nullptr) {
164 new_owner->SetSynchronizationResults(nullptr, result);
165 }
166
167 return result;
168}
169
170} // namespace Kernel
diff --git a/src/core/hle/kernel/mutex.h b/src/core/hle/kernel/mutex.h
deleted file mode 100644
index 3b81dc3df..000000000
--- a/src/core/hle/kernel/mutex.h
+++ /dev/null
@@ -1,42 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_types.h"
8
9union ResultCode;
10
11namespace Core {
12class System;
13}
14
15namespace Kernel {
16
17class Mutex final {
18public:
19 explicit Mutex(Core::System& system);
20 ~Mutex();
21
22 /// Flag that indicates that a mutex still has threads waiting for it.
23 static constexpr u32 MutexHasWaitersFlag = 0x40000000;
24 /// Mask of the bits in a mutex address value that contain the mutex owner.
25 static constexpr u32 MutexOwnerMask = 0xBFFFFFFF;
26
27 /// Attempts to acquire a mutex at the specified address.
28 ResultCode TryAcquire(VAddr address, Handle holding_thread_handle,
29 Handle requesting_thread_handle);
30
31 /// Unlocks a mutex for owner at address
32 std::pair<ResultCode, std::shared_ptr<Thread>> Unlock(std::shared_ptr<Thread> owner,
33 VAddr address);
34
35 /// Releases the mutex at the specified address.
36 ResultCode Release(VAddr address);
37
38private:
39 Core::System& system;
40};
41
42} // namespace Kernel
diff --git a/src/core/hle/kernel/object.h b/src/core/hle/kernel/object.h
index e3391e2af..27124ef67 100644
--- a/src/core/hle/kernel/object.h
+++ b/src/core/hle/kernel/object.h
@@ -50,6 +50,11 @@ public:
50 } 50 }
51 virtual HandleType GetHandleType() const = 0; 51 virtual HandleType GetHandleType() const = 0;
52 52
53 void Close() {
54 // TODO(bunnei): This is a placeholder to decrement the reference count, which we will use
55 // when we implement KAutoObject instead of using shared_ptr.
56 }
57
53 /** 58 /**
54 * Check if a thread can wait on the object 59 * Check if a thread can wait on the object
55 * @return True if a thread can wait on the object, otherwise false 60 * @return True if a thread can wait on the object, otherwise false
diff --git a/src/core/hle/kernel/process.cpp b/src/core/hle/kernel/process.cpp
index b905b486a..37b77fa6e 100644
--- a/src/core/hle/kernel/process.cpp
+++ b/src/core/hle/kernel/process.cpp
@@ -55,7 +55,7 @@ void SetupMainThread(Core::System& system, Process& owner_process, u32 priority,
55 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires 55 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
56 { 56 {
57 KScopedSchedulerLock lock{kernel}; 57 KScopedSchedulerLock lock{kernel};
58 thread->SetStatus(ThreadStatus::Ready); 58 thread->SetState(ThreadState::Runnable);
59 } 59 }
60} 60}
61} // Anonymous namespace 61} // Anonymous namespace
@@ -162,48 +162,6 @@ u64 Process::GetTotalPhysicalMemoryUsedWithoutSystemResource() const {
162 return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage(); 162 return GetTotalPhysicalMemoryUsed() - GetSystemResourceUsage();
163} 163}
164 164
165void Process::InsertConditionVariableThread(std::shared_ptr<Thread> thread) {
166 VAddr cond_var_addr = thread->GetCondVarWaitAddress();
167 std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
168 auto it = thread_list.begin();
169 while (it != thread_list.end()) {
170 const std::shared_ptr<Thread> current_thread = *it;
171 if (current_thread->GetPriority() > thread->GetPriority()) {
172 thread_list.insert(it, thread);
173 return;
174 }
175 ++it;
176 }
177 thread_list.push_back(thread);
178}
179
180void Process::RemoveConditionVariableThread(std::shared_ptr<Thread> thread) {
181 VAddr cond_var_addr = thread->GetCondVarWaitAddress();
182 std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
183 auto it = thread_list.begin();
184 while (it != thread_list.end()) {
185 const std::shared_ptr<Thread> current_thread = *it;
186 if (current_thread.get() == thread.get()) {
187 thread_list.erase(it);
188 return;
189 }
190 ++it;
191 }
192}
193
194std::vector<std::shared_ptr<Thread>> Process::GetConditionVariableThreads(
195 const VAddr cond_var_addr) {
196 std::vector<std::shared_ptr<Thread>> result{};
197 std::list<std::shared_ptr<Thread>>& thread_list = cond_var_threads[cond_var_addr];
198 auto it = thread_list.begin();
199 while (it != thread_list.end()) {
200 std::shared_ptr<Thread> current_thread = *it;
201 result.push_back(current_thread);
202 ++it;
203 }
204 return result;
205}
206
207void Process::RegisterThread(const Thread* thread) { 165void Process::RegisterThread(const Thread* thread) {
208 thread_list.push_back(thread); 166 thread_list.push_back(thread);
209} 167}
@@ -318,7 +276,7 @@ void Process::PrepareForTermination() {
318 continue; 276 continue;
319 277
320 // TODO(Subv): When are the other running/ready threads terminated? 278 // TODO(Subv): When are the other running/ready threads terminated?
321 ASSERT_MSG(thread->GetStatus() == ThreadStatus::WaitSynch, 279 ASSERT_MSG(thread->GetState() == ThreadState::Waiting,
322 "Exiting processes with non-waiting threads is currently unimplemented"); 280 "Exiting processes with non-waiting threads is currently unimplemented");
323 281
324 thread->Stop(); 282 thread->Stop();
@@ -406,21 +364,18 @@ void Process::LoadModule(CodeSet code_set, VAddr base_addr) {
406 ReprotectSegment(code_set.DataSegment(), Memory::MemoryPermission::ReadAndWrite); 364 ReprotectSegment(code_set.DataSegment(), Memory::MemoryPermission::ReadAndWrite);
407} 365}
408 366
367bool Process::IsSignaled() const {
368 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
369 return is_signaled;
370}
371
409Process::Process(Core::System& system) 372Process::Process(Core::System& system)
410 : SynchronizationObject{system.Kernel()}, page_table{std::make_unique<Memory::PageTable>( 373 : KSynchronizationObject{system.Kernel()},
411 system)}, 374 page_table{std::make_unique<Memory::PageTable>(system)}, handle_table{system.Kernel()},
412 handle_table{system.Kernel()}, address_arbiter{system}, mutex{system}, system{system} {} 375 address_arbiter{system}, condition_var{system}, system{system} {}
413 376
414Process::~Process() = default; 377Process::~Process() = default;
415 378
416void Process::Acquire(Thread* thread) {
417 ASSERT_MSG(!ShouldWait(thread), "Object unavailable!");
418}
419
420bool Process::ShouldWait(const Thread* thread) const {
421 return !is_signaled;
422}
423
424void Process::ChangeStatus(ProcessStatus new_status) { 379void Process::ChangeStatus(ProcessStatus new_status) {
425 if (status == new_status) { 380 if (status == new_status) {
426 return; 381 return;
@@ -428,7 +383,7 @@ void Process::ChangeStatus(ProcessStatus new_status) {
428 383
429 status = new_status; 384 status = new_status;
430 is_signaled = true; 385 is_signaled = true;
431 Signal(); 386 NotifyAvailable();
432} 387}
433 388
434ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) { 389ResultCode Process::AllocateMainThreadStack(std::size_t stack_size) {
diff --git a/src/core/hle/kernel/process.h b/src/core/hle/kernel/process.h
index e412e58aa..564e1f27d 100644
--- a/src/core/hle/kernel/process.h
+++ b/src/core/hle/kernel/process.h
@@ -11,11 +11,11 @@
11#include <unordered_map> 11#include <unordered_map>
12#include <vector> 12#include <vector>
13#include "common/common_types.h" 13#include "common/common_types.h"
14#include "core/hle/kernel/address_arbiter.h"
15#include "core/hle/kernel/handle_table.h" 14#include "core/hle/kernel/handle_table.h"
16#include "core/hle/kernel/mutex.h" 15#include "core/hle/kernel/k_address_arbiter.h"
16#include "core/hle/kernel/k_condition_variable.h"
17#include "core/hle/kernel/k_synchronization_object.h"
17#include "core/hle/kernel/process_capability.h" 18#include "core/hle/kernel/process_capability.h"
18#include "core/hle/kernel/synchronization_object.h"
19#include "core/hle/result.h" 19#include "core/hle/result.h"
20 20
21namespace Core { 21namespace Core {
@@ -63,7 +63,7 @@ enum class ProcessStatus {
63 DebugBreak, 63 DebugBreak,
64}; 64};
65 65
66class Process final : public SynchronizationObject { 66class Process final : public KSynchronizationObject {
67public: 67public:
68 explicit Process(Core::System& system); 68 explicit Process(Core::System& system);
69 ~Process() override; 69 ~Process() override;
@@ -123,24 +123,30 @@ public:
123 return handle_table; 123 return handle_table;
124 } 124 }
125 125
126 /// Gets a reference to the process' address arbiter. 126 ResultCode SignalToAddress(VAddr address) {
127 AddressArbiter& GetAddressArbiter() { 127 return condition_var.SignalToAddress(address);
128 return address_arbiter;
129 } 128 }
130 129
131 /// Gets a const reference to the process' address arbiter. 130 ResultCode WaitForAddress(Handle handle, VAddr address, u32 tag) {
132 const AddressArbiter& GetAddressArbiter() const { 131 return condition_var.WaitForAddress(handle, address, tag);
133 return address_arbiter;
134 } 132 }
135 133
136 /// Gets a reference to the process' mutex lock. 134 void SignalConditionVariable(u64 cv_key, int32_t count) {
137 Mutex& GetMutex() { 135 return condition_var.Signal(cv_key, count);
138 return mutex;
139 } 136 }
140 137
141 /// Gets a const reference to the process' mutex lock 138 ResultCode WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
142 const Mutex& GetMutex() const { 139 return condition_var.Wait(address, cv_key, tag, ns);
143 return mutex; 140 }
141
142 ResultCode SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value,
143 s32 count) {
144 return address_arbiter.SignalToAddress(address, signal_type, value, count);
145 }
146
147 ResultCode WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
148 s64 timeout) {
149 return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
144 } 150 }
145 151
146 /// Gets the address to the process' dedicated TLS region. 152 /// Gets the address to the process' dedicated TLS region.
@@ -250,15 +256,6 @@ public:
250 return thread_list; 256 return thread_list;
251 } 257 }
252 258
253 /// Insert a thread into the condition variable wait container
254 void InsertConditionVariableThread(std::shared_ptr<Thread> thread);
255
256 /// Remove a thread from the condition variable wait container
257 void RemoveConditionVariableThread(std::shared_ptr<Thread> thread);
258
259 /// Obtain all condition variable threads waiting for some address
260 std::vector<std::shared_ptr<Thread>> GetConditionVariableThreads(VAddr cond_var_addr);
261
262 /// Registers a thread as being created under this process, 259 /// Registers a thread as being created under this process,
263 /// adding it to this process' thread list. 260 /// adding it to this process' thread list.
264 void RegisterThread(const Thread* thread); 261 void RegisterThread(const Thread* thread);
@@ -304,6 +301,8 @@ public:
304 301
305 void LoadModule(CodeSet code_set, VAddr base_addr); 302 void LoadModule(CodeSet code_set, VAddr base_addr);
306 303
304 bool IsSignaled() const override;
305
307 /////////////////////////////////////////////////////////////////////////////////////////////// 306 ///////////////////////////////////////////////////////////////////////////////////////////////
308 // Thread-local storage management 307 // Thread-local storage management
309 308
@@ -314,12 +313,6 @@ public:
314 void FreeTLSRegion(VAddr tls_address); 313 void FreeTLSRegion(VAddr tls_address);
315 314
316private: 315private:
317 /// Checks if the specified thread should wait until this process is available.
318 bool ShouldWait(const Thread* thread) const override;
319
320 /// Acquires/locks this process for the specified thread if it's available.
321 void Acquire(Thread* thread) override;
322
323 /// Changes the process status. If the status is different 316 /// Changes the process status. If the status is different
324 /// from the current process status, then this will trigger 317 /// from the current process status, then this will trigger
325 /// a process signal. 318 /// a process signal.
@@ -373,12 +366,12 @@ private:
373 HandleTable handle_table; 366 HandleTable handle_table;
374 367
375 /// Per-process address arbiter. 368 /// Per-process address arbiter.
376 AddressArbiter address_arbiter; 369 KAddressArbiter address_arbiter;
377 370
378 /// The per-process mutex lock instance used for handling various 371 /// The per-process mutex lock instance used for handling various
379 /// forms of services, such as lock arbitration, and condition 372 /// forms of services, such as lock arbitration, and condition
380 /// variable related facilities. 373 /// variable related facilities.
381 Mutex mutex; 374 KConditionVariable condition_var;
382 375
383 /// Address indicating the location of the process' dedicated TLS region. 376 /// Address indicating the location of the process' dedicated TLS region.
384 VAddr tls_region_address = 0; 377 VAddr tls_region_address = 0;
@@ -389,9 +382,6 @@ private:
389 /// List of threads that are running with this process as their owner. 382 /// List of threads that are running with this process as their owner.
390 std::list<const Thread*> thread_list; 383 std::list<const Thread*> thread_list;
391 384
392 /// List of threads waiting for a condition variable
393 std::unordered_map<VAddr, std::list<std::shared_ptr<Thread>>> cond_var_threads;
394
395 /// Address of the top of the main thread's stack 385 /// Address of the top of the main thread's stack
396 VAddr main_thread_stack_top{}; 386 VAddr main_thread_stack_top{};
397 387
@@ -410,6 +400,8 @@ private:
410 /// Schedule count of this process 400 /// Schedule count of this process
411 s64 schedule_count{}; 401 s64 schedule_count{};
412 402
403 bool is_signaled{};
404
413 /// System context 405 /// System context
414 Core::System& system; 406 Core::System& system;
415}; 407};
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
index 0f128c586..0566311b6 100644
--- a/src/core/hle/kernel/process_capability.cpp
+++ b/src/core/hle/kernel/process_capability.cpp
@@ -2,6 +2,8 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <bit>
6
5#include "common/bit_util.h" 7#include "common/bit_util.h"
6#include "common/logging/log.h" 8#include "common/logging/log.h"
7#include "core/hle/kernel/errors.h" 9#include "core/hle/kernel/errors.h"
@@ -60,7 +62,7 @@ constexpr CapabilityType GetCapabilityType(u32 value) {
60 62
61u32 GetFlagBitOffset(CapabilityType type) { 63u32 GetFlagBitOffset(CapabilityType type) {
62 const auto value = static_cast<u32>(type); 64 const auto value = static_cast<u32>(type);
63 return static_cast<u32>(Common::BitSize<u32>() - Common::CountLeadingZeroes32(value)); 65 return static_cast<u32>(Common::BitSize<u32>() - static_cast<u32>(std::countl_zero(value)));
64} 66}
65 67
66} // Anonymous namespace 68} // Anonymous namespace
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index cea262ce0..99ed0857e 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -14,24 +14,22 @@
14 14
15namespace Kernel { 15namespace Kernel {
16 16
17ReadableEvent::ReadableEvent(KernelCore& kernel) : SynchronizationObject{kernel} {} 17ReadableEvent::ReadableEvent(KernelCore& kernel) : KSynchronizationObject{kernel} {}
18ReadableEvent::~ReadableEvent() = default; 18ReadableEvent::~ReadableEvent() = default;
19 19
20bool ReadableEvent::ShouldWait(const Thread* thread) const {
21 return !is_signaled;
22}
23
24void ReadableEvent::Acquire(Thread* thread) {
25 ASSERT_MSG(IsSignaled(), "object unavailable!");
26}
27
28void ReadableEvent::Signal() { 20void ReadableEvent::Signal() {
29 if (is_signaled) { 21 if (is_signaled) {
30 return; 22 return;
31 } 23 }
32 24
33 is_signaled = true; 25 is_signaled = true;
34 SynchronizationObject::Signal(); 26 NotifyAvailable();
27}
28
29bool ReadableEvent::IsSignaled() const {
30 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
31
32 return is_signaled;
35} 33}
36 34
37void ReadableEvent::Clear() { 35void ReadableEvent::Clear() {
diff --git a/src/core/hle/kernel/readable_event.h b/src/core/hle/kernel/readable_event.h
index 3264dd066..34e477274 100644
--- a/src/core/hle/kernel/readable_event.h
+++ b/src/core/hle/kernel/readable_event.h
@@ -4,8 +4,8 @@
4 4
5#pragma once 5#pragma once
6 6
7#include "core/hle/kernel/k_synchronization_object.h"
7#include "core/hle/kernel/object.h" 8#include "core/hle/kernel/object.h"
8#include "core/hle/kernel/synchronization_object.h"
9 9
10union ResultCode; 10union ResultCode;
11 11
@@ -14,7 +14,7 @@ namespace Kernel {
14class KernelCore; 14class KernelCore;
15class WritableEvent; 15class WritableEvent;
16 16
17class ReadableEvent final : public SynchronizationObject { 17class ReadableEvent final : public KSynchronizationObject {
18 friend class WritableEvent; 18 friend class WritableEvent;
19 19
20public: 20public:
@@ -32,9 +32,6 @@ public:
32 return HANDLE_TYPE; 32 return HANDLE_TYPE;
33 } 33 }
34 34
35 bool ShouldWait(const Thread* thread) const override;
36 void Acquire(Thread* thread) override;
37
38 /// Unconditionally clears the readable event's state. 35 /// Unconditionally clears the readable event's state.
39 void Clear(); 36 void Clear();
40 37
@@ -46,11 +43,14 @@ public:
46 /// then ERR_INVALID_STATE will be returned. 43 /// then ERR_INVALID_STATE will be returned.
47 ResultCode Reset(); 44 ResultCode Reset();
48 45
49 void Signal() override; 46 void Signal();
47
48 bool IsSignaled() const override;
50 49
51private: 50private:
52 explicit ReadableEvent(KernelCore& kernel); 51 explicit ReadableEvent(KernelCore& kernel);
53 52
53 bool is_signaled{};
54 std::string name; ///< Name of event (optional) 54 std::string name; ///< Name of event (optional)
55}; 55};
56 56
diff --git a/src/core/hle/kernel/server_port.cpp b/src/core/hle/kernel/server_port.cpp
index a549ae9d7..82857f93b 100644
--- a/src/core/hle/kernel/server_port.cpp
+++ b/src/core/hle/kernel/server_port.cpp
@@ -13,7 +13,7 @@
13 13
14namespace Kernel { 14namespace Kernel {
15 15
16ServerPort::ServerPort(KernelCore& kernel) : SynchronizationObject{kernel} {} 16ServerPort::ServerPort(KernelCore& kernel) : KSynchronizationObject{kernel} {}
17ServerPort::~ServerPort() = default; 17ServerPort::~ServerPort() = default;
18 18
19ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() { 19ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() {
@@ -28,15 +28,9 @@ ResultVal<std::shared_ptr<ServerSession>> ServerPort::Accept() {
28 28
29void ServerPort::AppendPendingSession(std::shared_ptr<ServerSession> pending_session) { 29void ServerPort::AppendPendingSession(std::shared_ptr<ServerSession> pending_session) {
30 pending_sessions.push_back(std::move(pending_session)); 30 pending_sessions.push_back(std::move(pending_session));
31} 31 if (pending_sessions.size() == 1) {
32 32 NotifyAvailable();
33bool ServerPort::ShouldWait(const Thread* thread) const { 33 }
34 // If there are no pending sessions, we wait until a new one is added.
35 return pending_sessions.empty();
36}
37
38void ServerPort::Acquire(Thread* thread) {
39 ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
40} 34}
41 35
42bool ServerPort::IsSignaled() const { 36bool ServerPort::IsSignaled() const {
diff --git a/src/core/hle/kernel/server_port.h b/src/core/hle/kernel/server_port.h
index 41b191b86..6470df993 100644
--- a/src/core/hle/kernel/server_port.h
+++ b/src/core/hle/kernel/server_port.h
@@ -9,8 +9,8 @@
9#include <utility> 9#include <utility>
10#include <vector> 10#include <vector>
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "core/hle/kernel/k_synchronization_object.h"
12#include "core/hle/kernel/object.h" 13#include "core/hle/kernel/object.h"
13#include "core/hle/kernel/synchronization_object.h"
14#include "core/hle/result.h" 14#include "core/hle/result.h"
15 15
16namespace Kernel { 16namespace Kernel {
@@ -20,7 +20,7 @@ class KernelCore;
20class ServerSession; 20class ServerSession;
21class SessionRequestHandler; 21class SessionRequestHandler;
22 22
23class ServerPort final : public SynchronizationObject { 23class ServerPort final : public KSynchronizationObject {
24public: 24public:
25 explicit ServerPort(KernelCore& kernel); 25 explicit ServerPort(KernelCore& kernel);
26 ~ServerPort() override; 26 ~ServerPort() override;
@@ -79,9 +79,6 @@ public:
79 /// waiting to be accepted by this port. 79 /// waiting to be accepted by this port.
80 void AppendPendingSession(std::shared_ptr<ServerSession> pending_session); 80 void AppendPendingSession(std::shared_ptr<ServerSession> pending_session);
81 81
82 bool ShouldWait(const Thread* thread) const override;
83 void Acquire(Thread* thread) override;
84
85 bool IsSignaled() const override; 82 bool IsSignaled() const override;
86 83
87private: 84private:
diff --git a/src/core/hle/kernel/server_session.cpp b/src/core/hle/kernel/server_session.cpp
index b40fe3916..4f2bb7822 100644
--- a/src/core/hle/kernel/server_session.cpp
+++ b/src/core/hle/kernel/server_session.cpp
@@ -24,7 +24,7 @@
24 24
25namespace Kernel { 25namespace Kernel {
26 26
27ServerSession::ServerSession(KernelCore& kernel) : SynchronizationObject{kernel} {} 27ServerSession::ServerSession(KernelCore& kernel) : KSynchronizationObject{kernel} {}
28 28
29ServerSession::~ServerSession() { 29ServerSession::~ServerSession() {
30 kernel.ReleaseServiceThread(service_thread); 30 kernel.ReleaseServiceThread(service_thread);
@@ -42,16 +42,6 @@ ResultVal<std::shared_ptr<ServerSession>> ServerSession::Create(KernelCore& kern
42 return MakeResult(std::move(session)); 42 return MakeResult(std::move(session));
43} 43}
44 44
45bool ServerSession::ShouldWait(const Thread* thread) const {
46 // Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
47 if (!parent->Client()) {
48 return false;
49 }
50
51 // Wait if we have no pending requests, or if we're currently handling a request.
52 return pending_requesting_threads.empty() || currently_handling != nullptr;
53}
54
55bool ServerSession::IsSignaled() const { 45bool ServerSession::IsSignaled() const {
56 // Closed sessions should never wait, an error will be returned from svcReplyAndReceive. 46 // Closed sessions should never wait, an error will be returned from svcReplyAndReceive.
57 if (!parent->Client()) { 47 if (!parent->Client()) {
@@ -62,15 +52,6 @@ bool ServerSession::IsSignaled() const {
62 return !pending_requesting_threads.empty() && currently_handling == nullptr; 52 return !pending_requesting_threads.empty() && currently_handling == nullptr;
63} 53}
64 54
65void ServerSession::Acquire(Thread* thread) {
66 ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
67 // We are now handling a request, pop it from the stack.
68 // TODO(Subv): What happens if the client endpoint is closed before any requests are made?
69 ASSERT(!pending_requesting_threads.empty());
70 currently_handling = pending_requesting_threads.back();
71 pending_requesting_threads.pop_back();
72}
73
74void ServerSession::ClientDisconnected() { 55void ServerSession::ClientDisconnected() {
75 // We keep a shared pointer to the hle handler to keep it alive throughout 56 // We keep a shared pointer to the hle handler to keep it alive throughout
76 // the call to ClientDisconnected, as ClientDisconnected invalidates the 57 // the call to ClientDisconnected, as ClientDisconnected invalidates the
@@ -172,7 +153,7 @@ ResultCode ServerSession::CompleteSyncRequest(HLERequestContext& context) {
172 { 153 {
173 KScopedSchedulerLock lock(kernel); 154 KScopedSchedulerLock lock(kernel);
174 if (!context.IsThreadWaiting()) { 155 if (!context.IsThreadWaiting()) {
175 context.GetThread().ResumeFromWait(); 156 context.GetThread().Wakeup();
176 context.GetThread().SetSynchronizationResults(nullptr, result); 157 context.GetThread().SetSynchronizationResults(nullptr, result);
177 } 158 }
178 } 159 }
diff --git a/src/core/hle/kernel/server_session.h b/src/core/hle/kernel/server_session.h
index e8d1d99ea..9155cf7f5 100644
--- a/src/core/hle/kernel/server_session.h
+++ b/src/core/hle/kernel/server_session.h
@@ -10,8 +10,8 @@
10#include <vector> 10#include <vector>
11 11
12#include "common/threadsafe_queue.h" 12#include "common/threadsafe_queue.h"
13#include "core/hle/kernel/k_synchronization_object.h"
13#include "core/hle/kernel/service_thread.h" 14#include "core/hle/kernel/service_thread.h"
14#include "core/hle/kernel/synchronization_object.h"
15#include "core/hle/result.h" 15#include "core/hle/result.h"
16 16
17namespace Core::Memory { 17namespace Core::Memory {
@@ -43,7 +43,7 @@ class Thread;
43 * After the server replies to the request, the response is marshalled back to the caller's 43 * After the server replies to the request, the response is marshalled back to the caller's
44 * TLS buffer and control is transferred back to it. 44 * TLS buffer and control is transferred back to it.
45 */ 45 */
46class ServerSession final : public SynchronizationObject { 46class ServerSession final : public KSynchronizationObject {
47 friend class ServiceThread; 47 friend class ServiceThread;
48 48
49public: 49public:
@@ -77,8 +77,6 @@ public:
77 return parent.get(); 77 return parent.get();
78 } 78 }
79 79
80 bool IsSignaled() const override;
81
82 /** 80 /**
83 * Sets the HLE handler for the session. This handler will be called to service IPC requests 81 * Sets the HLE handler for the session. This handler will be called to service IPC requests
84 * instead of the regular IPC machinery. (The regular IPC machinery is currently not 82 * instead of the regular IPC machinery. (The regular IPC machinery is currently not
@@ -100,10 +98,6 @@ public:
100 ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory, 98 ResultCode HandleSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory,
101 Core::Timing::CoreTiming& core_timing); 99 Core::Timing::CoreTiming& core_timing);
102 100
103 bool ShouldWait(const Thread* thread) const override;
104
105 void Acquire(Thread* thread) override;
106
107 /// Called when a client disconnection occurs. 101 /// Called when a client disconnection occurs.
108 void ClientDisconnected(); 102 void ClientDisconnected();
109 103
@@ -130,6 +124,8 @@ public:
130 convert_to_domain = true; 124 convert_to_domain = true;
131 } 125 }
132 126
127 bool IsSignaled() const override;
128
133private: 129private:
134 /// Queues a sync request from the emulated application. 130 /// Queues a sync request from the emulated application.
135 ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory); 131 ResultCode QueueSyncRequest(std::shared_ptr<Thread> thread, Core::Memory::Memory& memory);
diff --git a/src/core/hle/kernel/session.cpp b/src/core/hle/kernel/session.cpp
index e4dd53e24..75304b961 100644
--- a/src/core/hle/kernel/session.cpp
+++ b/src/core/hle/kernel/session.cpp
@@ -9,7 +9,7 @@
9 9
10namespace Kernel { 10namespace Kernel {
11 11
12Session::Session(KernelCore& kernel) : SynchronizationObject{kernel} {} 12Session::Session(KernelCore& kernel) : KSynchronizationObject{kernel} {}
13Session::~Session() = default; 13Session::~Session() = default;
14 14
15Session::SessionPair Session::Create(KernelCore& kernel, std::string name) { 15Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
@@ -24,18 +24,9 @@ Session::SessionPair Session::Create(KernelCore& kernel, std::string name) {
24 return std::make_pair(std::move(client_session), std::move(server_session)); 24 return std::make_pair(std::move(client_session), std::move(server_session));
25} 25}
26 26
27bool Session::ShouldWait(const Thread* thread) const {
28 UNIMPLEMENTED();
29 return {};
30}
31
32bool Session::IsSignaled() const { 27bool Session::IsSignaled() const {
33 UNIMPLEMENTED(); 28 UNIMPLEMENTED();
34 return true; 29 return true;
35} 30}
36 31
37void Session::Acquire(Thread* thread) {
38 UNIMPLEMENTED();
39}
40
41} // namespace Kernel 32} // namespace Kernel
diff --git a/src/core/hle/kernel/session.h b/src/core/hle/kernel/session.h
index 7cd9c0d77..f6dd2c1d2 100644
--- a/src/core/hle/kernel/session.h
+++ b/src/core/hle/kernel/session.h
@@ -8,7 +8,7 @@
8#include <string> 8#include <string>
9#include <utility> 9#include <utility>
10 10
11#include "core/hle/kernel/synchronization_object.h" 11#include "core/hle/kernel/k_synchronization_object.h"
12 12
13namespace Kernel { 13namespace Kernel {
14 14
@@ -19,7 +19,7 @@ class ServerSession;
19 * Parent structure to link the client and server endpoints of a session with their associated 19 * Parent structure to link the client and server endpoints of a session with their associated
20 * client port. 20 * client port.
21 */ 21 */
22class Session final : public SynchronizationObject { 22class Session final : public KSynchronizationObject {
23public: 23public:
24 explicit Session(KernelCore& kernel); 24 explicit Session(KernelCore& kernel);
25 ~Session() override; 25 ~Session() override;
@@ -37,12 +37,8 @@ public:
37 return HANDLE_TYPE; 37 return HANDLE_TYPE;
38 } 38 }
39 39
40 bool ShouldWait(const Thread* thread) const override;
41
42 bool IsSignaled() const override; 40 bool IsSignaled() const override;
43 41
44 void Acquire(Thread* thread) override;
45
46 std::shared_ptr<ClientSession> Client() { 42 std::shared_ptr<ClientSession> Client() {
47 if (auto result{client.lock()}) { 43 if (auto result{client.lock()}) {
48 return result; 44 return result;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index de3ed25da..cc8b661af 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -10,6 +10,7 @@
10 10
11#include "common/alignment.h" 11#include "common/alignment.h"
12#include "common/assert.h" 12#include "common/assert.h"
13#include "common/common_funcs.h"
13#include "common/fiber.h" 14#include "common/fiber.h"
14#include "common/logging/log.h" 15#include "common/logging/log.h"
15#include "common/microprofile.h" 16#include "common/microprofile.h"
@@ -19,26 +20,28 @@
19#include "core/core_timing.h" 20#include "core/core_timing.h"
20#include "core/core_timing_util.h" 21#include "core/core_timing_util.h"
21#include "core/cpu_manager.h" 22#include "core/cpu_manager.h"
22#include "core/hle/kernel/address_arbiter.h"
23#include "core/hle/kernel/client_port.h" 23#include "core/hle/kernel/client_port.h"
24#include "core/hle/kernel/client_session.h" 24#include "core/hle/kernel/client_session.h"
25#include "core/hle/kernel/errors.h" 25#include "core/hle/kernel/errors.h"
26#include "core/hle/kernel/handle_table.h" 26#include "core/hle/kernel/handle_table.h"
27#include "core/hle/kernel/k_address_arbiter.h"
28#include "core/hle/kernel/k_condition_variable.h"
27#include "core/hle/kernel/k_scheduler.h" 29#include "core/hle/kernel/k_scheduler.h"
28#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 30#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
31#include "core/hle/kernel/k_synchronization_object.h"
29#include "core/hle/kernel/kernel.h" 32#include "core/hle/kernel/kernel.h"
30#include "core/hle/kernel/memory/memory_block.h" 33#include "core/hle/kernel/memory/memory_block.h"
34#include "core/hle/kernel/memory/memory_layout.h"
31#include "core/hle/kernel/memory/page_table.h" 35#include "core/hle/kernel/memory/page_table.h"
32#include "core/hle/kernel/mutex.h"
33#include "core/hle/kernel/physical_core.h" 36#include "core/hle/kernel/physical_core.h"
34#include "core/hle/kernel/process.h" 37#include "core/hle/kernel/process.h"
35#include "core/hle/kernel/readable_event.h" 38#include "core/hle/kernel/readable_event.h"
36#include "core/hle/kernel/resource_limit.h" 39#include "core/hle/kernel/resource_limit.h"
37#include "core/hle/kernel/shared_memory.h" 40#include "core/hle/kernel/shared_memory.h"
38#include "core/hle/kernel/svc.h" 41#include "core/hle/kernel/svc.h"
42#include "core/hle/kernel/svc_results.h"
39#include "core/hle/kernel/svc_types.h" 43#include "core/hle/kernel/svc_types.h"
40#include "core/hle/kernel/svc_wrap.h" 44#include "core/hle/kernel/svc_wrap.h"
41#include "core/hle/kernel/synchronization.h"
42#include "core/hle/kernel/thread.h" 45#include "core/hle/kernel/thread.h"
43#include "core/hle/kernel/time_manager.h" 46#include "core/hle/kernel/time_manager.h"
44#include "core/hle/kernel/transfer_memory.h" 47#include "core/hle/kernel/transfer_memory.h"
@@ -343,27 +346,11 @@ static ResultCode SendSyncRequest(Core::System& system, Handle handle) {
343 auto thread = kernel.CurrentScheduler()->GetCurrentThread(); 346 auto thread = kernel.CurrentScheduler()->GetCurrentThread();
344 { 347 {
345 KScopedSchedulerLock lock(kernel); 348 KScopedSchedulerLock lock(kernel);
346 thread->InvalidateHLECallback(); 349 thread->SetState(ThreadState::Waiting);
347 thread->SetStatus(ThreadStatus::WaitIPC); 350 thread->SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::IPC);
348 session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming()); 351 session->SendSyncRequest(SharedFrom(thread), system.Memory(), system.CoreTiming());
349 } 352 }
350 353
351 if (thread->HasHLECallback()) {
352 Handle event_handle = thread->GetHLETimeEvent();
353 if (event_handle != InvalidHandle) {
354 auto& time_manager = kernel.TimeManager();
355 time_manager.UnscheduleTimeEvent(event_handle);
356 }
357
358 {
359 KScopedSchedulerLock lock(kernel);
360 auto* sync_object = thread->GetHLESyncObject();
361 sync_object->RemoveWaitingThread(SharedFrom(thread));
362 }
363
364 thread->InvokeHLECallback(SharedFrom(thread));
365 }
366
367 return thread->GetSignalingResult(); 354 return thread->GetSignalingResult();
368} 355}
369 356
@@ -436,7 +423,7 @@ static ResultCode GetProcessId32(Core::System& system, u32* process_id_low, u32*
436} 423}
437 424
438/// Wait for the given handles to synchronize, timeout after the specified nanoseconds 425/// Wait for the given handles to synchronize, timeout after the specified nanoseconds
439static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr handles_address, 426static ResultCode WaitSynchronization(Core::System& system, s32* index, VAddr handles_address,
440 u64 handle_count, s64 nano_seconds) { 427 u64 handle_count, s64 nano_seconds) {
441 LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}", 428 LOG_TRACE(Kernel_SVC, "called handles_address=0x{:X}, handle_count={}, nano_seconds={}",
442 handles_address, handle_count, nano_seconds); 429 handles_address, handle_count, nano_seconds);
@@ -458,28 +445,26 @@ static ResultCode WaitSynchronization(Core::System& system, Handle* index, VAddr
458 } 445 }
459 446
460 auto& kernel = system.Kernel(); 447 auto& kernel = system.Kernel();
461 Thread::ThreadSynchronizationObjects objects(handle_count); 448 std::vector<KSynchronizationObject*> objects(handle_count);
462 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable(); 449 const auto& handle_table = kernel.CurrentProcess()->GetHandleTable();
463 450
464 for (u64 i = 0; i < handle_count; ++i) { 451 for (u64 i = 0; i < handle_count; ++i) {
465 const Handle handle = memory.Read32(handles_address + i * sizeof(Handle)); 452 const Handle handle = memory.Read32(handles_address + i * sizeof(Handle));
466 const auto object = handle_table.Get<SynchronizationObject>(handle); 453 const auto object = handle_table.Get<KSynchronizationObject>(handle);
467 454
468 if (object == nullptr) { 455 if (object == nullptr) {
469 LOG_ERROR(Kernel_SVC, "Object is a nullptr"); 456 LOG_ERROR(Kernel_SVC, "Object is a nullptr");
470 return ERR_INVALID_HANDLE; 457 return ERR_INVALID_HANDLE;
471 } 458 }
472 459
473 objects[i] = object; 460 objects[i] = object.get();
474 } 461 }
475 auto& synchronization = kernel.Synchronization(); 462 return KSynchronizationObject::Wait(kernel, index, objects.data(),
476 const auto [result, handle_result] = synchronization.WaitFor(objects, nano_seconds); 463 static_cast<s32>(objects.size()), nano_seconds);
477 *index = handle_result;
478 return result;
479} 464}
480 465
481static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address, 466static ResultCode WaitSynchronization32(Core::System& system, u32 timeout_low, u32 handles_address,
482 s32 handle_count, u32 timeout_high, Handle* index) { 467 s32 handle_count, u32 timeout_high, s32* index) {
483 const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)}; 468 const s64 nano_seconds{(static_cast<s64>(timeout_high) << 32) | static_cast<s64>(timeout_low)};
484 return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds); 469 return WaitSynchronization(system, index, handles_address, handle_count, nano_seconds);
485} 470}
@@ -504,56 +489,37 @@ static ResultCode CancelSynchronization32(Core::System& system, Handle thread_ha
504 return CancelSynchronization(system, thread_handle); 489 return CancelSynchronization(system, thread_handle);
505} 490}
506 491
507/// Attempts to locks a mutex, creating it if it does not already exist 492/// Attempts to locks a mutex
508static ResultCode ArbitrateLock(Core::System& system, Handle holding_thread_handle, 493static ResultCode ArbitrateLock(Core::System& system, Handle thread_handle, VAddr address,
509 VAddr mutex_addr, Handle requesting_thread_handle) { 494 u32 tag) {
510 LOG_TRACE(Kernel_SVC, 495 LOG_TRACE(Kernel_SVC, "called thread_handle=0x{:08X}, address=0x{:X}, tag=0x{:08X}",
511 "called holding_thread_handle=0x{:08X}, mutex_addr=0x{:X}, " 496 thread_handle, address, tag);
512 "requesting_current_thread_handle=0x{:08X}",
513 holding_thread_handle, mutex_addr, requesting_thread_handle);
514
515 if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
516 LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
517 mutex_addr);
518 return ERR_INVALID_ADDRESS_STATE;
519 }
520 497
521 if (!Common::IsWordAligned(mutex_addr)) { 498 // Validate the input address.
522 LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr); 499 R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
523 return ERR_INVALID_ADDRESS; 500 R_UNLESS(Common::IsAligned(address, sizeof(u32)), Svc::ResultInvalidAddress);
524 }
525 501
526 auto* const current_process = system.Kernel().CurrentProcess(); 502 return system.Kernel().CurrentProcess()->WaitForAddress(thread_handle, address, tag);
527 return current_process->GetMutex().TryAcquire(mutex_addr, holding_thread_handle,
528 requesting_thread_handle);
529} 503}
530 504
531static ResultCode ArbitrateLock32(Core::System& system, Handle holding_thread_handle, 505static ResultCode ArbitrateLock32(Core::System& system, Handle thread_handle, u32 address,
532 u32 mutex_addr, Handle requesting_thread_handle) { 506 u32 tag) {
533 return ArbitrateLock(system, holding_thread_handle, mutex_addr, requesting_thread_handle); 507 return ArbitrateLock(system, thread_handle, address, tag);
534} 508}
535 509
536/// Unlock a mutex 510/// Unlock a mutex
537static ResultCode ArbitrateUnlock(Core::System& system, VAddr mutex_addr) { 511static ResultCode ArbitrateUnlock(Core::System& system, VAddr address) {
538 LOG_TRACE(Kernel_SVC, "called mutex_addr=0x{:X}", mutex_addr); 512 LOG_TRACE(Kernel_SVC, "called address=0x{:X}", address);
539
540 if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) {
541 LOG_ERROR(Kernel_SVC, "Mutex Address is a kernel virtual address, mutex_addr={:016X}",
542 mutex_addr);
543 return ERR_INVALID_ADDRESS_STATE;
544 }
545 513
546 if (!Common::IsWordAligned(mutex_addr)) { 514 // Validate the input address.
547 LOG_ERROR(Kernel_SVC, "Mutex Address is not word aligned, mutex_addr={:016X}", mutex_addr); 515 R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
548 return ERR_INVALID_ADDRESS; 516 R_UNLESS(Common::IsAligned(address, sizeof(u32)), Svc::ResultInvalidAddress);
549 }
550 517
551 auto* const current_process = system.Kernel().CurrentProcess(); 518 return system.Kernel().CurrentProcess()->SignalToAddress(address);
552 return current_process->GetMutex().Release(mutex_addr);
553} 519}
554 520
555static ResultCode ArbitrateUnlock32(Core::System& system, u32 mutex_addr) { 521static ResultCode ArbitrateUnlock32(Core::System& system, u32 address) {
556 return ArbitrateUnlock(system, mutex_addr); 522 return ArbitrateUnlock(system, address);
557} 523}
558 524
559enum class BreakType : u32 { 525enum class BreakType : u32 {
@@ -1180,7 +1146,7 @@ static ResultCode SetThreadPriority(Core::System& system, Handle handle, u32 pri
1180 return ERR_INVALID_HANDLE; 1146 return ERR_INVALID_HANDLE;
1181 } 1147 }
1182 1148
1183 thread->SetPriority(priority); 1149 thread->SetBasePriority(priority);
1184 1150
1185 return RESULT_SUCCESS; 1151 return RESULT_SUCCESS;
1186} 1152}
@@ -1559,7 +1525,7 @@ static ResultCode StartThread(Core::System& system, Handle thread_handle) {
1559 return ERR_INVALID_HANDLE; 1525 return ERR_INVALID_HANDLE;
1560 } 1526 }
1561 1527
1562 ASSERT(thread->GetStatus() == ThreadStatus::Dormant); 1528 ASSERT(thread->GetState() == ThreadState::Initialized);
1563 1529
1564 return thread->Start(); 1530 return thread->Start();
1565} 1531}
@@ -1620,224 +1586,135 @@ static void SleepThread32(Core::System& system, u32 nanoseconds_low, u32 nanosec
1620} 1586}
1621 1587
1622/// Wait process wide key atomic 1588/// Wait process wide key atomic
1623static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr mutex_addr, 1589static ResultCode WaitProcessWideKeyAtomic(Core::System& system, VAddr address, VAddr cv_key,
1624 VAddr condition_variable_addr, Handle thread_handle, 1590 u32 tag, s64 timeout_ns) {
1625 s64 nano_seconds) { 1591 LOG_TRACE(Kernel_SVC, "called address={:X}, cv_key={:X}, tag=0x{:08X}, timeout_ns={}", address,
1626 LOG_TRACE( 1592 cv_key, tag, timeout_ns);
1627 Kernel_SVC, 1593
1628 "called mutex_addr={:X}, condition_variable_addr={:X}, thread_handle=0x{:08X}, timeout={}", 1594 // Validate input.
1629 mutex_addr, condition_variable_addr, thread_handle, nano_seconds); 1595 R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
1630 1596 R_UNLESS(Common::IsAligned(address, sizeof(int32_t)), Svc::ResultInvalidAddress);
1631 if (Core::Memory::IsKernelVirtualAddress(mutex_addr)) { 1597
1632 LOG_ERROR( 1598 // Convert timeout from nanoseconds to ticks.
1633 Kernel_SVC, 1599 s64 timeout{};
1634 "Given mutex address must not be within the kernel address space. address=0x{:016X}", 1600 if (timeout_ns > 0) {
1635 mutex_addr); 1601 const s64 offset_tick(timeout_ns);
1636 return ERR_INVALID_ADDRESS_STATE; 1602 if (offset_tick > 0) {
1637 } 1603 timeout = offset_tick + 2;
1638 1604 if (timeout <= 0) {
1639 if (!Common::IsWordAligned(mutex_addr)) { 1605 timeout = std::numeric_limits<s64>::max();
1640 LOG_ERROR(Kernel_SVC, "Given mutex address must be word-aligned. address=0x{:016X}", 1606 }
1641 mutex_addr); 1607 } else {
1642 return ERR_INVALID_ADDRESS; 1608 timeout = std::numeric_limits<s64>::max();
1643 }
1644
1645 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4));
1646 auto& kernel = system.Kernel();
1647 Handle event_handle;
1648 Thread* current_thread = kernel.CurrentScheduler()->GetCurrentThread();
1649 auto* const current_process = kernel.CurrentProcess();
1650 {
1651 KScopedSchedulerLockAndSleep lock(kernel, event_handle, current_thread, nano_seconds);
1652 const auto& handle_table = current_process->GetHandleTable();
1653 std::shared_ptr<Thread> thread = handle_table.Get<Thread>(thread_handle);
1654 ASSERT(thread);
1655
1656 current_thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
1657
1658 if (thread->IsPendingTermination()) {
1659 lock.CancelSleep();
1660 return ERR_THREAD_TERMINATING;
1661 }
1662
1663 const auto release_result = current_process->GetMutex().Release(mutex_addr);
1664 if (release_result.IsError()) {
1665 lock.CancelSleep();
1666 return release_result;
1667 }
1668
1669 if (nano_seconds == 0) {
1670 lock.CancelSleep();
1671 return RESULT_TIMEOUT;
1672 }
1673
1674 current_thread->SetCondVarWaitAddress(condition_variable_addr);
1675 current_thread->SetMutexWaitAddress(mutex_addr);
1676 current_thread->SetWaitHandle(thread_handle);
1677 current_thread->SetStatus(ThreadStatus::WaitCondVar);
1678 current_process->InsertConditionVariableThread(SharedFrom(current_thread));
1679 }
1680
1681 if (event_handle != InvalidHandle) {
1682 auto& time_manager = kernel.TimeManager();
1683 time_manager.UnscheduleTimeEvent(event_handle);
1684 }
1685
1686 {
1687 KScopedSchedulerLock lock(kernel);
1688
1689 auto* owner = current_thread->GetLockOwner();
1690 if (owner != nullptr) {
1691 owner->RemoveMutexWaiter(SharedFrom(current_thread));
1692 } 1609 }
1693 1610 } else {
1694 current_process->RemoveConditionVariableThread(SharedFrom(current_thread)); 1611 timeout = timeout_ns;
1695 } 1612 }
1696 // Note: Deliberately don't attempt to inherit the lock owner's priority.
1697 1613
1698 return current_thread->GetSignalingResult(); 1614 // Wait on the condition variable.
1615 return system.Kernel().CurrentProcess()->WaitConditionVariable(
1616 address, Common::AlignDown(cv_key, sizeof(u32)), tag, timeout);
1699} 1617}
1700 1618
1701static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 mutex_addr, 1619static ResultCode WaitProcessWideKeyAtomic32(Core::System& system, u32 address, u32 cv_key, u32 tag,
1702 u32 condition_variable_addr, Handle thread_handle, 1620 u32 timeout_ns_low, u32 timeout_ns_high) {
1703 u32 nanoseconds_low, u32 nanoseconds_high) { 1621 const auto timeout_ns = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
1704 const auto nanoseconds = static_cast<s64>(nanoseconds_low | (u64{nanoseconds_high} << 32)); 1622 return WaitProcessWideKeyAtomic(system, address, cv_key, tag, timeout_ns);
1705 return WaitProcessWideKeyAtomic(system, mutex_addr, condition_variable_addr, thread_handle,
1706 nanoseconds);
1707} 1623}
1708 1624
1709/// Signal process wide key 1625/// Signal process wide key
1710static void SignalProcessWideKey(Core::System& system, VAddr condition_variable_addr, s32 target) { 1626static void SignalProcessWideKey(Core::System& system, VAddr cv_key, s32 count) {
1711 LOG_TRACE(Kernel_SVC, "called, condition_variable_addr=0x{:X}, target=0x{:08X}", 1627 LOG_TRACE(Kernel_SVC, "called, cv_key=0x{:X}, count=0x{:08X}", cv_key, count);
1712 condition_variable_addr, target);
1713 1628
1714 ASSERT(condition_variable_addr == Common::AlignDown(condition_variable_addr, 4)); 1629 // Signal the condition variable.
1630 return system.Kernel().CurrentProcess()->SignalConditionVariable(
1631 Common::AlignDown(cv_key, sizeof(u32)), count);
1632}
1715 1633
1716 // Retrieve a list of all threads that are waiting for this condition variable. 1634static void SignalProcessWideKey32(Core::System& system, u32 cv_key, s32 count) {
1717 auto& kernel = system.Kernel(); 1635 SignalProcessWideKey(system, cv_key, count);
1718 KScopedSchedulerLock lock(kernel); 1636}
1719 auto* const current_process = kernel.CurrentProcess();
1720 std::vector<std::shared_ptr<Thread>> waiting_threads =
1721 current_process->GetConditionVariableThreads(condition_variable_addr);
1722
1723 // Only process up to 'target' threads, unless 'target' is less equal 0, in which case process
1724 // them all.
1725 std::size_t last = waiting_threads.size();
1726 if (target > 0) {
1727 last = std::min(waiting_threads.size(), static_cast<std::size_t>(target));
1728 }
1729 for (std::size_t index = 0; index < last; ++index) {
1730 auto& thread = waiting_threads[index];
1731
1732 ASSERT(thread->GetCondVarWaitAddress() == condition_variable_addr);
1733
1734 // liberate Cond Var Thread.
1735 current_process->RemoveConditionVariableThread(thread);
1736
1737 const std::size_t current_core = system.CurrentCoreIndex();
1738 auto& monitor = system.Monitor();
1739
1740 // Atomically read the value of the mutex.
1741 u32 mutex_val = 0;
1742 u32 update_val = 0;
1743 const VAddr mutex_address = thread->GetMutexWaitAddress();
1744 do {
1745 // If the mutex is not yet acquired, acquire it.
1746 mutex_val = monitor.ExclusiveRead32(current_core, mutex_address);
1747
1748 if (mutex_val != 0) {
1749 update_val = mutex_val | Mutex::MutexHasWaitersFlag;
1750 } else {
1751 update_val = thread->GetWaitHandle();
1752 }
1753 } while (!monitor.ExclusiveWrite32(current_core, mutex_address, update_val));
1754 monitor.ClearExclusive();
1755 if (mutex_val == 0) {
1756 // We were able to acquire the mutex, resume this thread.
1757 auto* const lock_owner = thread->GetLockOwner();
1758 if (lock_owner != nullptr) {
1759 lock_owner->RemoveMutexWaiter(thread);
1760 }
1761 1637
1762 thread->SetLockOwner(nullptr); 1638namespace {
1763 thread->SetSynchronizationResults(nullptr, RESULT_SUCCESS);
1764 thread->ResumeFromWait();
1765 } else {
1766 // The mutex is already owned by some other thread, make this thread wait on it.
1767 const Handle owner_handle = static_cast<Handle>(mutex_val & Mutex::MutexOwnerMask);
1768 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1769 auto owner = handle_table.Get<Thread>(owner_handle);
1770 ASSERT(owner);
1771 if (thread->GetStatus() == ThreadStatus::WaitCondVar) {
1772 thread->SetStatus(ThreadStatus::WaitMutex);
1773 }
1774 1639
1775 owner->AddMutexWaiter(thread); 1640constexpr bool IsValidSignalType(Svc::SignalType type) {
1776 } 1641 switch (type) {
1642 case Svc::SignalType::Signal:
1643 case Svc::SignalType::SignalAndIncrementIfEqual:
1644 case Svc::SignalType::SignalAndModifyByWaitingCountIfEqual:
1645 return true;
1646 default:
1647 return false;
1777 } 1648 }
1778} 1649}
1779 1650
1780static void SignalProcessWideKey32(Core::System& system, u32 condition_variable_addr, s32 target) { 1651constexpr bool IsValidArbitrationType(Svc::ArbitrationType type) {
1781 SignalProcessWideKey(system, condition_variable_addr, target); 1652 switch (type) {
1653 case Svc::ArbitrationType::WaitIfLessThan:
1654 case Svc::ArbitrationType::DecrementAndWaitIfLessThan:
1655 case Svc::ArbitrationType::WaitIfEqual:
1656 return true;
1657 default:
1658 return false;
1659 }
1782} 1660}
1783 1661
1784// Wait for an address (via Address Arbiter) 1662} // namespace
1785static ResultCode WaitForAddress(Core::System& system, VAddr address, u32 type, s32 value,
1786 s64 timeout) {
1787 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, timeout={}", address,
1788 type, value, timeout);
1789
1790 // If the passed address is a kernel virtual address, return invalid memory state.
1791 if (Core::Memory::IsKernelVirtualAddress(address)) {
1792 LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
1793 return ERR_INVALID_ADDRESS_STATE;
1794 }
1795 1663
1796 // If the address is not properly aligned to 4 bytes, return invalid address. 1664// Wait for an address (via Address Arbiter)
1797 if (!Common::IsWordAligned(address)) { 1665static ResultCode WaitForAddress(Core::System& system, VAddr address, Svc::ArbitrationType arb_type,
1798 LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address); 1666 s32 value, s64 timeout_ns) {
1799 return ERR_INVALID_ADDRESS; 1667 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, arb_type=0x{:X}, value=0x{:X}, timeout_ns={}",
1668 address, arb_type, value, timeout_ns);
1669
1670 // Validate input.
1671 R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
1672 R_UNLESS(Common::IsAligned(address, sizeof(int32_t)), Svc::ResultInvalidAddress);
1673 R_UNLESS(IsValidArbitrationType(arb_type), Svc::ResultInvalidEnumValue);
1674
1675 // Convert timeout from nanoseconds to ticks.
1676 s64 timeout{};
1677 if (timeout_ns > 0) {
1678 const s64 offset_tick(timeout_ns);
1679 if (offset_tick > 0) {
1680 timeout = offset_tick + 2;
1681 if (timeout <= 0) {
1682 timeout = std::numeric_limits<s64>::max();
1683 }
1684 } else {
1685 timeout = std::numeric_limits<s64>::max();
1686 }
1687 } else {
1688 timeout = timeout_ns;
1800 } 1689 }
1801 1690
1802 const auto arbitration_type = static_cast<AddressArbiter::ArbitrationType>(type); 1691 return system.Kernel().CurrentProcess()->WaitAddressArbiter(address, arb_type, value, timeout);
1803 auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter();
1804 const ResultCode result =
1805 address_arbiter.WaitForAddress(address, arbitration_type, value, timeout);
1806 return result;
1807} 1692}
1808 1693
1809static ResultCode WaitForAddress32(Core::System& system, u32 address, u32 type, s32 value, 1694static ResultCode WaitForAddress32(Core::System& system, u32 address, Svc::ArbitrationType arb_type,
1810 u32 timeout_low, u32 timeout_high) { 1695 s32 value, u32 timeout_ns_low, u32 timeout_ns_high) {
1811 const auto timeout = static_cast<s64>(timeout_low | (u64{timeout_high} << 32)); 1696 const auto timeout = static_cast<s64>(timeout_ns_low | (u64{timeout_ns_high} << 32));
1812 return WaitForAddress(system, address, type, value, timeout); 1697 return WaitForAddress(system, address, arb_type, value, timeout);
1813} 1698}
1814 1699
1815// Signals to an address (via Address Arbiter) 1700// Signals to an address (via Address Arbiter)
1816static ResultCode SignalToAddress(Core::System& system, VAddr address, u32 type, s32 value, 1701static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::SignalType signal_type,
1817 s32 num_to_wake) { 1702 s32 value, s32 count) {
1818 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, type=0x{:X}, value=0x{:X}, num_to_wake=0x{:X}", 1703 LOG_TRACE(Kernel_SVC, "called, address=0x{:X}, signal_type=0x{:X}, value=0x{:X}, count=0x{:X}",
1819 address, type, value, num_to_wake); 1704 address, signal_type, value, count);
1820
1821 // If the passed address is a kernel virtual address, return invalid memory state.
1822 if (Core::Memory::IsKernelVirtualAddress(address)) {
1823 LOG_ERROR(Kernel_SVC, "Address is a kernel virtual address, address={:016X}", address);
1824 return ERR_INVALID_ADDRESS_STATE;
1825 }
1826 1705
1827 // If the address is not properly aligned to 4 bytes, return invalid address. 1706 // Validate input.
1828 if (!Common::IsWordAligned(address)) { 1707 R_UNLESS(!Memory::IsKernelAddress(address), Svc::ResultInvalidCurrentMemory);
1829 LOG_ERROR(Kernel_SVC, "Address is not word aligned, address={:016X}", address); 1708 R_UNLESS(Common::IsAligned(address, sizeof(s32)), Svc::ResultInvalidAddress);
1830 return ERR_INVALID_ADDRESS; 1709 R_UNLESS(IsValidSignalType(signal_type), Svc::ResultInvalidEnumValue);
1831 }
1832 1710
1833 const auto signal_type = static_cast<AddressArbiter::SignalType>(type); 1711 return system.Kernel().CurrentProcess()->SignalAddressArbiter(address, signal_type, value,
1834 auto& address_arbiter = system.Kernel().CurrentProcess()->GetAddressArbiter(); 1712 count);
1835 return address_arbiter.SignalToAddress(address, signal_type, value, num_to_wake);
1836} 1713}
1837 1714
1838static ResultCode SignalToAddress32(Core::System& system, u32 address, u32 type, s32 value, 1715static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
1839 s32 num_to_wake) { 1716 s32 value, s32 count) {
1840 return SignalToAddress(system, address, type, value, num_to_wake); 1717 return SignalToAddress(system, address, signal_type, value, count);
1841} 1718}
1842 1719
1843static void KernelDebug([[maybe_unused]] Core::System& system, 1720static void KernelDebug([[maybe_unused]] Core::System& system,
diff --git a/src/core/hle/kernel/svc_common.h b/src/core/hle/kernel/svc_common.h
new file mode 100644
index 000000000..4af049551
--- /dev/null
+++ b/src/core/hle/kernel/svc_common.h
@@ -0,0 +1,14 @@
1// Copyright 2020 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_types.h"
8
9namespace Kernel::Svc {
10
11constexpr s32 ArgumentHandleCountMax = 0x40;
12constexpr u32 HandleWaitMask{1u << 30};
13
14} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_results.h b/src/core/hle/kernel/svc_results.h
new file mode 100644
index 000000000..78282f021
--- /dev/null
+++ b/src/core/hle/kernel/svc_results.h
@@ -0,0 +1,20 @@
1// Copyright 2020 yuzu emulator team
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "core/hle/result.h"
8
9namespace Kernel::Svc {
10
11constexpr ResultCode ResultTerminationRequested{ErrorModule::Kernel, 59};
12constexpr ResultCode ResultInvalidAddress{ErrorModule::Kernel, 102};
13constexpr ResultCode ResultInvalidCurrentMemory{ErrorModule::Kernel, 106};
14constexpr ResultCode ResultInvalidHandle{ErrorModule::Kernel, 114};
15constexpr ResultCode ResultTimedOut{ErrorModule::Kernel, 117};
16constexpr ResultCode ResultCancelled{ErrorModule::Kernel, 118};
17constexpr ResultCode ResultInvalidEnumValue{ErrorModule::Kernel, 120};
18constexpr ResultCode ResultInvalidState{ErrorModule::Kernel, 125};
19
20} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_types.h b/src/core/hle/kernel/svc_types.h
index 11e1d8e2d..d623f7a50 100644
--- a/src/core/hle/kernel/svc_types.h
+++ b/src/core/hle/kernel/svc_types.h
@@ -65,4 +65,16 @@ struct MemoryInfo {
65 u32 padding{}; 65 u32 padding{};
66}; 66};
67 67
68enum class SignalType : u32 {
69 Signal = 0,
70 SignalAndIncrementIfEqual = 1,
71 SignalAndModifyByWaitingCountIfEqual = 2,
72};
73
74enum class ArbitrationType : u32 {
75 WaitIfLessThan = 0,
76 DecrementAndWaitIfLessThan = 1,
77 WaitIfEqual = 2,
78};
79
68} // namespace Kernel::Svc 80} // namespace Kernel::Svc
diff --git a/src/core/hle/kernel/svc_wrap.h b/src/core/hle/kernel/svc_wrap.h
index 0b6dd9df0..a32750ed7 100644
--- a/src/core/hle/kernel/svc_wrap.h
+++ b/src/core/hle/kernel/svc_wrap.h
@@ -7,6 +7,7 @@
7#include "common/common_types.h" 7#include "common/common_types.h"
8#include "core/arm/arm_interface.h" 8#include "core/arm/arm_interface.h"
9#include "core/core.h" 9#include "core/core.h"
10#include "core/hle/kernel/svc_types.h"
10#include "core/hle/result.h" 11#include "core/hle/result.h"
11 12
12namespace Kernel { 13namespace Kernel {
@@ -215,9 +216,10 @@ void SvcWrap64(Core::System& system) {
215 func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2)).raw); 216 func(system, static_cast<u32>(Param(system, 0)), Param(system, 1), Param(system, 2)).raw);
216} 217}
217 218
218template <ResultCode func(Core::System&, u32*, u64, u64, s64)> 219// Used by WaitSynchronization
220template <ResultCode func(Core::System&, s32*, u64, u64, s64)>
219void SvcWrap64(Core::System& system) { 221void SvcWrap64(Core::System& system) {
220 u32 param_1 = 0; 222 s32 param_1 = 0;
221 const u32 retval = func(system, &param_1, Param(system, 1), static_cast<u32>(Param(system, 2)), 223 const u32 retval = func(system, &param_1, Param(system, 1), static_cast<u32>(Param(system, 2)),
222 static_cast<s64>(Param(system, 3))) 224 static_cast<s64>(Param(system, 3)))
223 .raw; 225 .raw;
@@ -276,18 +278,22 @@ void SvcWrap64(Core::System& system) {
276 FuncReturn(system, retval); 278 FuncReturn(system, retval);
277} 279}
278 280
279template <ResultCode func(Core::System&, u64, u32, s32, s64)> 281// Used by WaitForAddress
282template <ResultCode func(Core::System&, u64, Svc::ArbitrationType, s32, s64)>
280void SvcWrap64(Core::System& system) { 283void SvcWrap64(Core::System& system) {
281 FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)), 284 FuncReturn(system,
282 static_cast<s32>(Param(system, 2)), static_cast<s64>(Param(system, 3))) 285 func(system, Param(system, 0), static_cast<Svc::ArbitrationType>(Param(system, 1)),
283 .raw); 286 static_cast<s32>(Param(system, 2)), static_cast<s64>(Param(system, 3)))
287 .raw);
284} 288}
285 289
286template <ResultCode func(Core::System&, u64, u32, s32, s32)> 290// Used by SignalToAddress
291template <ResultCode func(Core::System&, u64, Svc::SignalType, s32, s32)>
287void SvcWrap64(Core::System& system) { 292void SvcWrap64(Core::System& system) {
288 FuncReturn(system, func(system, Param(system, 0), static_cast<u32>(Param(system, 1)), 293 FuncReturn(system,
289 static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3))) 294 func(system, Param(system, 0), static_cast<Svc::SignalType>(Param(system, 1)),
290 .raw); 295 static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
296 .raw);
291} 297}
292 298
293//////////////////////////////////////////////////////////////////////////////////////////////////// 299////////////////////////////////////////////////////////////////////////////////////////////////////
@@ -503,22 +509,23 @@ void SvcWrap32(Core::System& system) {
503} 509}
504 510
505// Used by WaitForAddress32 511// Used by WaitForAddress32
506template <ResultCode func(Core::System&, u32, u32, s32, u32, u32)> 512template <ResultCode func(Core::System&, u32, Svc::ArbitrationType, s32, u32, u32)>
507void SvcWrap32(Core::System& system) { 513void SvcWrap32(Core::System& system) {
508 const u32 retval = func(system, static_cast<u32>(Param(system, 0)), 514 const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
509 static_cast<u32>(Param(system, 1)), static_cast<s32>(Param(system, 2)), 515 static_cast<Svc::ArbitrationType>(Param(system, 1)),
510 static_cast<u32>(Param(system, 3)), static_cast<u32>(Param(system, 4))) 516 static_cast<s32>(Param(system, 2)), static_cast<u32>(Param(system, 3)),
517 static_cast<u32>(Param(system, 4)))
511 .raw; 518 .raw;
512 FuncReturn(system, retval); 519 FuncReturn(system, retval);
513} 520}
514 521
515// Used by SignalToAddress32 522// Used by SignalToAddress32
516template <ResultCode func(Core::System&, u32, u32, s32, s32)> 523template <ResultCode func(Core::System&, u32, Svc::SignalType, s32, s32)>
517void SvcWrap32(Core::System& system) { 524void SvcWrap32(Core::System& system) {
518 const u32 retval = 525 const u32 retval = func(system, static_cast<u32>(Param(system, 0)),
519 func(system, static_cast<u32>(Param(system, 0)), static_cast<u32>(Param(system, 1)), 526 static_cast<Svc::SignalType>(Param(system, 1)),
520 static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3))) 527 static_cast<s32>(Param(system, 2)), static_cast<s32>(Param(system, 3)))
521 .raw; 528 .raw;
522 FuncReturn(system, retval); 529 FuncReturn(system, retval);
523} 530}
524 531
@@ -539,9 +546,9 @@ void SvcWrap32(Core::System& system) {
539} 546}
540 547
541// Used by WaitSynchronization32 548// Used by WaitSynchronization32
542template <ResultCode func(Core::System&, u32, u32, s32, u32, Handle*)> 549template <ResultCode func(Core::System&, u32, u32, s32, u32, s32*)>
543void SvcWrap32(Core::System& system) { 550void SvcWrap32(Core::System& system) {
544 u32 param_1 = 0; 551 s32 param_1 = 0;
545 const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2), 552 const u32 retval = func(system, Param32(system, 0), Param32(system, 1), Param32(system, 2),
546 Param32(system, 3), &param_1) 553 Param32(system, 3), &param_1)
547 .raw; 554 .raw;
diff --git a/src/core/hle/kernel/synchronization.cpp b/src/core/hle/kernel/synchronization.cpp
deleted file mode 100644
index d3f520ea2..000000000
--- a/src/core/hle/kernel/synchronization.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/core.h"
6#include "core/hle/kernel/errors.h"
7#include "core/hle/kernel/handle_table.h"
8#include "core/hle/kernel/k_scheduler.h"
9#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
10#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/synchronization.h"
12#include "core/hle/kernel/synchronization_object.h"
13#include "core/hle/kernel/thread.h"
14#include "core/hle/kernel/time_manager.h"
15
16namespace Kernel {
17
18Synchronization::Synchronization(Core::System& system) : system{system} {}
19
20void Synchronization::SignalObject(SynchronizationObject& obj) const {
21 auto& kernel = system.Kernel();
22 KScopedSchedulerLock lock(kernel);
23 if (obj.IsSignaled()) {
24 for (auto thread : obj.GetWaitingThreads()) {
25 if (thread->GetSchedulingStatus() == ThreadSchedStatus::Paused) {
26 if (thread->GetStatus() != ThreadStatus::WaitHLEEvent) {
27 ASSERT(thread->GetStatus() == ThreadStatus::WaitSynch);
28 ASSERT(thread->IsWaitingSync());
29 }
30 thread->SetSynchronizationResults(&obj, RESULT_SUCCESS);
31 thread->ResumeFromWait();
32 }
33 }
34 obj.ClearWaitingThreads();
35 }
36}
37
38std::pair<ResultCode, Handle> Synchronization::WaitFor(
39 std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds) {
40 auto& kernel = system.Kernel();
41 auto* const thread = kernel.CurrentScheduler()->GetCurrentThread();
42 Handle event_handle = InvalidHandle;
43 {
44 KScopedSchedulerLockAndSleep lock(kernel, event_handle, thread, nano_seconds);
45 const auto itr =
46 std::find_if(sync_objects.begin(), sync_objects.end(),
47 [thread](const std::shared_ptr<SynchronizationObject>& object) {
48 return object->IsSignaled();
49 });
50
51 if (itr != sync_objects.end()) {
52 // We found a ready object, acquire it and set the result value
53 SynchronizationObject* object = itr->get();
54 object->Acquire(thread);
55 const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
56 lock.CancelSleep();
57 return {RESULT_SUCCESS, index};
58 }
59
60 if (nano_seconds == 0) {
61 lock.CancelSleep();
62 return {RESULT_TIMEOUT, InvalidHandle};
63 }
64
65 if (thread->IsPendingTermination()) {
66 lock.CancelSleep();
67 return {ERR_THREAD_TERMINATING, InvalidHandle};
68 }
69
70 if (thread->IsSyncCancelled()) {
71 thread->SetSyncCancelled(false);
72 lock.CancelSleep();
73 return {ERR_SYNCHRONIZATION_CANCELED, InvalidHandle};
74 }
75
76 for (auto& object : sync_objects) {
77 object->AddWaitingThread(SharedFrom(thread));
78 }
79
80 thread->SetSynchronizationObjects(&sync_objects);
81 thread->SetSynchronizationResults(nullptr, RESULT_TIMEOUT);
82 thread->SetStatus(ThreadStatus::WaitSynch);
83 thread->SetWaitingSync(true);
84 }
85 thread->SetWaitingSync(false);
86
87 if (event_handle != InvalidHandle) {
88 auto& time_manager = kernel.TimeManager();
89 time_manager.UnscheduleTimeEvent(event_handle);
90 }
91
92 {
93 KScopedSchedulerLock lock(kernel);
94 ResultCode signaling_result = thread->GetSignalingResult();
95 SynchronizationObject* signaling_object = thread->GetSignalingObject();
96 thread->SetSynchronizationObjects(nullptr);
97 auto shared_thread = SharedFrom(thread);
98 for (auto& obj : sync_objects) {
99 obj->RemoveWaitingThread(shared_thread);
100 }
101 if (signaling_object != nullptr) {
102 const auto itr = std::find_if(
103 sync_objects.begin(), sync_objects.end(),
104 [signaling_object](const std::shared_ptr<SynchronizationObject>& object) {
105 return object.get() == signaling_object;
106 });
107 ASSERT(itr != sync_objects.end());
108 signaling_object->Acquire(thread);
109 const u32 index = static_cast<s32>(std::distance(sync_objects.begin(), itr));
110 return {signaling_result, index};
111 }
112 return {signaling_result, -1};
113 }
114}
115
116} // namespace Kernel
diff --git a/src/core/hle/kernel/synchronization.h b/src/core/hle/kernel/synchronization.h
deleted file mode 100644
index 379f4b1d3..000000000
--- a/src/core/hle/kernel/synchronization.h
+++ /dev/null
@@ -1,44 +0,0 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <utility>
9#include <vector>
10
11#include "core/hle/kernel/object.h"
12#include "core/hle/result.h"
13
14namespace Core {
15class System;
16} // namespace Core
17
18namespace Kernel {
19
20class SynchronizationObject;
21
22/**
23 * The 'Synchronization' class is an interface for handling synchronization methods
24 * used by Synchronization objects and synchronization SVCs. This centralizes processing of
25 * such
26 */
27class Synchronization {
28public:
29 explicit Synchronization(Core::System& system);
30
31 /// Signals a synchronization object, waking up all its waiting threads
32 void SignalObject(SynchronizationObject& obj) const;
33
34 /// Tries to see if waiting for any of the sync_objects is necessary, if not
35 /// it returns Success and the handle index of the signaled sync object. In
36 /// case not, the current thread will be locked and wait for nano_seconds or
37 /// for a synchronization object to signal.
38 std::pair<ResultCode, Handle> WaitFor(
39 std::vector<std::shared_ptr<SynchronizationObject>>& sync_objects, s64 nano_seconds);
40
41private:
42 Core::System& system;
43};
44} // namespace Kernel
diff --git a/src/core/hle/kernel/synchronization_object.cpp b/src/core/hle/kernel/synchronization_object.cpp
deleted file mode 100644
index ba4d39157..000000000
--- a/src/core/hle/kernel/synchronization_object.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include "common/assert.h"
7#include "common/common_types.h"
8#include "common/logging/log.h"
9#include "core/core.h"
10#include "core/hle/kernel/kernel.h"
11#include "core/hle/kernel/object.h"
12#include "core/hle/kernel/process.h"
13#include "core/hle/kernel/synchronization.h"
14#include "core/hle/kernel/synchronization_object.h"
15#include "core/hle/kernel/thread.h"
16
17namespace Kernel {
18
19SynchronizationObject::SynchronizationObject(KernelCore& kernel) : Object{kernel} {}
20SynchronizationObject::~SynchronizationObject() = default;
21
22void SynchronizationObject::Signal() {
23 kernel.Synchronization().SignalObject(*this);
24}
25
26void SynchronizationObject::AddWaitingThread(std::shared_ptr<Thread> thread) {
27 auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread);
28 if (itr == waiting_threads.end())
29 waiting_threads.push_back(std::move(thread));
30}
31
32void SynchronizationObject::RemoveWaitingThread(std::shared_ptr<Thread> thread) {
33 auto itr = std::find(waiting_threads.begin(), waiting_threads.end(), thread);
34 // If a thread passed multiple handles to the same object,
35 // the kernel might attempt to remove the thread from the object's
36 // waiting threads list multiple times.
37 if (itr != waiting_threads.end())
38 waiting_threads.erase(itr);
39}
40
41void SynchronizationObject::ClearWaitingThreads() {
42 waiting_threads.clear();
43}
44
45const std::vector<std::shared_ptr<Thread>>& SynchronizationObject::GetWaitingThreads() const {
46 return waiting_threads;
47}
48
49} // namespace Kernel
diff --git a/src/core/hle/kernel/synchronization_object.h b/src/core/hle/kernel/synchronization_object.h
deleted file mode 100644
index 7408ed51f..000000000
--- a/src/core/hle/kernel/synchronization_object.h
+++ /dev/null
@@ -1,77 +0,0 @@
1// Copyright 2014 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <atomic>
8#include <memory>
9#include <vector>
10
11#include "core/hle/kernel/object.h"
12
13namespace Kernel {
14
15class KernelCore;
16class Synchronization;
17class Thread;
18
19/// Class that represents a Kernel object that a thread can be waiting on
20class SynchronizationObject : public Object {
21public:
22 explicit SynchronizationObject(KernelCore& kernel);
23 ~SynchronizationObject() override;
24
25 /**
26 * Check if the specified thread should wait until the object is available
27 * @param thread The thread about which we're deciding.
28 * @return True if the current thread should wait due to this object being unavailable
29 */
30 virtual bool ShouldWait(const Thread* thread) const = 0;
31
32 /// Acquire/lock the object for the specified thread if it is available
33 virtual void Acquire(Thread* thread) = 0;
34
35 /// Signal this object
36 virtual void Signal();
37
38 virtual bool IsSignaled() const {
39 return is_signaled;
40 }
41
42 /**
43 * Add a thread to wait on this object
44 * @param thread Pointer to thread to add
45 */
46 void AddWaitingThread(std::shared_ptr<Thread> thread);
47
48 /**
49 * Removes a thread from waiting on this object (e.g. if it was resumed already)
50 * @param thread Pointer to thread to remove
51 */
52 void RemoveWaitingThread(std::shared_ptr<Thread> thread);
53
54 /// Get a const reference to the waiting threads list for debug use
55 const std::vector<std::shared_ptr<Thread>>& GetWaitingThreads() const;
56
57 void ClearWaitingThreads();
58
59protected:
60 std::atomic_bool is_signaled{}; // Tells if this sync object is signaled
61
62private:
63 /// Threads waiting for this object to become available
64 std::vector<std::shared_ptr<Thread>> waiting_threads;
65};
66
67// Specialization of DynamicObjectCast for SynchronizationObjects
68template <>
69inline std::shared_ptr<SynchronizationObject> DynamicObjectCast<SynchronizationObject>(
70 std::shared_ptr<Object> object) {
71 if (object != nullptr && object->IsWaitable()) {
72 return std::static_pointer_cast<SynchronizationObject>(object);
73 }
74 return nullptr;
75}
76
77} // namespace Kernel
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index a4f9e0d97..d97323255 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -17,9 +17,11 @@
17#include "core/hardware_properties.h" 17#include "core/hardware_properties.h"
18#include "core/hle/kernel/errors.h" 18#include "core/hle/kernel/errors.h"
19#include "core/hle/kernel/handle_table.h" 19#include "core/hle/kernel/handle_table.h"
20#include "core/hle/kernel/k_condition_variable.h"
20#include "core/hle/kernel/k_scheduler.h" 21#include "core/hle/kernel/k_scheduler.h"
21#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 22#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
22#include "core/hle/kernel/kernel.h" 23#include "core/hle/kernel/kernel.h"
24#include "core/hle/kernel/memory/memory_layout.h"
23#include "core/hle/kernel/object.h" 25#include "core/hle/kernel/object.h"
24#include "core/hle/kernel/process.h" 26#include "core/hle/kernel/process.h"
25#include "core/hle/kernel/thread.h" 27#include "core/hle/kernel/thread.h"
@@ -34,26 +36,19 @@
34 36
35namespace Kernel { 37namespace Kernel {
36 38
37bool Thread::ShouldWait(const Thread* thread) const {
38 return status != ThreadStatus::Dead;
39}
40
41bool Thread::IsSignaled() const { 39bool Thread::IsSignaled() const {
42 return status == ThreadStatus::Dead; 40 return signaled;
43}
44
45void Thread::Acquire(Thread* thread) {
46 ASSERT_MSG(!ShouldWait(thread), "object unavailable!");
47} 41}
48 42
49Thread::Thread(KernelCore& kernel) : SynchronizationObject{kernel} {} 43Thread::Thread(KernelCore& kernel) : KSynchronizationObject{kernel} {}
50Thread::~Thread() = default; 44Thread::~Thread() = default;
51 45
52void Thread::Stop() { 46void Thread::Stop() {
53 { 47 {
54 KScopedSchedulerLock lock(kernel); 48 KScopedSchedulerLock lock(kernel);
55 SetStatus(ThreadStatus::Dead); 49 SetState(ThreadState::Terminated);
56 Signal(); 50 signaled = true;
51 NotifyAvailable();
57 kernel.GlobalHandleTable().Close(global_handle); 52 kernel.GlobalHandleTable().Close(global_handle);
58 53
59 if (owner_process) { 54 if (owner_process) {
@@ -67,59 +62,27 @@ void Thread::Stop() {
67 global_handle = 0; 62 global_handle = 0;
68} 63}
69 64
70void Thread::ResumeFromWait() { 65void Thread::Wakeup() {
71 KScopedSchedulerLock lock(kernel); 66 KScopedSchedulerLock lock(kernel);
72 switch (status) { 67 SetState(ThreadState::Runnable);
73 case ThreadStatus::Paused:
74 case ThreadStatus::WaitSynch:
75 case ThreadStatus::WaitHLEEvent:
76 case ThreadStatus::WaitSleep:
77 case ThreadStatus::WaitIPC:
78 case ThreadStatus::WaitMutex:
79 case ThreadStatus::WaitCondVar:
80 case ThreadStatus::WaitArb:
81 case ThreadStatus::Dormant:
82 break;
83
84 case ThreadStatus::Ready:
85 // The thread's wakeup callback must have already been cleared when the thread was first
86 // awoken.
87 ASSERT(hle_callback == nullptr);
88 // If the thread is waiting on multiple wait objects, it might be awoken more than once
89 // before actually resuming. We can ignore subsequent wakeups if the thread status has
90 // already been set to ThreadStatus::Ready.
91 return;
92 case ThreadStatus::Dead:
93 // This should never happen, as threads must complete before being stopped.
94 DEBUG_ASSERT_MSG(false, "Thread with object id {} cannot be resumed because it's DEAD.",
95 GetObjectId());
96 return;
97 }
98
99 SetStatus(ThreadStatus::Ready);
100}
101
102void Thread::OnWakeUp() {
103 KScopedSchedulerLock lock(kernel);
104 SetStatus(ThreadStatus::Ready);
105} 68}
106 69
107ResultCode Thread::Start() { 70ResultCode Thread::Start() {
108 KScopedSchedulerLock lock(kernel); 71 KScopedSchedulerLock lock(kernel);
109 SetStatus(ThreadStatus::Ready); 72 SetState(ThreadState::Runnable);
110 return RESULT_SUCCESS; 73 return RESULT_SUCCESS;
111} 74}
112 75
113void Thread::CancelWait() { 76void Thread::CancelWait() {
114 KScopedSchedulerLock lock(kernel); 77 KScopedSchedulerLock lock(kernel);
115 if (GetSchedulingStatus() != ThreadSchedStatus::Paused || !is_waiting_on_sync) { 78 if (GetState() != ThreadState::Waiting || !is_cancellable) {
116 is_sync_cancelled = true; 79 is_sync_cancelled = true;
117 return; 80 return;
118 } 81 }
119 // TODO(Blinkhawk): Implement cancel of server session 82 // TODO(Blinkhawk): Implement cancel of server session
120 is_sync_cancelled = false; 83 is_sync_cancelled = false;
121 SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED); 84 SetSynchronizationResults(nullptr, ERR_SYNCHRONIZATION_CANCELED);
122 SetStatus(ThreadStatus::Ready); 85 SetState(ThreadState::Runnable);
123} 86}
124 87
125static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top, 88static void ResetThreadContext32(Core::ARM_Interface::ThreadContext32& context, u32 stack_top,
@@ -183,25 +146,24 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
183 std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel); 146 std::shared_ptr<Thread> thread = std::make_shared<Thread>(kernel);
184 147
185 thread->thread_id = kernel.CreateNewThreadID(); 148 thread->thread_id = kernel.CreateNewThreadID();
186 thread->status = ThreadStatus::Dormant; 149 thread->thread_state = ThreadState::Initialized;
187 thread->entry_point = entry_point; 150 thread->entry_point = entry_point;
188 thread->stack_top = stack_top; 151 thread->stack_top = stack_top;
189 thread->disable_count = 1; 152 thread->disable_count = 1;
190 thread->tpidr_el0 = 0; 153 thread->tpidr_el0 = 0;
191 thread->nominal_priority = thread->current_priority = priority; 154 thread->current_priority = priority;
155 thread->base_priority = priority;
156 thread->lock_owner = nullptr;
192 thread->schedule_count = -1; 157 thread->schedule_count = -1;
193 thread->last_scheduled_tick = 0; 158 thread->last_scheduled_tick = 0;
194 thread->processor_id = processor_id; 159 thread->processor_id = processor_id;
195 thread->ideal_core = processor_id; 160 thread->ideal_core = processor_id;
196 thread->affinity_mask.SetAffinity(processor_id, true); 161 thread->affinity_mask.SetAffinity(processor_id, true);
197 thread->wait_objects = nullptr;
198 thread->mutex_wait_address = 0;
199 thread->condvar_wait_address = 0;
200 thread->wait_handle = 0;
201 thread->name = std::move(name); 162 thread->name = std::move(name);
202 thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap(); 163 thread->global_handle = kernel.GlobalHandleTable().Create(thread).Unwrap();
203 thread->owner_process = owner_process; 164 thread->owner_process = owner_process;
204 thread->type = type_flags; 165 thread->type = type_flags;
166 thread->signaled = false;
205 if ((type_flags & THREADTYPE_IDLE) == 0) { 167 if ((type_flags & THREADTYPE_IDLE) == 0) {
206 auto& scheduler = kernel.GlobalSchedulerContext(); 168 auto& scheduler = kernel.GlobalSchedulerContext();
207 scheduler.AddThread(thread); 169 scheduler.AddThread(thread);
@@ -226,153 +188,185 @@ ResultVal<std::shared_ptr<Thread>> Thread::Create(Core::System& system, ThreadTy
226 return MakeResult<std::shared_ptr<Thread>>(std::move(thread)); 188 return MakeResult<std::shared_ptr<Thread>>(std::move(thread));
227} 189}
228 190
229void Thread::SetPriority(u32 priority) { 191void Thread::SetBasePriority(u32 priority) {
230 KScopedSchedulerLock lock(kernel);
231 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST, 192 ASSERT_MSG(priority <= THREADPRIO_LOWEST && priority >= THREADPRIO_HIGHEST,
232 "Invalid priority value."); 193 "Invalid priority value.");
233 nominal_priority = priority; 194
234 UpdatePriority(); 195 KScopedSchedulerLock lock(kernel);
196
197 // Change our base priority.
198 base_priority = priority;
199
200 // Perform a priority restoration.
201 RestorePriority(kernel, this);
235} 202}
236 203
237void Thread::SetSynchronizationResults(SynchronizationObject* object, ResultCode result) { 204void Thread::SetSynchronizationResults(KSynchronizationObject* object, ResultCode result) {
238 signaling_object = object; 205 signaling_object = object;
239 signaling_result = result; 206 signaling_result = result;
240} 207}
241 208
242s32 Thread::GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const {
243 ASSERT_MSG(!wait_objects->empty(), "Thread is not waiting for anything");
244 const auto match = std::find(wait_objects->rbegin(), wait_objects->rend(), object);
245 return static_cast<s32>(std::distance(match, wait_objects->rend()) - 1);
246}
247
248VAddr Thread::GetCommandBufferAddress() const { 209VAddr Thread::GetCommandBufferAddress() const {
249 // Offset from the start of TLS at which the IPC command buffer begins. 210 // Offset from the start of TLS at which the IPC command buffer begins.
250 constexpr u64 command_header_offset = 0x80; 211 constexpr u64 command_header_offset = 0x80;
251 return GetTLSAddress() + command_header_offset; 212 return GetTLSAddress() + command_header_offset;
252} 213}
253 214
254void Thread::SetStatus(ThreadStatus new_status) { 215void Thread::SetState(ThreadState state) {
255 if (new_status == status) { 216 KScopedSchedulerLock sl(kernel);
256 return;
257 }
258 217
259 switch (new_status) { 218 // Clear debugging state
260 case ThreadStatus::Ready: 219 SetMutexWaitAddressForDebugging({});
261 SetSchedulingStatus(ThreadSchedStatus::Runnable); 220 SetWaitReasonForDebugging({});
262 break;
263 case ThreadStatus::Dormant:
264 SetSchedulingStatus(ThreadSchedStatus::None);
265 break;
266 case ThreadStatus::Dead:
267 SetSchedulingStatus(ThreadSchedStatus::Exited);
268 break;
269 default:
270 SetSchedulingStatus(ThreadSchedStatus::Paused);
271 break;
272 }
273 221
274 status = new_status; 222 const ThreadState old_state = thread_state;
223 thread_state =
224 static_cast<ThreadState>((old_state & ~ThreadState::Mask) | (state & ThreadState::Mask));
225 if (thread_state != old_state) {
226 KScheduler::OnThreadStateChanged(kernel, this, old_state);
227 }
275} 228}
276 229
277void Thread::AddMutexWaiter(std::shared_ptr<Thread> thread) { 230void Thread::AddWaiterImpl(Thread* thread) {
278 if (thread->lock_owner.get() == this) { 231 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
279 // If the thread is already waiting for this thread to release the mutex, ensure that the 232
280 // waiters list is consistent and return without doing anything. 233 // Find the right spot to insert the waiter.
281 const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); 234 auto it = waiter_list.begin();
282 ASSERT(iter != wait_mutex_threads.end()); 235 while (it != waiter_list.end()) {
283 return; 236 if (it->GetPriority() > thread->GetPriority()) {
237 break;
238 }
239 it++;
284 } 240 }
285 241
286 // A thread can't wait on two different mutexes at the same time. 242 // Keep track of how many kernel waiters we have.
287 ASSERT(thread->lock_owner == nullptr); 243 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
244 ASSERT((num_kernel_waiters++) >= 0);
245 }
288 246
289 // Ensure that the thread is not already in the list of mutex waiters 247 // Insert the waiter.
290 const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); 248 waiter_list.insert(it, *thread);
291 ASSERT(iter == wait_mutex_threads.end()); 249 thread->SetLockOwner(this);
250}
292 251
293 // Keep the list in an ordered fashion 252void Thread::RemoveWaiterImpl(Thread* thread) {
294 const auto insertion_point = std::find_if( 253 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
295 wait_mutex_threads.begin(), wait_mutex_threads.end(),
296 [&thread](const auto& entry) { return entry->GetPriority() > thread->GetPriority(); });
297 wait_mutex_threads.insert(insertion_point, thread);
298 thread->lock_owner = SharedFrom(this);
299 254
300 UpdatePriority(); 255 // Keep track of how many kernel waiters we have.
301} 256 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
257 ASSERT((num_kernel_waiters--) > 0);
258 }
302 259
303void Thread::RemoveMutexWaiter(std::shared_ptr<Thread> thread) { 260 // Remove the waiter.
304 ASSERT(thread->lock_owner.get() == this); 261 waiter_list.erase(waiter_list.iterator_to(*thread));
262 thread->SetLockOwner(nullptr);
263}
305 264
306 // Ensure that the thread is in the list of mutex waiters 265void Thread::RestorePriority(KernelCore& kernel, Thread* thread) {
307 const auto iter = std::find(wait_mutex_threads.begin(), wait_mutex_threads.end(), thread); 266 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
308 ASSERT(iter != wait_mutex_threads.end());
309 267
310 wait_mutex_threads.erase(iter); 268 while (true) {
269 // We want to inherit priority where possible.
270 s32 new_priority = thread->GetBasePriority();
271 if (thread->HasWaiters()) {
272 new_priority = std::min(new_priority, thread->waiter_list.front().GetPriority());
273 }
311 274
312 thread->lock_owner = nullptr; 275 // If the priority we would inherit is not different from ours, don't do anything.
313 UpdatePriority(); 276 if (new_priority == thread->GetPriority()) {
314} 277 return;
278 }
315 279
316void Thread::UpdatePriority() { 280 // Ensure we don't violate condition variable red black tree invariants.
317 // If any of the threads waiting on the mutex have a higher priority 281 if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
318 // (taking into account priority inheritance), then this thread inherits 282 BeforeUpdatePriority(kernel, cv_tree, thread);
319 // that thread's priority.
320 u32 new_priority = nominal_priority;
321 if (!wait_mutex_threads.empty()) {
322 if (wait_mutex_threads.front()->current_priority < new_priority) {
323 new_priority = wait_mutex_threads.front()->current_priority;
324 } 283 }
325 }
326 284
327 if (new_priority == current_priority) { 285 // Change the priority.
328 return; 286 const s32 old_priority = thread->GetPriority();
329 } 287 thread->SetPriority(new_priority);
330 288
331 if (GetStatus() == ThreadStatus::WaitCondVar) { 289 // Restore the condition variable, if relevant.
332 owner_process->RemoveConditionVariableThread(SharedFrom(this)); 290 if (auto* cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
333 } 291 AfterUpdatePriority(kernel, cv_tree, thread);
292 }
334 293
335 SetCurrentPriority(new_priority); 294 // Update the scheduler.
295 KScheduler::OnThreadPriorityChanged(kernel, thread, old_priority);
336 296
337 if (GetStatus() == ThreadStatus::WaitCondVar) { 297 // Keep the lock owner up to date.
338 owner_process->InsertConditionVariableThread(SharedFrom(this)); 298 Thread* lock_owner = thread->GetLockOwner();
339 } 299 if (lock_owner == nullptr) {
300 return;
301 }
340 302
341 if (!lock_owner) { 303 // Update the thread in the lock owner's sorted list, and continue inheriting.
342 return; 304 lock_owner->RemoveWaiterImpl(thread);
305 lock_owner->AddWaiterImpl(thread);
306 thread = lock_owner;
343 } 307 }
308}
344 309
345 // Ensure that the thread is within the correct location in the waiting list. 310void Thread::AddWaiter(Thread* thread) {
346 auto old_owner = lock_owner; 311 AddWaiterImpl(thread);
347 lock_owner->RemoveMutexWaiter(SharedFrom(this)); 312 RestorePriority(kernel, this);
348 old_owner->AddMutexWaiter(SharedFrom(this));
349
350 // Recursively update the priority of the thread that depends on the priority of this one.
351 lock_owner->UpdatePriority();
352} 313}
353 314
354bool Thread::AllSynchronizationObjectsReady() const { 315void Thread::RemoveWaiter(Thread* thread) {
355 return std::none_of(wait_objects->begin(), wait_objects->end(), 316 RemoveWaiterImpl(thread);
356 [this](const std::shared_ptr<SynchronizationObject>& object) { 317 RestorePriority(kernel, this);
357 return object->ShouldWait(this);
358 });
359} 318}
360 319
361bool Thread::InvokeHLECallback(std::shared_ptr<Thread> thread) { 320Thread* Thread::RemoveWaiterByKey(s32* out_num_waiters, VAddr key) {
362 ASSERT(hle_callback); 321 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
363 return hle_callback(std::move(thread)); 322
323 s32 num_waiters{};
324 Thread* next_lock_owner{};
325 auto it = waiter_list.begin();
326 while (it != waiter_list.end()) {
327 if (it->GetAddressKey() == key) {
328 Thread* thread = std::addressof(*it);
329
330 // Keep track of how many kernel waiters we have.
331 if (Memory::IsKernelAddressKey(thread->GetAddressKey())) {
332 ASSERT((num_kernel_waiters--) > 0);
333 }
334 it = waiter_list.erase(it);
335
336 // Update the next lock owner.
337 if (next_lock_owner == nullptr) {
338 next_lock_owner = thread;
339 next_lock_owner->SetLockOwner(nullptr);
340 } else {
341 next_lock_owner->AddWaiterImpl(thread);
342 }
343 num_waiters++;
344 } else {
345 it++;
346 }
347 }
348
349 // Do priority updates, if we have a next owner.
350 if (next_lock_owner) {
351 RestorePriority(kernel, this);
352 RestorePriority(kernel, next_lock_owner);
353 }
354
355 // Return output.
356 *out_num_waiters = num_waiters;
357 return next_lock_owner;
364} 358}
365 359
366ResultCode Thread::SetActivity(ThreadActivity value) { 360ResultCode Thread::SetActivity(ThreadActivity value) {
367 KScopedSchedulerLock lock(kernel); 361 KScopedSchedulerLock lock(kernel);
368 362
369 auto sched_status = GetSchedulingStatus(); 363 auto sched_status = GetState();
370 364
371 if (sched_status != ThreadSchedStatus::Runnable && sched_status != ThreadSchedStatus::Paused) { 365 if (sched_status != ThreadState::Runnable && sched_status != ThreadState::Waiting) {
372 return ERR_INVALID_STATE; 366 return ERR_INVALID_STATE;
373 } 367 }
374 368
375 if (IsPendingTermination()) { 369 if (IsTerminationRequested()) {
376 return RESULT_SUCCESS; 370 return RESULT_SUCCESS;
377 } 371 }
378 372
@@ -394,7 +388,8 @@ ResultCode Thread::Sleep(s64 nanoseconds) {
394 Handle event_handle{}; 388 Handle event_handle{};
395 { 389 {
396 KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds); 390 KScopedSchedulerLockAndSleep lock(kernel, event_handle, this, nanoseconds);
397 SetStatus(ThreadStatus::WaitSleep); 391 SetState(ThreadState::Waiting);
392 SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
398 } 393 }
399 394
400 if (event_handle != InvalidHandle) { 395 if (event_handle != InvalidHandle) {
@@ -405,34 +400,21 @@ ResultCode Thread::Sleep(s64 nanoseconds) {
405} 400}
406 401
407void Thread::AddSchedulingFlag(ThreadSchedFlags flag) { 402void Thread::AddSchedulingFlag(ThreadSchedFlags flag) {
408 const u32 old_state = scheduling_state; 403 const auto old_state = GetRawState();
409 pausing_state |= static_cast<u32>(flag); 404 pausing_state |= static_cast<u32>(flag);
410 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); 405 const auto base_scheduling = GetState();
411 scheduling_state = base_scheduling | pausing_state; 406 thread_state = base_scheduling | static_cast<ThreadState>(pausing_state);
412 KScheduler::OnThreadStateChanged(kernel, this, old_state); 407 KScheduler::OnThreadStateChanged(kernel, this, old_state);
413} 408}
414 409
415void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) { 410void Thread::RemoveSchedulingFlag(ThreadSchedFlags flag) {
416 const u32 old_state = scheduling_state; 411 const auto old_state = GetRawState();
417 pausing_state &= ~static_cast<u32>(flag); 412 pausing_state &= ~static_cast<u32>(flag);
418 const u32 base_scheduling = static_cast<u32>(GetSchedulingStatus()); 413 const auto base_scheduling = GetState();
419 scheduling_state = base_scheduling | pausing_state; 414 thread_state = base_scheduling | static_cast<ThreadState>(pausing_state);
420 KScheduler::OnThreadStateChanged(kernel, this, old_state); 415 KScheduler::OnThreadStateChanged(kernel, this, old_state);
421} 416}
422 417
423void Thread::SetSchedulingStatus(ThreadSchedStatus new_status) {
424 const u32 old_state = scheduling_state;
425 scheduling_state = (scheduling_state & static_cast<u32>(ThreadSchedMasks::HighMask)) |
426 static_cast<u32>(new_status);
427 KScheduler::OnThreadStateChanged(kernel, this, old_state);
428}
429
430void Thread::SetCurrentPriority(u32 new_priority) {
431 const u32 old_priority = std::exchange(current_priority, new_priority);
432 KScheduler::OnThreadPriorityChanged(kernel, this, kernel.CurrentScheduler()->GetCurrentThread(),
433 old_priority);
434}
435
436ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) { 418ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
437 KScopedSchedulerLock lock(kernel); 419 KScopedSchedulerLock lock(kernel);
438 const auto HighestSetCore = [](u64 mask, u32 max_cores) { 420 const auto HighestSetCore = [](u64 mask, u32 max_cores) {
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index 11ef29888..6b66c9a0e 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -6,16 +6,21 @@
6 6
7#include <array> 7#include <array>
8#include <functional> 8#include <functional>
9#include <span>
9#include <string> 10#include <string>
10#include <utility> 11#include <utility>
11#include <vector> 12#include <vector>
12 13
14#include <boost/intrusive/list.hpp>
15
13#include "common/common_types.h" 16#include "common/common_types.h"
17#include "common/intrusive_red_black_tree.h"
14#include "common/spin_lock.h" 18#include "common/spin_lock.h"
15#include "core/arm/arm_interface.h" 19#include "core/arm/arm_interface.h"
16#include "core/hle/kernel/k_affinity_mask.h" 20#include "core/hle/kernel/k_affinity_mask.h"
21#include "core/hle/kernel/k_synchronization_object.h"
17#include "core/hle/kernel/object.h" 22#include "core/hle/kernel/object.h"
18#include "core/hle/kernel/synchronization_object.h" 23#include "core/hle/kernel/svc_common.h"
19#include "core/hle/result.h" 24#include "core/hle/result.h"
20 25
21namespace Common { 26namespace Common {
@@ -73,19 +78,24 @@ enum ThreadProcessorId : s32 {
73 (1 << THREADPROCESSORID_2) | (1 << THREADPROCESSORID_3) 78 (1 << THREADPROCESSORID_2) | (1 << THREADPROCESSORID_3)
74}; 79};
75 80
76enum class ThreadStatus { 81enum class ThreadState : u16 {
77 Ready, ///< Ready to run 82 Initialized = 0,
78 Paused, ///< Paused by SetThreadActivity or debug 83 Waiting = 1,
79 WaitHLEEvent, ///< Waiting for hle event to finish 84 Runnable = 2,
80 WaitSleep, ///< Waiting due to a SleepThread SVC 85 Terminated = 3,
81 WaitIPC, ///< Waiting for the reply from an IPC request 86
82 WaitSynch, ///< Waiting due to WaitSynchronization 87 SuspendShift = 4,
83 WaitMutex, ///< Waiting due to an ArbitrateLock svc 88 Mask = (1 << SuspendShift) - 1,
84 WaitCondVar, ///< Waiting due to an WaitProcessWideKey svc 89
85 WaitArb, ///< Waiting due to a SignalToAddress/WaitForAddress svc 90 ProcessSuspended = (1 << (0 + SuspendShift)),
86 Dormant, ///< Created but not yet made ready 91 ThreadSuspended = (1 << (1 + SuspendShift)),
87 Dead ///< Run to completion, or forcefully terminated 92 DebugSuspended = (1 << (2 + SuspendShift)),
93 BacktraceSuspended = (1 << (3 + SuspendShift)),
94 InitSuspended = (1 << (4 + SuspendShift)),
95
96 SuspendFlagMask = ((1 << 5) - 1) << SuspendShift,
88}; 97};
98DECLARE_ENUM_FLAG_OPERATORS(ThreadState);
89 99
90enum class ThreadWakeupReason { 100enum class ThreadWakeupReason {
91 Signal, // The thread was woken up by WakeupAllWaitingThreads due to an object signal. 101 Signal, // The thread was woken up by WakeupAllWaitingThreads due to an object signal.
@@ -97,13 +107,6 @@ enum class ThreadActivity : u32 {
97 Paused = 1, 107 Paused = 1,
98}; 108};
99 109
100enum class ThreadSchedStatus : u32 {
101 None = 0,
102 Paused = 1,
103 Runnable = 2,
104 Exited = 3,
105};
106
107enum class ThreadSchedFlags : u32 { 110enum class ThreadSchedFlags : u32 {
108 ProcessPauseFlag = 1 << 4, 111 ProcessPauseFlag = 1 << 4,
109 ThreadPauseFlag = 1 << 5, 112 ThreadPauseFlag = 1 << 5,
@@ -111,13 +114,20 @@ enum class ThreadSchedFlags : u32 {
111 KernelInitPauseFlag = 1 << 8, 114 KernelInitPauseFlag = 1 << 8,
112}; 115};
113 116
114enum class ThreadSchedMasks : u32 { 117enum class ThreadWaitReasonForDebugging : u32 {
115 LowMask = 0x000f, 118 None, ///< Thread is not waiting
116 HighMask = 0xfff0, 119 Sleep, ///< Thread is waiting due to a SleepThread SVC
117 ForcePauseMask = 0x0070, 120 IPC, ///< Thread is waiting for the reply from an IPC request
121 Synchronization, ///< Thread is waiting due to a WaitSynchronization SVC
122 ConditionVar, ///< Thread is waiting due to a WaitProcessWideKey SVC
123 Arbitration, ///< Thread is waiting due to a SignalToAddress/WaitForAddress SVC
124 Suspended, ///< Thread is waiting due to process suspension
118}; 125};
119 126
120class Thread final : public SynchronizationObject { 127class Thread final : public KSynchronizationObject, public boost::intrusive::list_base_hook<> {
128 friend class KScheduler;
129 friend class Process;
130
121public: 131public:
122 explicit Thread(KernelCore& kernel); 132 explicit Thread(KernelCore& kernel);
123 ~Thread() override; 133 ~Thread() override;
@@ -127,10 +137,6 @@ public:
127 using ThreadContext32 = Core::ARM_Interface::ThreadContext32; 137 using ThreadContext32 = Core::ARM_Interface::ThreadContext32;
128 using ThreadContext64 = Core::ARM_Interface::ThreadContext64; 138 using ThreadContext64 = Core::ARM_Interface::ThreadContext64;
129 139
130 using ThreadSynchronizationObjects = std::vector<std::shared_ptr<SynchronizationObject>>;
131
132 using HLECallback = std::function<bool(std::shared_ptr<Thread> thread)>;
133
134 /** 140 /**
135 * Creates and returns a new thread. The new thread is immediately scheduled 141 * Creates and returns a new thread. The new thread is immediately scheduled
136 * @param system The instance of the whole system 142 * @param system The instance of the whole system
@@ -186,59 +192,54 @@ public:
186 return HANDLE_TYPE; 192 return HANDLE_TYPE;
187 } 193 }
188 194
189 bool ShouldWait(const Thread* thread) const override;
190 void Acquire(Thread* thread) override;
191 bool IsSignaled() const override;
192
193 /** 195 /**
194 * Gets the thread's current priority 196 * Gets the thread's current priority
195 * @return The current thread's priority 197 * @return The current thread's priority
196 */ 198 */
197 u32 GetPriority() const { 199 [[nodiscard]] s32 GetPriority() const {
198 return current_priority; 200 return current_priority;
199 } 201 }
200 202
201 /** 203 /**
204 * Sets the thread's current priority.
205 * @param priority The new priority.
206 */
207 void SetPriority(s32 priority) {
208 current_priority = priority;
209 }
210
211 /**
202 * Gets the thread's nominal priority. 212 * Gets the thread's nominal priority.
203 * @return The current thread's nominal priority. 213 * @return The current thread's nominal priority.
204 */ 214 */
205 u32 GetNominalPriority() const { 215 [[nodiscard]] s32 GetBasePriority() const {
206 return nominal_priority; 216 return base_priority;
207 } 217 }
208 218
209 /** 219 /**
210 * Sets the thread's current priority 220 * Sets the thread's nominal priority.
211 * @param priority The new priority 221 * @param priority The new priority.
212 */ 222 */
213 void SetPriority(u32 priority); 223 void SetBasePriority(u32 priority);
214
215 /// Adds a thread to the list of threads that are waiting for a lock held by this thread.
216 void AddMutexWaiter(std::shared_ptr<Thread> thread);
217
218 /// Removes a thread from the list of threads that are waiting for a lock held by this thread.
219 void RemoveMutexWaiter(std::shared_ptr<Thread> thread);
220
221 /// Recalculates the current priority taking into account priority inheritance.
222 void UpdatePriority();
223 224
224 /// Changes the core that the thread is running or scheduled to run on. 225 /// Changes the core that the thread is running or scheduled to run on.
225 ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask); 226 [[nodiscard]] ResultCode SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask);
226 227
227 /** 228 /**
228 * Gets the thread's thread ID 229 * Gets the thread's thread ID
229 * @return The thread's ID 230 * @return The thread's ID
230 */ 231 */
231 u64 GetThreadID() const { 232 [[nodiscard]] u64 GetThreadID() const {
232 return thread_id; 233 return thread_id;
233 } 234 }
234 235
235 /// Resumes a thread from waiting 236 /// Resumes a thread from waiting
236 void ResumeFromWait(); 237 void Wakeup();
237
238 void OnWakeUp();
239 238
240 ResultCode Start(); 239 ResultCode Start();
241 240
241 virtual bool IsSignaled() const override;
242
242 /// Cancels a waiting operation that this thread may or may not be within. 243 /// Cancels a waiting operation that this thread may or may not be within.
243 /// 244 ///
244 /// When the thread is within a waiting state, this will set the thread's 245 /// When the thread is within a waiting state, this will set the thread's
@@ -247,29 +248,20 @@ public:
247 /// 248 ///
248 void CancelWait(); 249 void CancelWait();
249 250
250 void SetSynchronizationResults(SynchronizationObject* object, ResultCode result); 251 void SetSynchronizationResults(KSynchronizationObject* object, ResultCode result);
251 252
252 SynchronizationObject* GetSignalingObject() const { 253 void SetSyncedObject(KSynchronizationObject* object, ResultCode result) {
253 return signaling_object; 254 SetSynchronizationResults(object, result);
254 } 255 }
255 256
256 ResultCode GetSignalingResult() const { 257 ResultCode GetWaitResult(KSynchronizationObject** out) const {
258 *out = signaling_object;
257 return signaling_result; 259 return signaling_result;
258 } 260 }
259 261
260 /** 262 ResultCode GetSignalingResult() const {
261 * Retrieves the index that this particular object occupies in the list of objects 263 return signaling_result;
262 * that the thread passed to WaitSynchronization, starting the search from the last element. 264 }
263 *
264 * It is used to set the output index of WaitSynchronization when the thread is awakened.
265 *
266 * When a thread wakes up due to an object signal, the kernel will use the index of the last
267 * matching object in the wait objects list in case of having multiple instances of the same
268 * object in the list.
269 *
270 * @param object Object to query the index of.
271 */
272 s32 GetSynchronizationObjectIndex(std::shared_ptr<SynchronizationObject> object) const;
273 265
274 /** 266 /**
275 * Stops a thread, invalidating it from further use 267 * Stops a thread, invalidating it from further use
@@ -341,18 +333,22 @@ public:
341 333
342 std::shared_ptr<Common::Fiber>& GetHostContext(); 334 std::shared_ptr<Common::Fiber>& GetHostContext();
343 335
344 ThreadStatus GetStatus() const { 336 ThreadState GetState() const {
345 return status; 337 return thread_state & ThreadState::Mask;
338 }
339
340 ThreadState GetRawState() const {
341 return thread_state;
346 } 342 }
347 343
348 void SetStatus(ThreadStatus new_status); 344 void SetState(ThreadState state);
349 345
350 s64 GetLastScheduledTick() const { 346 s64 GetLastScheduledTick() const {
351 return this->last_scheduled_tick; 347 return last_scheduled_tick;
352 } 348 }
353 349
354 void SetLastScheduledTick(s64 tick) { 350 void SetLastScheduledTick(s64 tick) {
355 this->last_scheduled_tick = tick; 351 last_scheduled_tick = tick;
356 } 352 }
357 353
358 u64 GetTotalCPUTimeTicks() const { 354 u64 GetTotalCPUTimeTicks() const {
@@ -387,98 +383,18 @@ public:
387 return owner_process; 383 return owner_process;
388 } 384 }
389 385
390 const ThreadSynchronizationObjects& GetSynchronizationObjects() const {
391 return *wait_objects;
392 }
393
394 void SetSynchronizationObjects(ThreadSynchronizationObjects* objects) {
395 wait_objects = objects;
396 }
397
398 void ClearSynchronizationObjects() {
399 for (const auto& waiting_object : *wait_objects) {
400 waiting_object->RemoveWaitingThread(SharedFrom(this));
401 }
402 wait_objects->clear();
403 }
404
405 /// Determines whether all the objects this thread is waiting on are ready.
406 bool AllSynchronizationObjectsReady() const;
407
408 const MutexWaitingThreads& GetMutexWaitingThreads() const { 386 const MutexWaitingThreads& GetMutexWaitingThreads() const {
409 return wait_mutex_threads; 387 return wait_mutex_threads;
410 } 388 }
411 389
412 Thread* GetLockOwner() const { 390 Thread* GetLockOwner() const {
413 return lock_owner.get(); 391 return lock_owner;
414 }
415
416 void SetLockOwner(std::shared_ptr<Thread> owner) {
417 lock_owner = std::move(owner);
418 }
419
420 VAddr GetCondVarWaitAddress() const {
421 return condvar_wait_address;
422 }
423
424 void SetCondVarWaitAddress(VAddr address) {
425 condvar_wait_address = address;
426 }
427
428 VAddr GetMutexWaitAddress() const {
429 return mutex_wait_address;
430 }
431
432 void SetMutexWaitAddress(VAddr address) {
433 mutex_wait_address = address;
434 }
435
436 Handle GetWaitHandle() const {
437 return wait_handle;
438 }
439
440 void SetWaitHandle(Handle handle) {
441 wait_handle = handle;
442 }
443
444 VAddr GetArbiterWaitAddress() const {
445 return arb_wait_address;
446 }
447
448 void SetArbiterWaitAddress(VAddr address) {
449 arb_wait_address = address;
450 }
451
452 bool HasHLECallback() const {
453 return hle_callback != nullptr;
454 }
455
456 void SetHLECallback(HLECallback callback) {
457 hle_callback = std::move(callback);
458 }
459
460 void SetHLETimeEvent(Handle time_event) {
461 hle_time_event = time_event;
462 }
463
464 void SetHLESyncObject(SynchronizationObject* object) {
465 hle_object = object;
466 }
467
468 Handle GetHLETimeEvent() const {
469 return hle_time_event;
470 }
471
472 SynchronizationObject* GetHLESyncObject() const {
473 return hle_object;
474 } 392 }
475 393
476 void InvalidateHLECallback() { 394 void SetLockOwner(Thread* owner) {
477 SetHLECallback(nullptr); 395 lock_owner = owner;
478 } 396 }
479 397
480 bool InvokeHLECallback(std::shared_ptr<Thread> thread);
481
482 u32 GetIdealCore() const { 398 u32 GetIdealCore() const {
483 return ideal_core; 399 return ideal_core;
484 } 400 }
@@ -493,20 +409,11 @@ public:
493 ResultCode Sleep(s64 nanoseconds); 409 ResultCode Sleep(s64 nanoseconds);
494 410
495 s64 GetYieldScheduleCount() const { 411 s64 GetYieldScheduleCount() const {
496 return this->schedule_count; 412 return schedule_count;
497 } 413 }
498 414
499 void SetYieldScheduleCount(s64 count) { 415 void SetYieldScheduleCount(s64 count) {
500 this->schedule_count = count; 416 schedule_count = count;
501 }
502
503 ThreadSchedStatus GetSchedulingStatus() const {
504 return static_cast<ThreadSchedStatus>(scheduling_state &
505 static_cast<u32>(ThreadSchedMasks::LowMask));
506 }
507
508 bool IsRunnable() const {
509 return scheduling_state == static_cast<u32>(ThreadSchedStatus::Runnable);
510 } 417 }
511 418
512 bool IsRunning() const { 419 bool IsRunning() const {
@@ -517,36 +424,32 @@ public:
517 is_running = value; 424 is_running = value;
518 } 425 }
519 426
520 bool IsSyncCancelled() const { 427 bool IsWaitCancelled() const {
521 return is_sync_cancelled; 428 return is_sync_cancelled;
522 } 429 }
523 430
524 void SetSyncCancelled(bool value) { 431 void ClearWaitCancelled() {
525 is_sync_cancelled = value; 432 is_sync_cancelled = false;
526 } 433 }
527 434
528 Handle GetGlobalHandle() const { 435 Handle GetGlobalHandle() const {
529 return global_handle; 436 return global_handle;
530 } 437 }
531 438
532 bool IsWaitingForArbitration() const { 439 bool IsCancellable() const {
533 return waiting_for_arbitration; 440 return is_cancellable;
534 } 441 }
535 442
536 void WaitForArbitration(bool set) { 443 void SetCancellable() {
537 waiting_for_arbitration = set; 444 is_cancellable = true;
538 } 445 }
539 446
540 bool IsWaitingSync() const { 447 void ClearCancellable() {
541 return is_waiting_on_sync; 448 is_cancellable = false;
542 } 449 }
543 450
544 void SetWaitingSync(bool is_waiting) { 451 bool IsTerminationRequested() const {
545 is_waiting_on_sync = is_waiting; 452 return will_be_terminated || GetRawState() == ThreadState::Terminated;
546 }
547
548 bool IsPendingTermination() const {
549 return will_be_terminated || GetSchedulingStatus() == ThreadSchedStatus::Exited;
550 } 453 }
551 454
552 bool IsPaused() const { 455 bool IsPaused() const {
@@ -578,21 +481,21 @@ public:
578 constexpr QueueEntry() = default; 481 constexpr QueueEntry() = default;
579 482
580 constexpr void Initialize() { 483 constexpr void Initialize() {
581 this->prev = nullptr; 484 prev = nullptr;
582 this->next = nullptr; 485 next = nullptr;
583 } 486 }
584 487
585 constexpr Thread* GetPrev() const { 488 constexpr Thread* GetPrev() const {
586 return this->prev; 489 return prev;
587 } 490 }
588 constexpr Thread* GetNext() const { 491 constexpr Thread* GetNext() const {
589 return this->next; 492 return next;
590 } 493 }
591 constexpr void SetPrev(Thread* thread) { 494 constexpr void SetPrev(Thread* thread) {
592 this->prev = thread; 495 prev = thread;
593 } 496 }
594 constexpr void SetNext(Thread* thread) { 497 constexpr void SetNext(Thread* thread) {
595 this->next = thread; 498 next = thread;
596 } 499 }
597 500
598 private: 501 private:
@@ -601,11 +504,11 @@ public:
601 }; 504 };
602 505
603 QueueEntry& GetPriorityQueueEntry(s32 core) { 506 QueueEntry& GetPriorityQueueEntry(s32 core) {
604 return this->per_core_priority_queue_entry[core]; 507 return per_core_priority_queue_entry[core];
605 } 508 }
606 509
607 const QueueEntry& GetPriorityQueueEntry(s32 core) const { 510 const QueueEntry& GetPriorityQueueEntry(s32 core) const {
608 return this->per_core_priority_queue_entry[core]; 511 return per_core_priority_queue_entry[core];
609 } 512 }
610 513
611 s32 GetDisableDispatchCount() const { 514 s32 GetDisableDispatchCount() const {
@@ -622,24 +525,170 @@ public:
622 disable_count--; 525 disable_count--;
623 } 526 }
624 527
528 void SetWaitReasonForDebugging(ThreadWaitReasonForDebugging reason) {
529 wait_reason_for_debugging = reason;
530 }
531
532 [[nodiscard]] ThreadWaitReasonForDebugging GetWaitReasonForDebugging() const {
533 return wait_reason_for_debugging;
534 }
535
536 void SetWaitObjectsForDebugging(const std::span<KSynchronizationObject*>& objects) {
537 wait_objects_for_debugging.clear();
538 wait_objects_for_debugging.reserve(objects.size());
539 for (const auto& object : objects) {
540 wait_objects_for_debugging.emplace_back(object);
541 }
542 }
543
544 [[nodiscard]] const std::vector<KSynchronizationObject*>& GetWaitObjectsForDebugging() const {
545 return wait_objects_for_debugging;
546 }
547
548 void SetMutexWaitAddressForDebugging(VAddr address) {
549 mutex_wait_address_for_debugging = address;
550 }
551
552 [[nodiscard]] VAddr GetMutexWaitAddressForDebugging() const {
553 return mutex_wait_address_for_debugging;
554 }
555
556 void AddWaiter(Thread* thread);
557
558 void RemoveWaiter(Thread* thread);
559
560 [[nodiscard]] Thread* RemoveWaiterByKey(s32* out_num_waiters, VAddr key);
561
562 [[nodiscard]] VAddr GetAddressKey() const {
563 return address_key;
564 }
565
566 [[nodiscard]] u32 GetAddressKeyValue() const {
567 return address_key_value;
568 }
569
570 void SetAddressKey(VAddr key) {
571 address_key = key;
572 }
573
574 void SetAddressKey(VAddr key, u32 val) {
575 address_key = key;
576 address_key_value = val;
577 }
578
625private: 579private:
626 friend class GlobalSchedulerContext; 580 static constexpr size_t PriorityInheritanceCountMax = 10;
627 friend class KScheduler; 581 union SyncObjectBuffer {
628 friend class Process; 582 std::array<KSynchronizationObject*, Svc::ArgumentHandleCountMax> sync_objects{};
583 std::array<Handle,
584 Svc::ArgumentHandleCountMax*(sizeof(KSynchronizationObject*) / sizeof(Handle))>
585 handles;
586 constexpr SyncObjectBuffer() {}
587 };
588 static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles));
589
590 struct ConditionVariableComparator {
591 struct LightCompareType {
592 u64 cv_key{};
593 s32 priority{};
594
595 [[nodiscard]] constexpr u64 GetConditionVariableKey() const {
596 return cv_key;
597 }
598
599 [[nodiscard]] constexpr s32 GetPriority() const {
600 return priority;
601 }
602 };
603
604 template <typename T>
605 requires(
606 std::same_as<T, Thread> ||
607 std::same_as<T, LightCompareType>) static constexpr int Compare(const T& lhs,
608 const Thread& rhs) {
609 const uintptr_t l_key = lhs.GetConditionVariableKey();
610 const uintptr_t r_key = rhs.GetConditionVariableKey();
611
612 if (l_key < r_key) {
613 // Sort first by key
614 return -1;
615 } else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) {
616 // And then by priority.
617 return -1;
618 } else {
619 return 1;
620 }
621 }
622 };
623
624 Common::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node{};
625
626 using ConditionVariableThreadTreeTraits =
627 Common::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&Thread::condvar_arbiter_tree_node>;
628 using ConditionVariableThreadTree =
629 ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
630
631public:
632 using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
633
634 [[nodiscard]] uintptr_t GetConditionVariableKey() const {
635 return condvar_key;
636 }
637
638 [[nodiscard]] uintptr_t GetAddressArbiterKey() const {
639 return condvar_key;
640 }
629 641
630 void SetSchedulingStatus(ThreadSchedStatus new_status); 642 void SetConditionVariable(ConditionVariableThreadTree* tree, VAddr address, uintptr_t cv_key,
643 u32 value) {
644 condvar_tree = tree;
645 condvar_key = cv_key;
646 address_key = address;
647 address_key_value = value;
648 }
649
650 void ClearConditionVariable() {
651 condvar_tree = nullptr;
652 }
653
654 [[nodiscard]] bool IsWaitingForConditionVariable() const {
655 return condvar_tree != nullptr;
656 }
657
658 void SetAddressArbiter(ConditionVariableThreadTree* tree, uintptr_t address) {
659 condvar_tree = tree;
660 condvar_key = address;
661 }
662
663 void ClearAddressArbiter() {
664 condvar_tree = nullptr;
665 }
666
667 [[nodiscard]] bool IsWaitingForAddressArbiter() const {
668 return condvar_tree != nullptr;
669 }
670
671 [[nodiscard]] ConditionVariableThreadTree* GetConditionVariableTree() const {
672 return condvar_tree;
673 }
674
675 [[nodiscard]] bool HasWaiters() const {
676 return !waiter_list.empty();
677 }
678
679private:
631 void AddSchedulingFlag(ThreadSchedFlags flag); 680 void AddSchedulingFlag(ThreadSchedFlags flag);
632 void RemoveSchedulingFlag(ThreadSchedFlags flag); 681 void RemoveSchedulingFlag(ThreadSchedFlags flag);
633 682 void AddWaiterImpl(Thread* thread);
634 void SetCurrentPriority(u32 new_priority); 683 void RemoveWaiterImpl(Thread* thread);
684 static void RestorePriority(KernelCore& kernel, Thread* thread);
635 685
636 Common::SpinLock context_guard{}; 686 Common::SpinLock context_guard{};
637 ThreadContext32 context_32{}; 687 ThreadContext32 context_32{};
638 ThreadContext64 context_64{}; 688 ThreadContext64 context_64{};
639 std::shared_ptr<Common::Fiber> host_context{}; 689 std::shared_ptr<Common::Fiber> host_context{};
640 690
641 ThreadStatus status = ThreadStatus::Dormant; 691 ThreadState thread_state = ThreadState::Initialized;
642 u32 scheduling_state = 0;
643 692
644 u64 thread_id = 0; 693 u64 thread_id = 0;
645 694
@@ -652,11 +701,11 @@ private:
652 /// Nominal thread priority, as set by the emulated application. 701 /// Nominal thread priority, as set by the emulated application.
653 /// The nominal priority is the thread priority without priority 702 /// The nominal priority is the thread priority without priority
654 /// inheritance taken into account. 703 /// inheritance taken into account.
655 u32 nominal_priority = 0; 704 s32 base_priority{};
656 705
657 /// Current thread priority. This may change over the course of the 706 /// Current thread priority. This may change over the course of the
658 /// thread's lifetime in order to facilitate priority inheritance. 707 /// thread's lifetime in order to facilitate priority inheritance.
659 u32 current_priority = 0; 708 s32 current_priority{};
660 709
661 u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks. 710 u64 total_cpu_time_ticks = 0; ///< Total CPU running ticks.
662 s64 schedule_count{}; 711 s64 schedule_count{};
@@ -671,37 +720,27 @@ private:
671 Process* owner_process; 720 Process* owner_process;
672 721
673 /// Objects that the thread is waiting on, in the same order as they were 722 /// Objects that the thread is waiting on, in the same order as they were
674 /// passed to WaitSynchronization. 723 /// passed to WaitSynchronization. This is used for debugging only.
675 ThreadSynchronizationObjects* wait_objects; 724 std::vector<KSynchronizationObject*> wait_objects_for_debugging;
676 725
677 SynchronizationObject* signaling_object; 726 /// The current mutex wait address. This is used for debugging only.
727 VAddr mutex_wait_address_for_debugging{};
728
729 /// The reason the thread is waiting. This is used for debugging only.
730 ThreadWaitReasonForDebugging wait_reason_for_debugging{};
731
732 KSynchronizationObject* signaling_object;
678 ResultCode signaling_result{RESULT_SUCCESS}; 733 ResultCode signaling_result{RESULT_SUCCESS};
679 734
680 /// List of threads that are waiting for a mutex that is held by this thread. 735 /// List of threads that are waiting for a mutex that is held by this thread.
681 MutexWaitingThreads wait_mutex_threads; 736 MutexWaitingThreads wait_mutex_threads;
682 737
683 /// Thread that owns the lock that this thread is waiting for. 738 /// Thread that owns the lock that this thread is waiting for.
684 std::shared_ptr<Thread> lock_owner; 739 Thread* lock_owner{};
685
686 /// If waiting on a ConditionVariable, this is the ConditionVariable address
687 VAddr condvar_wait_address = 0;
688 /// If waiting on a Mutex, this is the mutex address
689 VAddr mutex_wait_address = 0;
690 /// The handle used to wait for the mutex.
691 Handle wait_handle = 0;
692
693 /// If waiting for an AddressArbiter, this is the address being waited on.
694 VAddr arb_wait_address{0};
695 bool waiting_for_arbitration{};
696 740
697 /// Handle used as userdata to reference this object when inserting into the CoreTiming queue. 741 /// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
698 Handle global_handle = 0; 742 Handle global_handle = 0;
699 743
700 /// Callback for HLE Events
701 HLECallback hle_callback;
702 Handle hle_time_event;
703 SynchronizationObject* hle_object;
704
705 KScheduler* scheduler = nullptr; 744 KScheduler* scheduler = nullptr;
706 745
707 std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{}; 746 std::array<QueueEntry, Core::Hardware::NUM_CPU_CORES> per_core_priority_queue_entry{};
@@ -714,7 +753,7 @@ private:
714 753
715 u32 pausing_state = 0; 754 u32 pausing_state = 0;
716 bool is_running = false; 755 bool is_running = false;
717 bool is_waiting_on_sync = false; 756 bool is_cancellable = false;
718 bool is_sync_cancelled = false; 757 bool is_sync_cancelled = false;
719 758
720 bool is_continuous_on_svc = false; 759 bool is_continuous_on_svc = false;
@@ -725,6 +764,18 @@ private:
725 764
726 bool was_running = false; 765 bool was_running = false;
727 766
767 bool signaled{};
768
769 ConditionVariableThreadTree* condvar_tree{};
770 uintptr_t condvar_key{};
771 VAddr address_key{};
772 u32 address_key_value{};
773 s32 num_kernel_waiters{};
774
775 using WaiterList = boost::intrusive::list<Thread>;
776 WaiterList waiter_list{};
777 WaiterList pinned_waiter_list{};
778
728 std::string name; 779 std::string name;
729}; 780};
730 781
diff --git a/src/core/hle/kernel/time_manager.cpp b/src/core/hle/kernel/time_manager.cpp
index 79628e2b4..832edd629 100644
--- a/src/core/hle/kernel/time_manager.cpp
+++ b/src/core/hle/kernel/time_manager.cpp
@@ -18,12 +18,10 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
18 time_manager_event_type = Core::Timing::CreateEvent( 18 time_manager_event_type = Core::Timing::CreateEvent(
19 "Kernel::TimeManagerCallback", 19 "Kernel::TimeManagerCallback",
20 [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) { 20 [this](std::uintptr_t thread_handle, std::chrono::nanoseconds) {
21 const KScopedSchedulerLock lock(system.Kernel());
22 const auto proper_handle = static_cast<Handle>(thread_handle);
23
24 std::shared_ptr<Thread> thread; 21 std::shared_ptr<Thread> thread;
25 { 22 {
26 std::lock_guard lock{mutex}; 23 std::lock_guard lock{mutex};
24 const auto proper_handle = static_cast<Handle>(thread_handle);
27 if (cancelled_events[proper_handle]) { 25 if (cancelled_events[proper_handle]) {
28 return; 26 return;
29 } 27 }
@@ -32,7 +30,7 @@ TimeManager::TimeManager(Core::System& system_) : system{system_} {
32 30
33 if (thread) { 31 if (thread) {
34 // Thread can be null if process has exited 32 // Thread can be null if process has exited
35 thread->OnWakeUp(); 33 thread->Wakeup();
36 } 34 }
37 }); 35 });
38} 36}
@@ -42,8 +40,7 @@ void TimeManager::ScheduleTimeEvent(Handle& event_handle, Thread* timetask, s64
42 event_handle = timetask->GetGlobalHandle(); 40 event_handle = timetask->GetGlobalHandle();
43 if (nanoseconds > 0) { 41 if (nanoseconds > 0) {
44 ASSERT(timetask); 42 ASSERT(timetask);
45 ASSERT(timetask->GetStatus() != ThreadStatus::Ready); 43 ASSERT(timetask->GetState() != ThreadState::Runnable);
46 ASSERT(timetask->GetStatus() != ThreadStatus::WaitMutex);
47 system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds}, 44 system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{nanoseconds},
48 time_manager_event_type, event_handle); 45 time_manager_event_type, event_handle);
49 } else { 46 } else {
diff --git a/src/core/hle/service/acc/acc.cpp b/src/core/hle/service/acc/acc.cpp
index 6981f8ee7..3ec0e1eca 100644
--- a/src/core/hle/service/acc/acc.cpp
+++ b/src/core/hle/service/acc/acc.cpp
@@ -32,9 +32,15 @@
32 32
33namespace Service::Account { 33namespace Service::Account {
34 34
35constexpr ResultCode ERR_INVALID_BUFFER_SIZE{ErrorModule::Account, 30}; 35constexpr ResultCode ERR_INVALID_USER_ID{ErrorModule::Account, 20};
36constexpr ResultCode ERR_INVALID_APPLICATION_ID{ErrorModule::Account, 22};
37constexpr ResultCode ERR_INVALID_BUFFER{ErrorModule::Account, 30};
38constexpr ResultCode ERR_INVALID_BUFFER_SIZE{ErrorModule::Account, 31};
36constexpr ResultCode ERR_FAILED_SAVE_DATA{ErrorModule::Account, 100}; 39constexpr ResultCode ERR_FAILED_SAVE_DATA{ErrorModule::Account, 100};
37 40
41// Thumbnails are hard coded to be at least this size
42constexpr std::size_t THUMBNAIL_SIZE = 0x24000;
43
38static std::string GetImagePath(Common::UUID uuid) { 44static std::string GetImagePath(Common::UUID uuid) {
39 return Common::FS::GetUserPath(Common::FS::UserPath::NANDDir) + 45 return Common::FS::GetUserPath(Common::FS::UserPath::NANDDir) +
40 "/system/save/8000000000000010/su/avators/" + uuid.FormatSwitch() + ".jpg"; 46 "/system/save/8000000000000010/su/avators/" + uuid.FormatSwitch() + ".jpg";
@@ -369,7 +375,7 @@ protected:
369 if (user_data.size() < sizeof(ProfileData)) { 375 if (user_data.size() < sizeof(ProfileData)) {
370 LOG_ERROR(Service_ACC, "ProfileData buffer too small!"); 376 LOG_ERROR(Service_ACC, "ProfileData buffer too small!");
371 IPC::ResponseBuilder rb{ctx, 2}; 377 IPC::ResponseBuilder rb{ctx, 2};
372 rb.Push(ERR_INVALID_BUFFER_SIZE); 378 rb.Push(ERR_INVALID_BUFFER);
373 return; 379 return;
374 } 380 }
375 381
@@ -402,7 +408,7 @@ protected:
402 if (user_data.size() < sizeof(ProfileData)) { 408 if (user_data.size() < sizeof(ProfileData)) {
403 LOG_ERROR(Service_ACC, "ProfileData buffer too small!"); 409 LOG_ERROR(Service_ACC, "ProfileData buffer too small!");
404 IPC::ResponseBuilder rb{ctx, 2}; 410 IPC::ResponseBuilder rb{ctx, 2};
405 rb.Push(ERR_INVALID_BUFFER_SIZE); 411 rb.Push(ERR_INVALID_BUFFER);
406 return; 412 return;
407 } 413 }
408 414
@@ -534,7 +540,7 @@ private:
534 rb.Push(RESULT_SUCCESS); 540 rb.Push(RESULT_SUCCESS);
535 } 541 }
536 542
537 Common::UUID user_id; 543 Common::UUID user_id{Common::INVALID_UUID};
538}; 544};
539 545
540// 6.0.0+ 546// 6.0.0+
@@ -811,6 +817,55 @@ void Module::Interface::ListOpenContextStoredUsers(Kernel::HLERequestContext& ct
811 rb.Push(RESULT_SUCCESS); 817 rb.Push(RESULT_SUCCESS);
812} 818}
813 819
820void Module::Interface::StoreSaveDataThumbnailApplication(Kernel::HLERequestContext& ctx) {
821 IPC::RequestParser rp{ctx};
822 const auto uuid = rp.PopRaw<Common::UUID>();
823
824 LOG_WARNING(Service_ACC, "(STUBBED) called, uuid={}", uuid.Format());
825
826 // TODO(ogniK): Check if application ID is zero on acc initialize. As we don't have a reliable
827 // way of confirming things like the TID, we're going to assume a non zero value for the time
828 // being.
829 constexpr u64 tid{1};
830 StoreSaveDataThumbnail(ctx, uuid, tid);
831}
832
833void Module::Interface::StoreSaveDataThumbnailSystem(Kernel::HLERequestContext& ctx) {
834 IPC::RequestParser rp{ctx};
835 const auto uuid = rp.PopRaw<Common::UUID>();
836 const auto tid = rp.Pop<u64_le>();
837
838 LOG_WARNING(Service_ACC, "(STUBBED) called, uuid={}, tid={:016X}", uuid.Format(), tid);
839 StoreSaveDataThumbnail(ctx, uuid, tid);
840}
841
842void Module::Interface::StoreSaveDataThumbnail(Kernel::HLERequestContext& ctx,
843 const Common::UUID& uuid, const u64 tid) {
844 IPC::ResponseBuilder rb{ctx, 2};
845
846 if (tid == 0) {
847 LOG_ERROR(Service_ACC, "TitleID is not valid!");
848 rb.Push(ERR_INVALID_APPLICATION_ID);
849 return;
850 }
851
852 if (!uuid) {
853 LOG_ERROR(Service_ACC, "User ID is not valid!");
854 rb.Push(ERR_INVALID_USER_ID);
855 return;
856 }
857 const auto thumbnail_size = ctx.GetReadBufferSize();
858 if (thumbnail_size != THUMBNAIL_SIZE) {
859 LOG_ERROR(Service_ACC, "Buffer size is empty! size={:X} expecting {:X}", thumbnail_size,
860 THUMBNAIL_SIZE);
861 rb.Push(ERR_INVALID_BUFFER_SIZE);
862 return;
863 }
864
865 // TODO(ogniK): Construct save data thumbnail
866 rb.Push(RESULT_SUCCESS);
867}
868
814void Module::Interface::TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx) { 869void Module::Interface::TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx) {
815 LOG_DEBUG(Service_ACC, "called"); 870 LOG_DEBUG(Service_ACC, "called");
816 // A u8 is passed into this function which we can safely ignore. It's to determine if we have 871 // A u8 is passed into this function which we can safely ignore. It's to determine if we have
diff --git a/src/core/hle/service/acc/acc.h b/src/core/hle/service/acc/acc.h
index ab8edc049..0e3ad8ec6 100644
--- a/src/core/hle/service/acc/acc.h
+++ b/src/core/hle/service/acc/acc.h
@@ -4,6 +4,7 @@
4 4
5#pragma once 5#pragma once
6 6
7#include "common/uuid.h"
7#include "core/hle/service/glue/manager.h" 8#include "core/hle/service/glue/manager.h"
8#include "core/hle/service/service.h" 9#include "core/hle/service/service.h"
9 10
@@ -36,9 +37,13 @@ public:
36 void ListQualifiedUsers(Kernel::HLERequestContext& ctx); 37 void ListQualifiedUsers(Kernel::HLERequestContext& ctx);
37 void LoadOpenContext(Kernel::HLERequestContext& ctx); 38 void LoadOpenContext(Kernel::HLERequestContext& ctx);
38 void ListOpenContextStoredUsers(Kernel::HLERequestContext& ctx); 39 void ListOpenContextStoredUsers(Kernel::HLERequestContext& ctx);
40 void StoreSaveDataThumbnailApplication(Kernel::HLERequestContext& ctx);
41 void StoreSaveDataThumbnailSystem(Kernel::HLERequestContext& ctx);
39 42
40 private: 43 private:
41 ResultCode InitializeApplicationInfoBase(); 44 ResultCode InitializeApplicationInfoBase();
45 void StoreSaveDataThumbnail(Kernel::HLERequestContext& ctx, const Common::UUID& uuid,
46 const u64 tid);
42 47
43 enum class ApplicationType : u32_le { 48 enum class ApplicationType : u32_le {
44 GameCard = 0, 49 GameCard = 0,
diff --git a/src/core/hle/service/acc/acc_su.cpp b/src/core/hle/service/acc/acc_su.cpp
index d2bb8c2c8..49b22583e 100644
--- a/src/core/hle/service/acc/acc_su.cpp
+++ b/src/core/hle/service/acc/acc_su.cpp
@@ -29,7 +29,7 @@ ACC_SU::ACC_SU(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
29 {104, nullptr, "GetProfileUpdateNotifier"}, 29 {104, nullptr, "GetProfileUpdateNotifier"},
30 {105, nullptr, "CheckNetworkServiceAvailabilityAsync"}, // 4.0.0+ 30 {105, nullptr, "CheckNetworkServiceAvailabilityAsync"}, // 4.0.0+
31 {106, nullptr, "GetProfileSyncNotifier"}, // 9.0.0+ 31 {106, nullptr, "GetProfileSyncNotifier"}, // 9.0.0+
32 {110, nullptr, "StoreSaveDataThumbnail"}, 32 {110, &ACC_SU::StoreSaveDataThumbnailSystem, "StoreSaveDataThumbnail"},
33 {111, nullptr, "ClearSaveDataThumbnail"}, 33 {111, nullptr, "ClearSaveDataThumbnail"},
34 {112, nullptr, "LoadSaveDataThumbnail"}, 34 {112, nullptr, "LoadSaveDataThumbnail"},
35 {113, nullptr, "GetSaveDataThumbnailExistence"}, // 5.0.0+ 35 {113, nullptr, "GetSaveDataThumbnailExistence"}, // 5.0.0+
diff --git a/src/core/hle/service/acc/acc_u0.cpp b/src/core/hle/service/acc/acc_u0.cpp
index 75a24f8f5..8d66d180d 100644
--- a/src/core/hle/service/acc/acc_u0.cpp
+++ b/src/core/hle/service/acc/acc_u0.cpp
@@ -26,7 +26,7 @@ ACC_U0::ACC_U0(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
26 {101, &ACC_U0::GetBaasAccountManagerForApplication, "GetBaasAccountManagerForApplication"}, 26 {101, &ACC_U0::GetBaasAccountManagerForApplication, "GetBaasAccountManagerForApplication"},
27 {102, nullptr, "AuthenticateApplicationAsync"}, 27 {102, nullptr, "AuthenticateApplicationAsync"},
28 {103, nullptr, "CheckNetworkServiceAvailabilityAsync"}, // 4.0.0+ 28 {103, nullptr, "CheckNetworkServiceAvailabilityAsync"}, // 4.0.0+
29 {110, nullptr, "StoreSaveDataThumbnail"}, 29 {110, &ACC_U0::StoreSaveDataThumbnailApplication, "StoreSaveDataThumbnail"},
30 {111, nullptr, "ClearSaveDataThumbnail"}, 30 {111, nullptr, "ClearSaveDataThumbnail"},
31 {120, nullptr, "CreateGuestLoginRequest"}, 31 {120, nullptr, "CreateGuestLoginRequest"},
32 {130, &ACC_U0::LoadOpenContext, "LoadOpenContext"}, // 5.0.0+ 32 {130, &ACC_U0::LoadOpenContext, "LoadOpenContext"}, // 5.0.0+
diff --git a/src/core/hle/service/acc/acc_u1.cpp b/src/core/hle/service/acc/acc_u1.cpp
index a4aa5316a..951081cd0 100644
--- a/src/core/hle/service/acc/acc_u1.cpp
+++ b/src/core/hle/service/acc/acc_u1.cpp
@@ -29,7 +29,7 @@ ACC_U1::ACC_U1(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
29 {104, nullptr, "GetProfileUpdateNotifier"}, 29 {104, nullptr, "GetProfileUpdateNotifier"},
30 {105, nullptr, "CheckNetworkServiceAvailabilityAsync"}, // 4.0.0+ 30 {105, nullptr, "CheckNetworkServiceAvailabilityAsync"}, // 4.0.0+
31 {106, nullptr, "GetProfileSyncNotifier"}, // 9.0.0+ 31 {106, nullptr, "GetProfileSyncNotifier"}, // 9.0.0+
32 {110, nullptr, "StoreSaveDataThumbnail"}, 32 {110, &ACC_U1::StoreSaveDataThumbnailApplication, "StoreSaveDataThumbnail"},
33 {111, nullptr, "ClearSaveDataThumbnail"}, 33 {111, nullptr, "ClearSaveDataThumbnail"},
34 {112, nullptr, "LoadSaveDataThumbnail"}, 34 {112, nullptr, "LoadSaveDataThumbnail"},
35 {113, nullptr, "GetSaveDataThumbnailExistence"}, // 5.0.0+ 35 {113, nullptr, "GetSaveDataThumbnailExistence"}, // 5.0.0+
diff --git a/src/core/hle/service/acc/profile_manager.cpp b/src/core/hle/service/acc/profile_manager.cpp
index 9b829e957..d9865d56f 100644
--- a/src/core/hle/service/acc/profile_manager.cpp
+++ b/src/core/hle/service/acc/profile_manager.cpp
@@ -227,17 +227,17 @@ void ProfileManager::CloseUser(UUID uuid) {
227 227
228/// Gets all valid user ids on the system 228/// Gets all valid user ids on the system
229UserIDArray ProfileManager::GetAllUsers() const { 229UserIDArray ProfileManager::GetAllUsers() const {
230 UserIDArray output; 230 UserIDArray output{};
231 std::transform(profiles.begin(), profiles.end(), output.begin(), 231 std::ranges::transform(profiles, output.begin(),
232 [](const ProfileInfo& p) { return p.user_uuid; }); 232 [](const ProfileInfo& p) { return p.user_uuid; });
233 return output; 233 return output;
234} 234}
235 235
236/// Get all the open users on the system and zero out the rest of the data. This is specifically 236/// Get all the open users on the system and zero out the rest of the data. This is specifically
237/// needed for GetOpenUsers and we need to ensure the rest of the output buffer is zero'd out 237/// needed for GetOpenUsers and we need to ensure the rest of the output buffer is zero'd out
238UserIDArray ProfileManager::GetOpenUsers() const { 238UserIDArray ProfileManager::GetOpenUsers() const {
239 UserIDArray output; 239 UserIDArray output{};
240 std::transform(profiles.begin(), profiles.end(), output.begin(), [](const ProfileInfo& p) { 240 std::ranges::transform(profiles, output.begin(), [](const ProfileInfo& p) {
241 if (p.is_open) 241 if (p.is_open)
242 return p.user_uuid; 242 return p.user_uuid;
243 return UUID{Common::INVALID_UUID}; 243 return UUID{Common::INVALID_UUID};
diff --git a/src/core/hle/service/acc/profile_manager.h b/src/core/hle/service/acc/profile_manager.h
index 5310637a6..71b9d5518 100644
--- a/src/core/hle/service/acc/profile_manager.h
+++ b/src/core/hle/service/acc/profile_manager.h
@@ -23,12 +23,12 @@ using UserIDArray = std::array<Common::UUID, MAX_USERS>;
23/// Contains extra data related to a user. 23/// Contains extra data related to a user.
24/// TODO: RE this structure 24/// TODO: RE this structure
25struct ProfileData { 25struct ProfileData {
26 INSERT_PADDING_WORDS(1); 26 INSERT_PADDING_WORDS_NOINIT(1);
27 u32 icon_id{}; 27 u32 icon_id;
28 u8 bg_color_id{}; 28 u8 bg_color_id;
29 INSERT_PADDING_BYTES(0x7); 29 INSERT_PADDING_BYTES_NOINIT(0x7);
30 INSERT_PADDING_BYTES(0x10); 30 INSERT_PADDING_BYTES_NOINIT(0x10);
31 INSERT_PADDING_BYTES(0x60); 31 INSERT_PADDING_BYTES_NOINIT(0x60);
32}; 32};
33static_assert(sizeof(ProfileData) == 0x80, "ProfileData structure has incorrect size"); 33static_assert(sizeof(ProfileData) == 0x80, "ProfileData structure has incorrect size");
34 34
@@ -43,9 +43,9 @@ struct ProfileInfo {
43}; 43};
44 44
45struct ProfileBase { 45struct ProfileBase {
46 Common::UUID user_uuid{Common::INVALID_UUID}; 46 Common::UUID user_uuid;
47 u64_le timestamp{}; 47 u64_le timestamp;
48 ProfileUsername username{}; 48 ProfileUsername username;
49 49
50 // Zero out all the fields to make the profile slot considered "Empty" 50 // Zero out all the fields to make the profile slot considered "Empty"
51 void Invalidate() { 51 void Invalidate() {
diff --git a/src/core/hle/service/am/applets/error.cpp b/src/core/hle/service/am/applets/error.cpp
index d85505082..0c8b632e8 100644
--- a/src/core/hle/service/am/applets/error.cpp
+++ b/src/core/hle/service/am/applets/error.cpp
@@ -20,9 +20,9 @@ namespace Service::AM::Applets {
20struct ShowError { 20struct ShowError {
21 u8 mode; 21 u8 mode;
22 bool jump; 22 bool jump;
23 INSERT_UNION_PADDING_BYTES(4); 23 INSERT_PADDING_BYTES_NOINIT(4);
24 bool use_64bit_error_code; 24 bool use_64bit_error_code;
25 INSERT_UNION_PADDING_BYTES(1); 25 INSERT_PADDING_BYTES_NOINIT(1);
26 u64 error_code_64; 26 u64 error_code_64;
27 u32 error_code_32; 27 u32 error_code_32;
28}; 28};
@@ -32,7 +32,7 @@ static_assert(sizeof(ShowError) == 0x14, "ShowError has incorrect size.");
32struct ShowErrorRecord { 32struct ShowErrorRecord {
33 u8 mode; 33 u8 mode;
34 bool jump; 34 bool jump;
35 INSERT_UNION_PADDING_BYTES(6); 35 INSERT_PADDING_BYTES_NOINIT(6);
36 u64 error_code_64; 36 u64 error_code_64;
37 u64 posix_time; 37 u64 posix_time;
38}; 38};
@@ -41,7 +41,7 @@ static_assert(sizeof(ShowErrorRecord) == 0x18, "ShowErrorRecord has incorrect si
41struct SystemErrorArg { 41struct SystemErrorArg {
42 u8 mode; 42 u8 mode;
43 bool jump; 43 bool jump;
44 INSERT_UNION_PADDING_BYTES(6); 44 INSERT_PADDING_BYTES_NOINIT(6);
45 u64 error_code_64; 45 u64 error_code_64;
46 std::array<char, 8> language_code; 46 std::array<char, 8> language_code;
47 std::array<char, 0x800> main_text; 47 std::array<char, 0x800> main_text;
@@ -52,7 +52,7 @@ static_assert(sizeof(SystemErrorArg) == 0x1018, "SystemErrorArg has incorrect si
52struct ApplicationErrorArg { 52struct ApplicationErrorArg {
53 u8 mode; 53 u8 mode;
54 bool jump; 54 bool jump;
55 INSERT_UNION_PADDING_BYTES(6); 55 INSERT_PADDING_BYTES_NOINIT(6);
56 u32 error_code; 56 u32 error_code;
57 std::array<char, 8> language_code; 57 std::array<char, 8> language_code;
58 std::array<char, 0x800> main_text; 58 std::array<char, 0x800> main_text;
diff --git a/src/core/hle/service/audio/audout_u.cpp b/src/core/hle/service/audio/audout_u.cpp
index 0cd797109..02ca711fb 100644
--- a/src/core/hle/service/audio/audout_u.cpp
+++ b/src/core/hle/service/audio/audout_u.cpp
@@ -29,7 +29,7 @@ constexpr int DefaultSampleRate{48000};
29struct AudoutParams { 29struct AudoutParams {
30 s32_le sample_rate; 30 s32_le sample_rate;
31 u16_le channel_count; 31 u16_le channel_count;
32 INSERT_PADDING_BYTES(2); 32 INSERT_PADDING_BYTES_NOINIT(2);
33}; 33};
34static_assert(sizeof(AudoutParams) == 0x8, "AudoutParams is an invalid size"); 34static_assert(sizeof(AudoutParams) == 0x8, "AudoutParams is an invalid size");
35 35
diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp
index d280e7caf..ff783b3cc 100644
--- a/src/core/hle/service/hid/controllers/npad.cpp
+++ b/src/core/hle/service/hid/controllers/npad.cpp
@@ -141,7 +141,9 @@ bool Controller_NPad::IsDeviceHandleValid(const DeviceHandle& device_handle) {
141 device_handle.device_index < DeviceIndex::MaxDeviceIndex; 141 device_handle.device_index < DeviceIndex::MaxDeviceIndex;
142} 142}
143 143
144Controller_NPad::Controller_NPad(Core::System& system) : ControllerBase(system), system(system) {} 144Controller_NPad::Controller_NPad(Core::System& system) : ControllerBase(system), system(system) {
145 latest_vibration_values.fill({DEFAULT_VIBRATION_VALUE, DEFAULT_VIBRATION_VALUE});
146}
145 147
146Controller_NPad::~Controller_NPad() { 148Controller_NPad::~Controller_NPad() {
147 OnRelease(); 149 OnRelease();
@@ -732,7 +734,7 @@ bool Controller_NPad::VibrateControllerAtIndex(std::size_t npad_index, std::size
732 // Send an empty vibration to stop any vibrations. 734 // Send an empty vibration to stop any vibrations.
733 vibrations[npad_index][device_index]->SetRumblePlay(0.0f, 160.0f, 0.0f, 320.0f); 735 vibrations[npad_index][device_index]->SetRumblePlay(0.0f, 160.0f, 0.0f, 320.0f);
734 // Then reset the vibration value to its default value. 736 // Then reset the vibration value to its default value.
735 latest_vibration_values[npad_index][device_index] = {}; 737 latest_vibration_values[npad_index][device_index] = DEFAULT_VIBRATION_VALUE;
736 } 738 }
737 739
738 return false; 740 return false;
@@ -890,7 +892,7 @@ void Controller_NPad::UpdateControllerAt(NPadControllerType controller, std::siz
890 return; 892 return;
891 } 893 }
892 894
893 if (controller == NPadControllerType::Handheld) { 895 if (controller == NPadControllerType::Handheld && npad_index == HANDHELD_INDEX) {
894 Settings::values.players.GetValue()[HANDHELD_INDEX].controller_type = 896 Settings::values.players.GetValue()[HANDHELD_INDEX].controller_type =
895 MapNPadToSettingsType(controller); 897 MapNPadToSettingsType(controller);
896 Settings::values.players.GetValue()[HANDHELD_INDEX].connected = true; 898 Settings::values.players.GetValue()[HANDHELD_INDEX].connected = true;
diff --git a/src/core/hle/service/hid/controllers/npad.h b/src/core/hle/service/hid/controllers/npad.h
index e2e826623..bc85ca4df 100644
--- a/src/core/hle/service/hid/controllers/npad.h
+++ b/src/core/hle/service/hid/controllers/npad.h
@@ -97,10 +97,10 @@ public:
97 }; 97 };
98 98
99 struct DeviceHandle { 99 struct DeviceHandle {
100 NpadType npad_type{}; 100 NpadType npad_type;
101 u8 npad_id{}; 101 u8 npad_id;
102 DeviceIndex device_index{}; 102 DeviceIndex device_index;
103 INSERT_PADDING_BYTES(1); 103 INSERT_PADDING_BYTES_NOINIT(1);
104 }; 104 };
105 static_assert(sizeof(DeviceHandle) == 4, "DeviceHandle is an invalid size"); 105 static_assert(sizeof(DeviceHandle) == 4, "DeviceHandle is an invalid size");
106 106
@@ -120,13 +120,20 @@ public:
120 static_assert(sizeof(NpadStyleSet) == 4, "NpadStyleSet is an invalid size"); 120 static_assert(sizeof(NpadStyleSet) == 4, "NpadStyleSet is an invalid size");
121 121
122 struct VibrationValue { 122 struct VibrationValue {
123 f32 amp_low{0.0f}; 123 f32 amp_low;
124 f32 freq_low{160.0f}; 124 f32 freq_low;
125 f32 amp_high{0.0f}; 125 f32 amp_high;
126 f32 freq_high{320.0f}; 126 f32 freq_high;
127 }; 127 };
128 static_assert(sizeof(VibrationValue) == 0x10, "Vibration is an invalid size"); 128 static_assert(sizeof(VibrationValue) == 0x10, "Vibration is an invalid size");
129 129
130 static constexpr VibrationValue DEFAULT_VIBRATION_VALUE{
131 .amp_low = 0.0f,
132 .freq_low = 160.0f,
133 .amp_high = 0.0f,
134 .freq_high = 320.0f,
135 };
136
130 struct LedPattern { 137 struct LedPattern {
131 explicit LedPattern(u64 light1, u64 light2, u64 light3, u64 light4) { 138 explicit LedPattern(u64 light1, u64 light2, u64 light3, u64 light4) {
132 position1.Assign(light1); 139 position1.Assign(light1);
diff --git a/src/core/hle/service/hid/controllers/touchscreen.cpp b/src/core/hle/service/hid/controllers/touchscreen.cpp
index 0df395e85..5219f2dad 100644
--- a/src/core/hle/service/hid/controllers/touchscreen.cpp
+++ b/src/core/hle/service/hid/controllers/touchscreen.cpp
@@ -2,6 +2,7 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <algorithm>
5#include <cstring> 6#include <cstring>
6#include "common/common_types.h" 7#include "common/common_types.h"
7#include "core/core_timing.h" 8#include "core/core_timing.h"
@@ -16,7 +17,13 @@ constexpr std::size_t SHARED_MEMORY_OFFSET = 0x400;
16Controller_Touchscreen::Controller_Touchscreen(Core::System& system) : ControllerBase(system) {} 17Controller_Touchscreen::Controller_Touchscreen(Core::System& system) : ControllerBase(system) {}
17Controller_Touchscreen::~Controller_Touchscreen() = default; 18Controller_Touchscreen::~Controller_Touchscreen() = default;
18 19
19void Controller_Touchscreen::OnInit() {} 20void Controller_Touchscreen::OnInit() {
21 for (std::size_t id = 0; id < MAX_FINGERS; ++id) {
22 mouse_finger_id[id] = MAX_FINGERS;
23 keyboard_finger_id[id] = MAX_FINGERS;
24 udp_finger_id[id] = MAX_FINGERS;
25 }
26}
20 27
21void Controller_Touchscreen::OnRelease() {} 28void Controller_Touchscreen::OnRelease() {}
22 29
@@ -40,38 +47,106 @@ void Controller_Touchscreen::OnUpdate(const Core::Timing::CoreTiming& core_timin
40 cur_entry.sampling_number = last_entry.sampling_number + 1; 47 cur_entry.sampling_number = last_entry.sampling_number + 1;
41 cur_entry.sampling_number2 = cur_entry.sampling_number; 48 cur_entry.sampling_number2 = cur_entry.sampling_number;
42 49
43 bool pressed = false; 50 const Input::TouchStatus& mouse_status = touch_mouse_device->GetStatus();
44 float x, y; 51 const Input::TouchStatus& udp_status = touch_udp_device->GetStatus();
45 std::tie(x, y, pressed) = touch_device->GetStatus(); 52 for (std::size_t id = 0; id < mouse_status.size(); ++id) {
46 auto& touch_entry = cur_entry.states[0]; 53 mouse_finger_id[id] = UpdateTouchInputEvent(mouse_status[id], mouse_finger_id[id]);
47 touch_entry.attribute.raw = 0; 54 udp_finger_id[id] = UpdateTouchInputEvent(udp_status[id], udp_finger_id[id]);
48 if (!pressed && touch_btn_device) {
49 std::tie(x, y, pressed) = touch_btn_device->GetStatus();
50 } 55 }
51 if (pressed && Settings::values.touchscreen.enabled) { 56
52 touch_entry.x = static_cast<u16>(x * Layout::ScreenUndocked::Width); 57 if (Settings::values.use_touch_from_button) {
53 touch_entry.y = static_cast<u16>(y * Layout::ScreenUndocked::Height); 58 const Input::TouchStatus& keyboard_status = touch_btn_device->GetStatus();
54 touch_entry.diameter_x = Settings::values.touchscreen.diameter_x; 59 for (std::size_t id = 0; id < mouse_status.size(); ++id) {
55 touch_entry.diameter_y = Settings::values.touchscreen.diameter_y; 60 keyboard_finger_id[id] =
56 touch_entry.rotation_angle = Settings::values.touchscreen.rotation_angle; 61 UpdateTouchInputEvent(keyboard_status[id], keyboard_finger_id[id]);
57 const u64 tick = core_timing.GetCPUTicks(); 62 }
58 touch_entry.delta_time = tick - last_touch;
59 last_touch = tick;
60 touch_entry.finger = Settings::values.touchscreen.finger;
61 cur_entry.entry_count = 1;
62 } else {
63 cur_entry.entry_count = 0;
64 } 63 }
65 64
65 std::array<Finger, 16> active_fingers;
66 const auto end_iter = std::copy_if(fingers.begin(), fingers.end(), active_fingers.begin(),
67 [](const auto& finger) { return finger.pressed; });
68 const auto active_fingers_count =
69 static_cast<std::size_t>(std::distance(active_fingers.begin(), end_iter));
70
71 const u64 tick = core_timing.GetCPUTicks();
72 cur_entry.entry_count = static_cast<s32_le>(active_fingers_count);
73 for (std::size_t id = 0; id < MAX_FINGERS; ++id) {
74 auto& touch_entry = cur_entry.states[id];
75 if (id < active_fingers_count) {
76 touch_entry.x = static_cast<u16>(active_fingers[id].x * Layout::ScreenUndocked::Width);
77 touch_entry.y = static_cast<u16>(active_fingers[id].y * Layout::ScreenUndocked::Height);
78 touch_entry.diameter_x = Settings::values.touchscreen.diameter_x;
79 touch_entry.diameter_y = Settings::values.touchscreen.diameter_y;
80 touch_entry.rotation_angle = Settings::values.touchscreen.rotation_angle;
81 touch_entry.delta_time = tick - active_fingers[id].last_touch;
82 fingers[active_fingers[id].id].last_touch = tick;
83 touch_entry.finger = active_fingers[id].id;
84 touch_entry.attribute.raw = active_fingers[id].attribute.raw;
85 } else {
86 // Clear touch entry
87 touch_entry.attribute.raw = 0;
88 touch_entry.x = 0;
89 touch_entry.y = 0;
90 touch_entry.diameter_x = 0;
91 touch_entry.diameter_y = 0;
92 touch_entry.rotation_angle = 0;
93 touch_entry.delta_time = 0;
94 touch_entry.finger = 0;
95 }
96 }
66 std::memcpy(data + SHARED_MEMORY_OFFSET, &shared_memory, sizeof(TouchScreenSharedMemory)); 97 std::memcpy(data + SHARED_MEMORY_OFFSET, &shared_memory, sizeof(TouchScreenSharedMemory));
67} 98}
68 99
69void Controller_Touchscreen::OnLoadInputDevices() { 100void Controller_Touchscreen::OnLoadInputDevices() {
70 touch_device = Input::CreateDevice<Input::TouchDevice>(Settings::values.touchscreen.device); 101 touch_mouse_device = Input::CreateDevice<Input::TouchDevice>("engine:emu_window");
71 if (Settings::values.use_touch_from_button) { 102 touch_udp_device = Input::CreateDevice<Input::TouchDevice>("engine:cemuhookudp");
72 touch_btn_device = Input::CreateDevice<Input::TouchDevice>("engine:touch_from_button"); 103 touch_btn_device = Input::CreateDevice<Input::TouchDevice>("engine:touch_from_button");
73 } else { 104}
74 touch_btn_device.reset(); 105
106std::optional<std::size_t> Controller_Touchscreen::GetUnusedFingerID() const {
107 std::size_t first_free_id = 0;
108 while (first_free_id < MAX_FINGERS) {
109 if (!fingers[first_free_id].pressed) {
110 return first_free_id;
111 } else {
112 first_free_id++;
113 }
114 }
115 return std::nullopt;
116}
117
118std::size_t Controller_Touchscreen::UpdateTouchInputEvent(
119 const std::tuple<float, float, bool>& touch_input, std::size_t finger_id) {
120 const auto& [x, y, pressed] = touch_input;
121 if (pressed) {
122 Attributes attribute{};
123 if (finger_id == MAX_FINGERS) {
124 const auto first_free_id = GetUnusedFingerID();
125 if (!first_free_id) {
126 // Invalid finger id do nothing
127 return MAX_FINGERS;
128 }
129 finger_id = first_free_id.value();
130 fingers[finger_id].pressed = true;
131 fingers[finger_id].id = static_cast<u32_le>(finger_id);
132 attribute.start_touch.Assign(1);
133 }
134 fingers[finger_id].x = x;
135 fingers[finger_id].y = y;
136 fingers[finger_id].attribute = attribute;
137 return finger_id;
75 } 138 }
139
140 if (finger_id != MAX_FINGERS) {
141 if (!fingers[finger_id].attribute.end_touch) {
142 fingers[finger_id].attribute.end_touch.Assign(1);
143 fingers[finger_id].attribute.start_touch.Assign(0);
144 return finger_id;
145 }
146 fingers[finger_id].pressed = false;
147 }
148
149 return MAX_FINGERS;
76} 150}
151
77} // namespace Service::HID 152} // namespace Service::HID
diff --git a/src/core/hle/service/hid/controllers/touchscreen.h b/src/core/hle/service/hid/controllers/touchscreen.h
index 4d9042adc..784124e25 100644
--- a/src/core/hle/service/hid/controllers/touchscreen.h
+++ b/src/core/hle/service/hid/controllers/touchscreen.h
@@ -30,6 +30,18 @@ public:
30 void OnLoadInputDevices() override; 30 void OnLoadInputDevices() override;
31 31
32private: 32private:
33 static constexpr std::size_t MAX_FINGERS = 16;
34
35 // Returns an unused finger id, if there is no fingers available std::nullopt will be returned
36 std::optional<std::size_t> GetUnusedFingerID() const;
37
38 // If the touch is new it tries to assing a new finger id, if there is no fingers avaliable no
39 // changes will be made. Updates the coordinates if the finger id it's already set. If the touch
40 // ends delays the output by one frame to set the end_touch flag before finally freeing the
41 // finger id
42 std::size_t UpdateTouchInputEvent(const std::tuple<float, float, bool>& touch_input,
43 std::size_t finger_id);
44
33 struct Attributes { 45 struct Attributes {
34 union { 46 union {
35 u32 raw{}; 47 u32 raw{};
@@ -55,7 +67,7 @@ private:
55 s64_le sampling_number; 67 s64_le sampling_number;
56 s64_le sampling_number2; 68 s64_le sampling_number2;
57 s32_le entry_count; 69 s32_le entry_count;
58 std::array<TouchState, 16> states; 70 std::array<TouchState, MAX_FINGERS> states;
59 }; 71 };
60 static_assert(sizeof(TouchScreenEntry) == 0x298, "TouchScreenEntry is an invalid size"); 72 static_assert(sizeof(TouchScreenEntry) == 0x298, "TouchScreenEntry is an invalid size");
61 73
@@ -66,9 +78,23 @@ private:
66 }; 78 };
67 static_assert(sizeof(TouchScreenSharedMemory) == 0x3000, 79 static_assert(sizeof(TouchScreenSharedMemory) == 0x3000,
68 "TouchScreenSharedMemory is an invalid size"); 80 "TouchScreenSharedMemory is an invalid size");
81
82 struct Finger {
83 u64_le last_touch{};
84 float x{};
85 float y{};
86 u32_le id{};
87 bool pressed{};
88 Attributes attribute;
89 };
90
69 TouchScreenSharedMemory shared_memory{}; 91 TouchScreenSharedMemory shared_memory{};
70 std::unique_ptr<Input::TouchDevice> touch_device; 92 std::unique_ptr<Input::TouchDevice> touch_mouse_device;
93 std::unique_ptr<Input::TouchDevice> touch_udp_device;
71 std::unique_ptr<Input::TouchDevice> touch_btn_device; 94 std::unique_ptr<Input::TouchDevice> touch_btn_device;
72 s64_le last_touch{}; 95 std::array<std::size_t, MAX_FINGERS> mouse_finger_id;
96 std::array<std::size_t, MAX_FINGERS> keyboard_finger_id;
97 std::array<std::size_t, MAX_FINGERS> udp_finger_id;
98 std::array<Finger, MAX_FINGERS> fingers;
73}; 99};
74} // namespace Service::HID 100} // namespace Service::HID
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index 8d95f74e6..2b13d6fe6 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -401,9 +401,9 @@ void Hid::SendKeyboardLockKeyEvent(Kernel::HLERequestContext& ctx) {
401void Hid::ActivateXpad(Kernel::HLERequestContext& ctx) { 401void Hid::ActivateXpad(Kernel::HLERequestContext& ctx) {
402 IPC::RequestParser rp{ctx}; 402 IPC::RequestParser rp{ctx};
403 struct Parameters { 403 struct Parameters {
404 u32 basic_xpad_id{}; 404 u32 basic_xpad_id;
405 INSERT_PADDING_WORDS(1); 405 INSERT_PADDING_WORDS_NOINIT(1);
406 u64 applet_resource_user_id{}; 406 u64 applet_resource_user_id;
407 }; 407 };
408 408
409 const auto parameters{rp.PopRaw<Parameters>()}; 409 const auto parameters{rp.PopRaw<Parameters>()};
@@ -431,9 +431,9 @@ void Hid::GetXpadIDs(Kernel::HLERequestContext& ctx) {
431void Hid::ActivateSixAxisSensor(Kernel::HLERequestContext& ctx) { 431void Hid::ActivateSixAxisSensor(Kernel::HLERequestContext& ctx) {
432 IPC::RequestParser rp{ctx}; 432 IPC::RequestParser rp{ctx};
433 struct Parameters { 433 struct Parameters {
434 Controller_NPad::DeviceHandle sixaxis_handle{}; 434 Controller_NPad::DeviceHandle sixaxis_handle;
435 INSERT_PADDING_WORDS(1); 435 INSERT_PADDING_WORDS_NOINIT(1);
436 u64 applet_resource_user_id{}; 436 u64 applet_resource_user_id;
437 }; 437 };
438 438
439 const auto parameters{rp.PopRaw<Parameters>()}; 439 const auto parameters{rp.PopRaw<Parameters>()};
@@ -452,9 +452,9 @@ void Hid::ActivateSixAxisSensor(Kernel::HLERequestContext& ctx) {
452void Hid::DeactivateSixAxisSensor(Kernel::HLERequestContext& ctx) { 452void Hid::DeactivateSixAxisSensor(Kernel::HLERequestContext& ctx) {
453 IPC::RequestParser rp{ctx}; 453 IPC::RequestParser rp{ctx};
454 struct Parameters { 454 struct Parameters {
455 Controller_NPad::DeviceHandle sixaxis_handle{}; 455 Controller_NPad::DeviceHandle sixaxis_handle;
456 INSERT_PADDING_WORDS(1); 456 INSERT_PADDING_WORDS_NOINIT(1);
457 u64 applet_resource_user_id{}; 457 u64 applet_resource_user_id;
458 }; 458 };
459 459
460 const auto parameters{rp.PopRaw<Parameters>()}; 460 const auto parameters{rp.PopRaw<Parameters>()};
@@ -473,9 +473,9 @@ void Hid::DeactivateSixAxisSensor(Kernel::HLERequestContext& ctx) {
473void Hid::StartSixAxisSensor(Kernel::HLERequestContext& ctx) { 473void Hid::StartSixAxisSensor(Kernel::HLERequestContext& ctx) {
474 IPC::RequestParser rp{ctx}; 474 IPC::RequestParser rp{ctx};
475 struct Parameters { 475 struct Parameters {
476 Controller_NPad::DeviceHandle sixaxis_handle{}; 476 Controller_NPad::DeviceHandle sixaxis_handle;
477 INSERT_PADDING_WORDS(1); 477 INSERT_PADDING_WORDS_NOINIT(1);
478 u64 applet_resource_user_id{}; 478 u64 applet_resource_user_id;
479 }; 479 };
480 480
481 const auto parameters{rp.PopRaw<Parameters>()}; 481 const auto parameters{rp.PopRaw<Parameters>()};
@@ -494,9 +494,9 @@ void Hid::StartSixAxisSensor(Kernel::HLERequestContext& ctx) {
494void Hid::StopSixAxisSensor(Kernel::HLERequestContext& ctx) { 494void Hid::StopSixAxisSensor(Kernel::HLERequestContext& ctx) {
495 IPC::RequestParser rp{ctx}; 495 IPC::RequestParser rp{ctx};
496 struct Parameters { 496 struct Parameters {
497 Controller_NPad::DeviceHandle sixaxis_handle{}; 497 Controller_NPad::DeviceHandle sixaxis_handle;
498 INSERT_PADDING_WORDS(1); 498 INSERT_PADDING_WORDS_NOINIT(1);
499 u64 applet_resource_user_id{}; 499 u64 applet_resource_user_id;
500 }; 500 };
501 501
502 const auto parameters{rp.PopRaw<Parameters>()}; 502 const auto parameters{rp.PopRaw<Parameters>()};
@@ -515,10 +515,10 @@ void Hid::StopSixAxisSensor(Kernel::HLERequestContext& ctx) {
515void Hid::EnableSixAxisSensorFusion(Kernel::HLERequestContext& ctx) { 515void Hid::EnableSixAxisSensorFusion(Kernel::HLERequestContext& ctx) {
516 IPC::RequestParser rp{ctx}; 516 IPC::RequestParser rp{ctx};
517 struct Parameters { 517 struct Parameters {
518 bool enable_sixaxis_sensor_fusion{}; 518 bool enable_sixaxis_sensor_fusion;
519 INSERT_PADDING_BYTES(3); 519 INSERT_PADDING_BYTES_NOINIT(3);
520 Controller_NPad::DeviceHandle sixaxis_handle{}; 520 Controller_NPad::DeviceHandle sixaxis_handle;
521 u64 applet_resource_user_id{}; 521 u64 applet_resource_user_id;
522 }; 522 };
523 523
524 const auto parameters{rp.PopRaw<Parameters>()}; 524 const auto parameters{rp.PopRaw<Parameters>()};
@@ -556,9 +556,9 @@ void Hid::SetGyroscopeZeroDriftMode(Kernel::HLERequestContext& ctx) {
556void Hid::GetGyroscopeZeroDriftMode(Kernel::HLERequestContext& ctx) { 556void Hid::GetGyroscopeZeroDriftMode(Kernel::HLERequestContext& ctx) {
557 IPC::RequestParser rp{ctx}; 557 IPC::RequestParser rp{ctx};
558 struct Parameters { 558 struct Parameters {
559 Controller_NPad::DeviceHandle sixaxis_handle{}; 559 Controller_NPad::DeviceHandle sixaxis_handle;
560 INSERT_PADDING_WORDS(1); 560 INSERT_PADDING_WORDS_NOINIT(1);
561 u64 applet_resource_user_id{}; 561 u64 applet_resource_user_id;
562 }; 562 };
563 563
564 const auto parameters{rp.PopRaw<Parameters>()}; 564 const auto parameters{rp.PopRaw<Parameters>()};
@@ -577,9 +577,9 @@ void Hid::GetGyroscopeZeroDriftMode(Kernel::HLERequestContext& ctx) {
577void Hid::ResetGyroscopeZeroDriftMode(Kernel::HLERequestContext& ctx) { 577void Hid::ResetGyroscopeZeroDriftMode(Kernel::HLERequestContext& ctx) {
578 IPC::RequestParser rp{ctx}; 578 IPC::RequestParser rp{ctx};
579 struct Parameters { 579 struct Parameters {
580 Controller_NPad::DeviceHandle sixaxis_handle{}; 580 Controller_NPad::DeviceHandle sixaxis_handle;
581 INSERT_PADDING_WORDS(1); 581 INSERT_PADDING_WORDS_NOINIT(1);
582 u64 applet_resource_user_id{}; 582 u64 applet_resource_user_id;
583 }; 583 };
584 584
585 const auto parameters{rp.PopRaw<Parameters>()}; 585 const auto parameters{rp.PopRaw<Parameters>()};
@@ -599,9 +599,9 @@ void Hid::ResetGyroscopeZeroDriftMode(Kernel::HLERequestContext& ctx) {
599void Hid::IsSixAxisSensorAtRest(Kernel::HLERequestContext& ctx) { 599void Hid::IsSixAxisSensorAtRest(Kernel::HLERequestContext& ctx) {
600 IPC::RequestParser rp{ctx}; 600 IPC::RequestParser rp{ctx};
601 struct Parameters { 601 struct Parameters {
602 Controller_NPad::DeviceHandle sixaxis_handle{}; 602 Controller_NPad::DeviceHandle sixaxis_handle;
603 INSERT_PADDING_WORDS(1); 603 INSERT_PADDING_WORDS_NOINIT(1);
604 u64 applet_resource_user_id{}; 604 u64 applet_resource_user_id;
605 }; 605 };
606 606
607 const auto parameters{rp.PopRaw<Parameters>()}; 607 const auto parameters{rp.PopRaw<Parameters>()};
@@ -620,9 +620,9 @@ void Hid::IsSixAxisSensorAtRest(Kernel::HLERequestContext& ctx) {
620void Hid::ActivateGesture(Kernel::HLERequestContext& ctx) { 620void Hid::ActivateGesture(Kernel::HLERequestContext& ctx) {
621 IPC::RequestParser rp{ctx}; 621 IPC::RequestParser rp{ctx};
622 struct Parameters { 622 struct Parameters {
623 u32 unknown{}; 623 u32 unknown;
624 INSERT_PADDING_WORDS(1); 624 INSERT_PADDING_WORDS_NOINIT(1);
625 u64 applet_resource_user_id{}; 625 u64 applet_resource_user_id;
626 }; 626 };
627 627
628 const auto parameters{rp.PopRaw<Parameters>()}; 628 const auto parameters{rp.PopRaw<Parameters>()};
@@ -702,10 +702,10 @@ void Hid::DeactivateNpad(Kernel::HLERequestContext& ctx) {
702void Hid::AcquireNpadStyleSetUpdateEventHandle(Kernel::HLERequestContext& ctx) { 702void Hid::AcquireNpadStyleSetUpdateEventHandle(Kernel::HLERequestContext& ctx) {
703 IPC::RequestParser rp{ctx}; 703 IPC::RequestParser rp{ctx};
704 struct Parameters { 704 struct Parameters {
705 u32 npad_id{}; 705 u32 npad_id;
706 INSERT_PADDING_WORDS(1); 706 INSERT_PADDING_WORDS_NOINIT(1);
707 u64 applet_resource_user_id{}; 707 u64 applet_resource_user_id;
708 u64 unknown{}; 708 u64 unknown;
709 }; 709 };
710 710
711 const auto parameters{rp.PopRaw<Parameters>()}; 711 const auto parameters{rp.PopRaw<Parameters>()};
@@ -722,9 +722,9 @@ void Hid::AcquireNpadStyleSetUpdateEventHandle(Kernel::HLERequestContext& ctx) {
722void Hid::DisconnectNpad(Kernel::HLERequestContext& ctx) { 722void Hid::DisconnectNpad(Kernel::HLERequestContext& ctx) {
723 IPC::RequestParser rp{ctx}; 723 IPC::RequestParser rp{ctx};
724 struct Parameters { 724 struct Parameters {
725 u32 npad_id{}; 725 u32 npad_id;
726 INSERT_PADDING_WORDS(1); 726 INSERT_PADDING_WORDS_NOINIT(1);
727 u64 applet_resource_user_id{}; 727 u64 applet_resource_user_id;
728 }; 728 };
729 729
730 const auto parameters{rp.PopRaw<Parameters>()}; 730 const auto parameters{rp.PopRaw<Parameters>()};
@@ -756,9 +756,9 @@ void Hid::ActivateNpadWithRevision(Kernel::HLERequestContext& ctx) {
756 // Should have no effect with how our npad sets up the data 756 // Should have no effect with how our npad sets up the data
757 IPC::RequestParser rp{ctx}; 757 IPC::RequestParser rp{ctx};
758 struct Parameters { 758 struct Parameters {
759 u32 unknown{}; 759 u32 unknown;
760 INSERT_PADDING_WORDS(1); 760 INSERT_PADDING_WORDS_NOINIT(1);
761 u64 applet_resource_user_id{}; 761 u64 applet_resource_user_id;
762 }; 762 };
763 763
764 const auto parameters{rp.PopRaw<Parameters>()}; 764 const auto parameters{rp.PopRaw<Parameters>()};
@@ -800,9 +800,9 @@ void Hid::GetNpadJoyHoldType(Kernel::HLERequestContext& ctx) {
800void Hid::SetNpadJoyAssignmentModeSingleByDefault(Kernel::HLERequestContext& ctx) { 800void Hid::SetNpadJoyAssignmentModeSingleByDefault(Kernel::HLERequestContext& ctx) {
801 IPC::RequestParser rp{ctx}; 801 IPC::RequestParser rp{ctx};
802 struct Parameters { 802 struct Parameters {
803 u32 npad_id{}; 803 u32 npad_id;
804 INSERT_PADDING_WORDS(1); 804 INSERT_PADDING_WORDS_NOINIT(1);
805 u64 applet_resource_user_id{}; 805 u64 applet_resource_user_id;
806 }; 806 };
807 807
808 const auto parameters{rp.PopRaw<Parameters>()}; 808 const auto parameters{rp.PopRaw<Parameters>()};
@@ -821,10 +821,10 @@ void Hid::SetNpadJoyAssignmentModeSingle(Kernel::HLERequestContext& ctx) {
821 // TODO: Check the differences between this and SetNpadJoyAssignmentModeSingleByDefault 821 // TODO: Check the differences between this and SetNpadJoyAssignmentModeSingleByDefault
822 IPC::RequestParser rp{ctx}; 822 IPC::RequestParser rp{ctx};
823 struct Parameters { 823 struct Parameters {
824 u32 npad_id{}; 824 u32 npad_id;
825 INSERT_PADDING_WORDS(1); 825 INSERT_PADDING_WORDS_NOINIT(1);
826 u64 applet_resource_user_id{}; 826 u64 applet_resource_user_id;
827 u64 npad_joy_device_type{}; 827 u64 npad_joy_device_type;
828 }; 828 };
829 829
830 const auto parameters{rp.PopRaw<Parameters>()}; 830 const auto parameters{rp.PopRaw<Parameters>()};
@@ -844,9 +844,9 @@ void Hid::SetNpadJoyAssignmentModeSingle(Kernel::HLERequestContext& ctx) {
844void Hid::SetNpadJoyAssignmentModeDual(Kernel::HLERequestContext& ctx) { 844void Hid::SetNpadJoyAssignmentModeDual(Kernel::HLERequestContext& ctx) {
845 IPC::RequestParser rp{ctx}; 845 IPC::RequestParser rp{ctx};
846 struct Parameters { 846 struct Parameters {
847 u32 npad_id{}; 847 u32 npad_id;
848 INSERT_PADDING_WORDS(1); 848 INSERT_PADDING_WORDS_NOINIT(1);
849 u64 applet_resource_user_id{}; 849 u64 applet_resource_user_id;
850 }; 850 };
851 851
852 const auto parameters{rp.PopRaw<Parameters>()}; 852 const auto parameters{rp.PopRaw<Parameters>()};
@@ -952,9 +952,9 @@ void Hid::SwapNpadAssignment(Kernel::HLERequestContext& ctx) {
952void Hid::IsUnintendedHomeButtonInputProtectionEnabled(Kernel::HLERequestContext& ctx) { 952void Hid::IsUnintendedHomeButtonInputProtectionEnabled(Kernel::HLERequestContext& ctx) {
953 IPC::RequestParser rp{ctx}; 953 IPC::RequestParser rp{ctx};
954 struct Parameters { 954 struct Parameters {
955 u32 npad_id{}; 955 u32 npad_id;
956 INSERT_PADDING_WORDS(1); 956 INSERT_PADDING_WORDS_NOINIT(1);
957 u64 applet_resource_user_id{}; 957 u64 applet_resource_user_id;
958 }; 958 };
959 959
960 const auto parameters{rp.PopRaw<Parameters>()}; 960 const auto parameters{rp.PopRaw<Parameters>()};
@@ -971,10 +971,10 @@ void Hid::IsUnintendedHomeButtonInputProtectionEnabled(Kernel::HLERequestContext
971void Hid::EnableUnintendedHomeButtonInputProtection(Kernel::HLERequestContext& ctx) { 971void Hid::EnableUnintendedHomeButtonInputProtection(Kernel::HLERequestContext& ctx) {
972 IPC::RequestParser rp{ctx}; 972 IPC::RequestParser rp{ctx};
973 struct Parameters { 973 struct Parameters {
974 bool unintended_home_button_input_protection{}; 974 bool unintended_home_button_input_protection;
975 INSERT_PADDING_BYTES(3); 975 INSERT_PADDING_BYTES_NOINIT(3);
976 u32 npad_id{}; 976 u32 npad_id;
977 u64 applet_resource_user_id{}; 977 u64 applet_resource_user_id;
978 }; 978 };
979 979
980 const auto parameters{rp.PopRaw<Parameters>()}; 980 const auto parameters{rp.PopRaw<Parameters>()};
@@ -1026,10 +1026,10 @@ void Hid::GetVibrationDeviceInfo(Kernel::HLERequestContext& ctx) {
1026void Hid::SendVibrationValue(Kernel::HLERequestContext& ctx) { 1026void Hid::SendVibrationValue(Kernel::HLERequestContext& ctx) {
1027 IPC::RequestParser rp{ctx}; 1027 IPC::RequestParser rp{ctx};
1028 struct Parameters { 1028 struct Parameters {
1029 Controller_NPad::DeviceHandle vibration_device_handle{}; 1029 Controller_NPad::DeviceHandle vibration_device_handle;
1030 Controller_NPad::VibrationValue vibration_value{}; 1030 Controller_NPad::VibrationValue vibration_value;
1031 INSERT_PADDING_WORDS(1); 1031 INSERT_PADDING_WORDS_NOINIT(1);
1032 u64 applet_resource_user_id{}; 1032 u64 applet_resource_user_id;
1033 }; 1033 };
1034 1034
1035 const auto parameters{rp.PopRaw<Parameters>()}; 1035 const auto parameters{rp.PopRaw<Parameters>()};
@@ -1050,9 +1050,9 @@ void Hid::SendVibrationValue(Kernel::HLERequestContext& ctx) {
1050void Hid::GetActualVibrationValue(Kernel::HLERequestContext& ctx) { 1050void Hid::GetActualVibrationValue(Kernel::HLERequestContext& ctx) {
1051 IPC::RequestParser rp{ctx}; 1051 IPC::RequestParser rp{ctx};
1052 struct Parameters { 1052 struct Parameters {
1053 Controller_NPad::DeviceHandle vibration_device_handle{}; 1053 Controller_NPad::DeviceHandle vibration_device_handle;
1054 INSERT_PADDING_WORDS(1); 1054 INSERT_PADDING_WORDS_NOINIT(1);
1055 u64 applet_resource_user_id{}; 1055 u64 applet_resource_user_id;
1056 }; 1056 };
1057 1057
1058 const auto parameters{rp.PopRaw<Parameters>()}; 1058 const auto parameters{rp.PopRaw<Parameters>()};
@@ -1147,9 +1147,9 @@ void Hid::EndPermitVibrationSession(Kernel::HLERequestContext& ctx) {
1147void Hid::IsVibrationDeviceMounted(Kernel::HLERequestContext& ctx) { 1147void Hid::IsVibrationDeviceMounted(Kernel::HLERequestContext& ctx) {
1148 IPC::RequestParser rp{ctx}; 1148 IPC::RequestParser rp{ctx};
1149 struct Parameters { 1149 struct Parameters {
1150 Controller_NPad::DeviceHandle vibration_device_handle{}; 1150 Controller_NPad::DeviceHandle vibration_device_handle;
1151 INSERT_PADDING_WORDS(1); 1151 INSERT_PADDING_WORDS_NOINIT(1);
1152 u64 applet_resource_user_id{}; 1152 u64 applet_resource_user_id;
1153 }; 1153 };
1154 1154
1155 const auto parameters{rp.PopRaw<Parameters>()}; 1155 const auto parameters{rp.PopRaw<Parameters>()};
@@ -1180,9 +1180,9 @@ void Hid::ActivateConsoleSixAxisSensor(Kernel::HLERequestContext& ctx) {
1180void Hid::StartConsoleSixAxisSensor(Kernel::HLERequestContext& ctx) { 1180void Hid::StartConsoleSixAxisSensor(Kernel::HLERequestContext& ctx) {
1181 IPC::RequestParser rp{ctx}; 1181 IPC::RequestParser rp{ctx};
1182 struct Parameters { 1182 struct Parameters {
1183 Controller_NPad::DeviceHandle sixaxis_handle{}; 1183 Controller_NPad::DeviceHandle sixaxis_handle;
1184 INSERT_PADDING_WORDS(1); 1184 INSERT_PADDING_WORDS_NOINIT(1);
1185 u64 applet_resource_user_id{}; 1185 u64 applet_resource_user_id;
1186 }; 1186 };
1187 1187
1188 const auto parameters{rp.PopRaw<Parameters>()}; 1188 const auto parameters{rp.PopRaw<Parameters>()};
@@ -1200,9 +1200,9 @@ void Hid::StartConsoleSixAxisSensor(Kernel::HLERequestContext& ctx) {
1200void Hid::StopConsoleSixAxisSensor(Kernel::HLERequestContext& ctx) { 1200void Hid::StopConsoleSixAxisSensor(Kernel::HLERequestContext& ctx) {
1201 IPC::RequestParser rp{ctx}; 1201 IPC::RequestParser rp{ctx};
1202 struct Parameters { 1202 struct Parameters {
1203 Controller_NPad::DeviceHandle sixaxis_handle{}; 1203 Controller_NPad::DeviceHandle sixaxis_handle;
1204 INSERT_PADDING_WORDS(1); 1204 INSERT_PADDING_WORDS_NOINIT(1);
1205 u64 applet_resource_user_id{}; 1205 u64 applet_resource_user_id;
1206 }; 1206 };
1207 1207
1208 const auto parameters{rp.PopRaw<Parameters>()}; 1208 const auto parameters{rp.PopRaw<Parameters>()};
diff --git a/src/core/hle/service/mii/manager.cpp b/src/core/hle/service/mii/manager.cpp
index d73b90015..567a4e345 100644
--- a/src/core/hle/service/mii/manager.cpp
+++ b/src/core/hle/service/mii/manager.cpp
@@ -100,6 +100,7 @@ MiiInfo ConvertStoreDataToInfo(const MiiStoreData& data) {
100 .mole_scale = static_cast<u8>(bf.mole_scale.Value()), 100 .mole_scale = static_cast<u8>(bf.mole_scale.Value()),
101 .mole_x = static_cast<u8>(bf.mole_x.Value()), 101 .mole_x = static_cast<u8>(bf.mole_x.Value()),
102 .mole_y = static_cast<u8>(bf.mole_y.Value()), 102 .mole_y = static_cast<u8>(bf.mole_y.Value()),
103 .padding = 0,
103 }; 104 };
104} 105}
105 106
diff --git a/src/core/hle/service/mii/manager.h b/src/core/hle/service/mii/manager.h
index 927451dea..32c27ee65 100644
--- a/src/core/hle/service/mii/manager.h
+++ b/src/core/hle/service/mii/manager.h
@@ -27,58 +27,58 @@ enum class SourceFlag : u32 {
27DECLARE_ENUM_FLAG_OPERATORS(SourceFlag); 27DECLARE_ENUM_FLAG_OPERATORS(SourceFlag);
28 28
29struct MiiInfo { 29struct MiiInfo {
30 Common::UUID uuid{Common::INVALID_UUID}; 30 Common::UUID uuid;
31 std::array<char16_t, 11> name{}; 31 std::array<char16_t, 11> name;
32 u8 font_region{}; 32 u8 font_region;
33 u8 favorite_color{}; 33 u8 favorite_color;
34 u8 gender{}; 34 u8 gender;
35 u8 height{}; 35 u8 height;
36 u8 build{}; 36 u8 build;
37 u8 type{}; 37 u8 type;
38 u8 region_move{}; 38 u8 region_move;
39 u8 faceline_type{}; 39 u8 faceline_type;
40 u8 faceline_color{}; 40 u8 faceline_color;
41 u8 faceline_wrinkle{}; 41 u8 faceline_wrinkle;
42 u8 faceline_make{}; 42 u8 faceline_make;
43 u8 hair_type{}; 43 u8 hair_type;
44 u8 hair_color{}; 44 u8 hair_color;
45 u8 hair_flip{}; 45 u8 hair_flip;
46 u8 eye_type{}; 46 u8 eye_type;
47 u8 eye_color{}; 47 u8 eye_color;
48 u8 eye_scale{}; 48 u8 eye_scale;
49 u8 eye_aspect{}; 49 u8 eye_aspect;
50 u8 eye_rotate{}; 50 u8 eye_rotate;
51 u8 eye_x{}; 51 u8 eye_x;
52 u8 eye_y{}; 52 u8 eye_y;
53 u8 eyebrow_type{}; 53 u8 eyebrow_type;
54 u8 eyebrow_color{}; 54 u8 eyebrow_color;
55 u8 eyebrow_scale{}; 55 u8 eyebrow_scale;
56 u8 eyebrow_aspect{}; 56 u8 eyebrow_aspect;
57 u8 eyebrow_rotate{}; 57 u8 eyebrow_rotate;
58 u8 eyebrow_x{}; 58 u8 eyebrow_x;
59 u8 eyebrow_y{}; 59 u8 eyebrow_y;
60 u8 nose_type{}; 60 u8 nose_type;
61 u8 nose_scale{}; 61 u8 nose_scale;
62 u8 nose_y{}; 62 u8 nose_y;
63 u8 mouth_type{}; 63 u8 mouth_type;
64 u8 mouth_color{}; 64 u8 mouth_color;
65 u8 mouth_scale{}; 65 u8 mouth_scale;
66 u8 mouth_aspect{}; 66 u8 mouth_aspect;
67 u8 mouth_y{}; 67 u8 mouth_y;
68 u8 beard_color{}; 68 u8 beard_color;
69 u8 beard_type{}; 69 u8 beard_type;
70 u8 mustache_type{}; 70 u8 mustache_type;
71 u8 mustache_scale{}; 71 u8 mustache_scale;
72 u8 mustache_y{}; 72 u8 mustache_y;
73 u8 glasses_type{}; 73 u8 glasses_type;
74 u8 glasses_color{}; 74 u8 glasses_color;
75 u8 glasses_scale{}; 75 u8 glasses_scale;
76 u8 glasses_y{}; 76 u8 glasses_y;
77 u8 mole_type{}; 77 u8 mole_type;
78 u8 mole_scale{}; 78 u8 mole_scale;
79 u8 mole_x{}; 79 u8 mole_x;
80 u8 mole_y{}; 80 u8 mole_y;
81 INSERT_PADDING_BYTES(1); 81 u8 padding;
82 82
83 std::u16string Name() const; 83 std::u16string Name() const;
84}; 84};
@@ -324,7 +324,7 @@ public:
324 ResultCode GetIndex(const MiiInfo& info, u32& index); 324 ResultCode GetIndex(const MiiInfo& info, u32& index);
325 325
326private: 326private:
327 const Common::UUID user_id; 327 const Common::UUID user_id{Common::INVALID_UUID};
328 u64 update_counter{}; 328 u64 update_counter{};
329}; 329};
330 330
diff --git a/src/core/hle/service/nfp/nfp.cpp b/src/core/hle/service/nfp/nfp.cpp
index 5557da72e..641bcadea 100644
--- a/src/core/hle/service/nfp/nfp.cpp
+++ b/src/core/hle/service/nfp/nfp.cpp
@@ -190,12 +190,6 @@ private:
190 void GetDeviceState(Kernel::HLERequestContext& ctx) { 190 void GetDeviceState(Kernel::HLERequestContext& ctx) {
191 LOG_DEBUG(Service_NFP, "called"); 191 LOG_DEBUG(Service_NFP, "called");
192 192
193 auto nfc_event = nfp_interface.GetNFCEvent();
194 if (!nfc_event->ShouldWait(&ctx.GetThread()) && !has_attached_handle) {
195 device_state = DeviceState::TagFound;
196 nfc_event->Clear();
197 }
198
199 IPC::ResponseBuilder rb{ctx, 3}; 193 IPC::ResponseBuilder rb{ctx, 3};
200 rb.Push(RESULT_SUCCESS); 194 rb.Push(RESULT_SUCCESS);
201 rb.Push<u32>(static_cast<u32>(device_state)); 195 rb.Push<u32>(static_cast<u32>(device_state));
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp
index 4b3581949..ceaa93d28 100644
--- a/src/core/hle/service/nvflinger/nvflinger.cpp
+++ b/src/core/hle/service/nvflinger/nvflinger.cpp
@@ -38,6 +38,10 @@ void NVFlinger::SplitVSync() {
38 system.RegisterHostThread(); 38 system.RegisterHostThread();
39 std::string name = "yuzu:VSyncThread"; 39 std::string name = "yuzu:VSyncThread";
40 MicroProfileOnThreadCreate(name.c_str()); 40 MicroProfileOnThreadCreate(name.c_str());
41
42 // Cleanup
43 SCOPE_EXIT({ MicroProfileOnThreadExit(); });
44
41 Common::SetCurrentThreadName(name.c_str()); 45 Common::SetCurrentThreadName(name.c_str());
42 Common::SetCurrentThreadPriority(Common::ThreadPriority::High); 46 Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
43 s64 delay = 0; 47 s64 delay = 0;
diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp
index 4da69f503..2b91a89d1 100644
--- a/src/core/hle/service/sm/sm.cpp
+++ b/src/core/hle/service/sm/sm.cpp
@@ -139,9 +139,6 @@ void SM::GetService(Kernel::HLERequestContext& ctx) {
139 server_port->AppendPendingSession(server); 139 server_port->AppendPendingSession(server);
140 } 140 }
141 141
142 // Wake the threads waiting on the ServerPort
143 server_port->Signal();
144
145 LOG_DEBUG(Service_SM, "called service={} -> session={}", name, client->GetObjectId()); 142 LOG_DEBUG(Service_SM, "called service={} -> session={}", name, client->GetObjectId());
146 IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles}; 143 IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles};
147 rb.Push(RESULT_SUCCESS); 144 rb.Push(RESULT_SUCCESS);
diff --git a/src/core/hle/service/time/clock_types.h b/src/core/hle/service/time/clock_types.h
index 72e1921ec..b78892223 100644
--- a/src/core/hle/service/time/clock_types.h
+++ b/src/core/hle/service/time/clock_types.h
@@ -73,19 +73,19 @@ struct TimeSpanType {
73static_assert(sizeof(TimeSpanType) == 8, "TimeSpanType is incorrect size"); 73static_assert(sizeof(TimeSpanType) == 8, "TimeSpanType is incorrect size");
74 74
75struct ClockSnapshot { 75struct ClockSnapshot {
76 SystemClockContext user_context{}; 76 SystemClockContext user_context;
77 SystemClockContext network_context{}; 77 SystemClockContext network_context;
78 s64 user_time{}; 78 s64 user_time;
79 s64 network_time{}; 79 s64 network_time;
80 TimeZone::CalendarTime user_calendar_time{}; 80 TimeZone::CalendarTime user_calendar_time;
81 TimeZone::CalendarTime network_calendar_time{}; 81 TimeZone::CalendarTime network_calendar_time;
82 TimeZone::CalendarAdditionalInfo user_calendar_additional_time{}; 82 TimeZone::CalendarAdditionalInfo user_calendar_additional_time;
83 TimeZone::CalendarAdditionalInfo network_calendar_additional_time{}; 83 TimeZone::CalendarAdditionalInfo network_calendar_additional_time;
84 SteadyClockTimePoint steady_clock_time_point{}; 84 SteadyClockTimePoint steady_clock_time_point;
85 TimeZone::LocationName location_name{}; 85 TimeZone::LocationName location_name;
86 u8 is_automatic_correction_enabled{}; 86 u8 is_automatic_correction_enabled;
87 u8 type{}; 87 u8 type;
88 INSERT_PADDING_BYTES(0x2); 88 INSERT_PADDING_BYTES_NOINIT(0x2);
89 89
90 static ResultCode GetCurrentTime(s64& current_time, 90 static ResultCode GetCurrentTime(s64& current_time,
91 const SteadyClockTimePoint& steady_clock_time_point, 91 const SteadyClockTimePoint& steady_clock_time_point,
diff --git a/src/core/hle/service/time/time_zone_types.h b/src/core/hle/service/time/time_zone_types.h
index 9be15b53e..4a57e036d 100644
--- a/src/core/hle/service/time/time_zone_types.h
+++ b/src/core/hle/service/time/time_zone_types.h
@@ -45,23 +45,23 @@ static_assert(sizeof(TimeZoneRule) == 0x4000, "TimeZoneRule is incorrect size");
45 45
46/// https://switchbrew.org/wiki/Glue_services#CalendarAdditionalInfo 46/// https://switchbrew.org/wiki/Glue_services#CalendarAdditionalInfo
47struct CalendarAdditionalInfo { 47struct CalendarAdditionalInfo {
48 u32 day_of_week{}; 48 u32 day_of_week;
49 u32 day_of_year{}; 49 u32 day_of_year;
50 std::array<char, 8> timezone_name; 50 std::array<char, 8> timezone_name;
51 u32 is_dst{}; 51 u32 is_dst;
52 s32 gmt_offset{}; 52 s32 gmt_offset;
53}; 53};
54static_assert(sizeof(CalendarAdditionalInfo) == 0x18, "CalendarAdditionalInfo is incorrect size"); 54static_assert(sizeof(CalendarAdditionalInfo) == 0x18, "CalendarAdditionalInfo is incorrect size");
55 55
56/// https://switchbrew.org/wiki/Glue_services#CalendarTime 56/// https://switchbrew.org/wiki/Glue_services#CalendarTime
57struct CalendarTime { 57struct CalendarTime {
58 s16 year{}; 58 s16 year;
59 s8 month{}; 59 s8 month;
60 s8 day{}; 60 s8 day;
61 s8 hour{}; 61 s8 hour;
62 s8 minute{}; 62 s8 minute;
63 s8 second{}; 63 s8 second;
64 INSERT_PADDING_BYTES(1); 64 INSERT_PADDING_BYTES_NOINIT(1);
65}; 65};
66static_assert(sizeof(CalendarTime) == 0x8, "CalendarTime is incorrect size"); 66static_assert(sizeof(CalendarTime) == 0x8, "CalendarTime is incorrect size");
67 67
diff --git a/src/input_common/touch_from_button.cpp b/src/input_common/touch_from_button.cpp
index a07124a86..ffbe4f2ed 100644
--- a/src/input_common/touch_from_button.cpp
+++ b/src/input_common/touch_from_button.cpp
@@ -25,18 +25,19 @@ public:
25 } 25 }
26 } 26 }
27 27
28 std::tuple<float, float, bool> GetStatus() const override { 28 Input::TouchStatus GetStatus() const override {
29 for (const auto& m : map) { 29 Input::TouchStatus touch_status{};
30 const bool state = std::get<0>(m)->GetStatus(); 30 for (std::size_t id = 0; id < map.size() && id < touch_status.size(); ++id) {
31 const bool state = std::get<0>(map[id])->GetStatus();
31 if (state) { 32 if (state) {
32 const float x = static_cast<float>(std::get<1>(m)) / 33 const float x = static_cast<float>(std::get<1>(map[id])) /
33 static_cast<int>(Layout::ScreenUndocked::Width); 34 static_cast<int>(Layout::ScreenUndocked::Width);
34 const float y = static_cast<float>(std::get<2>(m)) / 35 const float y = static_cast<float>(std::get<2>(map[id])) /
35 static_cast<int>(Layout::ScreenUndocked::Height); 36 static_cast<int>(Layout::ScreenUndocked::Height);
36 return {x, y, true}; 37 touch_status[id] = {x, y, true};
37 } 38 }
38 } 39 }
39 return {}; 40 return touch_status;
40 } 41 }
41 42
42private: 43private:
diff --git a/src/input_common/udp/client.cpp b/src/input_common/udp/client.cpp
index 412d57896..e7e50d789 100644
--- a/src/input_common/udp/client.cpp
+++ b/src/input_common/udp/client.cpp
@@ -136,6 +136,7 @@ static void SocketLoop(Socket* socket) {
136 136
137Client::Client() { 137Client::Client() {
138 LOG_INFO(Input, "Udp Initialization started"); 138 LOG_INFO(Input, "Udp Initialization started");
139 finger_id.fill(MAX_TOUCH_FINGERS);
139 ReloadSockets(); 140 ReloadSockets();
140} 141}
141 142
@@ -176,7 +177,7 @@ void Client::ReloadSockets() {
176 std::string server_token; 177 std::string server_token;
177 std::size_t client = 0; 178 std::size_t client = 0;
178 while (std::getline(servers_ss, server_token, ',')) { 179 while (std::getline(servers_ss, server_token, ',')) {
179 if (client == max_udp_clients) { 180 if (client == MAX_UDP_CLIENTS) {
180 break; 181 break;
181 } 182 }
182 std::stringstream server_ss(server_token); 183 std::stringstream server_ss(server_token);
@@ -194,7 +195,7 @@ void Client::ReloadSockets() {
194 for (std::size_t pad = 0; pad < 4; ++pad) { 195 for (std::size_t pad = 0; pad < 4; ++pad) {
195 const std::size_t client_number = 196 const std::size_t client_number =
196 GetClientNumber(udp_input_address, udp_input_port, pad); 197 GetClientNumber(udp_input_address, udp_input_port, pad);
197 if (client_number != max_udp_clients) { 198 if (client_number != MAX_UDP_CLIENTS) {
198 LOG_ERROR(Input, "Duplicated UDP servers found"); 199 LOG_ERROR(Input, "Duplicated UDP servers found");
199 continue; 200 continue;
200 } 201 }
@@ -213,7 +214,7 @@ std::size_t Client::GetClientNumber(std::string_view host, u16 port, std::size_t
213 return client; 214 return client;
214 } 215 }
215 } 216 }
216 return max_udp_clients; 217 return MAX_UDP_CLIENTS;
217} 218}
218 219
219void Client::OnVersion([[maybe_unused]] Response::Version data) { 220void Client::OnVersion([[maybe_unused]] Response::Version data) {
@@ -259,33 +260,14 @@ void Client::OnPadData(Response::PadData data, std::size_t client) {
259 std::lock_guard guard(clients[client].status.update_mutex); 260 std::lock_guard guard(clients[client].status.update_mutex);
260 clients[client].status.motion_status = clients[client].motion.GetMotion(); 261 clients[client].status.motion_status = clients[client].motion.GetMotion();
261 262
262 // TODO: add a setting for "click" touch. Click touch refers to a device that differentiates 263 for (std::size_t id = 0; id < data.touch.size(); ++id) {
263 // between a simple "tap" and a hard press that causes the touch screen to click. 264 UpdateTouchInput(data.touch[id], client, id);
264 const bool is_active = data.touch_1.is_active != 0;
265
266 float x = 0;
267 float y = 0;
268
269 if (is_active && clients[client].status.touch_calibration) {
270 const u16 min_x = clients[client].status.touch_calibration->min_x;
271 const u16 max_x = clients[client].status.touch_calibration->max_x;
272 const u16 min_y = clients[client].status.touch_calibration->min_y;
273 const u16 max_y = clients[client].status.touch_calibration->max_y;
274
275 x = static_cast<float>(std::clamp(static_cast<u16>(data.touch_1.x), min_x, max_x) -
276 min_x) /
277 static_cast<float>(max_x - min_x);
278 y = static_cast<float>(std::clamp(static_cast<u16>(data.touch_1.y), min_y, max_y) -
279 min_y) /
280 static_cast<float>(max_y - min_y);
281 } 265 }
282 266
283 clients[client].status.touch_status = {x, y, is_active};
284
285 if (configuring) { 267 if (configuring) {
286 const Common::Vec3f gyroscope = clients[client].motion.GetGyroscope(); 268 const Common::Vec3f gyroscope = clients[client].motion.GetGyroscope();
287 const Common::Vec3f accelerometer = clients[client].motion.GetAcceleration(); 269 const Common::Vec3f accelerometer = clients[client].motion.GetAcceleration();
288 UpdateYuzuSettings(client, accelerometer, gyroscope, is_active); 270 UpdateYuzuSettings(client, accelerometer, gyroscope);
289 } 271 }
290 } 272 }
291} 273}
@@ -320,21 +302,17 @@ void Client::Reset() {
320} 302}
321 303
322void Client::UpdateYuzuSettings(std::size_t client, const Common::Vec3<float>& acc, 304void Client::UpdateYuzuSettings(std::size_t client, const Common::Vec3<float>& acc,
323 const Common::Vec3<float>& gyro, bool touch) { 305 const Common::Vec3<float>& gyro) {
324 if (gyro.Length() > 0.2f) { 306 if (gyro.Length() > 0.2f) {
325 LOG_DEBUG(Input, "UDP Controller {}: gyro=({}, {}, {}), accel=({}, {}, {}), touch={}", 307 LOG_DEBUG(Input, "UDP Controller {}: gyro=({}, {}, {}), accel=({}, {}, {})", client,
326 client, gyro[0], gyro[1], gyro[2], acc[0], acc[1], acc[2], touch); 308 gyro[0], gyro[1], gyro[2], acc[0], acc[1], acc[2]);
327 } 309 }
328 UDPPadStatus pad{ 310 UDPPadStatus pad{
329 .host = clients[client].host, 311 .host = clients[client].host,
330 .port = clients[client].port, 312 .port = clients[client].port,
331 .pad_index = clients[client].pad_index, 313 .pad_index = clients[client].pad_index,
332 }; 314 };
333 if (touch) { 315 for (std::size_t i = 0; i < 3; ++i) {
334 pad.touch = PadTouch::Click;
335 pad_queue.Push(pad);
336 }
337 for (size_t i = 0; i < 3; ++i) {
338 if (gyro[i] > 5.0f || gyro[i] < -5.0f) { 316 if (gyro[i] > 5.0f || gyro[i] < -5.0f) {
339 pad.motion = static_cast<PadMotion>(i); 317 pad.motion = static_cast<PadMotion>(i);
340 pad.motion_value = gyro[i]; 318 pad.motion_value = gyro[i];
@@ -348,6 +326,50 @@ void Client::UpdateYuzuSettings(std::size_t client, const Common::Vec3<float>& a
348 } 326 }
349} 327}
350 328
329std::optional<std::size_t> Client::GetUnusedFingerID() const {
330 std::size_t first_free_id = 0;
331 while (first_free_id < MAX_TOUCH_FINGERS) {
332 if (!std::get<2>(touch_status[first_free_id])) {
333 return first_free_id;
334 } else {
335 first_free_id++;
336 }
337 }
338 return std::nullopt;
339}
340
341void Client::UpdateTouchInput(Response::TouchPad& touch_pad, std::size_t client, std::size_t id) {
342 // TODO: Use custom calibration per device
343 const Common::ParamPackage touch_param(Settings::values.touch_device);
344 const u16 min_x = static_cast<u16>(touch_param.Get("min_x", 100));
345 const u16 min_y = static_cast<u16>(touch_param.Get("min_y", 50));
346 const u16 max_x = static_cast<u16>(touch_param.Get("max_x", 1800));
347 const u16 max_y = static_cast<u16>(touch_param.Get("max_y", 850));
348 const std::size_t touch_id = client * 2 + id;
349 if (touch_pad.is_active) {
350 if (finger_id[touch_id] == MAX_TOUCH_FINGERS) {
351 const auto first_free_id = GetUnusedFingerID();
352 if (!first_free_id) {
353 // Invalid finger id skip to next input
354 return;
355 }
356 finger_id[touch_id] = *first_free_id;
357 }
358 auto& [x, y, pressed] = touch_status[finger_id[touch_id]];
359 x = static_cast<float>(std::clamp(static_cast<u16>(touch_pad.x), min_x, max_x) - min_x) /
360 static_cast<float>(max_x - min_x);
361 y = static_cast<float>(std::clamp(static_cast<u16>(touch_pad.y), min_y, max_y) - min_y) /
362 static_cast<float>(max_y - min_y);
363 pressed = true;
364 return;
365 }
366
367 if (finger_id[touch_id] != MAX_TOUCH_FINGERS) {
368 touch_status[finger_id[touch_id]] = {};
369 finger_id[touch_id] = MAX_TOUCH_FINGERS;
370 }
371}
372
351void Client::BeginConfiguration() { 373void Client::BeginConfiguration() {
352 pad_queue.Clear(); 374 pad_queue.Clear();
353 configuring = true; 375 configuring = true;
@@ -360,7 +382,7 @@ void Client::EndConfiguration() {
360 382
361DeviceStatus& Client::GetPadState(const std::string& host, u16 port, std::size_t pad) { 383DeviceStatus& Client::GetPadState(const std::string& host, u16 port, std::size_t pad) {
362 const std::size_t client_number = GetClientNumber(host, port, pad); 384 const std::size_t client_number = GetClientNumber(host, port, pad);
363 if (client_number == max_udp_clients) { 385 if (client_number == MAX_UDP_CLIENTS) {
364 return clients[0].status; 386 return clients[0].status;
365 } 387 }
366 return clients[client_number].status; 388 return clients[client_number].status;
@@ -368,12 +390,20 @@ DeviceStatus& Client::GetPadState(const std::string& host, u16 port, std::size_t
368 390
369const DeviceStatus& Client::GetPadState(const std::string& host, u16 port, std::size_t pad) const { 391const DeviceStatus& Client::GetPadState(const std::string& host, u16 port, std::size_t pad) const {
370 const std::size_t client_number = GetClientNumber(host, port, pad); 392 const std::size_t client_number = GetClientNumber(host, port, pad);
371 if (client_number == max_udp_clients) { 393 if (client_number == MAX_UDP_CLIENTS) {
372 return clients[0].status; 394 return clients[0].status;
373 } 395 }
374 return clients[client_number].status; 396 return clients[client_number].status;
375} 397}
376 398
399Input::TouchStatus& Client::GetTouchState() {
400 return touch_status;
401}
402
403const Input::TouchStatus& Client::GetTouchState() const {
404 return touch_status;
405}
406
377Common::SPSCQueue<UDPPadStatus>& Client::GetPadQueue() { 407Common::SPSCQueue<UDPPadStatus>& Client::GetPadQueue() {
378 return pad_queue; 408 return pad_queue;
379} 409}
@@ -426,24 +456,24 @@ CalibrationConfigurationJob::CalibrationConfigurationJob(
426 current_status = Status::Ready; 456 current_status = Status::Ready;
427 status_callback(current_status); 457 status_callback(current_status);
428 } 458 }
429 if (data.touch_1.is_active == 0) { 459 if (data.touch[0].is_active == 0) {
430 return; 460 return;
431 } 461 }
432 LOG_DEBUG(Input, "Current touch: {} {}", data.touch_1.x, 462 LOG_DEBUG(Input, "Current touch: {} {}", data.touch[0].x,
433 data.touch_1.y); 463 data.touch[0].y);
434 min_x = std::min(min_x, static_cast<u16>(data.touch_1.x)); 464 min_x = std::min(min_x, static_cast<u16>(data.touch[0].x));
435 min_y = std::min(min_y, static_cast<u16>(data.touch_1.y)); 465 min_y = std::min(min_y, static_cast<u16>(data.touch[0].y));
436 if (current_status == Status::Ready) { 466 if (current_status == Status::Ready) {
437 // First touch - min data (min_x/min_y) 467 // First touch - min data (min_x/min_y)
438 current_status = Status::Stage1Completed; 468 current_status = Status::Stage1Completed;
439 status_callback(current_status); 469 status_callback(current_status);
440 } 470 }
441 if (data.touch_1.x - min_x > CALIBRATION_THRESHOLD && 471 if (data.touch[0].x - min_x > CALIBRATION_THRESHOLD &&
442 data.touch_1.y - min_y > CALIBRATION_THRESHOLD) { 472 data.touch[0].y - min_y > CALIBRATION_THRESHOLD) {
443 // Set the current position as max value and finishes 473 // Set the current position as max value and finishes
444 // configuration 474 // configuration
445 max_x = data.touch_1.x; 475 max_x = data.touch[0].x;
446 max_y = data.touch_1.y; 476 max_y = data.touch[0].y;
447 current_status = Status::Completed; 477 current_status = Status::Completed;
448 data_callback(min_x, min_y, max_x, max_y); 478 data_callback(min_x, min_y, max_x, max_y);
449 status_callback(current_status); 479 status_callback(current_status);
diff --git a/src/input_common/udp/client.h b/src/input_common/udp/client.h
index 00c8b09f5..822f9c550 100644
--- a/src/input_common/udp/client.h
+++ b/src/input_common/udp/client.h
@@ -28,6 +28,7 @@ class Socket;
28namespace Response { 28namespace Response {
29struct PadData; 29struct PadData;
30struct PortInfo; 30struct PortInfo;
31struct TouchPad;
31struct Version; 32struct Version;
32} // namespace Response 33} // namespace Response
33 34
@@ -50,7 +51,6 @@ struct UDPPadStatus {
50 std::string host{"127.0.0.1"}; 51 std::string host{"127.0.0.1"};
51 u16 port{26760}; 52 u16 port{26760};
52 std::size_t pad_index{}; 53 std::size_t pad_index{};
53 PadTouch touch{PadTouch::Undefined};
54 PadMotion motion{PadMotion::Undefined}; 54 PadMotion motion{PadMotion::Undefined};
55 f32 motion_value{0.0f}; 55 f32 motion_value{0.0f};
56}; 56};
@@ -93,6 +93,9 @@ public:
93 DeviceStatus& GetPadState(const std::string& host, u16 port, std::size_t pad); 93 DeviceStatus& GetPadState(const std::string& host, u16 port, std::size_t pad);
94 const DeviceStatus& GetPadState(const std::string& host, u16 port, std::size_t pad) const; 94 const DeviceStatus& GetPadState(const std::string& host, u16 port, std::size_t pad) const;
95 95
96 Input::TouchStatus& GetTouchState();
97 const Input::TouchStatus& GetTouchState() const;
98
96private: 99private:
97 struct ClientData { 100 struct ClientData {
98 std::string host{"127.0.0.1"}; 101 std::string host{"127.0.0.1"};
@@ -122,14 +125,25 @@ private:
122 void StartCommunication(std::size_t client, const std::string& host, u16 port, 125 void StartCommunication(std::size_t client, const std::string& host, u16 port,
123 std::size_t pad_index, u32 client_id); 126 std::size_t pad_index, u32 client_id);
124 void UpdateYuzuSettings(std::size_t client, const Common::Vec3<float>& acc, 127 void UpdateYuzuSettings(std::size_t client, const Common::Vec3<float>& acc,
125 const Common::Vec3<float>& gyro, bool touch); 128 const Common::Vec3<float>& gyro);
129
130 // Returns an unused finger id, if there is no fingers available std::nullopt will be
131 // returned
132 std::optional<std::size_t> GetUnusedFingerID() const;
133
134 // Merges and updates all touch inputs into the touch_status array
135 void UpdateTouchInput(Response::TouchPad& touch_pad, std::size_t client, std::size_t id);
126 136
127 bool configuring = false; 137 bool configuring = false;
128 138
129 // Allocate clients for 8 udp servers 139 // Allocate clients for 8 udp servers
130 const std::size_t max_udp_clients = 32; 140 static constexpr std::size_t MAX_UDP_CLIENTS = 4 * 8;
131 std::array<ClientData, 4 * 8> clients; 141 // Each client can have up 2 touch inputs
132 Common::SPSCQueue<UDPPadStatus> pad_queue; 142 static constexpr std::size_t MAX_TOUCH_FINGERS = MAX_UDP_CLIENTS * 2;
143 std::array<ClientData, MAX_UDP_CLIENTS> clients{};
144 Common::SPSCQueue<UDPPadStatus> pad_queue{};
145 Input::TouchStatus touch_status{};
146 std::array<std::size_t, MAX_TOUCH_FINGERS> finger_id{};
133}; 147};
134 148
135/// An async job allowing configuration of the touchpad calibration. 149/// An async job allowing configuration of the touchpad calibration.
diff --git a/src/input_common/udp/protocol.h b/src/input_common/udp/protocol.h
index fc1aea4b9..a3d276697 100644
--- a/src/input_common/udp/protocol.h
+++ b/src/input_common/udp/protocol.h
@@ -140,6 +140,14 @@ static_assert(sizeof(PortInfo) == 12, "UDP Response PortInfo struct has wrong si
140static_assert(std::is_trivially_copyable_v<PortInfo>, 140static_assert(std::is_trivially_copyable_v<PortInfo>,
141 "UDP Response PortInfo is not trivially copyable"); 141 "UDP Response PortInfo is not trivially copyable");
142 142
143struct TouchPad {
144 u8 is_active{};
145 u8 id{};
146 u16_le x{};
147 u16_le y{};
148};
149static_assert(sizeof(TouchPad) == 6, "UDP Response TouchPad struct has wrong size ");
150
143#pragma pack(push, 1) 151#pragma pack(push, 1)
144struct PadData { 152struct PadData {
145 PortInfo info{}; 153 PortInfo info{};
@@ -190,12 +198,7 @@ struct PadData {
190 u8 button_13{}; 198 u8 button_13{};
191 } analog_button; 199 } analog_button;
192 200
193 struct TouchPad { 201 std::array<TouchPad, 2> touch;
194 u8 is_active{};
195 u8 id{};
196 u16_le x{};
197 u16_le y{};
198 } touch_1, touch_2;
199 202
200 u64_le motion_timestamp; 203 u64_le motion_timestamp;
201 204
@@ -222,7 +225,6 @@ static_assert(sizeof(Message<PadData>) == MAX_PACKET_SIZE,
222 225
223static_assert(sizeof(PadData::AnalogButton) == 12, 226static_assert(sizeof(PadData::AnalogButton) == 12,
224 "UDP Response AnalogButton struct has wrong size "); 227 "UDP Response AnalogButton struct has wrong size ");
225static_assert(sizeof(PadData::TouchPad) == 6, "UDP Response TouchPad struct has wrong size ");
226static_assert(sizeof(PadData::Accelerometer) == 12, 228static_assert(sizeof(PadData::Accelerometer) == 12,
227 "UDP Response Accelerometer struct has wrong size "); 229 "UDP Response Accelerometer struct has wrong size ");
228static_assert(sizeof(PadData::Gyroscope) == 12, "UDP Response Gyroscope struct has wrong size "); 230static_assert(sizeof(PadData::Gyroscope) == 12, "UDP Response Gyroscope struct has wrong size ");
diff --git a/src/input_common/udp/udp.cpp b/src/input_common/udp/udp.cpp
index c5da27a38..b630281a0 100644
--- a/src/input_common/udp/udp.cpp
+++ b/src/input_common/udp/udp.cpp
@@ -78,8 +78,8 @@ public:
78 explicit UDPTouch(std::string ip_, u16 port_, u16 pad_, CemuhookUDP::Client* client_) 78 explicit UDPTouch(std::string ip_, u16 port_, u16 pad_, CemuhookUDP::Client* client_)
79 : ip(std::move(ip_)), port(port_), pad(pad_), client(client_) {} 79 : ip(std::move(ip_)), port(port_), pad(pad_), client(client_) {}
80 80
81 std::tuple<float, float, bool> GetStatus() const override { 81 Input::TouchStatus GetStatus() const override {
82 return client->GetPadState(ip, port, pad).touch_status; 82 return client->GetTouchState();
83 } 83 }
84 84
85private: 85private:
@@ -107,32 +107,4 @@ std::unique_ptr<Input::TouchDevice> UDPTouchFactory::Create(const Common::ParamP
107 return std::make_unique<UDPTouch>(std::move(ip), port, pad, client.get()); 107 return std::make_unique<UDPTouch>(std::move(ip), port, pad, client.get());
108} 108}
109 109
110void UDPTouchFactory::BeginConfiguration() {
111 polling = true;
112 client->BeginConfiguration();
113}
114
115void UDPTouchFactory::EndConfiguration() {
116 polling = false;
117 client->EndConfiguration();
118}
119
120Common::ParamPackage UDPTouchFactory::GetNextInput() {
121 Common::ParamPackage params;
122 CemuhookUDP::UDPPadStatus pad;
123 auto& queue = client->GetPadQueue();
124 while (queue.Pop(pad)) {
125 if (pad.touch == CemuhookUDP::PadTouch::Undefined) {
126 continue;
127 }
128 params.Set("engine", "cemuhookudp");
129 params.Set("ip", pad.host);
130 params.Set("port", static_cast<u16>(pad.port));
131 params.Set("pad_index", static_cast<u16>(pad.pad_index));
132 params.Set("touch", static_cast<u16>(pad.touch));
133 return params;
134 }
135 return params;
136}
137
138} // namespace InputCommon 110} // namespace InputCommon
diff --git a/src/tests/CMakeLists.txt b/src/tests/CMakeLists.txt
index 8a606b448..6a5c18945 100644
--- a/src/tests/CMakeLists.txt
+++ b/src/tests/CMakeLists.txt
@@ -1,11 +1,11 @@
1add_executable(tests 1add_executable(tests
2 common/bit_field.cpp 2 common/bit_field.cpp
3 common/bit_utils.cpp
4 common/fibers.cpp 3 common/fibers.cpp
5 common/param_package.cpp 4 common/param_package.cpp
6 common/ring_buffer.cpp 5 common/ring_buffer.cpp
7 core/core_timing.cpp 6 core/core_timing.cpp
8 tests.cpp 7 tests.cpp
8 video_core/buffer_base.cpp
9) 9)
10 10
11create_target_directory_groups(tests) 11create_target_directory_groups(tests)
diff --git a/src/tests/common/bit_utils.cpp b/src/tests/common/bit_utils.cpp
deleted file mode 100644
index 479b5995a..000000000
--- a/src/tests/common/bit_utils.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
1// Copyright 2017 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <catch2/catch.hpp>
6#include <math.h>
7#include "common/bit_util.h"
8
9namespace Common {
10
11TEST_CASE("BitUtils::CountTrailingZeroes", "[common]") {
12 REQUIRE(Common::CountTrailingZeroes32(0) == 32);
13 REQUIRE(Common::CountTrailingZeroes64(0) == 64);
14 REQUIRE(Common::CountTrailingZeroes32(9) == 0);
15 REQUIRE(Common::CountTrailingZeroes32(8) == 3);
16 REQUIRE(Common::CountTrailingZeroes32(0x801000) == 12);
17 REQUIRE(Common::CountTrailingZeroes64(9) == 0);
18 REQUIRE(Common::CountTrailingZeroes64(8) == 3);
19 REQUIRE(Common::CountTrailingZeroes64(0x801000) == 12);
20 REQUIRE(Common::CountTrailingZeroes64(0x801000000000UL) == 36);
21}
22
23} // namespace Common
diff --git a/src/tests/video_core/buffer_base.cpp b/src/tests/video_core/buffer_base.cpp
new file mode 100644
index 000000000..651633e9e
--- /dev/null
+++ b/src/tests/video_core/buffer_base.cpp
@@ -0,0 +1,473 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <stdexcept>
6#include <unordered_map>
7
8#include <catch2/catch.hpp>
9
10#include "common/alignment.h"
11#include "common/common_types.h"
12#include "video_core/buffer_cache/buffer_base.h"
13
14namespace {
15using VideoCommon::BufferBase;
16using Range = std::pair<u64, u64>;
17
18constexpr u64 PAGE = 4096;
19constexpr u64 WORD = 4096 * 64;
20
21constexpr VAddr c = 0x1328914000;
22
23class RasterizerInterface {
24public:
25 void UpdatePagesCachedCount(VAddr addr, u64 size, int delta) {
26 const u64 page_start{addr >> Core::Memory::PAGE_BITS};
27 const u64 page_end{(addr + size + Core::Memory::PAGE_SIZE - 1) >> Core::Memory::PAGE_BITS};
28 for (u64 page = page_start; page < page_end; ++page) {
29 int& value = page_table[page];
30 value += delta;
31 if (value < 0) {
32 throw std::logic_error{"negative page"};
33 }
34 if (value == 0) {
35 page_table.erase(page);
36 }
37 }
38 }
39
40 [[nodiscard]] int Count(VAddr addr) const noexcept {
41 const auto it = page_table.find(addr >> Core::Memory::PAGE_BITS);
42 return it == page_table.end() ? 0 : it->second;
43 }
44
45 [[nodiscard]] unsigned Count() const noexcept {
46 unsigned count = 0;
47 for (const auto [index, value] : page_table) {
48 count += value;
49 }
50 return count;
51 }
52
53private:
54 std::unordered_map<u64, int> page_table;
55};
56} // Anonymous namespace
57
58TEST_CASE("BufferBase: Small buffer", "[video_core]") {
59 RasterizerInterface rasterizer;
60 BufferBase buffer(rasterizer, c, WORD);
61 REQUIRE(rasterizer.Count() == 0);
62 buffer.UnmarkRegionAsCpuModified(c, WORD);
63 REQUIRE(rasterizer.Count() == WORD / PAGE);
64 REQUIRE(buffer.ModifiedCpuRegion(c, WORD) == Range{0, 0});
65
66 buffer.MarkRegionAsCpuModified(c + PAGE, 1);
67 REQUIRE(buffer.ModifiedCpuRegion(c, WORD) == Range{PAGE * 1, PAGE * 2});
68}
69
70TEST_CASE("BufferBase: Large buffer", "[video_core]") {
71 RasterizerInterface rasterizer;
72 BufferBase buffer(rasterizer, c, WORD * 32);
73 buffer.UnmarkRegionAsCpuModified(c, WORD * 32);
74 buffer.MarkRegionAsCpuModified(c + 4096, WORD * 4);
75 REQUIRE(buffer.ModifiedCpuRegion(c, WORD + PAGE * 2) == Range{PAGE, WORD + PAGE * 2});
76 REQUIRE(buffer.ModifiedCpuRegion(c + PAGE * 2, PAGE * 6) == Range{PAGE * 2, PAGE * 8});
77 REQUIRE(buffer.ModifiedCpuRegion(c, WORD * 32) == Range{PAGE, WORD * 4 + PAGE});
78 REQUIRE(buffer.ModifiedCpuRegion(c + WORD * 4, PAGE) == Range{WORD * 4, WORD * 4 + PAGE});
79 REQUIRE(buffer.ModifiedCpuRegion(c + WORD * 3 + PAGE * 63, PAGE) ==
80 Range{WORD * 3 + PAGE * 63, WORD * 4});
81
82 buffer.MarkRegionAsCpuModified(c + WORD * 5 + PAGE * 6, PAGE);
83 buffer.MarkRegionAsCpuModified(c + WORD * 5 + PAGE * 8, PAGE);
84 REQUIRE(buffer.ModifiedCpuRegion(c + WORD * 5, WORD) ==
85 Range{WORD * 5 + PAGE * 6, WORD * 5 + PAGE * 9});
86
87 buffer.UnmarkRegionAsCpuModified(c + WORD * 5 + PAGE * 8, PAGE);
88 REQUIRE(buffer.ModifiedCpuRegion(c + WORD * 5, WORD) ==
89 Range{WORD * 5 + PAGE * 6, WORD * 5 + PAGE * 7});
90
91 buffer.MarkRegionAsCpuModified(c + PAGE, WORD * 31 + PAGE * 63);
92 REQUIRE(buffer.ModifiedCpuRegion(c, WORD * 32) == Range{PAGE, WORD * 32});
93
94 buffer.UnmarkRegionAsCpuModified(c + PAGE * 4, PAGE);
95 buffer.UnmarkRegionAsCpuModified(c + PAGE * 6, PAGE);
96
97 buffer.UnmarkRegionAsCpuModified(c, WORD * 32);
98 REQUIRE(buffer.ModifiedCpuRegion(c, WORD * 32) == Range{0, 0});
99}
100
101TEST_CASE("BufferBase: Rasterizer counting", "[video_core]") {
102 RasterizerInterface rasterizer;
103 BufferBase buffer(rasterizer, c, PAGE * 2);
104 REQUIRE(rasterizer.Count() == 0);
105 buffer.UnmarkRegionAsCpuModified(c, PAGE);
106 REQUIRE(rasterizer.Count() == 1);
107 buffer.MarkRegionAsCpuModified(c, PAGE * 2);
108 REQUIRE(rasterizer.Count() == 0);
109 buffer.UnmarkRegionAsCpuModified(c, PAGE);
110 buffer.UnmarkRegionAsCpuModified(c + PAGE, PAGE);
111 REQUIRE(rasterizer.Count() == 2);
112 buffer.MarkRegionAsCpuModified(c, PAGE * 2);
113 REQUIRE(rasterizer.Count() == 0);
114}
115
116TEST_CASE("BufferBase: Basic range", "[video_core]") {
117 RasterizerInterface rasterizer;
118 BufferBase buffer(rasterizer, c, WORD);
119 buffer.UnmarkRegionAsCpuModified(c, WORD);
120 buffer.MarkRegionAsCpuModified(c, PAGE);
121 int num = 0;
122 buffer.ForEachUploadRange(c, WORD, [&](u64 offset, u64 size) {
123 REQUIRE(offset == 0U);
124 REQUIRE(size == PAGE);
125 ++num;
126 });
127 REQUIRE(num == 1U);
128}
129
130TEST_CASE("BufferBase: Border upload", "[video_core]") {
131 RasterizerInterface rasterizer;
132 BufferBase buffer(rasterizer, c, WORD * 2);
133 buffer.UnmarkRegionAsCpuModified(c, WORD * 2);
134 buffer.MarkRegionAsCpuModified(c + WORD - PAGE, PAGE * 2);
135 buffer.ForEachUploadRange(c, WORD * 2, [](u64 offset, u64 size) {
136 REQUIRE(offset == WORD - PAGE);
137 REQUIRE(size == PAGE * 2);
138 });
139}
140
141TEST_CASE("BufferBase: Border upload range", "[video_core]") {
142 RasterizerInterface rasterizer;
143 BufferBase buffer(rasterizer, c, WORD * 2);
144 buffer.UnmarkRegionAsCpuModified(c, WORD * 2);
145 buffer.MarkRegionAsCpuModified(c + WORD - PAGE, PAGE * 2);
146 buffer.ForEachUploadRange(c + WORD - PAGE, PAGE * 2, [](u64 offset, u64 size) {
147 REQUIRE(offset == WORD - PAGE);
148 REQUIRE(size == PAGE * 2);
149 });
150 buffer.MarkRegionAsCpuModified(c + WORD - PAGE, PAGE * 2);
151 buffer.ForEachUploadRange(c + WORD - PAGE, PAGE, [](u64 offset, u64 size) {
152 REQUIRE(offset == WORD - PAGE);
153 REQUIRE(size == PAGE);
154 });
155 buffer.ForEachUploadRange(c + WORD, PAGE, [](u64 offset, u64 size) {
156 REQUIRE(offset == WORD);
157 REQUIRE(size == PAGE);
158 });
159}
160
161TEST_CASE("BufferBase: Border upload partial range", "[video_core]") {
162 RasterizerInterface rasterizer;
163 BufferBase buffer(rasterizer, c, WORD * 2);
164 buffer.UnmarkRegionAsCpuModified(c, WORD * 2);
165 buffer.MarkRegionAsCpuModified(c + WORD - PAGE, PAGE * 2);
166 buffer.ForEachUploadRange(c + WORD - 1, 2, [](u64 offset, u64 size) {
167 REQUIRE(offset == WORD - PAGE);
168 REQUIRE(size == PAGE * 2);
169 });
170 buffer.MarkRegionAsCpuModified(c + WORD - PAGE, PAGE * 2);
171 buffer.ForEachUploadRange(c + WORD - 1, 1, [](u64 offset, u64 size) {
172 REQUIRE(offset == WORD - PAGE);
173 REQUIRE(size == PAGE);
174 });
175 buffer.ForEachUploadRange(c + WORD + 50, 1, [](u64 offset, u64 size) {
176 REQUIRE(offset == WORD);
177 REQUIRE(size == PAGE);
178 });
179}
180
181TEST_CASE("BufferBase: Partial word uploads", "[video_core]") {
182 RasterizerInterface rasterizer;
183 BufferBase buffer(rasterizer, c, 0x9d000);
184 int num = 0;
185 buffer.ForEachUploadRange(c, WORD, [&](u64 offset, u64 size) {
186 REQUIRE(offset == 0U);
187 REQUIRE(size == WORD);
188 ++num;
189 });
190 REQUIRE(num == 1);
191 buffer.ForEachUploadRange(c + WORD, WORD, [&](u64 offset, u64 size) {
192 REQUIRE(offset == WORD);
193 REQUIRE(size == WORD);
194 ++num;
195 });
196 REQUIRE(num == 2);
197 buffer.ForEachUploadRange(c + 0x79000, 0x24000, [&](u64 offset, u64 size) {
198 REQUIRE(offset == WORD * 2);
199 REQUIRE(size == PAGE * 0x1d);
200 ++num;
201 });
202 REQUIRE(num == 3);
203}
204
205TEST_CASE("BufferBase: Partial page upload", "[video_core]") {
206 RasterizerInterface rasterizer;
207 BufferBase buffer(rasterizer, c, WORD);
208 buffer.UnmarkRegionAsCpuModified(c, WORD);
209 int num = 0;
210 buffer.MarkRegionAsCpuModified(c + PAGE * 2, PAGE);
211 buffer.MarkRegionAsCpuModified(c + PAGE * 9, PAGE);
212 buffer.ForEachUploadRange(c, PAGE * 3, [&](u64 offset, u64 size) {
213 REQUIRE(offset == PAGE * 2);
214 REQUIRE(size == PAGE);
215 ++num;
216 });
217 REQUIRE(num == 1);
218 buffer.ForEachUploadRange(c + PAGE * 7, PAGE * 3, [&](u64 offset, u64 size) {
219 REQUIRE(offset == PAGE * 9);
220 REQUIRE(size == PAGE);
221 ++num;
222 });
223 REQUIRE(num == 2);
224}
225
226TEST_CASE("BufferBase: Partial page upload with multiple words on the right") {
227 RasterizerInterface rasterizer;
228 BufferBase buffer(rasterizer, c, WORD * 8);
229 buffer.UnmarkRegionAsCpuModified(c, WORD * 8);
230 buffer.MarkRegionAsCpuModified(c + PAGE * 13, WORD * 7);
231 int num = 0;
232 buffer.ForEachUploadRange(c + PAGE * 10, WORD * 7, [&](u64 offset, u64 size) {
233 REQUIRE(offset == PAGE * 13);
234 REQUIRE(size == WORD * 7 - PAGE * 3);
235 ++num;
236 });
237 REQUIRE(num == 1);
238 buffer.ForEachUploadRange(c + PAGE, WORD * 8, [&](u64 offset, u64 size) {
239 REQUIRE(offset == WORD * 7 + PAGE * 10);
240 REQUIRE(size == PAGE * 3);
241 ++num;
242 });
243 REQUIRE(num == 2);
244}
245
246TEST_CASE("BufferBase: Partial page upload with multiple words on the left", "[video_core]") {
247 RasterizerInterface rasterizer;
248 BufferBase buffer(rasterizer, c, WORD * 8);
249 buffer.UnmarkRegionAsCpuModified(c, WORD * 8);
250 buffer.MarkRegionAsCpuModified(c + PAGE * 13, WORD * 7);
251 int num = 0;
252 buffer.ForEachUploadRange(c + PAGE * 16, WORD * 7, [&](u64 offset, u64 size) {
253 REQUIRE(offset == PAGE * 16);
254 REQUIRE(size == WORD * 7 - PAGE * 3);
255 ++num;
256 });
257 REQUIRE(num == 1);
258 buffer.ForEachUploadRange(c + PAGE, WORD, [&](u64 offset, u64 size) {
259 REQUIRE(offset == PAGE * 13);
260 REQUIRE(size == PAGE * 3);
261 ++num;
262 });
263 REQUIRE(num == 2);
264}
265
266TEST_CASE("BufferBase: Partial page upload with multiple words in the middle", "[video_core]") {
267 RasterizerInterface rasterizer;
268 BufferBase buffer(rasterizer, c, WORD * 8);
269 buffer.UnmarkRegionAsCpuModified(c, WORD * 8);
270 buffer.MarkRegionAsCpuModified(c + PAGE * 13, PAGE * 140);
271 int num = 0;
272 buffer.ForEachUploadRange(c + PAGE * 16, WORD, [&](u64 offset, u64 size) {
273 REQUIRE(offset == PAGE * 16);
274 REQUIRE(size == WORD);
275 ++num;
276 });
277 REQUIRE(num == 1);
278 buffer.ForEachUploadRange(c, WORD, [&](u64 offset, u64 size) {
279 REQUIRE(offset == PAGE * 13);
280 REQUIRE(size == PAGE * 3);
281 ++num;
282 });
283 REQUIRE(num == 2);
284 buffer.ForEachUploadRange(c, WORD * 8, [&](u64 offset, u64 size) {
285 REQUIRE(offset == WORD + PAGE * 16);
286 REQUIRE(size == PAGE * 73);
287 ++num;
288 });
289 REQUIRE(num == 3);
290}
291
292TEST_CASE("BufferBase: Empty right bits", "[video_core]") {
293 RasterizerInterface rasterizer;
294 BufferBase buffer(rasterizer, c, WORD * 2048);
295 buffer.UnmarkRegionAsCpuModified(c, WORD * 2048);
296 buffer.MarkRegionAsCpuModified(c + WORD - PAGE, PAGE * 2);
297 buffer.ForEachUploadRange(c, WORD * 2048, [](u64 offset, u64 size) {
298 REQUIRE(offset == WORD - PAGE);
299 REQUIRE(size == PAGE * 2);
300 });
301}
302
303TEST_CASE("BufferBase: Out of bound ranges 1", "[video_core]") {
304 RasterizerInterface rasterizer;
305 BufferBase buffer(rasterizer, c, WORD);
306 buffer.UnmarkRegionAsCpuModified(c, WORD);
307 buffer.MarkRegionAsCpuModified(c, PAGE);
308 int num = 0;
309 buffer.ForEachUploadRange(c - WORD, WORD, [&](u64 offset, u64 size) { ++num; });
310 buffer.ForEachUploadRange(c + WORD, WORD, [&](u64 offset, u64 size) { ++num; });
311 buffer.ForEachUploadRange(c - PAGE, PAGE, [&](u64 offset, u64 size) { ++num; });
312 REQUIRE(num == 0);
313 buffer.ForEachUploadRange(c - PAGE, PAGE * 2, [&](u64 offset, u64 size) { ++num; });
314 REQUIRE(num == 1);
315 buffer.MarkRegionAsCpuModified(c, WORD);
316 REQUIRE(rasterizer.Count() == 0);
317}
318
319TEST_CASE("BufferBase: Out of bound ranges 2", "[video_core]") {
320 RasterizerInterface rasterizer;
321 BufferBase buffer(rasterizer, c, 0x22000);
322 REQUIRE_NOTHROW(buffer.UnmarkRegionAsCpuModified(c + 0x22000, PAGE));
323 REQUIRE_NOTHROW(buffer.UnmarkRegionAsCpuModified(c + 0x28000, PAGE));
324 REQUIRE(rasterizer.Count() == 0);
325 REQUIRE_NOTHROW(buffer.UnmarkRegionAsCpuModified(c + 0x21100, PAGE - 0x100));
326 REQUIRE(rasterizer.Count() == 1);
327 REQUIRE_NOTHROW(buffer.UnmarkRegionAsCpuModified(c - 0x1000, PAGE * 2));
328 buffer.UnmarkRegionAsCpuModified(c - 0x3000, PAGE * 2);
329 buffer.UnmarkRegionAsCpuModified(c - 0x2000, PAGE * 2);
330 REQUIRE(rasterizer.Count() == 2);
331}
332
333TEST_CASE("BufferBase: Out of bound ranges 3", "[video_core]") {
334 RasterizerInterface rasterizer;
335 BufferBase buffer(rasterizer, c, 0x310720);
336 buffer.UnmarkRegionAsCpuModified(c, 0x310720);
337 REQUIRE(rasterizer.Count(c) == 1);
338 REQUIRE(rasterizer.Count(c + PAGE) == 1);
339 REQUIRE(rasterizer.Count(c + WORD) == 1);
340 REQUIRE(rasterizer.Count(c + WORD + PAGE) == 1);
341}
342
343TEST_CASE("BufferBase: Sparse regions 1", "[video_core]") {
344 RasterizerInterface rasterizer;
345 BufferBase buffer(rasterizer, c, WORD);
346 buffer.UnmarkRegionAsCpuModified(c, WORD);
347 buffer.MarkRegionAsCpuModified(c + PAGE * 1, PAGE);
348 buffer.MarkRegionAsCpuModified(c + PAGE * 3, PAGE * 4);
349 buffer.ForEachUploadRange(c, WORD, [i = 0](u64 offset, u64 size) mutable {
350 static constexpr std::array<u64, 2> offsets{PAGE, PAGE * 3};
351 static constexpr std::array<u64, 2> sizes{PAGE, PAGE * 4};
352 REQUIRE(offset == offsets.at(i));
353 REQUIRE(size == sizes.at(i));
354 ++i;
355 });
356}
357
358TEST_CASE("BufferBase: Sparse regions 2", "[video_core]") {
359 RasterizerInterface rasterizer;
360 BufferBase buffer(rasterizer, c, 0x22000);
361 buffer.UnmarkRegionAsCpuModified(c, 0x22000);
362 REQUIRE(rasterizer.Count() == 0x22);
363 buffer.MarkRegionAsCpuModified(c + PAGE * 0x1B, PAGE);
364 buffer.MarkRegionAsCpuModified(c + PAGE * 0x21, PAGE);
365 buffer.ForEachUploadRange(c, WORD, [i = 0](u64 offset, u64 size) mutable {
366 static constexpr std::array<u64, 2> offsets{PAGE * 0x1B, PAGE * 0x21};
367 static constexpr std::array<u64, 2> sizes{PAGE, PAGE};
368 REQUIRE(offset == offsets.at(i));
369 REQUIRE(size == sizes.at(i));
370 ++i;
371 });
372}
373
374TEST_CASE("BufferBase: Single page modified range", "[video_core]") {
375 RasterizerInterface rasterizer;
376 BufferBase buffer(rasterizer, c, PAGE);
377 REQUIRE(buffer.IsRegionCpuModified(c, PAGE));
378 buffer.UnmarkRegionAsCpuModified(c, PAGE);
379 REQUIRE(!buffer.IsRegionCpuModified(c, PAGE));
380}
381
382TEST_CASE("BufferBase: Two page modified range", "[video_core]") {
383 RasterizerInterface rasterizer;
384 BufferBase buffer(rasterizer, c, PAGE * 2);
385 REQUIRE(buffer.IsRegionCpuModified(c, PAGE));
386 REQUIRE(buffer.IsRegionCpuModified(c + PAGE, PAGE));
387 REQUIRE(buffer.IsRegionCpuModified(c, PAGE * 2));
388 buffer.UnmarkRegionAsCpuModified(c, PAGE);
389 REQUIRE(!buffer.IsRegionCpuModified(c, PAGE));
390}
391
392TEST_CASE("BufferBase: Multi word modified ranges", "[video_core]") {
393 for (int offset = 0; offset < 4; ++offset) {
394 const VAddr address = c + WORD * offset;
395 RasterizerInterface rasterizer;
396 BufferBase buffer(rasterizer, address, WORD * 4);
397 REQUIRE(buffer.IsRegionCpuModified(address, PAGE));
398 REQUIRE(buffer.IsRegionCpuModified(address + PAGE * 48, PAGE));
399 REQUIRE(buffer.IsRegionCpuModified(address + PAGE * 56, PAGE));
400
401 buffer.UnmarkRegionAsCpuModified(address + PAGE * 32, PAGE);
402 REQUIRE(buffer.IsRegionCpuModified(address + PAGE, WORD));
403 REQUIRE(buffer.IsRegionCpuModified(address + PAGE * 31, PAGE));
404 REQUIRE(!buffer.IsRegionCpuModified(address + PAGE * 32, PAGE));
405 REQUIRE(buffer.IsRegionCpuModified(address + PAGE * 33, PAGE));
406 REQUIRE(buffer.IsRegionCpuModified(address + PAGE * 31, PAGE * 2));
407 REQUIRE(buffer.IsRegionCpuModified(address + PAGE * 32, PAGE * 2));
408
409 buffer.UnmarkRegionAsCpuModified(address + PAGE * 33, PAGE);
410 REQUIRE(!buffer.IsRegionCpuModified(address + PAGE * 32, PAGE * 2));
411 }
412}
413
414TEST_CASE("BufferBase: Single page in large buffer", "[video_core]") {
415 RasterizerInterface rasterizer;
416 BufferBase buffer(rasterizer, c, WORD * 16);
417 buffer.UnmarkRegionAsCpuModified(c, WORD * 16);
418 REQUIRE(!buffer.IsRegionCpuModified(c, WORD * 16));
419
420 buffer.MarkRegionAsCpuModified(c + WORD * 12 + PAGE * 8, PAGE);
421 REQUIRE(buffer.IsRegionCpuModified(c, WORD * 16));
422 REQUIRE(buffer.IsRegionCpuModified(c + WORD * 10, WORD * 2));
423 REQUIRE(buffer.IsRegionCpuModified(c + WORD * 11, WORD * 2));
424 REQUIRE(buffer.IsRegionCpuModified(c + WORD * 12, WORD * 2));
425 REQUIRE(buffer.IsRegionCpuModified(c + WORD * 12 + PAGE * 4, PAGE * 8));
426 REQUIRE(buffer.IsRegionCpuModified(c + WORD * 12 + PAGE * 6, PAGE * 8));
427 REQUIRE(!buffer.IsRegionCpuModified(c + WORD * 12 + PAGE * 6, PAGE));
428 REQUIRE(buffer.IsRegionCpuModified(c + WORD * 12 + PAGE * 7, PAGE * 2));
429 REQUIRE(buffer.IsRegionCpuModified(c + WORD * 12 + PAGE * 8, PAGE * 2));
430}
431
432TEST_CASE("BufferBase: Out of bounds region query") {
433 RasterizerInterface rasterizer;
434 BufferBase buffer(rasterizer, c, WORD * 16);
435 REQUIRE(!buffer.IsRegionCpuModified(c - PAGE, PAGE));
436 REQUIRE(!buffer.IsRegionCpuModified(c - PAGE * 2, PAGE));
437 REQUIRE(!buffer.IsRegionCpuModified(c + WORD * 16, PAGE));
438 REQUIRE(buffer.IsRegionCpuModified(c + WORD * 16 - PAGE, WORD * 64));
439 REQUIRE(!buffer.IsRegionCpuModified(c + WORD * 16, WORD * 64));
440}
441
442TEST_CASE("BufferBase: Wrap word regions") {
443 RasterizerInterface rasterizer;
444 BufferBase buffer(rasterizer, c, WORD * 2);
445 buffer.UnmarkRegionAsCpuModified(c, WORD * 2);
446 buffer.MarkRegionAsCpuModified(c + PAGE * 63, PAGE * 2);
447 REQUIRE(buffer.IsRegionCpuModified(c, WORD * 2));
448 REQUIRE(!buffer.IsRegionCpuModified(c + PAGE * 62, PAGE));
449 REQUIRE(buffer.IsRegionCpuModified(c + PAGE * 63, PAGE));
450 REQUIRE(buffer.IsRegionCpuModified(c + PAGE * 64, PAGE));
451 REQUIRE(buffer.IsRegionCpuModified(c + PAGE * 63, PAGE * 2));
452 REQUIRE(buffer.IsRegionCpuModified(c + PAGE * 63, PAGE * 8));
453 REQUIRE(buffer.IsRegionCpuModified(c + PAGE * 60, PAGE * 8));
454
455 REQUIRE(!buffer.IsRegionCpuModified(c + PAGE * 127, WORD * 16));
456 buffer.MarkRegionAsCpuModified(c + PAGE * 127, PAGE);
457 REQUIRE(buffer.IsRegionCpuModified(c + PAGE * 127, WORD * 16));
458 REQUIRE(buffer.IsRegionCpuModified(c + PAGE * 127, PAGE));
459 REQUIRE(!buffer.IsRegionCpuModified(c + PAGE * 126, PAGE));
460 REQUIRE(buffer.IsRegionCpuModified(c + PAGE * 126, PAGE * 2));
461 REQUIRE(!buffer.IsRegionCpuModified(c + PAGE * 128, WORD * 16));
462}
463
464TEST_CASE("BufferBase: Unaligned page region query") {
465 RasterizerInterface rasterizer;
466 BufferBase buffer(rasterizer, c, WORD);
467 buffer.UnmarkRegionAsCpuModified(c, WORD);
468 buffer.MarkRegionAsCpuModified(c + 4000, 1000);
469 REQUIRE(buffer.IsRegionCpuModified(c, PAGE));
470 REQUIRE(buffer.IsRegionCpuModified(c + PAGE, PAGE));
471 REQUIRE(buffer.IsRegionCpuModified(c + 4000, 1000));
472 REQUIRE(buffer.IsRegionCpuModified(c + 4000, 1));
473}
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index f7b9d7f86..e01ea55ab 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -1,6 +1,7 @@
1add_subdirectory(host_shaders) 1add_subdirectory(host_shaders)
2 2
3add_library(video_core STATIC 3add_library(video_core STATIC
4 buffer_cache/buffer_base.h
4 buffer_cache/buffer_block.h 5 buffer_cache/buffer_block.h
5 buffer_cache/buffer_cache.h 6 buffer_cache/buffer_cache.h
6 buffer_cache/map_interval.cpp 7 buffer_cache/map_interval.cpp
@@ -135,8 +136,6 @@ add_library(video_core STATIC
135 renderer_vulkan/vk_graphics_pipeline.h 136 renderer_vulkan/vk_graphics_pipeline.h
136 renderer_vulkan/vk_master_semaphore.cpp 137 renderer_vulkan/vk_master_semaphore.cpp
137 renderer_vulkan/vk_master_semaphore.h 138 renderer_vulkan/vk_master_semaphore.h
138 renderer_vulkan/vk_memory_manager.cpp
139 renderer_vulkan/vk_memory_manager.h
140 renderer_vulkan/vk_pipeline_cache.cpp 139 renderer_vulkan/vk_pipeline_cache.cpp
141 renderer_vulkan/vk_pipeline_cache.h 140 renderer_vulkan/vk_pipeline_cache.h
142 renderer_vulkan/vk_query_cache.cpp 141 renderer_vulkan/vk_query_cache.cpp
@@ -259,6 +258,8 @@ add_library(video_core STATIC
259 vulkan_common/vulkan_instance.h 258 vulkan_common/vulkan_instance.h
260 vulkan_common/vulkan_library.cpp 259 vulkan_common/vulkan_library.cpp
261 vulkan_common/vulkan_library.h 260 vulkan_common/vulkan_library.h
261 vulkan_common/vulkan_memory_allocator.cpp
262 vulkan_common/vulkan_memory_allocator.h
262 vulkan_common/vulkan_surface.cpp 263 vulkan_common/vulkan_surface.cpp
263 vulkan_common/vulkan_surface.h 264 vulkan_common/vulkan_surface.h
264 vulkan_common/vulkan_wrapper.cpp 265 vulkan_common/vulkan_wrapper.cpp
@@ -312,9 +313,7 @@ else()
312 -Werror=pessimizing-move 313 -Werror=pessimizing-move
313 -Werror=redundant-move 314 -Werror=redundant-move
314 -Werror=shadow 315 -Werror=shadow
315 -Werror=switch
316 -Werror=type-limits 316 -Werror=type-limits
317 -Werror=unused-variable
318 317
319 $<$<CXX_COMPILER_ID:GNU>:-Werror=class-memaccess> 318 $<$<CXX_COMPILER_ID:GNU>:-Werror=class-memaccess>
320 $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter> 319 $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
diff --git a/src/video_core/buffer_cache/buffer_base.h b/src/video_core/buffer_cache/buffer_base.h
new file mode 100644
index 000000000..ee8602ce9
--- /dev/null
+++ b/src/video_core/buffer_cache/buffer_base.h
@@ -0,0 +1,495 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <algorithm>
8#include <bit>
9#include <limits>
10#include <utility>
11
12#include "common/alignment.h"
13#include "common/common_funcs.h"
14#include "common/common_types.h"
15#include "common/div_ceil.h"
16#include "core/memory.h"
17
18namespace VideoCommon {
19
20enum class BufferFlagBits {
21 Picked = 1 << 0,
22};
23DECLARE_ENUM_FLAG_OPERATORS(BufferFlagBits)
24
25/// Tag for creating null buffers with no storage or size
26struct NullBufferParams {};
27
28/**
29 * Range tracking buffer container.
30 *
31 * It keeps track of the modified CPU and GPU ranges on a CPU page granularity, notifying the given
32 * rasterizer about state changes in the tracking behavior of the buffer.
33 *
34 * The buffer size and address is forcefully aligned to CPU page boundaries.
35 */
36template <class RasterizerInterface>
37class BufferBase {
38 static constexpr u64 PAGES_PER_WORD = 64;
39 static constexpr u64 BYTES_PER_PAGE = Core::Memory::PAGE_SIZE;
40 static constexpr u64 BYTES_PER_WORD = PAGES_PER_WORD * BYTES_PER_PAGE;
41
42 /// Vector tracking modified pages tightly packed with small vector optimization
43 union WrittenWords {
44 /// Returns the pointer to the words state
45 [[nodiscard]] const u64* Pointer(bool is_short) const noexcept {
46 return is_short ? &stack : heap;
47 }
48
49 /// Returns the pointer to the words state
50 [[nodiscard]] u64* Pointer(bool is_short) noexcept {
51 return is_short ? &stack : heap;
52 }
53
54 u64 stack = 0; ///< Small buffers storage
55 u64* heap; ///< Not-small buffers pointer to the storage
56 };
57
58 struct GpuCpuWords {
59 explicit GpuCpuWords() = default;
60 explicit GpuCpuWords(u64 size_bytes_) : size_bytes{size_bytes_} {
61 if (IsShort()) {
62 cpu.stack = ~u64{0};
63 gpu.stack = 0;
64 } else {
65 // Share allocation between CPU and GPU pages and set their default values
66 const size_t num_words = NumWords();
67 u64* const alloc = new u64[num_words * 2];
68 cpu.heap = alloc;
69 gpu.heap = alloc + num_words;
70 std::fill_n(cpu.heap, num_words, ~u64{0});
71 std::fill_n(gpu.heap, num_words, 0);
72 }
73 // Clean up tailing bits
74 const u64 last_local_page =
75 Common::DivCeil(size_bytes % BYTES_PER_WORD, BYTES_PER_PAGE);
76 const u64 shift = (PAGES_PER_WORD - last_local_page) % PAGES_PER_WORD;
77 u64& last_word = cpu.Pointer(IsShort())[NumWords() - 1];
78 last_word = (last_word << shift) >> shift;
79 }
80
81 ~GpuCpuWords() {
82 Release();
83 }
84
85 GpuCpuWords& operator=(GpuCpuWords&& rhs) noexcept {
86 Release();
87 size_bytes = rhs.size_bytes;
88 cpu = rhs.cpu;
89 gpu = rhs.gpu;
90 rhs.cpu.heap = nullptr;
91 return *this;
92 }
93
94 GpuCpuWords(GpuCpuWords&& rhs) noexcept
95 : size_bytes{rhs.size_bytes}, cpu{rhs.cpu}, gpu{rhs.gpu} {
96 rhs.cpu.heap = nullptr;
97 }
98
99 GpuCpuWords& operator=(const GpuCpuWords&) = delete;
100 GpuCpuWords(const GpuCpuWords&) = delete;
101
102 /// Returns true when the buffer fits in the small vector optimization
103 [[nodiscard]] bool IsShort() const noexcept {
104 return size_bytes <= BYTES_PER_WORD;
105 }
106
107 /// Returns the number of words of the buffer
108 [[nodiscard]] size_t NumWords() const noexcept {
109 return Common::DivCeil(size_bytes, BYTES_PER_WORD);
110 }
111
112 /// Release buffer resources
113 void Release() {
114 if (!IsShort()) {
115 // CPU written words is the base for the heap allocation
116 delete[] cpu.heap;
117 }
118 }
119
120 u64 size_bytes = 0;
121 WrittenWords cpu;
122 WrittenWords gpu;
123 };
124
125public:
126 explicit BufferBase(RasterizerInterface& rasterizer_, VAddr cpu_addr_, u64 size_bytes)
127 : rasterizer{&rasterizer_}, cpu_addr{Common::AlignDown(cpu_addr_, BYTES_PER_PAGE)},
128 words(Common::AlignUp(size_bytes + (cpu_addr_ - cpu_addr), BYTES_PER_PAGE)) {}
129
130 explicit BufferBase(NullBufferParams) {}
131
132 BufferBase& operator=(const BufferBase&) = delete;
133 BufferBase(const BufferBase&) = delete;
134
135 /// Returns the inclusive CPU modified range in a begin end pair
136 [[nodiscard]] std::pair<u64, u64> ModifiedCpuRegion(VAddr query_cpu_addr,
137 u64 query_size) const noexcept {
138 const u64 offset = query_cpu_addr - cpu_addr;
139 return ModifiedRegion<false>(offset, query_size);
140 }
141
142 /// Returns the inclusive GPU modified range in a begin end pair
143 [[nodiscard]] std::pair<u64, u64> ModifiedGpuRegion(VAddr query_cpu_addr,
144 u64 query_size) const noexcept {
145 const u64 offset = query_cpu_addr - cpu_addr;
146 return ModifiedRegion<true>(offset, query_size);
147 }
148
149 /// Returns true if a region has been modified from the CPU
150 [[nodiscard]] bool IsRegionCpuModified(VAddr query_cpu_addr, u64 query_size) const noexcept {
151 const u64 offset = query_cpu_addr - cpu_addr;
152 return IsRegionModified<false>(offset, query_size);
153 }
154
155 /// Returns true if a region has been modified from the GPU
156 [[nodiscard]] bool IsRegionGpuModified(VAddr query_cpu_addr, u64 query_size) const noexcept {
157 const u64 offset = query_cpu_addr - cpu_addr;
158 return IsRegionModified<true>(offset, query_size);
159 }
160
161 /// Mark region as CPU modified, notifying the rasterizer about this change
162 void MarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 size) {
163 ChangeRegionState<true, true>(words.cpu, dirty_cpu_addr, size);
164 }
165
166 /// Unmark region as CPU modified, notifying the rasterizer about this change
167 void UnmarkRegionAsCpuModified(VAddr dirty_cpu_addr, u64 size) {
168 ChangeRegionState<false, true>(words.cpu, dirty_cpu_addr, size);
169 }
170
171 /// Mark region as modified from the host GPU
172 void MarkRegionAsGpuModified(VAddr dirty_cpu_addr, u64 size) noexcept {
173 ChangeRegionState<true, false>(words.gpu, dirty_cpu_addr, size);
174 }
175
176 /// Unmark region as modified from the host GPU
177 void UnmarkRegionAsGpuModified(VAddr dirty_cpu_addr, u64 size) noexcept {
178 ChangeRegionState<false, false>(words.gpu, dirty_cpu_addr, size);
179 }
180
181 /// Call 'func' for each CPU modified range and unmark those pages as CPU modified
182 template <typename Func>
183 void ForEachUploadRange(VAddr query_cpu_range, u64 size, Func&& func) {
184 ForEachModifiedRange<false, true>(query_cpu_range, size, func);
185 }
186
187 /// Call 'func' for each GPU modified range and unmark those pages as GPU modified
188 template <typename Func>
189 void ForEachDownloadRange(VAddr query_cpu_range, u64 size, Func&& func) {
190 ForEachModifiedRange<true, false>(query_cpu_range, size, func);
191 }
192
193 /// Call 'func' for each GPU modified range and unmark those pages as GPU modified
194 template <typename Func>
195 void ForEachDownloadRange(Func&& func) {
196 ForEachModifiedRange<true, false>(cpu_addr, SizeBytes(), func);
197 }
198
199 /// Mark buffer as picked
200 void Pick() noexcept {
201 flags |= BufferFlagBits::Picked;
202 }
203
204 /// Unmark buffer as picked
205 void Unpick() noexcept {
206 flags &= ~BufferFlagBits::Picked;
207 }
208
209 /// Returns true when vaddr -> vaddr+size is fully contained in the buffer
210 [[nodiscard]] bool IsInBounds(VAddr addr, u64 size) const noexcept {
211 return addr >= cpu_addr && addr + size <= cpu_addr + SizeBytes();
212 }
213
214 /// Returns true if the buffer has been marked as picked
215 [[nodiscard]] bool IsPicked() const noexcept {
216 return True(flags & BufferFlagBits::Picked);
217 }
218
219 /// Returns the base CPU address of the buffer
220 [[nodiscard]] VAddr CpuAddr() const noexcept {
221 return cpu_addr;
222 }
223
224 /// Returns the offset relative to the given CPU address
225 /// @pre IsInBounds returns true
226 [[nodiscard]] u32 Offset(VAddr other_cpu_addr) const noexcept {
227 return static_cast<u32>(other_cpu_addr - cpu_addr);
228 }
229
230 /// Returns the size in bytes of the buffer
231 [[nodiscard]] u64 SizeBytes() const noexcept {
232 return words.size_bytes;
233 }
234
235private:
236 /**
237 * Change the state of a range of pages
238 *
239 * @param written_words Pages to be marked or unmarked as modified
240 * @param dirty_addr Base address to mark or unmark as modified
241 * @param size Size in bytes to mark or unmark as modified
242 *
243 * @tparam enable True when the bits will be set to one, false for zero
244 * @tparam notify_rasterizer True when the rasterizer has to be notified about the changes
245 */
246 template <bool enable, bool notify_rasterizer>
247 void ChangeRegionState(WrittenWords& written_words, u64 dirty_addr,
248 s64 size) noexcept(!notify_rasterizer) {
249 const s64 difference = dirty_addr - cpu_addr;
250 const u64 offset = std::max<s64>(difference, 0);
251 size += std::min<s64>(difference, 0);
252 if (offset >= SizeBytes() || size < 0) {
253 return;
254 }
255 u64* const state_words = written_words.Pointer(IsShort());
256 const u64 offset_end = std::min(offset + size, SizeBytes());
257 const u64 begin_page_index = offset / BYTES_PER_PAGE;
258 const u64 begin_word_index = begin_page_index / PAGES_PER_WORD;
259 const u64 end_page_index = Common::DivCeil(offset_end, BYTES_PER_PAGE);
260 const u64 end_word_index = Common::DivCeil(end_page_index, PAGES_PER_WORD);
261 u64 page_index = begin_page_index % PAGES_PER_WORD;
262 u64 word_index = begin_word_index;
263 while (word_index < end_word_index) {
264 const u64 next_word_first_page = (word_index + 1) * PAGES_PER_WORD;
265 const u64 left_offset =
266 std::min(next_word_first_page - end_page_index, PAGES_PER_WORD) % PAGES_PER_WORD;
267 const u64 right_offset = page_index;
268 u64 bits = ~u64{0};
269 bits = (bits >> right_offset) << right_offset;
270 bits = (bits << left_offset) >> left_offset;
271 if constexpr (notify_rasterizer) {
272 NotifyRasterizer<!enable>(word_index, state_words[word_index], bits);
273 }
274 if constexpr (enable) {
275 state_words[word_index] |= bits;
276 } else {
277 state_words[word_index] &= ~bits;
278 }
279 page_index = 0;
280 ++word_index;
281 }
282 }
283
284 /**
285 * Notify rasterizer about changes in the CPU tracking state of a word in the buffer
286 *
287 * @param word_index Index to the word to notify to the rasterizer
288 * @param current_bits Current state of the word
289 * @param new_bits New state of the word
290 *
291 * @tparam add_to_rasterizer True when the rasterizer should start tracking the new pages
292 */
293 template <bool add_to_rasterizer>
294 void NotifyRasterizer(u64 word_index, u64 current_bits, u64 new_bits) {
295 u64 changed_bits = (add_to_rasterizer ? current_bits : ~current_bits) & new_bits;
296 VAddr addr = cpu_addr + word_index * BYTES_PER_WORD;
297 while (changed_bits != 0) {
298 const int empty_bits = std::countr_zero(changed_bits);
299 addr += empty_bits * BYTES_PER_PAGE;
300 changed_bits >>= empty_bits;
301
302 const u32 continuous_bits = std::countr_one(changed_bits);
303 const u64 size = continuous_bits * BYTES_PER_PAGE;
304 const VAddr begin_addr = addr;
305 addr += size;
306 changed_bits = continuous_bits < PAGES_PER_WORD ? (changed_bits >> continuous_bits) : 0;
307 rasterizer->UpdatePagesCachedCount(begin_addr, size, add_to_rasterizer ? 1 : -1);
308 }
309 }
310
311 /**
312 * Loop over each page in the given range, turn off those bits and notify the rasterizer if
313 * needed. Call the given function on each turned off range.
314 *
315 * @param query_cpu_range Base CPU address to loop over
316 * @param size Size in bytes of the CPU range to loop over
317 * @param func Function to call for each turned off region
318 *
319 * @tparam gpu True for host GPU pages, false for CPU pages
320 * @tparam notify_rasterizer True when the rasterizer should be notified about state changes
321 */
322 template <bool gpu, bool notify_rasterizer, typename Func>
323 void ForEachModifiedRange(VAddr query_cpu_range, s64 size, Func&& func) {
324 const s64 difference = query_cpu_range - cpu_addr;
325 const u64 query_begin = std::max<s64>(difference, 0);
326 size += std::min<s64>(difference, 0);
327 if (query_begin >= SizeBytes() || size < 0) {
328 return;
329 }
330 const u64* const cpu_words = words.cpu.Pointer(IsShort());
331 const u64 query_end = query_begin + std::min(static_cast<u64>(size), SizeBytes());
332 u64* const state_words = (gpu ? words.gpu : words.cpu).Pointer(IsShort());
333 u64* const words_begin = state_words + query_begin / BYTES_PER_WORD;
334 u64* const words_end = state_words + Common::DivCeil(query_end, BYTES_PER_WORD);
335
336 const auto modified = [](u64 word) { return word != 0; };
337 const auto first_modified_word = std::find_if(words_begin, words_end, modified);
338 if (first_modified_word == words_end) {
339 // Exit early when the buffer is not modified
340 return;
341 }
342 const auto last_modified_word = std::find_if_not(first_modified_word, words_end, modified);
343
344 const u64 word_index_begin = std::distance(state_words, first_modified_word);
345 const u64 word_index_end = std::distance(state_words, last_modified_word);
346
347 const unsigned local_page_begin = std::countr_zero(*first_modified_word);
348 const unsigned local_page_end = PAGES_PER_WORD - std::countl_zero(last_modified_word[-1]);
349 const u64 word_page_begin = word_index_begin * PAGES_PER_WORD;
350 const u64 word_page_end = (word_index_end - 1) * PAGES_PER_WORD;
351 const u64 query_page_begin = query_begin / BYTES_PER_PAGE;
352 const u64 query_page_end = Common::DivCeil(query_end, BYTES_PER_PAGE);
353 const u64 page_index_begin = std::max(word_page_begin + local_page_begin, query_page_begin);
354 const u64 page_index_end = std::min(word_page_end + local_page_end, query_page_end);
355 const u64 first_word_page_begin = page_index_begin % PAGES_PER_WORD;
356 const u64 last_word_page_end = (page_index_end - 1) % PAGES_PER_WORD + 1;
357
358 u64 page_begin = first_word_page_begin;
359 u64 current_base = 0;
360 u64 current_size = 0;
361 bool on_going = false;
362 for (u64 word_index = word_index_begin; word_index < word_index_end; ++word_index) {
363 const bool is_last_word = word_index + 1 == word_index_end;
364 const u64 page_end = is_last_word ? last_word_page_end : PAGES_PER_WORD;
365 const u64 right_offset = page_begin;
366 const u64 left_offset = PAGES_PER_WORD - page_end;
367 u64 bits = ~u64{0};
368 bits = (bits >> right_offset) << right_offset;
369 bits = (bits << left_offset) >> left_offset;
370
371 const u64 current_word = state_words[word_index] & bits;
372 state_words[word_index] &= ~bits;
373
374 // Exclude CPU modified pages when visiting GPU pages
375 const u64 word = current_word & ~(gpu ? cpu_words[word_index] : 0);
376 if constexpr (notify_rasterizer) {
377 NotifyRasterizer<true>(word_index, word, ~u64{0});
378 }
379 u64 page = page_begin;
380 page_begin = 0;
381
382 while (page < page_end) {
383 const int empty_bits = std::countr_zero(word >> page);
384 if (on_going && empty_bits != 0) {
385 InvokeModifiedRange(func, current_size, current_base);
386 current_size = 0;
387 on_going = false;
388 }
389 page += empty_bits;
390
391 const int continuous_bits = std::countr_one(word >> page);
392 if (!on_going && continuous_bits != 0) {
393 current_base = word_index * PAGES_PER_WORD + page;
394 on_going = true;
395 }
396 current_size += continuous_bits;
397 page += continuous_bits;
398 }
399 }
400 if (on_going && current_size > 0) {
401 InvokeModifiedRange(func, current_size, current_base);
402 }
403 }
404
405 template <typename Func>
406 void InvokeModifiedRange(Func&& func, u64 current_size, u64 current_base) {
407 const u64 current_size_bytes = current_size * BYTES_PER_PAGE;
408 const u64 offset_begin = current_base * BYTES_PER_PAGE;
409 const u64 offset_end = std::min(offset_begin + current_size_bytes, SizeBytes());
410 func(offset_begin, offset_end - offset_begin);
411 }
412
413 /**
414 * Returns true when a region has been modified
415 *
416 * @param offset Offset in bytes from the start of the buffer
417 * @param size Size in bytes of the region to query for modifications
418 */
419 template <bool gpu>
420 [[nodiscard]] bool IsRegionModified(u64 offset, u64 size) const noexcept {
421 const u64* const cpu_words = words.cpu.Pointer(IsShort());
422 const u64* const state_words = (gpu ? words.gpu : words.cpu).Pointer(IsShort());
423 const u64 num_query_words = size / BYTES_PER_WORD + 1;
424 const u64 word_begin = offset / BYTES_PER_WORD;
425 const u64 word_end = std::min(word_begin + num_query_words, NumWords());
426 const u64 page_limit = Common::DivCeil(offset + size, BYTES_PER_PAGE);
427 u64 page_index = (offset / BYTES_PER_PAGE) % PAGES_PER_WORD;
428 for (u64 word_index = word_begin; word_index < word_end; ++word_index, page_index = 0) {
429 const u64 word = state_words[word_index] & ~(gpu ? cpu_words[word_index] : 0);
430 if (word == 0) {
431 continue;
432 }
433 const u64 page_end = std::min((word_index + 1) * PAGES_PER_WORD, page_limit);
434 const u64 local_page_end = page_end % PAGES_PER_WORD;
435 const u64 page_end_shift = (PAGES_PER_WORD - local_page_end) % PAGES_PER_WORD;
436 if (((word >> page_index) << page_index) << page_end_shift != 0) {
437 return true;
438 }
439 }
440 return false;
441 }
442
443 /**
444 * Returns a begin end pair with the inclusive modified region
445 *
446 * @param offset Offset in bytes from the start of the buffer
447 * @param size Size in bytes of the region to query for modifications
448 *
449 * @tparam gpu True to query GPU modified pages, false for CPU pages
450 */
451 template <bool gpu>
452 [[nodiscard]] std::pair<u64, u64> ModifiedRegion(u64 offset, u64 size) const noexcept {
453 const u64* const cpu_words = words.cpu.Pointer(IsShort());
454 const u64* const state_words = (gpu ? words.gpu : words.cpu).Pointer(IsShort());
455 const u64 num_query_words = size / BYTES_PER_WORD + 1;
456 const u64 word_begin = offset / BYTES_PER_WORD;
457 const u64 word_end = std::min(word_begin + num_query_words, NumWords());
458 const u64 page_base = offset / BYTES_PER_PAGE;
459 const u64 page_limit = Common::DivCeil(offset + size, BYTES_PER_PAGE);
460 u64 begin = std::numeric_limits<u64>::max();
461 u64 end = 0;
462 for (u64 word_index = word_begin; word_index < word_end; ++word_index) {
463 const u64 word = state_words[word_index] & ~(gpu ? cpu_words[word_index] : 0);
464 if (word == 0) {
465 continue;
466 }
467 const u64 local_page_begin = std::countr_zero(word);
468 const u64 local_page_end = PAGES_PER_WORD - std::countl_zero(word);
469 const u64 page_index = word_index * PAGES_PER_WORD;
470 const u64 page_begin = std::max(page_index + local_page_begin, page_base);
471 const u64 page_end = std::min(page_index + local_page_end, page_limit);
472 begin = std::min(begin, page_begin);
473 end = std::max(end, page_end);
474 }
475 static constexpr std::pair<u64, u64> EMPTY{0, 0};
476 return begin < end ? std::make_pair(begin * BYTES_PER_PAGE, end * BYTES_PER_PAGE) : EMPTY;
477 }
478
479 /// Returns the number of words of the buffer
480 [[nodiscard]] size_t NumWords() const noexcept {
481 return words.NumWords();
482 }
483
484 /// Returns true when the buffer fits in the small vector optimization
485 [[nodiscard]] bool IsShort() const noexcept {
486 return words.IsShort();
487 }
488
489 RasterizerInterface* rasterizer = nullptr;
490 VAddr cpu_addr = 0;
491 GpuCpuWords words;
492 BufferFlagBits flags{};
493};
494
495} // namespace VideoCommon
diff --git a/src/video_core/cdma_pusher.cpp b/src/video_core/cdma_pusher.cpp
index 94679d5d1..33b3c060b 100644
--- a/src/video_core/cdma_pusher.cpp
+++ b/src/video_core/cdma_pusher.cpp
@@ -18,10 +18,10 @@
18// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 18// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19// 19//
20 20
21#include <bit>
21#include "command_classes/host1x.h" 22#include "command_classes/host1x.h"
22#include "command_classes/nvdec.h" 23#include "command_classes/nvdec.h"
23#include "command_classes/vic.h" 24#include "command_classes/vic.h"
24#include "common/bit_util.h"
25#include "video_core/cdma_pusher.h" 25#include "video_core/cdma_pusher.h"
26#include "video_core/command_classes/nvdec_common.h" 26#include "video_core/command_classes/nvdec_common.h"
27#include "video_core/engines/maxwell_3d.h" 27#include "video_core/engines/maxwell_3d.h"
@@ -56,7 +56,7 @@ void CDmaPusher::Step() {
56 56
57 for (const u32 value : values) { 57 for (const u32 value : values) {
58 if (mask != 0) { 58 if (mask != 0) {
59 const u32 lbs = Common::CountTrailingZeroes32(mask); 59 const auto lbs = static_cast<u32>(std::countr_zero(mask));
60 mask &= ~(1U << lbs); 60 mask &= ~(1U << lbs);
61 ExecuteCommand(static_cast<u32>(offset + lbs), value); 61 ExecuteCommand(static_cast<u32>(offset + lbs), value);
62 continue; 62 continue;
diff --git a/src/video_core/cdma_pusher.h b/src/video_core/cdma_pusher.h
index 8ca70b6dd..e5f212c1a 100644
--- a/src/video_core/cdma_pusher.h
+++ b/src/video_core/cdma_pusher.h
@@ -126,7 +126,7 @@ private:
126 126
127 s32 count{}; 127 s32 count{};
128 s32 offset{}; 128 s32 offset{};
129 s32 mask{}; 129 u32 mask{};
130 bool incrementing{}; 130 bool incrementing{};
131 131
132 // Queue of command lists to be processed 132 // Queue of command lists to be processed
diff --git a/src/video_core/command_classes/codecs/h264.cpp b/src/video_core/command_classes/codecs/h264.cpp
index 65bbeac78..fea6aed98 100644
--- a/src/video_core/command_classes/codecs/h264.cpp
+++ b/src/video_core/command_classes/codecs/h264.cpp
@@ -19,7 +19,7 @@
19// 19//
20 20
21#include <array> 21#include <array>
22#include "common/bit_util.h" 22#include <bit>
23#include "video_core/command_classes/codecs/h264.h" 23#include "video_core/command_classes/codecs/h264.h"
24#include "video_core/gpu.h" 24#include "video_core/gpu.h"
25#include "video_core/memory_manager.h" 25#include "video_core/memory_manager.h"
@@ -266,7 +266,7 @@ void H264BitWriter::WriteExpGolombCodedInt(s32 value) {
266} 266}
267 267
268void H264BitWriter::WriteExpGolombCodedUInt(u32 value) { 268void H264BitWriter::WriteExpGolombCodedUInt(u32 value) {
269 const s32 size = 32 - Common::CountLeadingZeroes32(static_cast<s32>(value + 1)); 269 const s32 size = 32 - std::countl_zero(value + 1);
270 WriteBits(1, size); 270 WriteBits(1, size);
271 271
272 value -= (1U << (size - 1)) - 1; 272 value -= (1U << (size - 1)) - 1;
diff --git a/src/video_core/engines/fermi_2d.h b/src/video_core/engines/fermi_2d.h
index 81522988e..0de3280a2 100644
--- a/src/video_core/engines/fermi_2d.h
+++ b/src/video_core/engines/fermi_2d.h
@@ -171,30 +171,30 @@ public:
171 static constexpr std::size_t NUM_REGS = 0x258; 171 static constexpr std::size_t NUM_REGS = 0x258;
172 struct { 172 struct {
173 u32 object; 173 u32 object;
174 INSERT_UNION_PADDING_WORDS(0x3F); 174 INSERT_PADDING_WORDS_NOINIT(0x3F);
175 u32 no_operation; 175 u32 no_operation;
176 NotifyType notify; 176 NotifyType notify;
177 INSERT_UNION_PADDING_WORDS(0x2); 177 INSERT_PADDING_WORDS_NOINIT(0x2);
178 u32 wait_for_idle; 178 u32 wait_for_idle;
179 INSERT_UNION_PADDING_WORDS(0xB); 179 INSERT_PADDING_WORDS_NOINIT(0xB);
180 u32 pm_trigger; 180 u32 pm_trigger;
181 INSERT_UNION_PADDING_WORDS(0xF); 181 INSERT_PADDING_WORDS_NOINIT(0xF);
182 u32 context_dma_notify; 182 u32 context_dma_notify;
183 u32 dst_context_dma; 183 u32 dst_context_dma;
184 u32 src_context_dma; 184 u32 src_context_dma;
185 u32 semaphore_context_dma; 185 u32 semaphore_context_dma;
186 INSERT_UNION_PADDING_WORDS(0x1C); 186 INSERT_PADDING_WORDS_NOINIT(0x1C);
187 Surface dst; 187 Surface dst;
188 CpuIndexWrap pixels_from_cpu_index_wrap; 188 CpuIndexWrap pixels_from_cpu_index_wrap;
189 u32 kind2d_check_enable; 189 u32 kind2d_check_enable;
190 Surface src; 190 Surface src;
191 SectorPromotion pixels_from_memory_sector_promotion; 191 SectorPromotion pixels_from_memory_sector_promotion;
192 INSERT_UNION_PADDING_WORDS(0x1); 192 INSERT_PADDING_WORDS_NOINIT(0x1);
193 NumTpcs num_tpcs; 193 NumTpcs num_tpcs;
194 u32 render_enable_addr_upper; 194 u32 render_enable_addr_upper;
195 u32 render_enable_addr_lower; 195 u32 render_enable_addr_lower;
196 RenderEnableMode render_enable_mode; 196 RenderEnableMode render_enable_mode;
197 INSERT_UNION_PADDING_WORDS(0x4); 197 INSERT_PADDING_WORDS_NOINIT(0x4);
198 u32 clip_x0; 198 u32 clip_x0;
199 u32 clip_y0; 199 u32 clip_y0;
200 u32 clip_width; 200 u32 clip_width;
@@ -212,7 +212,7 @@ public:
212 BitField<8, 6, u32> y; 212 BitField<8, 6, u32> y;
213 } pattern_offset; 213 } pattern_offset;
214 BitField<0, 2, PatternSelect> pattern_select; 214 BitField<0, 2, PatternSelect> pattern_select;
215 INSERT_UNION_PADDING_WORDS(0xC); 215 INSERT_PADDING_WORDS_NOINIT(0xC);
216 struct { 216 struct {
217 BitField<0, 3, MonochromePatternColorFormat> color_format; 217 BitField<0, 3, MonochromePatternColorFormat> color_format;
218 BitField<0, 1, MonochromePatternFormat> format; 218 BitField<0, 1, MonochromePatternFormat> format;
@@ -227,15 +227,15 @@ public:
227 std::array<u32, 0x20> X1R5G5B5; 227 std::array<u32, 0x20> X1R5G5B5;
228 std::array<u32, 0x10> Y8; 228 std::array<u32, 0x10> Y8;
229 } color_pattern; 229 } color_pattern;
230 INSERT_UNION_PADDING_WORDS(0x10); 230 INSERT_PADDING_WORDS_NOINIT(0x10);
231 struct { 231 struct {
232 u32 prim_mode; 232 u32 prim_mode;
233 u32 prim_color_format; 233 u32 prim_color_format;
234 u32 prim_color; 234 u32 prim_color;
235 u32 line_tie_break_bits; 235 u32 line_tie_break_bits;
236 INSERT_UNION_PADDING_WORDS(0x14); 236 INSERT_PADDING_WORDS_NOINIT(0x14);
237 u32 prim_point_xy; 237 u32 prim_point_xy;
238 INSERT_UNION_PADDING_WORDS(0x7); 238 INSERT_PADDING_WORDS_NOINIT(0x7);
239 std::array<Point, 0x40> prim_point; 239 std::array<Point, 0x40> prim_point;
240 } render_solid; 240 } render_solid;
241 struct { 241 struct {
@@ -247,7 +247,7 @@ public:
247 u32 color0; 247 u32 color0;
248 u32 color1; 248 u32 color1;
249 u32 mono_opacity; 249 u32 mono_opacity;
250 INSERT_UNION_PADDING_WORDS(0x6); 250 INSERT_PADDING_WORDS_NOINIT(0x6);
251 u32 src_width; 251 u32 src_width;
252 u32 src_height; 252 u32 src_height;
253 u32 dx_du_frac; 253 u32 dx_du_frac;
@@ -260,9 +260,9 @@ public:
260 u32 dst_y0_int; 260 u32 dst_y0_int;
261 u32 data; 261 u32 data;
262 } pixels_from_cpu; 262 } pixels_from_cpu;
263 INSERT_UNION_PADDING_WORDS(0x3); 263 INSERT_PADDING_WORDS_NOINIT(0x3);
264 u32 big_endian_control; 264 u32 big_endian_control;
265 INSERT_UNION_PADDING_WORDS(0x3); 265 INSERT_PADDING_WORDS_NOINIT(0x3);
266 struct { 266 struct {
267 BitField<0, 3, u32> block_shape; 267 BitField<0, 3, u32> block_shape;
268 BitField<0, 5, u32> corral_size; 268 BitField<0, 5, u32> corral_size;
@@ -271,7 +271,7 @@ public:
271 BitField<0, 1, Origin> origin; 271 BitField<0, 1, Origin> origin;
272 BitField<4, 1, Filter> filter; 272 BitField<4, 1, Filter> filter;
273 } sample_mode; 273 } sample_mode;
274 INSERT_UNION_PADDING_WORDS(0x8); 274 INSERT_PADDING_WORDS_NOINIT(0x8);
275 s32 dst_x0; 275 s32 dst_x0;
276 s32 dst_y0; 276 s32 dst_y0;
277 s32 dst_width; 277 s32 dst_width;
diff --git a/src/video_core/engines/kepler_compute.h b/src/video_core/engines/kepler_compute.h
index 51a041202..9f0a7b76d 100644
--- a/src/video_core/engines/kepler_compute.h
+++ b/src/video_core/engines/kepler_compute.h
@@ -55,7 +55,7 @@ public:
55 55
56 union { 56 union {
57 struct { 57 struct {
58 INSERT_UNION_PADDING_WORDS(0x60); 58 INSERT_PADDING_WORDS_NOINIT(0x60);
59 59
60 Upload::Registers upload; 60 Upload::Registers upload;
61 61
@@ -67,7 +67,7 @@ public:
67 67
68 u32 data_upload; 68 u32 data_upload;
69 69
70 INSERT_UNION_PADDING_WORDS(0x3F); 70 INSERT_PADDING_WORDS_NOINIT(0x3F);
71 71
72 struct { 72 struct {
73 u32 address; 73 u32 address;
@@ -76,11 +76,11 @@ public:
76 } 76 }
77 } launch_desc_loc; 77 } launch_desc_loc;
78 78
79 INSERT_UNION_PADDING_WORDS(0x1); 79 INSERT_PADDING_WORDS_NOINIT(0x1);
80 80
81 u32 launch; 81 u32 launch;
82 82
83 INSERT_UNION_PADDING_WORDS(0x4A7); 83 INSERT_PADDING_WORDS_NOINIT(0x4A7);
84 84
85 struct { 85 struct {
86 u32 address_high; 86 u32 address_high;
@@ -92,7 +92,7 @@ public:
92 } 92 }
93 } tsc; 93 } tsc;
94 94
95 INSERT_UNION_PADDING_WORDS(0x3); 95 INSERT_PADDING_WORDS_NOINIT(0x3);
96 96
97 struct { 97 struct {
98 u32 address_high; 98 u32 address_high;
@@ -104,7 +104,7 @@ public:
104 } 104 }
105 } tic; 105 } tic;
106 106
107 INSERT_UNION_PADDING_WORDS(0x22); 107 INSERT_PADDING_WORDS_NOINIT(0x22);
108 108
109 struct { 109 struct {
110 u32 address_high; 110 u32 address_high;
@@ -115,11 +115,11 @@ public:
115 } 115 }
116 } code_loc; 116 } code_loc;
117 117
118 INSERT_UNION_PADDING_WORDS(0x3FE); 118 INSERT_PADDING_WORDS_NOINIT(0x3FE);
119 119
120 u32 tex_cb_index; 120 u32 tex_cb_index;
121 121
122 INSERT_UNION_PADDING_WORDS(0x374); 122 INSERT_PADDING_WORDS_NOINIT(0x374);
123 }; 123 };
124 std::array<u32, NUM_REGS> reg_array; 124 std::array<u32, NUM_REGS> reg_array;
125 }; 125 };
diff --git a/src/video_core/engines/kepler_memory.h b/src/video_core/engines/kepler_memory.h
index 62483589e..19808a5c6 100644
--- a/src/video_core/engines/kepler_memory.h
+++ b/src/video_core/engines/kepler_memory.h
@@ -50,7 +50,7 @@ public:
50 50
51 union { 51 union {
52 struct { 52 struct {
53 INSERT_UNION_PADDING_WORDS(0x60); 53 INSERT_PADDING_WORDS_NOINIT(0x60);
54 54
55 Upload::Registers upload; 55 Upload::Registers upload;
56 56
@@ -62,7 +62,7 @@ public:
62 62
63 u32 data; 63 u32 data;
64 64
65 INSERT_UNION_PADDING_WORDS(0x11); 65 INSERT_PADDING_WORDS_NOINIT(0x11);
66 }; 66 };
67 std::array<u32, NUM_REGS> reg_array; 67 std::array<u32, NUM_REGS> reg_array;
68 }; 68 };
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index bf9e07c9b..326b32228 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -536,7 +536,7 @@ public:
536 Equation equation_a; 536 Equation equation_a;
537 Factor factor_source_a; 537 Factor factor_source_a;
538 Factor factor_dest_a; 538 Factor factor_dest_a;
539 INSERT_UNION_PADDING_WORDS(1); 539 INSERT_PADDING_WORDS_NOINIT(1);
540 }; 540 };
541 541
542 enum class TessellationPrimitive : u32 { 542 enum class TessellationPrimitive : u32 {
@@ -608,7 +608,7 @@ public:
608 }; 608 };
609 u32 layer_stride; 609 u32 layer_stride;
610 u32 base_layer; 610 u32 base_layer;
611 INSERT_UNION_PADDING_WORDS(7); 611 INSERT_PADDING_WORDS_NOINIT(7);
612 612
613 GPUVAddr Address() const { 613 GPUVAddr Address() const {
614 return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | 614 return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
@@ -640,7 +640,7 @@ public:
640 BitField<8, 3, ViewportSwizzle> z; 640 BitField<8, 3, ViewportSwizzle> z;
641 BitField<12, 3, ViewportSwizzle> w; 641 BitField<12, 3, ViewportSwizzle> w;
642 } swizzle; 642 } swizzle;
643 INSERT_UNION_PADDING_WORDS(1); 643 INSERT_PADDING_WORDS_NOINIT(1);
644 644
645 Common::Rectangle<f32> GetRect() const { 645 Common::Rectangle<f32> GetRect() const {
646 return { 646 return {
@@ -700,7 +700,7 @@ public:
700 u32 address_low; 700 u32 address_low;
701 s32 buffer_size; 701 s32 buffer_size;
702 s32 buffer_offset; 702 s32 buffer_offset;
703 INSERT_UNION_PADDING_WORDS(3); 703 INSERT_PADDING_WORDS_NOINIT(3);
704 704
705 GPUVAddr Address() const { 705 GPUVAddr Address() const {
706 return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) | 706 return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
@@ -713,7 +713,7 @@ public:
713 u32 stream; 713 u32 stream;
714 u32 varying_count; 714 u32 varying_count;
715 u32 stride; 715 u32 stride;
716 INSERT_UNION_PADDING_WORDS(1); 716 INSERT_PADDING_WORDS_NOINIT(1);
717 }; 717 };
718 static_assert(sizeof(TransformFeedbackLayout) == 16); 718 static_assert(sizeof(TransformFeedbackLayout) == 16);
719 719
@@ -731,7 +731,7 @@ public:
731 731
732 union { 732 union {
733 struct { 733 struct {
734 INSERT_UNION_PADDING_WORDS(0x44); 734 INSERT_PADDING_WORDS_NOINIT(0x44);
735 735
736 u32 wait_for_idle; 736 u32 wait_for_idle;
737 737
@@ -744,7 +744,7 @@ public:
744 744
745 ShadowRamControl shadow_ram_control; 745 ShadowRamControl shadow_ram_control;
746 746
747 INSERT_UNION_PADDING_WORDS(0x16); 747 INSERT_PADDING_WORDS_NOINIT(0x16);
748 748
749 Upload::Registers upload; 749 Upload::Registers upload;
750 struct { 750 struct {
@@ -755,11 +755,11 @@ public:
755 755
756 u32 data_upload; 756 u32 data_upload;
757 757
758 INSERT_UNION_PADDING_WORDS(0x16); 758 INSERT_PADDING_WORDS_NOINIT(0x16);
759 759
760 u32 force_early_fragment_tests; 760 u32 force_early_fragment_tests;
761 761
762 INSERT_UNION_PADDING_WORDS(0x2D); 762 INSERT_PADDING_WORDS_NOINIT(0x2D);
763 763
764 struct { 764 struct {
765 union { 765 union {
@@ -769,7 +769,7 @@ public:
769 }; 769 };
770 } sync_info; 770 } sync_info;
771 771
772 INSERT_UNION_PADDING_WORDS(0x15); 772 INSERT_PADDING_WORDS_NOINIT(0x15);
773 773
774 union { 774 union {
775 BitField<0, 2, TessellationPrimitive> prim; 775 BitField<0, 2, TessellationPrimitive> prim;
@@ -781,21 +781,21 @@ public:
781 std::array<f32, 4> tess_level_outer; 781 std::array<f32, 4> tess_level_outer;
782 std::array<f32, 2> tess_level_inner; 782 std::array<f32, 2> tess_level_inner;
783 783
784 INSERT_UNION_PADDING_WORDS(0x10); 784 INSERT_PADDING_WORDS_NOINIT(0x10);
785 785
786 u32 rasterize_enable; 786 u32 rasterize_enable;
787 787
788 std::array<TransformFeedbackBinding, NumTransformFeedbackBuffers> tfb_bindings; 788 std::array<TransformFeedbackBinding, NumTransformFeedbackBuffers> tfb_bindings;
789 789
790 INSERT_UNION_PADDING_WORDS(0xC0); 790 INSERT_PADDING_WORDS_NOINIT(0xC0);
791 791
792 std::array<TransformFeedbackLayout, NumTransformFeedbackBuffers> tfb_layouts; 792 std::array<TransformFeedbackLayout, NumTransformFeedbackBuffers> tfb_layouts;
793 793
794 INSERT_UNION_PADDING_WORDS(0x1); 794 INSERT_PADDING_WORDS_NOINIT(0x1);
795 795
796 u32 tfb_enabled; 796 u32 tfb_enabled;
797 797
798 INSERT_UNION_PADDING_WORDS(0x2E); 798 INSERT_PADDING_WORDS_NOINIT(0x2E);
799 799
800 std::array<RenderTargetConfig, NumRenderTargets> rt; 800 std::array<RenderTargetConfig, NumRenderTargets> rt;
801 801
@@ -803,7 +803,7 @@ public:
803 803
804 std::array<ViewPort, NumViewports> viewports; 804 std::array<ViewPort, NumViewports> viewports;
805 805
806 INSERT_UNION_PADDING_WORDS(0x1D); 806 INSERT_PADDING_WORDS_NOINIT(0x1D);
807 807
808 struct { 808 struct {
809 u32 first; 809 u32 first;
@@ -815,16 +815,16 @@ public:
815 float clear_color[4]; 815 float clear_color[4];
816 float clear_depth; 816 float clear_depth;
817 817
818 INSERT_UNION_PADDING_WORDS(0x3); 818 INSERT_PADDING_WORDS_NOINIT(0x3);
819 819
820 s32 clear_stencil; 820 s32 clear_stencil;
821 821
822 INSERT_UNION_PADDING_WORDS(0x2); 822 INSERT_PADDING_WORDS_NOINIT(0x2);
823 823
824 PolygonMode polygon_mode_front; 824 PolygonMode polygon_mode_front;
825 PolygonMode polygon_mode_back; 825 PolygonMode polygon_mode_back;
826 826
827 INSERT_UNION_PADDING_WORDS(0x3); 827 INSERT_PADDING_WORDS_NOINIT(0x3);
828 828
829 u32 polygon_offset_point_enable; 829 u32 polygon_offset_point_enable;
830 u32 polygon_offset_line_enable; 830 u32 polygon_offset_line_enable;
@@ -832,47 +832,47 @@ public:
832 832
833 u32 patch_vertices; 833 u32 patch_vertices;
834 834
835 INSERT_UNION_PADDING_WORDS(0x4); 835 INSERT_PADDING_WORDS_NOINIT(0x4);
836 836
837 u32 fragment_barrier; 837 u32 fragment_barrier;
838 838
839 INSERT_UNION_PADDING_WORDS(0x7); 839 INSERT_PADDING_WORDS_NOINIT(0x7);
840 840
841 std::array<ScissorTest, NumViewports> scissor_test; 841 std::array<ScissorTest, NumViewports> scissor_test;
842 842
843 INSERT_UNION_PADDING_WORDS(0x15); 843 INSERT_PADDING_WORDS_NOINIT(0x15);
844 844
845 s32 stencil_back_func_ref; 845 s32 stencil_back_func_ref;
846 u32 stencil_back_mask; 846 u32 stencil_back_mask;
847 u32 stencil_back_func_mask; 847 u32 stencil_back_func_mask;
848 848
849 INSERT_UNION_PADDING_WORDS(0x5); 849 INSERT_PADDING_WORDS_NOINIT(0x5);
850 850
851 u32 invalidate_texture_data_cache; 851 u32 invalidate_texture_data_cache;
852 852
853 INSERT_UNION_PADDING_WORDS(0x1); 853 INSERT_PADDING_WORDS_NOINIT(0x1);
854 854
855 u32 tiled_cache_barrier; 855 u32 tiled_cache_barrier;
856 856
857 INSERT_UNION_PADDING_WORDS(0x4); 857 INSERT_PADDING_WORDS_NOINIT(0x4);
858 858
859 u32 color_mask_common; 859 u32 color_mask_common;
860 860
861 INSERT_UNION_PADDING_WORDS(0x2); 861 INSERT_PADDING_WORDS_NOINIT(0x2);
862 862
863 f32 depth_bounds[2]; 863 f32 depth_bounds[2];
864 864
865 INSERT_UNION_PADDING_WORDS(0x2); 865 INSERT_PADDING_WORDS_NOINIT(0x2);
866 866
867 u32 rt_separate_frag_data; 867 u32 rt_separate_frag_data;
868 868
869 INSERT_UNION_PADDING_WORDS(0x1); 869 INSERT_PADDING_WORDS_NOINIT(0x1);
870 870
871 u32 multisample_raster_enable; 871 u32 multisample_raster_enable;
872 u32 multisample_raster_samples; 872 u32 multisample_raster_samples;
873 std::array<u32, 4> multisample_sample_mask; 873 std::array<u32, 4> multisample_sample_mask;
874 874
875 INSERT_UNION_PADDING_WORDS(0x5); 875 INSERT_PADDING_WORDS_NOINIT(0x5);
876 876
877 struct { 877 struct {
878 u32 address_high; 878 u32 address_high;
@@ -898,7 +898,7 @@ public:
898 }; 898 };
899 } render_area; 899 } render_area;
900 900
901 INSERT_UNION_PADDING_WORDS(0x3F); 901 INSERT_PADDING_WORDS_NOINIT(0x3F);
902 902
903 union { 903 union {
904 BitField<0, 4, u32> stencil; 904 BitField<0, 4, u32> stencil;
@@ -907,24 +907,24 @@ public:
907 BitField<12, 4, u32> viewport; 907 BitField<12, 4, u32> viewport;
908 } clear_flags; 908 } clear_flags;
909 909
910 INSERT_UNION_PADDING_WORDS(0x10); 910 INSERT_PADDING_WORDS_NOINIT(0x10);
911 911
912 u32 fill_rectangle; 912 u32 fill_rectangle;
913 913
914 INSERT_UNION_PADDING_WORDS(0x8); 914 INSERT_PADDING_WORDS_NOINIT(0x8);
915 915
916 std::array<VertexAttribute, NumVertexAttributes> vertex_attrib_format; 916 std::array<VertexAttribute, NumVertexAttributes> vertex_attrib_format;
917 917
918 std::array<MsaaSampleLocation, 4> multisample_sample_locations; 918 std::array<MsaaSampleLocation, 4> multisample_sample_locations;
919 919
920 INSERT_UNION_PADDING_WORDS(0x2); 920 INSERT_PADDING_WORDS_NOINIT(0x2);
921 921
922 union { 922 union {
923 BitField<0, 1, u32> enable; 923 BitField<0, 1, u32> enable;
924 BitField<4, 3, u32> target; 924 BitField<4, 3, u32> target;
925 } multisample_coverage_to_color; 925 } multisample_coverage_to_color;
926 926
927 INSERT_UNION_PADDING_WORDS(0x8); 927 INSERT_PADDING_WORDS_NOINIT(0x8);
928 928
929 struct { 929 struct {
930 union { 930 union {
@@ -947,7 +947,7 @@ public:
947 } 947 }
948 } rt_control; 948 } rt_control;
949 949
950 INSERT_UNION_PADDING_WORDS(0x2); 950 INSERT_PADDING_WORDS_NOINIT(0x2);
951 951
952 u32 zeta_width; 952 u32 zeta_width;
953 u32 zeta_height; 953 u32 zeta_height;
@@ -958,11 +958,11 @@ public:
958 958
959 SamplerIndex sampler_index; 959 SamplerIndex sampler_index;
960 960
961 INSERT_UNION_PADDING_WORDS(0x25); 961 INSERT_PADDING_WORDS_NOINIT(0x25);
962 962
963 u32 depth_test_enable; 963 u32 depth_test_enable;
964 964
965 INSERT_UNION_PADDING_WORDS(0x5); 965 INSERT_PADDING_WORDS_NOINIT(0x5);
966 966
967 u32 independent_blend_enable; 967 u32 independent_blend_enable;
968 968
@@ -970,7 +970,7 @@ public:
970 970
971 u32 alpha_test_enabled; 971 u32 alpha_test_enabled;
972 972
973 INSERT_UNION_PADDING_WORDS(0x6); 973 INSERT_PADDING_WORDS_NOINIT(0x6);
974 974
975 u32 d3d_cull_mode; 975 u32 d3d_cull_mode;
976 976
@@ -985,7 +985,7 @@ public:
985 float a; 985 float a;
986 } blend_color; 986 } blend_color;
987 987
988 INSERT_UNION_PADDING_WORDS(0x4); 988 INSERT_PADDING_WORDS_NOINIT(0x4);
989 989
990 struct { 990 struct {
991 u32 separate_alpha; 991 u32 separate_alpha;
@@ -994,7 +994,7 @@ public:
994 Blend::Factor factor_dest_rgb; 994 Blend::Factor factor_dest_rgb;
995 Blend::Equation equation_a; 995 Blend::Equation equation_a;
996 Blend::Factor factor_source_a; 996 Blend::Factor factor_source_a;
997 INSERT_UNION_PADDING_WORDS(1); 997 INSERT_PADDING_WORDS_NOINIT(1);
998 Blend::Factor factor_dest_a; 998 Blend::Factor factor_dest_a;
999 999
1000 u32 enable_common; 1000 u32 enable_common;
@@ -1010,7 +1010,7 @@ public:
1010 u32 stencil_front_func_mask; 1010 u32 stencil_front_func_mask;
1011 u32 stencil_front_mask; 1011 u32 stencil_front_mask;
1012 1012
1013 INSERT_UNION_PADDING_WORDS(0x2); 1013 INSERT_PADDING_WORDS_NOINIT(0x2);
1014 1014
1015 u32 frag_color_clamp; 1015 u32 frag_color_clamp;
1016 1016
@@ -1022,17 +1022,17 @@ public:
1022 float line_width_smooth; 1022 float line_width_smooth;
1023 float line_width_aliased; 1023 float line_width_aliased;
1024 1024
1025 INSERT_UNION_PADDING_WORDS(0x1B); 1025 INSERT_PADDING_WORDS_NOINIT(0x1B);
1026 1026
1027 u32 invalidate_sampler_cache_no_wfi; 1027 u32 invalidate_sampler_cache_no_wfi;
1028 u32 invalidate_texture_header_cache_no_wfi; 1028 u32 invalidate_texture_header_cache_no_wfi;
1029 1029
1030 INSERT_UNION_PADDING_WORDS(0x2); 1030 INSERT_PADDING_WORDS_NOINIT(0x2);
1031 1031
1032 u32 vb_element_base; 1032 u32 vb_element_base;
1033 u32 vb_base_instance; 1033 u32 vb_base_instance;
1034 1034
1035 INSERT_UNION_PADDING_WORDS(0x35); 1035 INSERT_PADDING_WORDS_NOINIT(0x35);
1036 1036
1037 u32 clip_distance_enabled; 1037 u32 clip_distance_enabled;
1038 1038
@@ -1040,11 +1040,11 @@ public:
1040 1040
1041 float point_size; 1041 float point_size;
1042 1042
1043 INSERT_UNION_PADDING_WORDS(0x1); 1043 INSERT_PADDING_WORDS_NOINIT(0x1);
1044 1044
1045 u32 point_sprite_enable; 1045 u32 point_sprite_enable;
1046 1046
1047 INSERT_UNION_PADDING_WORDS(0x3); 1047 INSERT_PADDING_WORDS_NOINIT(0x3);
1048 1048
1049 CounterReset counter_reset; 1049 CounterReset counter_reset;
1050 1050
@@ -1057,7 +1057,7 @@ public:
1057 BitField<4, 1, u32> alpha_to_one; 1057 BitField<4, 1, u32> alpha_to_one;
1058 } multisample_control; 1058 } multisample_control;
1059 1059
1060 INSERT_UNION_PADDING_WORDS(0x4); 1060 INSERT_PADDING_WORDS_NOINIT(0x4);
1061 1061
1062 struct { 1062 struct {
1063 u32 address_high; 1063 u32 address_high;
@@ -1081,7 +1081,7 @@ public:
1081 } 1081 }
1082 } tsc; 1082 } tsc;
1083 1083
1084 INSERT_UNION_PADDING_WORDS(0x1); 1084 INSERT_PADDING_WORDS_NOINIT(0x1);
1085 1085
1086 float polygon_offset_factor; 1086 float polygon_offset_factor;
1087 1087
@@ -1098,7 +1098,7 @@ public:
1098 } 1098 }
1099 } tic; 1099 } tic;
1100 1100
1101 INSERT_UNION_PADDING_WORDS(0x5); 1101 INSERT_PADDING_WORDS_NOINIT(0x5);
1102 1102
1103 u32 stencil_two_side_enable; 1103 u32 stencil_two_side_enable;
1104 StencilOp stencil_back_op_fail; 1104 StencilOp stencil_back_op_fail;
@@ -1106,17 +1106,17 @@ public:
1106 StencilOp stencil_back_op_zpass; 1106 StencilOp stencil_back_op_zpass;
1107 ComparisonOp stencil_back_func_func; 1107 ComparisonOp stencil_back_func_func;
1108 1108
1109 INSERT_UNION_PADDING_WORDS(0x4); 1109 INSERT_PADDING_WORDS_NOINIT(0x4);
1110 1110
1111 u32 framebuffer_srgb; 1111 u32 framebuffer_srgb;
1112 1112
1113 float polygon_offset_units; 1113 float polygon_offset_units;
1114 1114
1115 INSERT_UNION_PADDING_WORDS(0x4); 1115 INSERT_PADDING_WORDS_NOINIT(0x4);
1116 1116
1117 Tegra::Texture::MsaaMode multisample_mode; 1117 Tegra::Texture::MsaaMode multisample_mode;
1118 1118
1119 INSERT_UNION_PADDING_WORDS(0xC); 1119 INSERT_PADDING_WORDS_NOINIT(0xC);
1120 1120
1121 union { 1121 union {
1122 BitField<2, 1, u32> coord_origin; 1122 BitField<2, 1, u32> coord_origin;
@@ -1132,7 +1132,7 @@ public:
1132 (static_cast<GPUVAddr>(code_address_high) << 32) | code_address_low); 1132 (static_cast<GPUVAddr>(code_address_high) << 32) | code_address_low);
1133 } 1133 }
1134 } code_address; 1134 } code_address;
1135 INSERT_UNION_PADDING_WORDS(1); 1135 INSERT_PADDING_WORDS_NOINIT(1);
1136 1136
1137 struct { 1137 struct {
1138 u32 vertex_end_gl; 1138 u32 vertex_end_gl;
@@ -1144,14 +1144,14 @@ public:
1144 }; 1144 };
1145 } draw; 1145 } draw;
1146 1146
1147 INSERT_UNION_PADDING_WORDS(0xA); 1147 INSERT_PADDING_WORDS_NOINIT(0xA);
1148 1148
1149 struct { 1149 struct {
1150 u32 enabled; 1150 u32 enabled;
1151 u32 index; 1151 u32 index;
1152 } primitive_restart; 1152 } primitive_restart;
1153 1153
1154 INSERT_UNION_PADDING_WORDS(0x5F); 1154 INSERT_PADDING_WORDS_NOINIT(0x5F);
1155 1155
1156 struct { 1156 struct {
1157 u32 start_addr_high; 1157 u32 start_addr_high;
@@ -1192,9 +1192,9 @@ public:
1192 } 1192 }
1193 } index_array; 1193 } index_array;
1194 1194
1195 INSERT_UNION_PADDING_WORDS(0x7); 1195 INSERT_PADDING_WORDS_NOINIT(0x7);
1196 1196
1197 INSERT_UNION_PADDING_WORDS(0x1F); 1197 INSERT_PADDING_WORDS_NOINIT(0x1F);
1198 1198
1199 float polygon_offset_clamp; 1199 float polygon_offset_clamp;
1200 1200
@@ -1208,14 +1208,14 @@ public:
1208 } 1208 }
1209 } instanced_arrays; 1209 } instanced_arrays;
1210 1210
1211 INSERT_UNION_PADDING_WORDS(0x4); 1211 INSERT_PADDING_WORDS_NOINIT(0x4);
1212 1212
1213 union { 1213 union {
1214 BitField<0, 1, u32> enable; 1214 BitField<0, 1, u32> enable;
1215 BitField<4, 8, u32> unk4; 1215 BitField<4, 8, u32> unk4;
1216 } vp_point_size; 1216 } vp_point_size;
1217 1217
1218 INSERT_UNION_PADDING_WORDS(1); 1218 INSERT_PADDING_WORDS_NOINIT(1);
1219 1219
1220 u32 cull_test_enabled; 1220 u32 cull_test_enabled;
1221 FrontFace front_face; 1221 FrontFace front_face;
@@ -1223,11 +1223,11 @@ public:
1223 1223
1224 u32 pixel_center_integer; 1224 u32 pixel_center_integer;
1225 1225
1226 INSERT_UNION_PADDING_WORDS(0x1); 1226 INSERT_PADDING_WORDS_NOINIT(0x1);
1227 1227
1228 u32 viewport_transform_enabled; 1228 u32 viewport_transform_enabled;
1229 1229
1230 INSERT_UNION_PADDING_WORDS(0x3); 1230 INSERT_PADDING_WORDS_NOINIT(0x3);
1231 1231
1232 union { 1232 union {
1233 BitField<0, 1, u32> depth_range_0_1; 1233 BitField<0, 1, u32> depth_range_0_1;
@@ -1236,18 +1236,18 @@ public:
1236 BitField<11, 1, u32> depth_clamp_disabled; 1236 BitField<11, 1, u32> depth_clamp_disabled;
1237 } view_volume_clip_control; 1237 } view_volume_clip_control;
1238 1238
1239 INSERT_UNION_PADDING_WORDS(0x1F); 1239 INSERT_PADDING_WORDS_NOINIT(0x1F);
1240 1240
1241 u32 depth_bounds_enable; 1241 u32 depth_bounds_enable;
1242 1242
1243 INSERT_UNION_PADDING_WORDS(1); 1243 INSERT_PADDING_WORDS_NOINIT(1);
1244 1244
1245 struct { 1245 struct {
1246 u32 enable; 1246 u32 enable;
1247 LogicOperation operation; 1247 LogicOperation operation;
1248 } logic_op; 1248 } logic_op;
1249 1249
1250 INSERT_UNION_PADDING_WORDS(0x1); 1250 INSERT_PADDING_WORDS_NOINIT(0x1);
1251 1251
1252 union { 1252 union {
1253 u32 raw; 1253 u32 raw;
@@ -1260,9 +1260,9 @@ public:
1260 BitField<6, 4, u32> RT; 1260 BitField<6, 4, u32> RT;
1261 BitField<10, 11, u32> layer; 1261 BitField<10, 11, u32> layer;
1262 } clear_buffers; 1262 } clear_buffers;
1263 INSERT_UNION_PADDING_WORDS(0xB); 1263 INSERT_PADDING_WORDS_NOINIT(0xB);
1264 std::array<ColorMask, NumRenderTargets> color_mask; 1264 std::array<ColorMask, NumRenderTargets> color_mask;
1265 INSERT_UNION_PADDING_WORDS(0x38); 1265 INSERT_PADDING_WORDS_NOINIT(0x38);
1266 1266
1267 struct { 1267 struct {
1268 u32 query_address_high; 1268 u32 query_address_high;
@@ -1284,7 +1284,7 @@ public:
1284 } 1284 }
1285 } query; 1285 } query;
1286 1286
1287 INSERT_UNION_PADDING_WORDS(0x3C); 1287 INSERT_PADDING_WORDS_NOINIT(0x3C);
1288 1288
1289 struct { 1289 struct {
1290 union { 1290 union {
@@ -1325,10 +1325,10 @@ public:
1325 BitField<4, 4, ShaderProgram> program; 1325 BitField<4, 4, ShaderProgram> program;
1326 }; 1326 };
1327 u32 offset; 1327 u32 offset;
1328 INSERT_UNION_PADDING_WORDS(14); 1328 INSERT_PADDING_WORDS_NOINIT(14);
1329 } shader_config[MaxShaderProgram]; 1329 } shader_config[MaxShaderProgram];
1330 1330
1331 INSERT_UNION_PADDING_WORDS(0x60); 1331 INSERT_PADDING_WORDS_NOINIT(0x60);
1332 1332
1333 u32 firmware[0x20]; 1333 u32 firmware[0x20];
1334 1334
@@ -1345,7 +1345,7 @@ public:
1345 } 1345 }
1346 } const_buffer; 1346 } const_buffer;
1347 1347
1348 INSERT_UNION_PADDING_WORDS(0x10); 1348 INSERT_PADDING_WORDS_NOINIT(0x10);
1349 1349
1350 struct { 1350 struct {
1351 union { 1351 union {
@@ -1353,18 +1353,18 @@ public:
1353 BitField<0, 1, u32> valid; 1353 BitField<0, 1, u32> valid;
1354 BitField<4, 5, u32> index; 1354 BitField<4, 5, u32> index;
1355 }; 1355 };
1356 INSERT_UNION_PADDING_WORDS(7); 1356 INSERT_PADDING_WORDS_NOINIT(7);
1357 } cb_bind[MaxShaderStage]; 1357 } cb_bind[MaxShaderStage];
1358 1358
1359 INSERT_UNION_PADDING_WORDS(0x56); 1359 INSERT_PADDING_WORDS_NOINIT(0x56);
1360 1360
1361 u32 tex_cb_index; 1361 u32 tex_cb_index;
1362 1362
1363 INSERT_UNION_PADDING_WORDS(0x7D); 1363 INSERT_PADDING_WORDS_NOINIT(0x7D);
1364 1364
1365 std::array<std::array<u8, 128>, NumTransformFeedbackBuffers> tfb_varying_locs; 1365 std::array<std::array<u8, 128>, NumTransformFeedbackBuffers> tfb_varying_locs;
1366 1366
1367 INSERT_UNION_PADDING_WORDS(0x298); 1367 INSERT_PADDING_WORDS_NOINIT(0x298);
1368 1368
1369 struct { 1369 struct {
1370 /// Compressed address of a buffer that holds information about bound SSBOs. 1370 /// Compressed address of a buffer that holds information about bound SSBOs.
@@ -1376,14 +1376,14 @@ public:
1376 } 1376 }
1377 } ssbo_info; 1377 } ssbo_info;
1378 1378
1379 INSERT_UNION_PADDING_WORDS(0x11); 1379 INSERT_PADDING_WORDS_NOINIT(0x11);
1380 1380
1381 struct { 1381 struct {
1382 u32 address[MaxShaderStage]; 1382 u32 address[MaxShaderStage];
1383 u32 size[MaxShaderStage]; 1383 u32 size[MaxShaderStage];
1384 } tex_info_buffers; 1384 } tex_info_buffers;
1385 1385
1386 INSERT_UNION_PADDING_WORDS(0xCC); 1386 INSERT_PADDING_WORDS_NOINIT(0xCC);
1387 }; 1387 };
1388 std::array<u32, NUM_REGS> reg_array; 1388 std::array<u32, NUM_REGS> reg_array;
1389 }; 1389 };
diff --git a/src/video_core/engines/shader_header.h b/src/video_core/engines/shader_header.h
index ceec05459..e0d7b89c5 100644
--- a/src/video_core/engines/shader_header.h
+++ b/src/video_core/engines/shader_header.h
@@ -68,10 +68,10 @@ struct Header {
68 68
69 union { 69 union {
70 struct { 70 struct {
71 INSERT_UNION_PADDING_BYTES(3); // ImapSystemValuesA 71 INSERT_PADDING_BYTES_NOINIT(3); // ImapSystemValuesA
72 INSERT_UNION_PADDING_BYTES(1); // ImapSystemValuesB 72 INSERT_PADDING_BYTES_NOINIT(1); // ImapSystemValuesB
73 INSERT_UNION_PADDING_BYTES(16); // ImapGenericVector[32] 73 INSERT_PADDING_BYTES_NOINIT(16); // ImapGenericVector[32]
74 INSERT_UNION_PADDING_BYTES(2); // ImapColor 74 INSERT_PADDING_BYTES_NOINIT(2); // ImapColor
75 union { 75 union {
76 BitField<0, 8, u16> clip_distances; 76 BitField<0, 8, u16> clip_distances;
77 BitField<8, 1, u16> point_sprite_s; 77 BitField<8, 1, u16> point_sprite_s;
@@ -82,20 +82,20 @@ struct Header {
82 BitField<14, 1, u16> instance_id; 82 BitField<14, 1, u16> instance_id;
83 BitField<15, 1, u16> vertex_id; 83 BitField<15, 1, u16> vertex_id;
84 }; 84 };
85 INSERT_UNION_PADDING_BYTES(5); // ImapFixedFncTexture[10] 85 INSERT_PADDING_BYTES_NOINIT(5); // ImapFixedFncTexture[10]
86 INSERT_UNION_PADDING_BYTES(1); // ImapReserved 86 INSERT_PADDING_BYTES_NOINIT(1); // ImapReserved
87 INSERT_UNION_PADDING_BYTES(3); // OmapSystemValuesA 87 INSERT_PADDING_BYTES_NOINIT(3); // OmapSystemValuesA
88 INSERT_UNION_PADDING_BYTES(1); // OmapSystemValuesB 88 INSERT_PADDING_BYTES_NOINIT(1); // OmapSystemValuesB
89 INSERT_UNION_PADDING_BYTES(16); // OmapGenericVector[32] 89 INSERT_PADDING_BYTES_NOINIT(16); // OmapGenericVector[32]
90 INSERT_UNION_PADDING_BYTES(2); // OmapColor 90 INSERT_PADDING_BYTES_NOINIT(2); // OmapColor
91 INSERT_UNION_PADDING_BYTES(2); // OmapSystemValuesC 91 INSERT_PADDING_BYTES_NOINIT(2); // OmapSystemValuesC
92 INSERT_UNION_PADDING_BYTES(5); // OmapFixedFncTexture[10] 92 INSERT_PADDING_BYTES_NOINIT(5); // OmapFixedFncTexture[10]
93 INSERT_UNION_PADDING_BYTES(1); // OmapReserved 93 INSERT_PADDING_BYTES_NOINIT(1); // OmapReserved
94 } vtg; 94 } vtg;
95 95
96 struct { 96 struct {
97 INSERT_UNION_PADDING_BYTES(3); // ImapSystemValuesA 97 INSERT_PADDING_BYTES_NOINIT(3); // ImapSystemValuesA
98 INSERT_UNION_PADDING_BYTES(1); // ImapSystemValuesB 98 INSERT_PADDING_BYTES_NOINIT(1); // ImapSystemValuesB
99 99
100 union { 100 union {
101 BitField<0, 2, PixelImap> x; 101 BitField<0, 2, PixelImap> x;
@@ -105,10 +105,10 @@ struct Header {
105 u8 raw; 105 u8 raw;
106 } imap_generic_vector[32]; 106 } imap_generic_vector[32];
107 107
108 INSERT_UNION_PADDING_BYTES(2); // ImapColor 108 INSERT_PADDING_BYTES_NOINIT(2); // ImapColor
109 INSERT_UNION_PADDING_BYTES(2); // ImapSystemValuesC 109 INSERT_PADDING_BYTES_NOINIT(2); // ImapSystemValuesC
110 INSERT_UNION_PADDING_BYTES(10); // ImapFixedFncTexture[10] 110 INSERT_PADDING_BYTES_NOINIT(10); // ImapFixedFncTexture[10]
111 INSERT_UNION_PADDING_BYTES(2); // ImapReserved 111 INSERT_PADDING_BYTES_NOINIT(2); // ImapReserved
112 112
113 struct { 113 struct {
114 u32 target; 114 u32 target;
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index d81e38680..b4ce6b154 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -270,7 +270,7 @@ public:
270 270
271 union { 271 union {
272 struct { 272 struct {
273 INSERT_UNION_PADDING_WORDS(0x4); 273 INSERT_PADDING_WORDS_NOINIT(0x4);
274 struct { 274 struct {
275 u32 address_high; 275 u32 address_high;
276 u32 address_low; 276 u32 address_low;
@@ -283,18 +283,18 @@ public:
283 283
284 u32 semaphore_sequence; 284 u32 semaphore_sequence;
285 u32 semaphore_trigger; 285 u32 semaphore_trigger;
286 INSERT_UNION_PADDING_WORDS(0xC); 286 INSERT_PADDING_WORDS_NOINIT(0xC);
287 287
288 // The pusher and the puller share the reference counter, the pusher only has read 288 // The pusher and the puller share the reference counter, the pusher only has read
289 // access 289 // access
290 u32 reference_count; 290 u32 reference_count;
291 INSERT_UNION_PADDING_WORDS(0x5); 291 INSERT_PADDING_WORDS_NOINIT(0x5);
292 292
293 u32 semaphore_acquire; 293 u32 semaphore_acquire;
294 u32 semaphore_release; 294 u32 semaphore_release;
295 u32 fence_value; 295 u32 fence_value;
296 FenceAction fence_action; 296 FenceAction fence_action;
297 INSERT_UNION_PADDING_WORDS(0xE2); 297 INSERT_PADDING_WORDS_NOINIT(0xE2);
298 298
299 // Puller state 299 // Puller state
300 u32 acquire_mode; 300 u32 acquire_mode;
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index d7437e185..61796e33a 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -23,7 +23,6 @@
23#include "video_core/renderer_vulkan/renderer_vulkan.h" 23#include "video_core/renderer_vulkan/renderer_vulkan.h"
24#include "video_core/renderer_vulkan/vk_blit_screen.h" 24#include "video_core/renderer_vulkan/vk_blit_screen.h"
25#include "video_core/renderer_vulkan/vk_master_semaphore.h" 25#include "video_core/renderer_vulkan/vk_master_semaphore.h"
26#include "video_core/renderer_vulkan/vk_memory_manager.h"
27#include "video_core/renderer_vulkan/vk_rasterizer.h" 26#include "video_core/renderer_vulkan/vk_rasterizer.h"
28#include "video_core/renderer_vulkan/vk_scheduler.h" 27#include "video_core/renderer_vulkan/vk_scheduler.h"
29#include "video_core/renderer_vulkan/vk_state_tracker.h" 28#include "video_core/renderer_vulkan/vk_state_tracker.h"
@@ -32,6 +31,7 @@
32#include "video_core/vulkan_common/vulkan_device.h" 31#include "video_core/vulkan_common/vulkan_device.h"
33#include "video_core/vulkan_common/vulkan_instance.h" 32#include "video_core/vulkan_common/vulkan_instance.h"
34#include "video_core/vulkan_common/vulkan_library.h" 33#include "video_core/vulkan_common/vulkan_library.h"
34#include "video_core/vulkan_common/vulkan_memory_allocator.h"
35#include "video_core/vulkan_common/vulkan_surface.h" 35#include "video_core/vulkan_common/vulkan_surface.h"
36#include "video_core/vulkan_common/vulkan_wrapper.h" 36#include "video_core/vulkan_common/vulkan_wrapper.h"
37 37
@@ -137,7 +137,7 @@ bool RendererVulkan::Init() try {
137 InitializeDevice(); 137 InitializeDevice();
138 Report(); 138 Report();
139 139
140 memory_manager = std::make_unique<VKMemoryManager>(*device); 140 memory_allocator = std::make_unique<MemoryAllocator>(*device);
141 141
142 state_tracker = std::make_unique<StateTracker>(gpu); 142 state_tracker = std::make_unique<StateTracker>(gpu);
143 143
@@ -149,11 +149,11 @@ bool RendererVulkan::Init() try {
149 149
150 rasterizer = std::make_unique<RasterizerVulkan>(render_window, gpu, gpu.MemoryManager(), 150 rasterizer = std::make_unique<RasterizerVulkan>(render_window, gpu, gpu.MemoryManager(),
151 cpu_memory, screen_info, *device, 151 cpu_memory, screen_info, *device,
152 *memory_manager, *state_tracker, *scheduler); 152 *memory_allocator, *state_tracker, *scheduler);
153 153
154 blit_screen = 154 blit_screen =
155 std::make_unique<VKBlitScreen>(cpu_memory, render_window, *rasterizer, *device, 155 std::make_unique<VKBlitScreen>(cpu_memory, render_window, *rasterizer, *device,
156 *memory_manager, *swapchain, *scheduler, screen_info); 156 *memory_allocator, *swapchain, *scheduler, screen_info);
157 return true; 157 return true;
158 158
159} catch (const vk::Exception& exception) { 159} catch (const vk::Exception& exception) {
@@ -172,7 +172,7 @@ void RendererVulkan::ShutDown() {
172 blit_screen.reset(); 172 blit_screen.reset();
173 scheduler.reset(); 173 scheduler.reset();
174 swapchain.reset(); 174 swapchain.reset();
175 memory_manager.reset(); 175 memory_allocator.reset();
176 device.reset(); 176 device.reset();
177} 177}
178 178
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h
index 5575ffc54..daf55b9b4 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.h
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.h
@@ -29,8 +29,8 @@ namespace Vulkan {
29 29
30class Device; 30class Device;
31class StateTracker; 31class StateTracker;
32class MemoryAllocator;
32class VKBlitScreen; 33class VKBlitScreen;
33class VKMemoryManager;
34class VKSwapchain; 34class VKSwapchain;
35class VKScheduler; 35class VKScheduler;
36 36
@@ -75,7 +75,7 @@ private:
75 75
76 vk::DebugUtilsMessenger debug_callback; 76 vk::DebugUtilsMessenger debug_callback;
77 std::unique_ptr<Device> device; 77 std::unique_ptr<Device> device;
78 std::unique_ptr<VKMemoryManager> memory_manager; 78 std::unique_ptr<MemoryAllocator> memory_allocator;
79 std::unique_ptr<StateTracker> state_tracker; 79 std::unique_ptr<StateTracker> state_tracker;
80 std::unique_ptr<VKScheduler> scheduler; 80 std::unique_ptr<VKScheduler> scheduler;
81 std::unique_ptr<VKSwapchain> swapchain; 81 std::unique_ptr<VKSwapchain> swapchain;
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
index 5e184eb42..3e3b895e0 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
@@ -22,13 +22,13 @@
22#include "video_core/renderer_vulkan/renderer_vulkan.h" 22#include "video_core/renderer_vulkan/renderer_vulkan.h"
23#include "video_core/renderer_vulkan/vk_blit_screen.h" 23#include "video_core/renderer_vulkan/vk_blit_screen.h"
24#include "video_core/renderer_vulkan/vk_master_semaphore.h" 24#include "video_core/renderer_vulkan/vk_master_semaphore.h"
25#include "video_core/renderer_vulkan/vk_memory_manager.h"
26#include "video_core/renderer_vulkan/vk_scheduler.h" 25#include "video_core/renderer_vulkan/vk_scheduler.h"
27#include "video_core/renderer_vulkan/vk_shader_util.h" 26#include "video_core/renderer_vulkan/vk_shader_util.h"
28#include "video_core/renderer_vulkan/vk_swapchain.h" 27#include "video_core/renderer_vulkan/vk_swapchain.h"
29#include "video_core/surface.h" 28#include "video_core/surface.h"
30#include "video_core/textures/decoders.h" 29#include "video_core/textures/decoders.h"
31#include "video_core/vulkan_common/vulkan_device.h" 30#include "video_core/vulkan_common/vulkan_device.h"
31#include "video_core/vulkan_common/vulkan_memory_allocator.h"
32#include "video_core/vulkan_common/vulkan_wrapper.h" 32#include "video_core/vulkan_common/vulkan_wrapper.h"
33 33
34namespace Vulkan { 34namespace Vulkan {
@@ -115,10 +115,10 @@ struct VKBlitScreen::BufferData {
115VKBlitScreen::VKBlitScreen(Core::Memory::Memory& cpu_memory_, 115VKBlitScreen::VKBlitScreen(Core::Memory::Memory& cpu_memory_,
116 Core::Frontend::EmuWindow& render_window_, 116 Core::Frontend::EmuWindow& render_window_,
117 VideoCore::RasterizerInterface& rasterizer_, const Device& device_, 117 VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
118 VKMemoryManager& memory_manager_, VKSwapchain& swapchain_, 118 MemoryAllocator& memory_allocator_, VKSwapchain& swapchain_,
119 VKScheduler& scheduler_, const VKScreenInfo& screen_info_) 119 VKScheduler& scheduler_, const VKScreenInfo& screen_info_)
120 : cpu_memory{cpu_memory_}, render_window{render_window_}, rasterizer{rasterizer_}, 120 : cpu_memory{cpu_memory_}, render_window{render_window_}, rasterizer{rasterizer_},
121 device{device_}, memory_manager{memory_manager_}, swapchain{swapchain_}, 121 device{device_}, memory_allocator{memory_allocator_}, swapchain{swapchain_},
122 scheduler{scheduler_}, image_count{swapchain.GetImageCount()}, screen_info{screen_info_} { 122 scheduler{scheduler_}, image_count{swapchain.GetImageCount()}, screen_info{screen_info_} {
123 resource_ticks.resize(image_count); 123 resource_ticks.resize(image_count);
124 124
@@ -150,8 +150,8 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
150 SetUniformData(data, framebuffer); 150 SetUniformData(data, framebuffer);
151 SetVertexData(data, framebuffer); 151 SetVertexData(data, framebuffer);
152 152
153 auto map = buffer_commit->Map(); 153 const std::span<u8> map = buffer_commit.Map();
154 std::memcpy(map.Address(), &data, sizeof(data)); 154 std::memcpy(map.data(), &data, sizeof(data));
155 155
156 if (!use_accelerated) { 156 if (!use_accelerated) {
157 const u64 image_offset = GetRawImageOffset(framebuffer, image_index); 157 const u64 image_offset = GetRawImageOffset(framebuffer, image_index);
@@ -165,8 +165,8 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
165 constexpr u32 block_height_log2 = 4; 165 constexpr u32 block_height_log2 = 4;
166 const u32 bytes_per_pixel = GetBytesPerPixel(framebuffer); 166 const u32 bytes_per_pixel = GetBytesPerPixel(framebuffer);
167 Tegra::Texture::UnswizzleTexture( 167 Tegra::Texture::UnswizzleTexture(
168 std::span(map.Address() + image_offset, size_bytes), std::span(host_ptr, size_bytes), 168 map.subspan(image_offset, size_bytes), std::span(host_ptr, size_bytes), bytes_per_pixel,
169 bytes_per_pixel, framebuffer.width, framebuffer.height, 1, block_height_log2, 0); 169 framebuffer.width, framebuffer.height, 1, block_height_log2, 0);
170 170
171 const VkBufferImageCopy copy{ 171 const VkBufferImageCopy copy{
172 .bufferOffset = image_offset, 172 .bufferOffset = image_offset,
@@ -224,8 +224,6 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
224 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier); 224 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier);
225 }); 225 });
226 } 226 }
227 map.Release();
228
229 scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index], 227 scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index],
230 descriptor_set = descriptor_sets[image_index], buffer = *buffer, 228 descriptor_set = descriptor_sets[image_index], buffer = *buffer,
231 size = swapchain.GetSize(), pipeline = *pipeline, 229 size = swapchain.GetSize(), pipeline = *pipeline,
@@ -642,7 +640,7 @@ void VKBlitScreen::ReleaseRawImages() {
642 raw_images.clear(); 640 raw_images.clear();
643 raw_buffer_commits.clear(); 641 raw_buffer_commits.clear();
644 buffer.reset(); 642 buffer.reset();
645 buffer_commit.reset(); 643 buffer_commit = MemoryCommit{};
646} 644}
647 645
648void VKBlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer) { 646void VKBlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer) {
@@ -659,7 +657,7 @@ void VKBlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuff
659 }; 657 };
660 658
661 buffer = device.GetLogical().CreateBuffer(ci); 659 buffer = device.GetLogical().CreateBuffer(ci);
662 buffer_commit = memory_manager.Commit(buffer, true); 660 buffer_commit = memory_allocator.Commit(buffer, MemoryUsage::Upload);
663} 661}
664 662
665void VKBlitScreen::CreateRawImages(const Tegra::FramebufferConfig& framebuffer) { 663void VKBlitScreen::CreateRawImages(const Tegra::FramebufferConfig& framebuffer) {
@@ -690,7 +688,7 @@ void VKBlitScreen::CreateRawImages(const Tegra::FramebufferConfig& framebuffer)
690 .pQueueFamilyIndices = nullptr, 688 .pQueueFamilyIndices = nullptr,
691 .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED, 689 .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
692 }); 690 });
693 raw_buffer_commits[i] = memory_manager.Commit(raw_images[i], false); 691 raw_buffer_commits[i] = memory_allocator.Commit(raw_images[i], MemoryUsage::DeviceLocal);
694 raw_image_views[i] = device.GetLogical().CreateImageView(VkImageViewCreateInfo{ 692 raw_image_views[i] = device.GetLogical().CreateImageView(VkImageViewCreateInfo{
695 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, 693 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
696 .pNext = nullptr, 694 .pNext = nullptr,
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h
index 69ed61770..b52576957 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.h
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.h
@@ -6,7 +6,7 @@
6 6
7#include <memory> 7#include <memory>
8 8
9#include "video_core/renderer_vulkan/vk_memory_manager.h" 9#include "video_core/vulkan_common/vulkan_memory_allocator.h"
10#include "video_core/vulkan_common/vulkan_wrapper.h" 10#include "video_core/vulkan_common/vulkan_wrapper.h"
11 11
12namespace Core { 12namespace Core {
@@ -43,7 +43,7 @@ public:
43 explicit VKBlitScreen(Core::Memory::Memory& cpu_memory, 43 explicit VKBlitScreen(Core::Memory::Memory& cpu_memory,
44 Core::Frontend::EmuWindow& render_window, 44 Core::Frontend::EmuWindow& render_window,
45 VideoCore::RasterizerInterface& rasterizer, const Device& device, 45 VideoCore::RasterizerInterface& rasterizer, const Device& device,
46 VKMemoryManager& memory_manager, VKSwapchain& swapchain, 46 MemoryAllocator& memory_allocator, VKSwapchain& swapchain,
47 VKScheduler& scheduler, const VKScreenInfo& screen_info); 47 VKScheduler& scheduler, const VKScreenInfo& screen_info);
48 ~VKBlitScreen(); 48 ~VKBlitScreen();
49 49
@@ -86,7 +86,7 @@ private:
86 Core::Frontend::EmuWindow& render_window; 86 Core::Frontend::EmuWindow& render_window;
87 VideoCore::RasterizerInterface& rasterizer; 87 VideoCore::RasterizerInterface& rasterizer;
88 const Device& device; 88 const Device& device;
89 VKMemoryManager& memory_manager; 89 MemoryAllocator& memory_allocator;
90 VKSwapchain& swapchain; 90 VKSwapchain& swapchain;
91 VKScheduler& scheduler; 91 VKScheduler& scheduler;
92 const std::size_t image_count; 92 const std::size_t image_count;
@@ -104,14 +104,14 @@ private:
104 vk::Sampler sampler; 104 vk::Sampler sampler;
105 105
106 vk::Buffer buffer; 106 vk::Buffer buffer;
107 VKMemoryCommit buffer_commit; 107 MemoryCommit buffer_commit;
108 108
109 std::vector<u64> resource_ticks; 109 std::vector<u64> resource_ticks;
110 110
111 std::vector<vk::Semaphore> semaphores; 111 std::vector<vk::Semaphore> semaphores;
112 std::vector<vk::Image> raw_images; 112 std::vector<vk::Image> raw_images;
113 std::vector<vk::ImageView> raw_image_views; 113 std::vector<vk::ImageView> raw_image_views;
114 std::vector<VKMemoryCommit> raw_buffer_commits; 114 std::vector<MemoryCommit> raw_buffer_commits;
115 u32 raw_width = 0; 115 u32 raw_width = 0;
116 u32 raw_height = 0; 116 u32 raw_height = 0;
117}; 117};
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 4d517c547..d8ad40a0f 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -34,17 +34,13 @@ constexpr VkAccessFlags UPLOAD_ACCESS_BARRIERS =
34constexpr VkAccessFlags TRANSFORM_FEEDBACK_WRITE_ACCESS = 34constexpr VkAccessFlags TRANSFORM_FEEDBACK_WRITE_ACCESS =
35 VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT | VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT; 35 VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT | VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT;
36 36
37std::unique_ptr<VKStreamBuffer> CreateStreamBuffer(const Device& device, VKScheduler& scheduler) {
38 return std::make_unique<VKStreamBuffer>(device, scheduler);
39}
40
41} // Anonymous namespace 37} // Anonymous namespace
42 38
43Buffer::Buffer(const Device& device_, VKMemoryManager& memory_manager, VKScheduler& scheduler_, 39Buffer::Buffer(const Device& device_, MemoryAllocator& memory_allocator, VKScheduler& scheduler_,
44 VKStagingBufferPool& staging_pool_, VAddr cpu_addr_, std::size_t size_) 40 StagingBufferPool& staging_pool_, VAddr cpu_addr_, std::size_t size_)
45 : BufferBlock{cpu_addr_, size_}, device{device_}, scheduler{scheduler_}, staging_pool{ 41 : BufferBlock{cpu_addr_, size_}, device{device_}, scheduler{scheduler_}, staging_pool{
46 staging_pool_} { 42 staging_pool_} {
47 const VkBufferCreateInfo ci{ 43 buffer = device.GetLogical().CreateBuffer(VkBufferCreateInfo{
48 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, 44 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
49 .pNext = nullptr, 45 .pNext = nullptr,
50 .flags = 0, 46 .flags = 0,
@@ -53,22 +49,20 @@ Buffer::Buffer(const Device& device_, VKMemoryManager& memory_manager, VKSchedul
53 .sharingMode = VK_SHARING_MODE_EXCLUSIVE, 49 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
54 .queueFamilyIndexCount = 0, 50 .queueFamilyIndexCount = 0,
55 .pQueueFamilyIndices = nullptr, 51 .pQueueFamilyIndices = nullptr,
56 }; 52 });
57 53 commit = memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal);
58 buffer.handle = device.GetLogical().CreateBuffer(ci);
59 buffer.commit = memory_manager.Commit(buffer.handle, false);
60} 54}
61 55
62Buffer::~Buffer() = default; 56Buffer::~Buffer() = default;
63 57
64void Buffer::Upload(std::size_t offset, std::size_t data_size, const u8* data) { 58void Buffer::Upload(std::size_t offset, std::size_t data_size, const u8* data) {
65 const auto& staging = staging_pool.GetUnusedBuffer(data_size, true); 59 const auto& staging = staging_pool.Request(data_size, MemoryUsage::Upload);
66 std::memcpy(staging.commit->Map(data_size), data, data_size); 60 std::memcpy(staging.mapped_span.data(), data, data_size);
67 61
68 scheduler.RequestOutsideRenderPassOperationContext(); 62 scheduler.RequestOutsideRenderPassOperationContext();
69 63
70 const VkBuffer handle = Handle(); 64 const VkBuffer handle = Handle();
71 scheduler.Record([staging = *staging.handle, handle, offset, data_size, 65 scheduler.Record([staging = staging.buffer, handle, offset, data_size,
72 &device = device](vk::CommandBuffer cmdbuf) { 66 &device = device](vk::CommandBuffer cmdbuf) {
73 const VkBufferMemoryBarrier read_barrier{ 67 const VkBufferMemoryBarrier read_barrier{
74 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, 68 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
@@ -104,12 +98,12 @@ void Buffer::Upload(std::size_t offset, std::size_t data_size, const u8* data) {
104} 98}
105 99
106void Buffer::Download(std::size_t offset, std::size_t data_size, u8* data) { 100void Buffer::Download(std::size_t offset, std::size_t data_size, u8* data) {
107 const auto& staging = staging_pool.GetUnusedBuffer(data_size, true); 101 auto staging = staging_pool.Request(data_size, MemoryUsage::Download);
108 scheduler.RequestOutsideRenderPassOperationContext(); 102 scheduler.RequestOutsideRenderPassOperationContext();
109 103
110 const VkBuffer handle = Handle(); 104 const VkBuffer handle = Handle();
111 scheduler.Record( 105 scheduler.Record(
112 [staging = *staging.handle, handle, offset, data_size](vk::CommandBuffer cmdbuf) { 106 [staging = staging.buffer, handle, offset, data_size](vk::CommandBuffer cmdbuf) {
113 const VkBufferMemoryBarrier barrier{ 107 const VkBufferMemoryBarrier barrier{
114 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, 108 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
115 .pNext = nullptr, 109 .pNext = nullptr,
@@ -130,7 +124,7 @@ void Buffer::Download(std::size_t offset, std::size_t data_size, u8* data) {
130 }); 124 });
131 scheduler.Finish(); 125 scheduler.Finish();
132 126
133 std::memcpy(data, staging.commit->Map(data_size), data_size); 127 std::memcpy(data, staging.mapped_span.data(), data_size);
134} 128}
135 129
136void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst_offset, 130void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst_offset,
@@ -168,29 +162,29 @@ void Buffer::CopyFrom(const Buffer& src, std::size_t src_offset, std::size_t dst
168 162
169VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer_, 163VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer_,
170 Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, 164 Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
171 const Device& device_, VKMemoryManager& memory_manager_, 165 const Device& device_, MemoryAllocator& memory_allocator_,
172 VKScheduler& scheduler_, VKStreamBuffer& stream_buffer_, 166 VKScheduler& scheduler_, VKStreamBuffer& stream_buffer_,
173 VKStagingBufferPool& staging_pool_) 167 StagingBufferPool& staging_pool_)
174 : VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer_, gpu_memory_, 168 : VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer_, gpu_memory_,
175 cpu_memory_, stream_buffer_}, 169 cpu_memory_, stream_buffer_},
176 device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_}, staging_pool{ 170 device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_},
177 staging_pool_} {} 171 staging_pool{staging_pool_} {}
178 172
179VKBufferCache::~VKBufferCache() = default; 173VKBufferCache::~VKBufferCache() = default;
180 174
181std::shared_ptr<Buffer> VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) { 175std::shared_ptr<Buffer> VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) {
182 return std::make_shared<Buffer>(device, memory_manager, scheduler, staging_pool, cpu_addr, 176 return std::make_shared<Buffer>(device, memory_allocator, scheduler, staging_pool, cpu_addr,
183 size); 177 size);
184} 178}
185 179
186VKBufferCache::BufferInfo VKBufferCache::GetEmptyBuffer(std::size_t size) { 180VKBufferCache::BufferInfo VKBufferCache::GetEmptyBuffer(std::size_t size) {
187 size = std::max(size, std::size_t(4)); 181 size = std::max(size, std::size_t(4));
188 const auto& empty = staging_pool.GetUnusedBuffer(size, false); 182 const auto& empty = staging_pool.Request(size, MemoryUsage::DeviceLocal);
189 scheduler.RequestOutsideRenderPassOperationContext(); 183 scheduler.RequestOutsideRenderPassOperationContext();
190 scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf) { 184 scheduler.Record([size, buffer = empty.buffer](vk::CommandBuffer cmdbuf) {
191 cmdbuf.FillBuffer(buffer, 0, size, 0); 185 cmdbuf.FillBuffer(buffer, 0, size, 0);
192 }); 186 });
193 return {*empty.handle, 0, 0}; 187 return {empty.buffer, 0, 0};
194} 188}
195 189
196} // namespace Vulkan 190} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index 1c39aed34..41d577510 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -8,21 +8,20 @@
8 8
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "video_core/buffer_cache/buffer_cache.h" 10#include "video_core/buffer_cache/buffer_cache.h"
11#include "video_core/renderer_vulkan/vk_memory_manager.h"
12#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" 11#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
13#include "video_core/renderer_vulkan/vk_stream_buffer.h" 12#include "video_core/renderer_vulkan/vk_stream_buffer.h"
13#include "video_core/vulkan_common/vulkan_memory_allocator.h"
14#include "video_core/vulkan_common/vulkan_wrapper.h" 14#include "video_core/vulkan_common/vulkan_wrapper.h"
15 15
16namespace Vulkan { 16namespace Vulkan {
17 17
18class Device; 18class Device;
19class VKMemoryManager;
20class VKScheduler; 19class VKScheduler;
21 20
22class Buffer final : public VideoCommon::BufferBlock { 21class Buffer final : public VideoCommon::BufferBlock {
23public: 22public:
24 explicit Buffer(const Device& device, VKMemoryManager& memory_manager, VKScheduler& scheduler, 23 explicit Buffer(const Device& device, MemoryAllocator& memory_allocator, VKScheduler& scheduler,
25 VKStagingBufferPool& staging_pool, VAddr cpu_addr_, std::size_t size_); 24 StagingBufferPool& staging_pool, VAddr cpu_addr_, std::size_t size_);
26 ~Buffer(); 25 ~Buffer();
27 26
28 void Upload(std::size_t offset, std::size_t data_size, const u8* data); 27 void Upload(std::size_t offset, std::size_t data_size, const u8* data);
@@ -33,7 +32,7 @@ public:
33 std::size_t copy_size); 32 std::size_t copy_size);
34 33
35 VkBuffer Handle() const { 34 VkBuffer Handle() const {
36 return *buffer.handle; 35 return *buffer;
37 } 36 }
38 37
39 u64 Address() const { 38 u64 Address() const {
@@ -43,18 +42,19 @@ public:
43private: 42private:
44 const Device& device; 43 const Device& device;
45 VKScheduler& scheduler; 44 VKScheduler& scheduler;
46 VKStagingBufferPool& staging_pool; 45 StagingBufferPool& staging_pool;
47 46
48 VKBuffer buffer; 47 vk::Buffer buffer;
48 MemoryCommit commit;
49}; 49};
50 50
51class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> { 51class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> {
52public: 52public:
53 explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer, 53 explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer,
54 Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory, 54 Tegra::MemoryManager& gpu_memory, Core::Memory::Memory& cpu_memory,
55 const Device& device, VKMemoryManager& memory_manager, 55 const Device& device, MemoryAllocator& memory_allocator,
56 VKScheduler& scheduler, VKStreamBuffer& stream_buffer, 56 VKScheduler& scheduler, VKStreamBuffer& stream_buffer,
57 VKStagingBufferPool& staging_pool); 57 StagingBufferPool& staging_pool);
58 ~VKBufferCache(); 58 ~VKBufferCache();
59 59
60 BufferInfo GetEmptyBuffer(std::size_t size) override; 60 BufferInfo GetEmptyBuffer(std::size_t size) override;
@@ -64,9 +64,9 @@ protected:
64 64
65private: 65private:
66 const Device& device; 66 const Device& device;
67 VKMemoryManager& memory_manager; 67 MemoryAllocator& memory_allocator;
68 VKScheduler& scheduler; 68 VKScheduler& scheduler;
69 VKStagingBufferPool& staging_pool; 69 StagingBufferPool& staging_pool;
70}; 70};
71 71
72} // namespace Vulkan 72} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 02a6d54b7..5eb6a54be 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -164,7 +164,7 @@ VkDescriptorSet VKComputePass::CommitDescriptorSet(
164 164
165QuadArrayPass::QuadArrayPass(const Device& device_, VKScheduler& scheduler_, 165QuadArrayPass::QuadArrayPass(const Device& device_, VKScheduler& scheduler_,
166 VKDescriptorPool& descriptor_pool_, 166 VKDescriptorPool& descriptor_pool_,
167 VKStagingBufferPool& staging_buffer_pool_, 167 StagingBufferPool& staging_buffer_pool_,
168 VKUpdateDescriptorQueue& update_descriptor_queue_) 168 VKUpdateDescriptorQueue& update_descriptor_queue_)
169 : VKComputePass(device_, descriptor_pool_, BuildQuadArrayPassDescriptorSetLayoutBinding(), 169 : VKComputePass(device_, descriptor_pool_, BuildQuadArrayPassDescriptorSetLayoutBinding(),
170 BuildQuadArrayPassDescriptorUpdateTemplateEntry(), 170 BuildQuadArrayPassDescriptorUpdateTemplateEntry(),
@@ -177,18 +177,18 @@ QuadArrayPass::~QuadArrayPass() = default;
177std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) { 177std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) {
178 const u32 num_triangle_vertices = (num_vertices / 4) * 6; 178 const u32 num_triangle_vertices = (num_vertices / 4) * 6;
179 const std::size_t staging_size = num_triangle_vertices * sizeof(u32); 179 const std::size_t staging_size = num_triangle_vertices * sizeof(u32);
180 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); 180 const auto staging_ref = staging_buffer_pool.Request(staging_size, MemoryUsage::DeviceLocal);
181 181
182 update_descriptor_queue.Acquire(); 182 update_descriptor_queue.Acquire();
183 update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); 183 update_descriptor_queue.AddBuffer(staging_ref.buffer, 0, staging_size);
184 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); 184 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
185 185
186 scheduler.RequestOutsideRenderPassOperationContext(); 186 scheduler.RequestOutsideRenderPassOperationContext();
187 187
188 ASSERT(num_vertices % 4 == 0); 188 ASSERT(num_vertices % 4 == 0);
189 const u32 num_quads = num_vertices / 4; 189 const u32 num_quads = num_vertices / 4;
190 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, num_quads, 190 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging_ref.buffer,
191 first, set](vk::CommandBuffer cmdbuf) { 191 num_quads, first, set](vk::CommandBuffer cmdbuf) {
192 constexpr u32 dispatch_size = 1024; 192 constexpr u32 dispatch_size = 1024;
193 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); 193 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
194 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {}); 194 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {});
@@ -208,11 +208,11 @@ std::pair<VkBuffer, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32
208 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 208 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
209 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {}); 209 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {});
210 }); 210 });
211 return {*buffer.handle, 0}; 211 return {staging_ref.buffer, 0};
212} 212}
213 213
214Uint8Pass::Uint8Pass(const Device& device, VKScheduler& scheduler_, 214Uint8Pass::Uint8Pass(const Device& device, VKScheduler& scheduler_,
215 VKDescriptorPool& descriptor_pool, VKStagingBufferPool& staging_buffer_pool_, 215 VKDescriptorPool& descriptor_pool, StagingBufferPool& staging_buffer_pool_,
216 VKUpdateDescriptorQueue& update_descriptor_queue_) 216 VKUpdateDescriptorQueue& update_descriptor_queue_)
217 : VKComputePass(device, descriptor_pool, BuildInputOutputDescriptorSetBindings(), 217 : VKComputePass(device, descriptor_pool, BuildInputOutputDescriptorSetBindings(),
218 BuildInputOutputDescriptorUpdateTemplate(), {}, VULKAN_UINT8_COMP_SPV), 218 BuildInputOutputDescriptorUpdateTemplate(), {}, VULKAN_UINT8_COMP_SPV),
@@ -224,15 +224,15 @@ Uint8Pass::~Uint8Pass() = default;
224std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer, 224std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer,
225 u64 src_offset) { 225 u64 src_offset) {
226 const u32 staging_size = static_cast<u32>(num_vertices * sizeof(u16)); 226 const u32 staging_size = static_cast<u32>(num_vertices * sizeof(u16));
227 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); 227 const auto staging_ref = staging_buffer_pool.Request(staging_size, MemoryUsage::DeviceLocal);
228 228
229 update_descriptor_queue.Acquire(); 229 update_descriptor_queue.Acquire();
230 update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices); 230 update_descriptor_queue.AddBuffer(src_buffer, src_offset, num_vertices);
231 update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); 231 update_descriptor_queue.AddBuffer(staging_ref.buffer, 0, staging_size);
232 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); 232 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
233 233
234 scheduler.RequestOutsideRenderPassOperationContext(); 234 scheduler.RequestOutsideRenderPassOperationContext();
235 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set, 235 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging_ref.buffer, set,
236 num_vertices](vk::CommandBuffer cmdbuf) { 236 num_vertices](vk::CommandBuffer cmdbuf) {
237 constexpr u32 dispatch_size = 1024; 237 constexpr u32 dispatch_size = 1024;
238 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); 238 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
@@ -252,12 +252,12 @@ std::pair<VkBuffer, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buff
252 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 252 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
253 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {}); 253 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {});
254 }); 254 });
255 return {*buffer.handle, 0}; 255 return {staging_ref.buffer, 0};
256} 256}
257 257
258QuadIndexedPass::QuadIndexedPass(const Device& device_, VKScheduler& scheduler_, 258QuadIndexedPass::QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
259 VKDescriptorPool& descriptor_pool_, 259 VKDescriptorPool& descriptor_pool_,
260 VKStagingBufferPool& staging_buffer_pool_, 260 StagingBufferPool& staging_buffer_pool_,
261 VKUpdateDescriptorQueue& update_descriptor_queue_) 261 VKUpdateDescriptorQueue& update_descriptor_queue_)
262 : VKComputePass(device_, descriptor_pool_, BuildInputOutputDescriptorSetBindings(), 262 : VKComputePass(device_, descriptor_pool_, BuildInputOutputDescriptorSetBindings(),
263 BuildInputOutputDescriptorUpdateTemplate(), 263 BuildInputOutputDescriptorUpdateTemplate(),
@@ -286,15 +286,15 @@ std::pair<VkBuffer, u64> QuadIndexedPass::Assemble(
286 const u32 num_tri_vertices = (num_vertices / 4) * 6; 286 const u32 num_tri_vertices = (num_vertices / 4) * 6;
287 287
288 const std::size_t staging_size = num_tri_vertices * sizeof(u32); 288 const std::size_t staging_size = num_tri_vertices * sizeof(u32);
289 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); 289 const auto staging_ref = staging_buffer_pool.Request(staging_size, MemoryUsage::DeviceLocal);
290 290
291 update_descriptor_queue.Acquire(); 291 update_descriptor_queue.Acquire();
292 update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size); 292 update_descriptor_queue.AddBuffer(src_buffer, src_offset, input_size);
293 update_descriptor_queue.AddBuffer(*buffer.handle, 0, staging_size); 293 update_descriptor_queue.AddBuffer(staging_ref.buffer, 0, staging_size);
294 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue); 294 const VkDescriptorSet set = CommitDescriptorSet(update_descriptor_queue);
295 295
296 scheduler.RequestOutsideRenderPassOperationContext(); 296 scheduler.RequestOutsideRenderPassOperationContext();
297 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set, 297 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = staging_ref.buffer, set,
298 num_tri_vertices, base_vertex, index_shift](vk::CommandBuffer cmdbuf) { 298 num_tri_vertices, base_vertex, index_shift](vk::CommandBuffer cmdbuf) {
299 static constexpr u32 dispatch_size = 1024; 299 static constexpr u32 dispatch_size = 1024;
300 const std::array push_constants = {base_vertex, index_shift}; 300 const std::array push_constants = {base_vertex, index_shift};
@@ -317,7 +317,7 @@ std::pair<VkBuffer, u64> QuadIndexedPass::Assemble(
317 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 317 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
318 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {}); 318 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {});
319 }); 319 });
320 return {*buffer.handle, 0}; 320 return {staging_ref.buffer, 0};
321} 321}
322 322
323} // namespace Vulkan 323} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index 7ddb09afb..f5c6f5f17 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -16,8 +16,8 @@
16namespace Vulkan { 16namespace Vulkan {
17 17
18class Device; 18class Device;
19class StagingBufferPool;
19class VKScheduler; 20class VKScheduler;
20class VKStagingBufferPool;
21class VKUpdateDescriptorQueue; 21class VKUpdateDescriptorQueue;
22 22
23class VKComputePass { 23class VKComputePass {
@@ -45,7 +45,7 @@ class QuadArrayPass final : public VKComputePass {
45public: 45public:
46 explicit QuadArrayPass(const Device& device_, VKScheduler& scheduler_, 46 explicit QuadArrayPass(const Device& device_, VKScheduler& scheduler_,
47 VKDescriptorPool& descriptor_pool_, 47 VKDescriptorPool& descriptor_pool_,
48 VKStagingBufferPool& staging_buffer_pool_, 48 StagingBufferPool& staging_buffer_pool_,
49 VKUpdateDescriptorQueue& update_descriptor_queue_); 49 VKUpdateDescriptorQueue& update_descriptor_queue_);
50 ~QuadArrayPass(); 50 ~QuadArrayPass();
51 51
@@ -53,15 +53,14 @@ public:
53 53
54private: 54private:
55 VKScheduler& scheduler; 55 VKScheduler& scheduler;
56 VKStagingBufferPool& staging_buffer_pool; 56 StagingBufferPool& staging_buffer_pool;
57 VKUpdateDescriptorQueue& update_descriptor_queue; 57 VKUpdateDescriptorQueue& update_descriptor_queue;
58}; 58};
59 59
60class Uint8Pass final : public VKComputePass { 60class Uint8Pass final : public VKComputePass {
61public: 61public:
62 explicit Uint8Pass(const Device& device_, VKScheduler& scheduler_, 62 explicit Uint8Pass(const Device& device_, VKScheduler& scheduler_,
63 VKDescriptorPool& descriptor_pool_, 63 VKDescriptorPool& descriptor_pool_, StagingBufferPool& staging_buffer_pool_,
64 VKStagingBufferPool& staging_buffer_pool_,
65 VKUpdateDescriptorQueue& update_descriptor_queue_); 64 VKUpdateDescriptorQueue& update_descriptor_queue_);
66 ~Uint8Pass(); 65 ~Uint8Pass();
67 66
@@ -69,7 +68,7 @@ public:
69 68
70private: 69private:
71 VKScheduler& scheduler; 70 VKScheduler& scheduler;
72 VKStagingBufferPool& staging_buffer_pool; 71 StagingBufferPool& staging_buffer_pool;
73 VKUpdateDescriptorQueue& update_descriptor_queue; 72 VKUpdateDescriptorQueue& update_descriptor_queue;
74}; 73};
75 74
@@ -77,7 +76,7 @@ class QuadIndexedPass final : public VKComputePass {
77public: 76public:
78 explicit QuadIndexedPass(const Device& device_, VKScheduler& scheduler_, 77 explicit QuadIndexedPass(const Device& device_, VKScheduler& scheduler_,
79 VKDescriptorPool& descriptor_pool_, 78 VKDescriptorPool& descriptor_pool_,
80 VKStagingBufferPool& staging_buffer_pool_, 79 StagingBufferPool& staging_buffer_pool_,
81 VKUpdateDescriptorQueue& update_descriptor_queue_); 80 VKUpdateDescriptorQueue& update_descriptor_queue_);
82 ~QuadIndexedPass(); 81 ~QuadIndexedPass();
83 82
@@ -87,7 +86,7 @@ public:
87 86
88private: 87private:
89 VKScheduler& scheduler; 88 VKScheduler& scheduler;
90 VKStagingBufferPool& staging_buffer_pool; 89 StagingBufferPool& staging_buffer_pool;
91 VKUpdateDescriptorQueue& update_descriptor_queue; 90 VKUpdateDescriptorQueue& update_descriptor_queue;
92}; 91};
93 92
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
index 4c5bc0aa1..6cd00884d 100644
--- a/src/video_core/renderer_vulkan/vk_fence_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
@@ -3,7 +3,6 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <memory> 5#include <memory>
6#include <thread>
7 6
8#include "video_core/renderer_vulkan/vk_buffer_cache.h" 7#include "video_core/renderer_vulkan/vk_buffer_cache.h"
9#include "video_core/renderer_vulkan/vk_fence_manager.h" 8#include "video_core/renderer_vulkan/vk_fence_manager.h"
@@ -14,13 +13,11 @@
14 13
15namespace Vulkan { 14namespace Vulkan {
16 15
17InnerFence::InnerFence(const Device& device_, VKScheduler& scheduler_, u32 payload_, 16InnerFence::InnerFence(VKScheduler& scheduler_, u32 payload_, bool is_stubbed_)
18 bool is_stubbed_) 17 : FenceBase{payload_, is_stubbed_}, scheduler{scheduler_} {}
19 : FenceBase{payload_, is_stubbed_}, device{device_}, scheduler{scheduler_} {}
20 18
21InnerFence::InnerFence(const Device& device_, VKScheduler& scheduler_, GPUVAddr address_, 19InnerFence::InnerFence(VKScheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_)
22 u32 payload_, bool is_stubbed_) 20 : FenceBase{address_, payload_, is_stubbed_}, scheduler{scheduler_} {}
23 : FenceBase{address_, payload_, is_stubbed_}, device{device_}, scheduler{scheduler_} {}
24 21
25InnerFence::~InnerFence() = default; 22InnerFence::~InnerFence() = default;
26 23
@@ -28,63 +25,38 @@ void InnerFence::Queue() {
28 if (is_stubbed) { 25 if (is_stubbed) {
29 return; 26 return;
30 } 27 }
31 ASSERT(!event); 28 // Get the current tick so we can wait for it
32 29 wait_tick = scheduler.CurrentTick();
33 event = device.GetLogical().CreateEvent(); 30 scheduler.Flush();
34 ticks = scheduler.CurrentTick();
35
36 scheduler.RequestOutsideRenderPassOperationContext();
37 scheduler.Record([event = *event](vk::CommandBuffer cmdbuf) {
38 cmdbuf.SetEvent(event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
39 });
40} 31}
41 32
42bool InnerFence::IsSignaled() const { 33bool InnerFence::IsSignaled() const {
43 if (is_stubbed) { 34 if (is_stubbed) {
44 return true; 35 return true;
45 } 36 }
46 ASSERT(event); 37 return scheduler.IsFree(wait_tick);
47 return IsEventSignalled();
48} 38}
49 39
50void InnerFence::Wait() { 40void InnerFence::Wait() {
51 if (is_stubbed) { 41 if (is_stubbed) {
52 return; 42 return;
53 } 43 }
54 ASSERT(event); 44 scheduler.Wait(wait_tick);
55
56 if (ticks >= scheduler.CurrentTick()) {
57 scheduler.Flush();
58 }
59 while (!IsEventSignalled()) {
60 std::this_thread::yield();
61 }
62}
63
64bool InnerFence::IsEventSignalled() const {
65 switch (const VkResult result = event.GetStatus()) {
66 case VK_EVENT_SET:
67 return true;
68 case VK_EVENT_RESET:
69 return false;
70 default:
71 throw vk::Exception(result);
72 }
73} 45}
74 46
75VKFenceManager::VKFenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_, 47VKFenceManager::VKFenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_,
76 Tegra::MemoryManager& memory_manager_, TextureCache& texture_cache_, 48 Tegra::MemoryManager& memory_manager_, TextureCache& texture_cache_,
77 VKBufferCache& buffer_cache_, VKQueryCache& query_cache_, 49 VKBufferCache& buffer_cache_, VKQueryCache& query_cache_,
78 const Device& device_, VKScheduler& scheduler_) 50 VKScheduler& scheduler_)
79 : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_}, 51 : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_},
80 device{device_}, scheduler{scheduler_} {} 52 scheduler{scheduler_} {}
81 53
82Fence VKFenceManager::CreateFence(u32 value, bool is_stubbed) { 54Fence VKFenceManager::CreateFence(u32 value, bool is_stubbed) {
83 return std::make_shared<InnerFence>(device, scheduler, value, is_stubbed); 55 return std::make_shared<InnerFence>(scheduler, value, is_stubbed);
84} 56}
85 57
86Fence VKFenceManager::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) { 58Fence VKFenceManager::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) {
87 return std::make_shared<InnerFence>(device, scheduler, addr, value, is_stubbed); 59 return std::make_shared<InnerFence>(scheduler, addr, value, is_stubbed);
88} 60}
89 61
90void VKFenceManager::QueueFence(Fence& fence) { 62void VKFenceManager::QueueFence(Fence& fence) {
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h
index 6b51e4587..9c5e5aa8f 100644
--- a/src/video_core/renderer_vulkan/vk_fence_manager.h
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.h
@@ -28,10 +28,8 @@ class VKScheduler;
28 28
29class InnerFence : public VideoCommon::FenceBase { 29class InnerFence : public VideoCommon::FenceBase {
30public: 30public:
31 explicit InnerFence(const Device& device_, VKScheduler& scheduler_, u32 payload_, 31 explicit InnerFence(VKScheduler& scheduler_, u32 payload_, bool is_stubbed_);
32 bool is_stubbed_); 32 explicit InnerFence(VKScheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_);
33 explicit InnerFence(const Device& device_, VKScheduler& scheduler_, GPUVAddr address_,
34 u32 payload_, bool is_stubbed_);
35 ~InnerFence(); 33 ~InnerFence();
36 34
37 void Queue(); 35 void Queue();
@@ -41,12 +39,8 @@ public:
41 void Wait(); 39 void Wait();
42 40
43private: 41private:
44 bool IsEventSignalled() const;
45
46 const Device& device;
47 VKScheduler& scheduler; 42 VKScheduler& scheduler;
48 vk::Event event; 43 u64 wait_tick = 0;
49 u64 ticks = 0;
50}; 44};
51using Fence = std::shared_ptr<InnerFence>; 45using Fence = std::shared_ptr<InnerFence>;
52 46
@@ -58,7 +52,7 @@ public:
58 explicit VKFenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_, 52 explicit VKFenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_,
59 Tegra::MemoryManager& memory_manager_, TextureCache& texture_cache_, 53 Tegra::MemoryManager& memory_manager_, TextureCache& texture_cache_,
60 VKBufferCache& buffer_cache_, VKQueryCache& query_cache_, 54 VKBufferCache& buffer_cache_, VKQueryCache& query_cache_,
61 const Device& device_, VKScheduler& scheduler_); 55 VKScheduler& scheduler_);
62 56
63protected: 57protected:
64 Fence CreateFence(u32 value, bool is_stubbed) override; 58 Fence CreateFence(u32 value, bool is_stubbed) override;
@@ -68,7 +62,6 @@ protected:
68 void WaitFence(Fence& fence) override; 62 void WaitFence(Fence& fence) override;
69 63
70private: 64private:
71 const Device& device;
72 VKScheduler& scheduler; 65 VKScheduler& scheduler;
73}; 66};
74 67
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.cpp b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
deleted file mode 100644
index a6abd0eee..000000000
--- a/src/video_core/renderer_vulkan/vk_memory_manager.cpp
+++ /dev/null
@@ -1,230 +0,0 @@
1// Copyright 2018 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <optional>
7#include <tuple>
8#include <vector>
9
10#include "common/alignment.h"
11#include "common/assert.h"
12#include "common/common_types.h"
13#include "common/logging/log.h"
14#include "video_core/renderer_vulkan/vk_memory_manager.h"
15#include "video_core/vulkan_common/vulkan_device.h"
16#include "video_core/vulkan_common/vulkan_wrapper.h"
17
18namespace Vulkan {
19
20namespace {
21
22u64 GetAllocationChunkSize(u64 required_size) {
23 static constexpr u64 sizes[] = {16ULL << 20, 32ULL << 20, 64ULL << 20, 128ULL << 20};
24 auto it = std::lower_bound(std::begin(sizes), std::end(sizes), required_size);
25 return it != std::end(sizes) ? *it : Common::AlignUp(required_size, 256ULL << 20);
26}
27
28} // Anonymous namespace
29
30class VKMemoryAllocation final {
31public:
32 explicit VKMemoryAllocation(const Device& device_, vk::DeviceMemory memory_,
33 VkMemoryPropertyFlags properties_, u64 allocation_size_, u32 type_)
34 : device{device_}, memory{std::move(memory_)}, properties{properties_},
35 allocation_size{allocation_size_}, shifted_type{ShiftType(type_)} {}
36
37 VKMemoryCommit Commit(VkDeviceSize commit_size, VkDeviceSize alignment) {
38 auto found = TryFindFreeSection(free_iterator, allocation_size,
39 static_cast<u64>(commit_size), static_cast<u64>(alignment));
40 if (!found) {
41 found = TryFindFreeSection(0, free_iterator, static_cast<u64>(commit_size),
42 static_cast<u64>(alignment));
43 if (!found) {
44 // Signal out of memory, it'll try to do more allocations.
45 return nullptr;
46 }
47 }
48 auto commit = std::make_unique<VKMemoryCommitImpl>(device, this, memory, *found,
49 *found + commit_size);
50 commits.push_back(commit.get());
51
52 // Last commit's address is highly probable to be free.
53 free_iterator = *found + commit_size;
54
55 return commit;
56 }
57
58 void Free(const VKMemoryCommitImpl* commit) {
59 ASSERT(commit);
60
61 const auto it = std::find(std::begin(commits), std::end(commits), commit);
62 if (it == commits.end()) {
63 UNREACHABLE_MSG("Freeing unallocated commit!");
64 return;
65 }
66 commits.erase(it);
67 }
68
69 /// Returns whether this allocation is compatible with the arguments.
70 bool IsCompatible(VkMemoryPropertyFlags wanted_properties, u32 type_mask) const {
71 return (wanted_properties & properties) && (type_mask & shifted_type) != 0;
72 }
73
74private:
75 static constexpr u32 ShiftType(u32 type) {
76 return 1U << type;
77 }
78
79 /// A memory allocator, it may return a free region between "start" and "end" with the solicited
80 /// requirements.
81 std::optional<u64> TryFindFreeSection(u64 start, u64 end, u64 size, u64 alignment) const {
82 u64 iterator = Common::AlignUp(start, alignment);
83 while (iterator + size <= end) {
84 const u64 try_left = iterator;
85 const u64 try_right = try_left + size;
86
87 bool overlap = false;
88 for (const auto& commit : commits) {
89 const auto [commit_left, commit_right] = commit->interval;
90 if (try_left < commit_right && commit_left < try_right) {
91 // There's an overlap, continue the search where the overlapping commit ends.
92 iterator = Common::AlignUp(commit_right, alignment);
93 overlap = true;
94 break;
95 }
96 }
97 if (!overlap) {
98 // A free address has been found.
99 return try_left;
100 }
101 }
102
103 // No free regions where found, return an empty optional.
104 return std::nullopt;
105 }
106
107 const Device& device; ///< Vulkan device.
108 const vk::DeviceMemory memory; ///< Vulkan memory allocation handler.
109 const VkMemoryPropertyFlags properties; ///< Vulkan properties.
110 const u64 allocation_size; ///< Size of this allocation.
111 const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted.
112
113 /// Hints where the next free region is likely going to be.
114 u64 free_iterator{};
115
116 /// Stores all commits done from this allocation.
117 std::vector<const VKMemoryCommitImpl*> commits;
118};
119
120VKMemoryManager::VKMemoryManager(const Device& device_)
121 : device{device_}, properties{device_.GetPhysical().GetMemoryProperties()} {}
122
123VKMemoryManager::~VKMemoryManager() = default;
124
125VKMemoryCommit VKMemoryManager::Commit(const VkMemoryRequirements& requirements,
126 bool host_visible) {
127 const u64 chunk_size = GetAllocationChunkSize(requirements.size);
128
129 // When a host visible commit is asked, search for host visible and coherent, otherwise search
130 // for a fast device local type.
131 const VkMemoryPropertyFlags wanted_properties =
132 host_visible ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
133 : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
134
135 if (auto commit = TryAllocCommit(requirements, wanted_properties)) {
136 return commit;
137 }
138
139 // Commit has failed, allocate more memory.
140 if (!AllocMemory(wanted_properties, requirements.memoryTypeBits, chunk_size)) {
141 // TODO(Rodrigo): Handle these situations in some way like flushing to guest memory.
142 // Allocation has failed, panic.
143 UNREACHABLE_MSG("Ran out of VRAM!");
144 return {};
145 }
146
147 // Commit again, this time it won't fail since there's a fresh allocation above. If it does,
148 // there's a bug.
149 auto commit = TryAllocCommit(requirements, wanted_properties);
150 ASSERT(commit);
151 return commit;
152}
153
154VKMemoryCommit VKMemoryManager::Commit(const vk::Buffer& buffer, bool host_visible) {
155 auto commit = Commit(device.GetLogical().GetBufferMemoryRequirements(*buffer), host_visible);
156 buffer.BindMemory(commit->GetMemory(), commit->GetOffset());
157 return commit;
158}
159
160VKMemoryCommit VKMemoryManager::Commit(const vk::Image& image, bool host_visible) {
161 auto commit = Commit(device.GetLogical().GetImageMemoryRequirements(*image), host_visible);
162 image.BindMemory(commit->GetMemory(), commit->GetOffset());
163 return commit;
164}
165
166bool VKMemoryManager::AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask,
167 u64 size) {
168 const u32 type = [&] {
169 for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) {
170 const auto flags = properties.memoryTypes[type_index].propertyFlags;
171 if ((type_mask & (1U << type_index)) && (flags & wanted_properties)) {
172 // The type matches in type and in the wanted properties.
173 return type_index;
174 }
175 }
176 UNREACHABLE_MSG("Couldn't find a compatible memory type!");
177 return 0U;
178 }();
179
180 // Try to allocate found type.
181 vk::DeviceMemory memory = device.GetLogical().TryAllocateMemory({
182 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
183 .pNext = nullptr,
184 .allocationSize = size,
185 .memoryTypeIndex = type,
186 });
187 if (!memory) {
188 LOG_CRITICAL(Render_Vulkan, "Device allocation failed!");
189 return false;
190 }
191
192 allocations.push_back(std::make_unique<VKMemoryAllocation>(device, std::move(memory),
193 wanted_properties, size, type));
194 return true;
195}
196
197VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requirements,
198 VkMemoryPropertyFlags wanted_properties) {
199 for (auto& allocation : allocations) {
200 if (!allocation->IsCompatible(wanted_properties, requirements.memoryTypeBits)) {
201 continue;
202 }
203 if (auto commit = allocation->Commit(requirements.size, requirements.alignment)) {
204 return commit;
205 }
206 }
207 return {};
208}
209
210VKMemoryCommitImpl::VKMemoryCommitImpl(const Device& device_, VKMemoryAllocation* allocation_,
211 const vk::DeviceMemory& memory_, u64 begin_, u64 end_)
212 : device{device_}, memory{memory_}, interval{begin_, end_}, allocation{allocation_} {}
213
214VKMemoryCommitImpl::~VKMemoryCommitImpl() {
215 allocation->Free(this);
216}
217
218MemoryMap VKMemoryCommitImpl::Map(u64 size, u64 offset_) const {
219 return MemoryMap(this, std::span<u8>(memory.Map(interval.first + offset_, size), size));
220}
221
222void VKMemoryCommitImpl::Unmap() const {
223 memory.Unmap();
224}
225
226MemoryMap VKMemoryCommitImpl::Map() const {
227 return Map(interval.second - interval.first);
228}
229
230} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h
deleted file mode 100644
index 2452bca4e..000000000
--- a/src/video_core/renderer_vulkan/vk_memory_manager.h
+++ /dev/null
@@ -1,132 +0,0 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <span>
9#include <utility>
10#include <vector>
11#include "common/common_types.h"
12#include "video_core/vulkan_common/vulkan_wrapper.h"
13
14namespace Vulkan {
15
16class Device;
17class MemoryMap;
18class VKMemoryAllocation;
19class VKMemoryCommitImpl;
20
21using VKMemoryCommit = std::unique_ptr<VKMemoryCommitImpl>;
22
23class VKMemoryManager final {
24public:
25 explicit VKMemoryManager(const Device& device_);
26 VKMemoryManager(const VKMemoryManager&) = delete;
27 ~VKMemoryManager();
28
29 /**
30 * Commits a memory with the specified requeriments.
31 * @param requirements Requirements returned from a Vulkan call.
32 * @param host_visible Signals the allocator that it *must* use host visible and coherent
33 * memory. When passing false, it will try to allocate device local memory.
34 * @returns A memory commit.
35 */
36 VKMemoryCommit Commit(const VkMemoryRequirements& requirements, bool host_visible);
37
38 /// Commits memory required by the buffer and binds it.
39 VKMemoryCommit Commit(const vk::Buffer& buffer, bool host_visible);
40
41 /// Commits memory required by the image and binds it.
42 VKMemoryCommit Commit(const vk::Image& image, bool host_visible);
43
44private:
45 /// Allocates a chunk of memory.
46 bool AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
47
48 /// Tries to allocate a memory commit.
49 VKMemoryCommit TryAllocCommit(const VkMemoryRequirements& requirements,
50 VkMemoryPropertyFlags wanted_properties);
51
52 const Device& device; ///< Device handler.
53 const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
54 std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations.
55};
56
57class VKMemoryCommitImpl final {
58 friend VKMemoryAllocation;
59 friend MemoryMap;
60
61public:
62 explicit VKMemoryCommitImpl(const Device& device_, VKMemoryAllocation* allocation_,
63 const vk::DeviceMemory& memory_, u64 begin_, u64 end_);
64 ~VKMemoryCommitImpl();
65
66 /// Maps a memory region and returns a pointer to it.
67 /// It's illegal to have more than one memory map at the same time.
68 MemoryMap Map(u64 size, u64 offset = 0) const;
69
70 /// Maps the whole commit and returns a pointer to it.
71 /// It's illegal to have more than one memory map at the same time.
72 MemoryMap Map() const;
73
74 /// Returns the Vulkan memory handler.
75 VkDeviceMemory GetMemory() const {
76 return *memory;
77 }
78
79 /// Returns the start position of the commit relative to the allocation.
80 VkDeviceSize GetOffset() const {
81 return static_cast<VkDeviceSize>(interval.first);
82 }
83
84private:
85 /// Unmaps memory.
86 void Unmap() const;
87
88 const Device& device; ///< Vulkan device.
89 const vk::DeviceMemory& memory; ///< Vulkan device memory handler.
90 std::pair<u64, u64> interval{}; ///< Interval where the commit exists.
91 VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation.
92};
93
94/// Holds ownership of a memory map.
95class MemoryMap final {
96public:
97 explicit MemoryMap(const VKMemoryCommitImpl* commit_, std::span<u8> span_)
98 : commit{commit_}, span{span_} {}
99
100 ~MemoryMap() {
101 if (commit) {
102 commit->Unmap();
103 }
104 }
105
106 /// Prematurely releases the memory map.
107 void Release() {
108 commit->Unmap();
109 commit = nullptr;
110 }
111
112 /// Returns a span to the memory map.
113 [[nodiscard]] std::span<u8> Span() const noexcept {
114 return span;
115 }
116
117 /// Returns the address of the memory map.
118 [[nodiscard]] u8* Address() const noexcept {
119 return span.data();
120 }
121
122 /// Returns the address of the memory map;
123 [[nodiscard]] operator u8*() const noexcept {
124 return span.data();
125 }
126
127private:
128 const VKMemoryCommitImpl* commit{}; ///< Mapped memory commit.
129 std::span<u8> span; ///< Address to the mapped memory.
130};
131
132} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 93fbea510..f0a111829 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -409,27 +409,26 @@ void RasterizerVulkan::DrawParameters::Draw(vk::CommandBuffer cmdbuf) const {
409RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, 409RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
410 Tegra::MemoryManager& gpu_memory_, 410 Tegra::MemoryManager& gpu_memory_,
411 Core::Memory::Memory& cpu_memory_, VKScreenInfo& screen_info_, 411 Core::Memory::Memory& cpu_memory_, VKScreenInfo& screen_info_,
412 const Device& device_, VKMemoryManager& memory_manager_, 412 const Device& device_, MemoryAllocator& memory_allocator_,
413 StateTracker& state_tracker_, VKScheduler& scheduler_) 413 StateTracker& state_tracker_, VKScheduler& scheduler_)
414 : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, 414 : RasterizerAccelerated{cpu_memory_}, gpu{gpu_},
415 gpu_memory{gpu_memory_}, maxwell3d{gpu.Maxwell3D()}, kepler_compute{gpu.KeplerCompute()}, 415 gpu_memory{gpu_memory_}, maxwell3d{gpu.Maxwell3D()}, kepler_compute{gpu.KeplerCompute()},
416 screen_info{screen_info_}, device{device_}, memory_manager{memory_manager_}, 416 screen_info{screen_info_}, device{device_}, memory_allocator{memory_allocator_},
417 state_tracker{state_tracker_}, scheduler{scheduler_}, stream_buffer(device, scheduler), 417 state_tracker{state_tracker_}, scheduler{scheduler_}, stream_buffer(device, scheduler),
418 staging_pool(device, memory_manager, scheduler), descriptor_pool(device, scheduler), 418 staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler),
419 update_descriptor_queue(device, scheduler), 419 update_descriptor_queue(device, scheduler),
420 blit_image(device, scheduler, state_tracker, descriptor_pool), 420 blit_image(device, scheduler, state_tracker, descriptor_pool),
421 quad_array_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), 421 quad_array_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
422 quad_indexed_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), 422 quad_indexed_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
423 uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), 423 uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
424 texture_cache_runtime{device, scheduler, memory_manager, staging_pool, blit_image}, 424 texture_cache_runtime{device, scheduler, memory_allocator, staging_pool, blit_image},
425 texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory), 425 texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory),
426 pipeline_cache(*this, gpu, maxwell3d, kepler_compute, gpu_memory, device, scheduler, 426 pipeline_cache(*this, gpu, maxwell3d, kepler_compute, gpu_memory, device, scheduler,
427 descriptor_pool, update_descriptor_queue), 427 descriptor_pool, update_descriptor_queue),
428 buffer_cache(*this, gpu_memory, cpu_memory_, device, memory_manager, scheduler, stream_buffer, 428 buffer_cache(*this, gpu_memory, cpu_memory_, device, memory_allocator, scheduler,
429 staging_pool), 429 stream_buffer, staging_pool),
430 query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, 430 query_cache{*this, maxwell3d, gpu_memory, device, scheduler},
431 fence_manager(*this, gpu, gpu_memory, texture_cache, buffer_cache, query_cache, device, 431 fence_manager(*this, gpu, gpu_memory, texture_cache, buffer_cache, query_cache, scheduler),
432 scheduler),
433 wfi_event(device.GetLogical().CreateEvent()), async_shaders(emu_window_) { 432 wfi_event(device.GetLogical().CreateEvent()), async_shaders(emu_window_) {
434 scheduler.SetQueryCache(query_cache); 433 scheduler.SetQueryCache(query_cache);
435 if (device.UseAsynchronousShaders()) { 434 if (device.UseAsynchronousShaders()) {
@@ -1446,7 +1445,7 @@ VkBuffer RasterizerVulkan::DefaultBuffer() {
1446 .queueFamilyIndexCount = 0, 1445 .queueFamilyIndexCount = 0,
1447 .pQueueFamilyIndices = nullptr, 1446 .pQueueFamilyIndices = nullptr,
1448 }); 1447 });
1449 default_buffer_commit = memory_manager.Commit(default_buffer, false); 1448 default_buffer_commit = memory_allocator.Commit(default_buffer, MemoryUsage::DeviceLocal);
1450 1449
1451 scheduler.RequestOutsideRenderPassOperationContext(); 1450 scheduler.RequestOutsideRenderPassOperationContext();
1452 scheduler.Record([buffer = *default_buffer](vk::CommandBuffer cmdbuf) { 1451 scheduler.Record([buffer = *default_buffer](vk::CommandBuffer cmdbuf) {
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 4695718e9..8e261b9bd 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -21,7 +21,6 @@
21#include "video_core/renderer_vulkan/vk_compute_pass.h" 21#include "video_core/renderer_vulkan/vk_compute_pass.h"
22#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 22#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
23#include "video_core/renderer_vulkan/vk_fence_manager.h" 23#include "video_core/renderer_vulkan/vk_fence_manager.h"
24#include "video_core/renderer_vulkan/vk_memory_manager.h"
25#include "video_core/renderer_vulkan/vk_pipeline_cache.h" 24#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
26#include "video_core/renderer_vulkan/vk_query_cache.h" 25#include "video_core/renderer_vulkan/vk_query_cache.h"
27#include "video_core/renderer_vulkan/vk_scheduler.h" 26#include "video_core/renderer_vulkan/vk_scheduler.h"
@@ -30,6 +29,7 @@
30#include "video_core/renderer_vulkan/vk_texture_cache.h" 29#include "video_core/renderer_vulkan/vk_texture_cache.h"
31#include "video_core/renderer_vulkan/vk_update_descriptor.h" 30#include "video_core/renderer_vulkan/vk_update_descriptor.h"
32#include "video_core/shader/async_shaders.h" 31#include "video_core/shader/async_shaders.h"
32#include "video_core/vulkan_common/vulkan_memory_allocator.h"
33#include "video_core/vulkan_common/vulkan_wrapper.h" 33#include "video_core/vulkan_common/vulkan_wrapper.h"
34 34
35namespace Core { 35namespace Core {
@@ -56,7 +56,7 @@ public:
56 explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, 56 explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
57 Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, 57 Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
58 VKScreenInfo& screen_info_, const Device& device_, 58 VKScreenInfo& screen_info_, const Device& device_,
59 VKMemoryManager& memory_manager_, StateTracker& state_tracker_, 59 MemoryAllocator& memory_allocator_, StateTracker& state_tracker_,
60 VKScheduler& scheduler_); 60 VKScheduler& scheduler_);
61 ~RasterizerVulkan() override; 61 ~RasterizerVulkan() override;
62 62
@@ -213,12 +213,12 @@ private:
213 213
214 VKScreenInfo& screen_info; 214 VKScreenInfo& screen_info;
215 const Device& device; 215 const Device& device;
216 VKMemoryManager& memory_manager; 216 MemoryAllocator& memory_allocator;
217 StateTracker& state_tracker; 217 StateTracker& state_tracker;
218 VKScheduler& scheduler; 218 VKScheduler& scheduler;
219 219
220 VKStreamBuffer stream_buffer; 220 VKStreamBuffer stream_buffer;
221 VKStagingBufferPool staging_pool; 221 StagingBufferPool staging_pool;
222 VKDescriptorPool descriptor_pool; 222 VKDescriptorPool descriptor_pool;
223 VKUpdateDescriptorQueue update_descriptor_queue; 223 VKUpdateDescriptorQueue update_descriptor_queue;
224 BlitImageHelper blit_image; 224 BlitImageHelper blit_image;
@@ -234,7 +234,7 @@ private:
234 VKFenceManager fence_manager; 234 VKFenceManager fence_manager;
235 235
236 vk::Buffer default_buffer; 236 vk::Buffer default_buffer;
237 VKMemoryCommit default_buffer_commit; 237 MemoryCommit default_buffer_commit;
238 vk::Event wfi_event; 238 vk::Event wfi_event;
239 VideoCommon::Shader::AsyncShaders async_shaders; 239 VideoCommon::Shader::AsyncShaders async_shaders;
240 240
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
index 89cbe01ad..61d52b961 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
@@ -1334,7 +1334,10 @@ private:
1334 } 1334 }
1335 1335
1336 if (const auto comment = std::get_if<CommentNode>(&*node)) { 1336 if (const auto comment = std::get_if<CommentNode>(&*node)) {
1337 Name(OpUndef(t_void), comment->GetText()); 1337 if (device.HasDebuggingToolAttached()) {
1338 // We should insert comments with OpString instead of using named variables
1339 Name(OpUndef(t_int), comment->GetText());
1340 }
1338 return {}; 1341 return {};
1339 } 1342 }
1340 1343
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
index 1e0b8b922..97fd41cc1 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -3,10 +3,12 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <algorithm> 5#include <algorithm>
6#include <unordered_map>
7#include <utility> 6#include <utility>
8#include <vector> 7#include <vector>
9 8
9#include <fmt/format.h>
10
11#include "common/assert.h"
10#include "common/bit_util.h" 12#include "common/bit_util.h"
11#include "common/common_types.h" 13#include "common/common_types.h"
12#include "video_core/renderer_vulkan/vk_scheduler.h" 14#include "video_core/renderer_vulkan/vk_scheduler.h"
@@ -16,45 +18,51 @@
16 18
17namespace Vulkan { 19namespace Vulkan {
18 20
19VKStagingBufferPool::StagingBuffer::StagingBuffer(std::unique_ptr<VKBuffer> buffer_) 21StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_,
20 : buffer{std::move(buffer_)} {} 22 VKScheduler& scheduler_)
21 23 : device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_} {}
22VKStagingBufferPool::VKStagingBufferPool(const Device& device_, VKMemoryManager& memory_manager_,
23 VKScheduler& scheduler_)
24 : device{device_}, memory_manager{memory_manager_}, scheduler{scheduler_} {}
25 24
26VKStagingBufferPool::~VKStagingBufferPool() = default; 25StagingBufferPool::~StagingBufferPool() = default;
27 26
28VKBuffer& VKStagingBufferPool::GetUnusedBuffer(std::size_t size, bool host_visible) { 27StagingBufferRef StagingBufferPool::Request(size_t size, MemoryUsage usage) {
29 if (const auto buffer = TryGetReservedBuffer(size, host_visible)) { 28 if (const std::optional<StagingBufferRef> ref = TryGetReservedBuffer(size, usage)) {
30 return *buffer; 29 return *ref;
31 } 30 }
32 return CreateStagingBuffer(size, host_visible); 31 return CreateStagingBuffer(size, usage);
33} 32}
34 33
35void VKStagingBufferPool::TickFrame() { 34void StagingBufferPool::TickFrame() {
36 current_delete_level = (current_delete_level + 1) % NumLevels; 35 current_delete_level = (current_delete_level + 1) % NUM_LEVELS;
37 36
38 ReleaseCache(true); 37 ReleaseCache(MemoryUsage::DeviceLocal);
39 ReleaseCache(false); 38 ReleaseCache(MemoryUsage::Upload);
39 ReleaseCache(MemoryUsage::Download);
40} 40}
41 41
42VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) { 42std::optional<StagingBufferRef> StagingBufferPool::TryGetReservedBuffer(size_t size,
43 for (StagingBuffer& entry : GetCache(host_visible)[Common::Log2Ceil64(size)].entries) { 43 MemoryUsage usage) {
44 if (!scheduler.IsFree(entry.tick)) { 44 StagingBuffers& cache_level = GetCache(usage)[Common::Log2Ceil64(size)];
45 continue; 45
46 const auto is_free = [this](const StagingBuffer& entry) {
47 return scheduler.IsFree(entry.tick);
48 };
49 auto& entries = cache_level.entries;
50 const auto hint_it = entries.begin() + cache_level.iterate_index;
51 auto it = std::find_if(entries.begin() + cache_level.iterate_index, entries.end(), is_free);
52 if (it == entries.end()) {
53 it = std::find_if(entries.begin(), hint_it, is_free);
54 if (it == hint_it) {
55 return std::nullopt;
46 } 56 }
47 entry.tick = scheduler.CurrentTick();
48 return &*entry.buffer;
49 } 57 }
50 return nullptr; 58 cache_level.iterate_index = std::distance(entries.begin(), it) + 1;
59 it->tick = scheduler.CurrentTick();
60 return it->Ref();
51} 61}
52 62
53VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) { 63StagingBufferRef StagingBufferPool::CreateStagingBuffer(size_t size, MemoryUsage usage) {
54 const u32 log2 = Common::Log2Ceil64(size); 64 const u32 log2 = Common::Log2Ceil64(size);
55 65 vk::Buffer buffer = device.GetLogical().CreateBuffer({
56 auto buffer = std::make_unique<VKBuffer>();
57 buffer->handle = device.GetLogical().CreateBuffer({
58 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, 66 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
59 .pNext = nullptr, 67 .pNext = nullptr,
60 .flags = 0, 68 .flags = 0,
@@ -66,49 +74,63 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v
66 .queueFamilyIndexCount = 0, 74 .queueFamilyIndexCount = 0,
67 .pQueueFamilyIndices = nullptr, 75 .pQueueFamilyIndices = nullptr,
68 }); 76 });
69 buffer->commit = memory_manager.Commit(buffer->handle, host_visible); 77 if (device.HasDebuggingToolAttached()) {
70 78 ++buffer_index;
71 std::vector<StagingBuffer>& entries = GetCache(host_visible)[log2].entries; 79 buffer.SetObjectNameEXT(fmt::format("Staging Buffer {}", buffer_index).c_str());
72 StagingBuffer& entry = entries.emplace_back(std::move(buffer)); 80 }
73 entry.tick = scheduler.CurrentTick(); 81 MemoryCommit commit = memory_allocator.Commit(buffer, usage);
74 return *entry.buffer; 82 const std::span<u8> mapped_span = IsHostVisible(usage) ? commit.Map() : std::span<u8>{};
75} 83
76 84 StagingBuffer& entry = GetCache(usage)[log2].entries.emplace_back(StagingBuffer{
77VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) { 85 .buffer = std::move(buffer),
78 return host_visible ? host_staging_buffers : device_staging_buffers; 86 .commit = std::move(commit),
87 .mapped_span = mapped_span,
88 .tick = scheduler.CurrentTick(),
89 });
90 return entry.Ref();
79} 91}
80 92
81void VKStagingBufferPool::ReleaseCache(bool host_visible) { 93StagingBufferPool::StagingBuffersCache& StagingBufferPool::GetCache(MemoryUsage usage) {
82 auto& cache = GetCache(host_visible); 94 switch (usage) {
83 const u64 size = ReleaseLevel(cache, current_delete_level); 95 case MemoryUsage::DeviceLocal:
84 if (size == 0) { 96 return device_local_cache;
85 return; 97 case MemoryUsage::Upload:
98 return upload_cache;
99 case MemoryUsage::Download:
100 return download_cache;
101 default:
102 UNREACHABLE_MSG("Invalid memory usage={}", usage);
103 return upload_cache;
86 } 104 }
87} 105}
88 106
89u64 VKStagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, std::size_t log2) { 107void StagingBufferPool::ReleaseCache(MemoryUsage usage) {
90 static constexpr std::size_t deletions_per_tick = 16; 108 ReleaseLevel(GetCache(usage), current_delete_level);
109}
91 110
111void StagingBufferPool::ReleaseLevel(StagingBuffersCache& cache, size_t log2) {
112 constexpr size_t deletions_per_tick = 16;
92 auto& staging = cache[log2]; 113 auto& staging = cache[log2];
93 auto& entries = staging.entries; 114 auto& entries = staging.entries;
94 const std::size_t old_size = entries.size(); 115 const size_t old_size = entries.size();
95 116
96 const auto is_deleteable = [this](const StagingBuffer& entry) { 117 const auto is_deleteable = [this](const StagingBuffer& entry) {
97 return scheduler.IsFree(entry.tick); 118 return scheduler.IsFree(entry.tick);
98 }; 119 };
99 const std::size_t begin_offset = staging.delete_index; 120 const size_t begin_offset = staging.delete_index;
100 const std::size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size); 121 const size_t end_offset = std::min(begin_offset + deletions_per_tick, old_size);
101 const auto begin = std::begin(entries) + begin_offset; 122 const auto begin = entries.begin() + begin_offset;
102 const auto end = std::begin(entries) + end_offset; 123 const auto end = entries.begin() + end_offset;
103 entries.erase(std::remove_if(begin, end, is_deleteable), end); 124 entries.erase(std::remove_if(begin, end, is_deleteable), end);
104 125
105 const std::size_t new_size = entries.size(); 126 const size_t new_size = entries.size();
106 staging.delete_index += deletions_per_tick; 127 staging.delete_index += deletions_per_tick;
107 if (staging.delete_index >= new_size) { 128 if (staging.delete_index >= new_size) {
108 staging.delete_index = 0; 129 staging.delete_index = 0;
109 } 130 }
110 131 if (staging.iterate_index > new_size) {
111 return (1ULL << log2) * (old_size - new_size); 132 staging.iterate_index = 0;
133 }
112} 134}
113 135
114} // namespace Vulkan 136} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
index 90dadcbbe..d42918a47 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
@@ -9,7 +9,7 @@
9 9
10#include "common/common_types.h" 10#include "common/common_types.h"
11 11
12#include "video_core/renderer_vulkan/vk_memory_manager.h" 12#include "video_core/vulkan_common/vulkan_memory_allocator.h"
13#include "video_core/vulkan_common/vulkan_wrapper.h" 13#include "video_core/vulkan_common/vulkan_wrapper.h"
14 14
15namespace Vulkan { 15namespace Vulkan {
@@ -17,55 +17,65 @@ namespace Vulkan {
17class Device; 17class Device;
18class VKScheduler; 18class VKScheduler;
19 19
20struct VKBuffer final { 20struct StagingBufferRef {
21 vk::Buffer handle; 21 VkBuffer buffer;
22 VKMemoryCommit commit; 22 std::span<u8> mapped_span;
23}; 23};
24 24
25class VKStagingBufferPool final { 25class StagingBufferPool {
26public: 26public:
27 explicit VKStagingBufferPool(const Device& device, VKMemoryManager& memory_manager, 27 explicit StagingBufferPool(const Device& device, MemoryAllocator& memory_allocator,
28 VKScheduler& scheduler); 28 VKScheduler& scheduler);
29 ~VKStagingBufferPool(); 29 ~StagingBufferPool();
30 30
31 VKBuffer& GetUnusedBuffer(std::size_t size, bool host_visible); 31 StagingBufferRef Request(size_t size, MemoryUsage usage);
32 32
33 void TickFrame(); 33 void TickFrame();
34 34
35private: 35private:
36 struct StagingBuffer final { 36 struct StagingBuffer {
37 explicit StagingBuffer(std::unique_ptr<VKBuffer> buffer); 37 vk::Buffer buffer;
38 38 MemoryCommit commit;
39 std::unique_ptr<VKBuffer> buffer; 39 std::span<u8> mapped_span;
40 u64 tick = 0; 40 u64 tick = 0;
41
42 StagingBufferRef Ref() const noexcept {
43 return {
44 .buffer = *buffer,
45 .mapped_span = mapped_span,
46 };
47 }
41 }; 48 };
42 49
43 struct StagingBuffers final { 50 struct StagingBuffers {
44 std::vector<StagingBuffer> entries; 51 std::vector<StagingBuffer> entries;
45 std::size_t delete_index = 0; 52 size_t delete_index = 0;
53 size_t iterate_index = 0;
46 }; 54 };
47 55
48 static constexpr std::size_t NumLevels = sizeof(std::size_t) * CHAR_BIT; 56 static constexpr size_t NUM_LEVELS = sizeof(size_t) * CHAR_BIT;
49 using StagingBuffersCache = std::array<StagingBuffers, NumLevels>; 57 using StagingBuffersCache = std::array<StagingBuffers, NUM_LEVELS>;
50 58
51 VKBuffer* TryGetReservedBuffer(std::size_t size, bool host_visible); 59 std::optional<StagingBufferRef> TryGetReservedBuffer(size_t size, MemoryUsage usage);
52 60
53 VKBuffer& CreateStagingBuffer(std::size_t size, bool host_visible); 61 StagingBufferRef CreateStagingBuffer(size_t size, MemoryUsage usage);
54 62
55 StagingBuffersCache& GetCache(bool host_visible); 63 StagingBuffersCache& GetCache(MemoryUsage usage);
56 64
57 void ReleaseCache(bool host_visible); 65 void ReleaseCache(MemoryUsage usage);
58 66
59 u64 ReleaseLevel(StagingBuffersCache& cache, std::size_t log2); 67 void ReleaseLevel(StagingBuffersCache& cache, size_t log2);
60 68
61 const Device& device; 69 const Device& device;
62 VKMemoryManager& memory_manager; 70 MemoryAllocator& memory_allocator;
63 VKScheduler& scheduler; 71 VKScheduler& scheduler;
64 72
65 StagingBuffersCache host_staging_buffers; 73 StagingBuffersCache device_local_cache;
66 StagingBuffersCache device_staging_buffers; 74 StagingBuffersCache upload_cache;
75 StagingBuffersCache download_cache;
67 76
68 std::size_t current_delete_level = 0; 77 size_t current_delete_level = 0;
78 u64 buffer_index = 0;
69}; 79};
70 80
71} // namespace Vulkan 81} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index bd11de012..ab14922d7 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -10,12 +10,12 @@
10#include "video_core/engines/fermi_2d.h" 10#include "video_core/engines/fermi_2d.h"
11#include "video_core/renderer_vulkan/blit_image.h" 11#include "video_core/renderer_vulkan/blit_image.h"
12#include "video_core/renderer_vulkan/maxwell_to_vk.h" 12#include "video_core/renderer_vulkan/maxwell_to_vk.h"
13#include "video_core/renderer_vulkan/vk_memory_manager.h"
14#include "video_core/renderer_vulkan/vk_rasterizer.h" 13#include "video_core/renderer_vulkan/vk_rasterizer.h"
15#include "video_core/renderer_vulkan/vk_scheduler.h" 14#include "video_core/renderer_vulkan/vk_scheduler.h"
16#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" 15#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
17#include "video_core/renderer_vulkan/vk_texture_cache.h" 16#include "video_core/renderer_vulkan/vk_texture_cache.h"
18#include "video_core/vulkan_common/vulkan_device.h" 17#include "video_core/vulkan_common/vulkan_device.h"
18#include "video_core/vulkan_common/vulkan_memory_allocator.h"
19#include "video_core/vulkan_common/vulkan_wrapper.h" 19#include "video_core/vulkan_common/vulkan_wrapper.h"
20 20
21namespace Vulkan { 21namespace Vulkan {
@@ -554,10 +554,18 @@ void TextureCacheRuntime::Finish() {
554} 554}
555 555
556ImageBufferMap TextureCacheRuntime::MapUploadBuffer(size_t size) { 556ImageBufferMap TextureCacheRuntime::MapUploadBuffer(size_t size) {
557 const auto& buffer = staging_buffer_pool.GetUnusedBuffer(size, true); 557 const auto staging_ref = staging_buffer_pool.Request(size, MemoryUsage::Upload);
558 return ImageBufferMap{ 558 return {
559 .handle = *buffer.handle, 559 .handle = staging_ref.buffer,
560 .map = buffer.commit->Map(size), 560 .span = staging_ref.mapped_span,
561 };
562}
563
564ImageBufferMap TextureCacheRuntime::MapDownloadBuffer(size_t size) {
565 const auto staging_ref = staging_buffer_pool.Request(size, MemoryUsage::Download);
566 return {
567 .handle = staging_ref.buffer,
568 .span = staging_ref.mapped_span,
561 }; 569 };
562} 570}
563 571
@@ -788,9 +796,9 @@ Image::Image(TextureCacheRuntime& runtime, const ImageInfo& info_, GPUVAddr gpu_
788 image(MakeImage(runtime.device, info)), buffer(MakeBuffer(runtime.device, info)), 796 image(MakeImage(runtime.device, info)), buffer(MakeBuffer(runtime.device, info)),
789 aspect_mask(ImageAspectMask(info.format)) { 797 aspect_mask(ImageAspectMask(info.format)) {
790 if (image) { 798 if (image) {
791 commit = runtime.memory_manager.Commit(image, false); 799 commit = runtime.memory_allocator.Commit(image, MemoryUsage::DeviceLocal);
792 } else { 800 } else {
793 commit = runtime.memory_manager.Commit(buffer, false); 801 commit = runtime.memory_allocator.Commit(buffer, MemoryUsage::DeviceLocal);
794 } 802 }
795 if (IsPixelFormatASTC(info.format) && !runtime.device.IsOptimalAstcSupported()) { 803 if (IsPixelFormatASTC(info.format) && !runtime.device.IsOptimalAstcSupported()) {
796 flags |= VideoCommon::ImageFlagBits::Converted; 804 flags |= VideoCommon::ImageFlagBits::Converted;
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 92a7aad8b..a55d405d1 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -7,8 +7,8 @@
7#include <compare> 7#include <compare>
8#include <span> 8#include <span>
9 9
10#include "video_core/renderer_vulkan/vk_memory_manager.h"
11#include "video_core/texture_cache/texture_cache.h" 10#include "video_core/texture_cache/texture_cache.h"
11#include "video_core/vulkan_common/vulkan_memory_allocator.h"
12#include "video_core/vulkan_common/vulkan_wrapper.h" 12#include "video_core/vulkan_common/vulkan_wrapper.h"
13 13
14namespace Vulkan { 14namespace Vulkan {
@@ -19,14 +19,13 @@ using VideoCommon::Offset2D;
19using VideoCommon::RenderTargets; 19using VideoCommon::RenderTargets;
20using VideoCore::Surface::PixelFormat; 20using VideoCore::Surface::PixelFormat;
21 21
22class VKScheduler;
23class VKStagingBufferPool;
24
25class BlitImageHelper; 22class BlitImageHelper;
26class Device; 23class Device;
27class Image; 24class Image;
28class ImageView; 25class ImageView;
29class Framebuffer; 26class Framebuffer;
27class StagingBufferPool;
28class VKScheduler;
30 29
31struct RenderPassKey { 30struct RenderPassKey {
32 constexpr auto operator<=>(const RenderPassKey&) const noexcept = default; 31 constexpr auto operator<=>(const RenderPassKey&) const noexcept = default;
@@ -60,18 +59,18 @@ struct ImageBufferMap {
60 } 59 }
61 60
62 [[nodiscard]] std::span<u8> Span() const noexcept { 61 [[nodiscard]] std::span<u8> Span() const noexcept {
63 return map.Span(); 62 return span;
64 } 63 }
65 64
66 VkBuffer handle; 65 VkBuffer handle;
67 MemoryMap map; 66 std::span<u8> span;
68}; 67};
69 68
70struct TextureCacheRuntime { 69struct TextureCacheRuntime {
71 const Device& device; 70 const Device& device;
72 VKScheduler& scheduler; 71 VKScheduler& scheduler;
73 VKMemoryManager& memory_manager; 72 MemoryAllocator& memory_allocator;
74 VKStagingBufferPool& staging_buffer_pool; 73 StagingBufferPool& staging_buffer_pool;
75 BlitImageHelper& blit_image_helper; 74 BlitImageHelper& blit_image_helper;
76 std::unordered_map<RenderPassKey, vk::RenderPass> renderpass_cache; 75 std::unordered_map<RenderPassKey, vk::RenderPass> renderpass_cache;
77 76
@@ -79,10 +78,7 @@ struct TextureCacheRuntime {
79 78
80 [[nodiscard]] ImageBufferMap MapUploadBuffer(size_t size); 79 [[nodiscard]] ImageBufferMap MapUploadBuffer(size_t size);
81 80
82 [[nodiscard]] ImageBufferMap MapDownloadBuffer(size_t size) { 81 [[nodiscard]] ImageBufferMap MapDownloadBuffer(size_t size);
83 // TODO: Have a special function for this
84 return MapUploadBuffer(size);
85 }
86 82
87 void BlitImage(Framebuffer* dst_framebuffer, ImageView& dst, ImageView& src, 83 void BlitImage(Framebuffer* dst_framebuffer, ImageView& dst, ImageView& src,
88 const std::array<Offset2D, 2>& dst_region, 84 const std::array<Offset2D, 2>& dst_region,
@@ -141,7 +137,7 @@ private:
141 VKScheduler* scheduler; 137 VKScheduler* scheduler;
142 vk::Image image; 138 vk::Image image;
143 vk::Buffer buffer; 139 vk::Buffer buffer;
144 VKMemoryCommit commit; 140 MemoryCommit commit;
145 VkImageAspectFlags aspect_mask = 0; 141 VkImageAspectFlags aspect_mask = 0;
146 bool initialized = false; 142 bool initialized = false;
147}; 143};
diff --git a/src/video_core/texture_cache/accelerated_swizzle.cpp b/src/video_core/texture_cache/accelerated_swizzle.cpp
index a4fc1184b..15585caeb 100644
--- a/src/video_core/texture_cache/accelerated_swizzle.cpp
+++ b/src/video_core/texture_cache/accelerated_swizzle.cpp
@@ -27,7 +27,7 @@ BlockLinearSwizzle2DParams MakeBlockLinearSwizzle2DParams(const SwizzleParameter
27 const Extent3D num_tiles = swizzle.num_tiles; 27 const Extent3D num_tiles = swizzle.num_tiles;
28 const u32 bytes_per_block = BytesPerBlock(info.format); 28 const u32 bytes_per_block = BytesPerBlock(info.format);
29 const u32 stride_alignment = CalculateLevelStrideAlignment(info, swizzle.level); 29 const u32 stride_alignment = CalculateLevelStrideAlignment(info, swizzle.level);
30 const u32 stride = Common::AlignBits(num_tiles.width, stride_alignment) * bytes_per_block; 30 const u32 stride = Common::AlignUpLog2(num_tiles.width, stride_alignment) * bytes_per_block;
31 const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT); 31 const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT);
32 return BlockLinearSwizzle2DParams{ 32 return BlockLinearSwizzle2DParams{
33 .origin{0, 0, 0}, 33 .origin{0, 0, 0},
@@ -47,7 +47,7 @@ BlockLinearSwizzle3DParams MakeBlockLinearSwizzle3DParams(const SwizzleParameter
47 const Extent3D num_tiles = swizzle.num_tiles; 47 const Extent3D num_tiles = swizzle.num_tiles;
48 const u32 bytes_per_block = BytesPerBlock(info.format); 48 const u32 bytes_per_block = BytesPerBlock(info.format);
49 const u32 stride_alignment = CalculateLevelStrideAlignment(info, swizzle.level); 49 const u32 stride_alignment = CalculateLevelStrideAlignment(info, swizzle.level);
50 const u32 stride = Common::AlignBits(num_tiles.width, stride_alignment) * bytes_per_block; 50 const u32 stride = Common::AlignUpLog2(num_tiles.width, stride_alignment) * bytes_per_block;
51 51
52 const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) >> GOB_SIZE_X_SHIFT; 52 const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) >> GOB_SIZE_X_SHIFT;
53 const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block.height + block.depth); 53 const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block.height + block.depth);
diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp
index 279932778..b23424523 100644
--- a/src/video_core/texture_cache/util.cpp
+++ b/src/video_core/texture_cache/util.cpp
@@ -279,7 +279,7 @@ template <u32 GOB_EXTENT>
279 const bool is_small = IsSmallerThanGobSize(blocks, gob, info.block.depth); 279 const bool is_small = IsSmallerThanGobSize(blocks, gob, info.block.depth);
280 const u32 alignment = is_small ? 0 : info.tile_width_spacing; 280 const u32 alignment = is_small ? 0 : info.tile_width_spacing;
281 return Extent2D{ 281 return Extent2D{
282 .width = Common::AlignBits(gobs.width, alignment), 282 .width = Common::AlignUpLog2(gobs.width, alignment),
283 .height = gobs.height, 283 .height = gobs.height,
284 }; 284 };
285} 285}
@@ -352,7 +352,7 @@ template <u32 GOB_EXTENT>
352 // https://github.com/Ryujinx/Ryujinx/blob/1c9aba6de1520aea5480c032e0ff5664ac1bb36f/Ryujinx.Graphics.Texture/SizeCalculator.cs#L134 352 // https://github.com/Ryujinx/Ryujinx/blob/1c9aba6de1520aea5480c032e0ff5664ac1bb36f/Ryujinx.Graphics.Texture/SizeCalculator.cs#L134
353 if (tile_width_spacing > 0) { 353 if (tile_width_spacing > 0) {
354 const u32 alignment_log2 = GOB_SIZE_SHIFT + tile_width_spacing + block.height + block.depth; 354 const u32 alignment_log2 = GOB_SIZE_SHIFT + tile_width_spacing + block.height + block.depth;
355 return Common::AlignBits(size_bytes, alignment_log2); 355 return Common::AlignUpLog2(size_bytes, alignment_log2);
356 } 356 }
357 const u32 aligned_height = Common::AlignUp(size.height, tile_size_y); 357 const u32 aligned_height = Common::AlignUp(size.height, tile_size_y);
358 while (block.height != 0 && aligned_height <= (1U << (block.height - 1)) * GOB_SIZE_Y) { 358 while (block.height != 0 && aligned_height <= (1U << (block.height - 1)) * GOB_SIZE_Y) {
@@ -528,9 +528,9 @@ template <u32 GOB_EXTENT>
528 const u32 alignment = StrideAlignment(num_tiles, info.block, bpp_log2, info.tile_width_spacing); 528 const u32 alignment = StrideAlignment(num_tiles, info.block, bpp_log2, info.tile_width_spacing);
529 const Extent3D mip_block = AdjustMipBlockSize(num_tiles, info.block, 0); 529 const Extent3D mip_block = AdjustMipBlockSize(num_tiles, info.block, 0);
530 return Extent3D{ 530 return Extent3D{
531 .width = Common::AlignBits(num_tiles.width, alignment), 531 .width = Common::AlignUpLog2(num_tiles.width, alignment),
532 .height = Common::AlignBits(num_tiles.height, GOB_SIZE_Y_SHIFT + mip_block.height), 532 .height = Common::AlignUpLog2(num_tiles.height, GOB_SIZE_Y_SHIFT + mip_block.height),
533 .depth = Common::AlignBits(num_tiles.depth, GOB_SIZE_Z_SHIFT + mip_block.depth), 533 .depth = Common::AlignUpLog2(num_tiles.depth, GOB_SIZE_Z_SHIFT + mip_block.depth),
534 }; 534 };
535} 535}
536 536
@@ -679,7 +679,7 @@ u32 CalculateLayerSize(const ImageInfo& info) noexcept {
679} 679}
680 680
681std::array<u32, MAX_MIP_LEVELS> CalculateMipLevelOffsets(const ImageInfo& info) noexcept { 681std::array<u32, MAX_MIP_LEVELS> CalculateMipLevelOffsets(const ImageInfo& info) noexcept {
682 ASSERT(info.resources.levels <= MAX_MIP_LEVELS); 682 ASSERT(info.resources.levels <= static_cast<s32>(MAX_MIP_LEVELS));
683 const LevelInfo level_info = MakeLevelInfo(info); 683 const LevelInfo level_info = MakeLevelInfo(info);
684 std::array<u32, MAX_MIP_LEVELS> offsets{}; 684 std::array<u32, MAX_MIP_LEVELS> offsets{};
685 u32 offset = 0; 685 u32 offset = 0;
diff --git a/src/video_core/textures/astc.cpp b/src/video_core/textures/astc.cpp
index acd5bdd78..3625b666c 100644
--- a/src/video_core/textures/astc.cpp
+++ b/src/video_core/textures/astc.cpp
@@ -42,21 +42,24 @@ constexpr u32 Popcnt(u32 n) {
42 42
43class InputBitStream { 43class InputBitStream {
44public: 44public:
45 constexpr explicit InputBitStream(const u8* ptr, std::size_t start_offset = 0) 45 constexpr explicit InputBitStream(std::span<const u8> data, size_t start_offset = 0)
46 : cur_byte{ptr}, next_bit{start_offset % 8} {} 46 : cur_byte{data.data()}, total_bits{data.size()}, next_bit{start_offset % 8} {}
47 47
48 constexpr std::size_t GetBitsRead() const { 48 constexpr size_t GetBitsRead() const {
49 return bits_read; 49 return bits_read;
50 } 50 }
51 51
52 constexpr bool ReadBit() { 52 constexpr bool ReadBit() {
53 const bool bit = (*cur_byte >> next_bit++) & 1; 53 if (bits_read >= total_bits * 8) {
54 return 0;
55 }
56 const bool bit = ((*cur_byte >> next_bit) & 1) != 0;
57 ++next_bit;
54 while (next_bit >= 8) { 58 while (next_bit >= 8) {
55 next_bit -= 8; 59 next_bit -= 8;
56 cur_byte++; 60 ++cur_byte;
57 } 61 }
58 62 ++bits_read;
59 bits_read++;
60 return bit; 63 return bit;
61 } 64 }
62 65
@@ -79,8 +82,9 @@ public:
79 82
80private: 83private:
81 const u8* cur_byte; 84 const u8* cur_byte;
82 std::size_t next_bit = 0; 85 size_t total_bits = 0;
83 std::size_t bits_read = 0; 86 size_t next_bit = 0;
87 size_t bits_read = 0;
84}; 88};
85 89
86class OutputBitStream { 90class OutputBitStream {
@@ -193,15 +197,15 @@ struct IntegerEncodedValue {
193 }; 197 };
194}; 198};
195using IntegerEncodedVector = boost::container::static_vector< 199using IntegerEncodedVector = boost::container::static_vector<
196 IntegerEncodedValue, 64, 200 IntegerEncodedValue, 256,
197 boost::container::static_vector_options< 201 boost::container::static_vector_options<
198 boost::container::inplace_alignment<alignof(IntegerEncodedValue)>, 202 boost::container::inplace_alignment<alignof(IntegerEncodedValue)>,
199 boost::container::throw_on_overflow<false>>::type>; 203 boost::container::throw_on_overflow<false>>::type>;
200 204
201static void DecodeTritBlock(InputBitStream& bits, IntegerEncodedVector& result, u32 nBitsPerValue) { 205static void DecodeTritBlock(InputBitStream& bits, IntegerEncodedVector& result, u32 nBitsPerValue) {
202 // Implement the algorithm in section C.2.12 206 // Implement the algorithm in section C.2.12
203 u32 m[5]; 207 std::array<u32, 5> m;
204 u32 t[5]; 208 std::array<u32, 5> t;
205 u32 T; 209 u32 T;
206 210
207 // Read the trit encoded block according to 211 // Read the trit encoded block according to
@@ -866,7 +870,7 @@ public:
866 } 870 }
867}; 871};
868 872
869static void DecodeColorValues(u32* out, u8* data, const u32* modes, const u32 nPartitions, 873static void DecodeColorValues(u32* out, std::span<u8> data, const u32* modes, const u32 nPartitions,
870 const u32 nBitsForColorData) { 874 const u32 nBitsForColorData) {
871 // First figure out how many color values we have 875 // First figure out how many color values we have
872 u32 nValues = 0; 876 u32 nValues = 0;
@@ -898,7 +902,7 @@ static void DecodeColorValues(u32* out, u8* data, const u32* modes, const u32 nP
898 // We now have enough to decode our integer sequence. 902 // We now have enough to decode our integer sequence.
899 IntegerEncodedVector decodedColorValues; 903 IntegerEncodedVector decodedColorValues;
900 904
901 InputBitStream colorStream(data); 905 InputBitStream colorStream(data, 0);
902 DecodeIntegerSequence(decodedColorValues, colorStream, range, nValues); 906 DecodeIntegerSequence(decodedColorValues, colorStream, range, nValues);
903 907
904 // Once we have the decoded values, we need to dequantize them to the 0-255 range 908 // Once we have the decoded values, we need to dequantize them to the 0-255 range
@@ -1441,7 +1445,7 @@ static void ComputeEndpos32s(Pixel& ep1, Pixel& ep2, const u32*& colorValues,
1441 1445
1442static void DecompressBlock(std::span<const u8, 16> inBuf, const u32 blockWidth, 1446static void DecompressBlock(std::span<const u8, 16> inBuf, const u32 blockWidth,
1443 const u32 blockHeight, std::span<u32, 12 * 12> outBuf) { 1447 const u32 blockHeight, std::span<u32, 12 * 12> outBuf) {
1444 InputBitStream strm(inBuf.data()); 1448 InputBitStream strm(inBuf);
1445 TexelWeightParams weightParams = DecodeBlockInfo(strm); 1449 TexelWeightParams weightParams = DecodeBlockInfo(strm);
1446 1450
1447 // Was there an error? 1451 // Was there an error?
@@ -1619,15 +1623,16 @@ static void DecompressBlock(std::span<const u8, 16> inBuf, const u32 blockWidth,
1619 1623
1620 // Make sure that higher non-texel bits are set to zero 1624 // Make sure that higher non-texel bits are set to zero
1621 const u32 clearByteStart = (weightParams.GetPackedBitSize() >> 3) + 1; 1625 const u32 clearByteStart = (weightParams.GetPackedBitSize() >> 3) + 1;
1622 if (clearByteStart > 0) { 1626 if (clearByteStart > 0 && clearByteStart <= texelWeightData.size()) {
1623 texelWeightData[clearByteStart - 1] &= 1627 texelWeightData[clearByteStart - 1] &=
1624 static_cast<u8>((1 << (weightParams.GetPackedBitSize() % 8)) - 1); 1628 static_cast<u8>((1 << (weightParams.GetPackedBitSize() % 8)) - 1);
1629 std::memset(texelWeightData.data() + clearByteStart, 0,
1630 std::min(16U - clearByteStart, 16U));
1625 } 1631 }
1626 std::memset(texelWeightData.data() + clearByteStart, 0, std::min(16U - clearByteStart, 16U));
1627 1632
1628 IntegerEncodedVector texelWeightValues; 1633 IntegerEncodedVector texelWeightValues;
1629 1634
1630 InputBitStream weightStream(texelWeightData.data()); 1635 InputBitStream weightStream(texelWeightData);
1631 1636
1632 DecodeIntegerSequence(texelWeightValues, weightStream, weightParams.m_MaxWeight, 1637 DecodeIntegerSequence(texelWeightValues, weightStream, weightParams.m_MaxWeight,
1633 weightParams.GetNumWeightValues()); 1638 weightParams.GetNumWeightValues());
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp
index 9f5181318..62685a183 100644
--- a/src/video_core/textures/decoders.cpp
+++ b/src/video_core/textures/decoders.cpp
@@ -49,7 +49,7 @@ void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixe
49 // We can configure here a custom pitch 49 // We can configure here a custom pitch
50 // As it's not exposed 'width * bpp' will be the expected pitch. 50 // As it's not exposed 'width * bpp' will be the expected pitch.
51 const u32 pitch = width * bytes_per_pixel; 51 const u32 pitch = width * bytes_per_pixel;
52 const u32 stride = Common::AlignBits(width, stride_alignment) * bytes_per_pixel; 52 const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel;
53 53
54 const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT); 54 const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT);
55 const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth); 55 const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth);
@@ -217,9 +217,9 @@ void SwizzleKepler(const u32 width, const u32 height, const u32 dst_x, const u32
217std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, 217std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth,
218 u32 block_height, u32 block_depth) { 218 u32 block_height, u32 block_depth) {
219 if (tiled) { 219 if (tiled) {
220 const u32 aligned_width = Common::AlignBits(width * bytes_per_pixel, GOB_SIZE_X_SHIFT); 220 const u32 aligned_width = Common::AlignUpLog2(width * bytes_per_pixel, GOB_SIZE_X_SHIFT);
221 const u32 aligned_height = Common::AlignBits(height, GOB_SIZE_Y_SHIFT + block_height); 221 const u32 aligned_height = Common::AlignUpLog2(height, GOB_SIZE_Y_SHIFT + block_height);
222 const u32 aligned_depth = Common::AlignBits(depth, GOB_SIZE_Z_SHIFT + block_depth); 222 const u32 aligned_depth = Common::AlignUpLog2(depth, GOB_SIZE_Z_SHIFT + block_depth);
223 return aligned_width * aligned_height * aligned_depth; 223 return aligned_width * aligned_height * aligned_depth;
224 } else { 224 } else {
225 return width * height * depth * bytes_per_pixel; 225 return width * height * depth * bytes_per_pixel;
diff --git a/src/video_core/vulkan_common/vulkan_debug_callback.h b/src/video_core/vulkan_common/vulkan_debug_callback.h
index 2efcd244c..b0519f132 100644
--- a/src/video_core/vulkan_common/vulkan_debug_callback.h
+++ b/src/video_core/vulkan_common/vulkan_debug_callback.h
@@ -2,6 +2,8 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#pragma once
6
5#include "video_core/vulkan_common/vulkan_wrapper.h" 7#include "video_core/vulkan_common/vulkan_wrapper.h"
6 8
7namespace Vulkan { 9namespace Vulkan {
diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp
index 75173324e..37d7b45a3 100644
--- a/src/video_core/vulkan_common/vulkan_device.cpp
+++ b/src/video_core/vulkan_common/vulkan_device.cpp
@@ -99,8 +99,7 @@ VkFormatFeatureFlags GetFormatFeatures(VkFormatProperties properties, FormatType
99 }); 99 });
100} 100}
101 101
102std::unordered_map<VkFormat, VkFormatProperties> GetFormatProperties( 102std::unordered_map<VkFormat, VkFormatProperties> GetFormatProperties(vk::PhysicalDevice physical) {
103 vk::PhysicalDevice physical, const vk::InstanceDispatch& dld) {
104 static constexpr std::array formats{ 103 static constexpr std::array formats{
105 VK_FORMAT_A8B8G8R8_UNORM_PACK32, 104 VK_FORMAT_A8B8G8R8_UNORM_PACK32,
106 VK_FORMAT_A8B8G8R8_UINT_PACK32, 105 VK_FORMAT_A8B8G8R8_UINT_PACK32,
@@ -210,7 +209,7 @@ std::unordered_map<VkFormat, VkFormatProperties> GetFormatProperties(
210Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR surface, 209Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR surface,
211 const vk::InstanceDispatch& dld_) 210 const vk::InstanceDispatch& dld_)
212 : instance{instance_}, dld{dld_}, physical{physical_}, properties{physical.GetProperties()}, 211 : instance{instance_}, dld{dld_}, physical{physical_}, properties{physical.GetProperties()},
213 format_properties{GetFormatProperties(physical, dld)} { 212 format_properties{GetFormatProperties(physical)} {
214 CheckSuitability(); 213 CheckSuitability();
215 SetupFamilies(surface); 214 SetupFamilies(surface);
216 SetupFeatures(); 215 SetupFeatures();
@@ -221,6 +220,7 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
221 VkPhysicalDeviceFeatures2 features2{ 220 VkPhysicalDeviceFeatures2 features2{
222 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, 221 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
223 .pNext = nullptr, 222 .pNext = nullptr,
223 .features{},
224 }; 224 };
225 const void* first_next = &features2; 225 const void* first_next = &features2;
226 void** next = &features2.pNext; 226 void** next = &features2.pNext;
@@ -256,7 +256,7 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
256 .shaderTessellationAndGeometryPointSize = false, 256 .shaderTessellationAndGeometryPointSize = false,
257 .shaderImageGatherExtended = true, 257 .shaderImageGatherExtended = true,
258 .shaderStorageImageExtendedFormats = false, 258 .shaderStorageImageExtendedFormats = false,
259 .shaderStorageImageMultisample = true, 259 .shaderStorageImageMultisample = is_shader_storage_image_multisample,
260 .shaderStorageImageReadWithoutFormat = is_formatless_image_load_supported, 260 .shaderStorageImageReadWithoutFormat = is_formatless_image_load_supported,
261 .shaderStorageImageWriteWithoutFormat = true, 261 .shaderStorageImageWriteWithoutFormat = true,
262 .shaderUniformBufferArrayDynamicIndexing = false, 262 .shaderUniformBufferArrayDynamicIndexing = false,
@@ -310,6 +310,7 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
310 310
311 VkPhysicalDeviceHostQueryResetFeaturesEXT host_query_reset{ 311 VkPhysicalDeviceHostQueryResetFeaturesEXT host_query_reset{
312 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT, 312 .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT,
313 .pNext = nullptr,
313 .hostQueryReset = true, 314 .hostQueryReset = true,
314 }; 315 };
315 SetNext(next, host_query_reset); 316 SetNext(next, host_query_reset);
@@ -604,7 +605,6 @@ void Device::CheckSuitability() const {
604 std::make_pair(features.occlusionQueryPrecise, "occlusionQueryPrecise"), 605 std::make_pair(features.occlusionQueryPrecise, "occlusionQueryPrecise"),
605 std::make_pair(features.fragmentStoresAndAtomics, "fragmentStoresAndAtomics"), 606 std::make_pair(features.fragmentStoresAndAtomics, "fragmentStoresAndAtomics"),
606 std::make_pair(features.shaderImageGatherExtended, "shaderImageGatherExtended"), 607 std::make_pair(features.shaderImageGatherExtended, "shaderImageGatherExtended"),
607 std::make_pair(features.shaderStorageImageMultisample, "shaderStorageImageMultisample"),
608 std::make_pair(features.shaderStorageImageWriteWithoutFormat, 608 std::make_pair(features.shaderStorageImageWriteWithoutFormat,
609 "shaderStorageImageWriteWithoutFormat"), 609 "shaderStorageImageWriteWithoutFormat"),
610 }; 610 };
@@ -804,6 +804,7 @@ void Device::SetupFamilies(VkSurfaceKHR surface) {
804void Device::SetupFeatures() { 804void Device::SetupFeatures() {
805 const auto supported_features{physical.GetFeatures()}; 805 const auto supported_features{physical.GetFeatures()};
806 is_formatless_image_load_supported = supported_features.shaderStorageImageReadWithoutFormat; 806 is_formatless_image_load_supported = supported_features.shaderStorageImageReadWithoutFormat;
807 is_shader_storage_image_multisample = supported_features.shaderStorageImageMultisample;
807 is_blit_depth_stencil_supported = TestDepthStencilBlits(); 808 is_blit_depth_stencil_supported = TestDepthStencilBlits();
808 is_optimal_astc_supported = IsOptimalAstcSupported(supported_features); 809 is_optimal_astc_supported = IsOptimalAstcSupported(supported_features);
809} 810}
diff --git a/src/video_core/vulkan_common/vulkan_device.h b/src/video_core/vulkan_common/vulkan_device.h
index a973c3ce4..4b66dba7a 100644
--- a/src/video_core/vulkan_common/vulkan_device.h
+++ b/src/video_core/vulkan_common/vulkan_device.h
@@ -272,23 +272,24 @@ private:
272 bool is_optimal_astc_supported{}; ///< Support for native ASTC. 272 bool is_optimal_astc_supported{}; ///< Support for native ASTC.
273 bool is_float16_supported{}; ///< Support for float16 arithmetics. 273 bool is_float16_supported{}; ///< Support for float16 arithmetics.
274 bool is_warp_potentially_bigger{}; ///< Host warp size can be bigger than guest. 274 bool is_warp_potentially_bigger{}; ///< Host warp size can be bigger than guest.
275 bool is_formatless_image_load_supported{}; ///< Support for shader image read without format. 275 bool is_formatless_image_load_supported{}; ///< Support for shader image read without format.
276 bool is_blit_depth_stencil_supported{}; ///< Support for blitting from and to depth stencil. 276 bool is_shader_storage_image_multisample{}; ///< Support for image operations on MSAA images.
277 bool nv_viewport_swizzle{}; ///< Support for VK_NV_viewport_swizzle. 277 bool is_blit_depth_stencil_supported{}; ///< Support for blitting from and to depth stencil.
278 bool khr_uniform_buffer_standard_layout{}; ///< Support for std430 on UBOs. 278 bool nv_viewport_swizzle{}; ///< Support for VK_NV_viewport_swizzle.
279 bool ext_index_type_uint8{}; ///< Support for VK_EXT_index_type_uint8. 279 bool khr_uniform_buffer_standard_layout{}; ///< Support for std430 on UBOs.
280 bool ext_sampler_filter_minmax{}; ///< Support for VK_EXT_sampler_filter_minmax. 280 bool ext_index_type_uint8{}; ///< Support for VK_EXT_index_type_uint8.
281 bool ext_depth_range_unrestricted{}; ///< Support for VK_EXT_depth_range_unrestricted. 281 bool ext_sampler_filter_minmax{}; ///< Support for VK_EXT_sampler_filter_minmax.
282 bool ext_shader_viewport_index_layer{}; ///< Support for VK_EXT_shader_viewport_index_layer. 282 bool ext_depth_range_unrestricted{}; ///< Support for VK_EXT_depth_range_unrestricted.
283 bool ext_tooling_info{}; ///< Support for VK_EXT_tooling_info. 283 bool ext_shader_viewport_index_layer{}; ///< Support for VK_EXT_shader_viewport_index_layer.
284 bool ext_transform_feedback{}; ///< Support for VK_EXT_transform_feedback. 284 bool ext_tooling_info{}; ///< Support for VK_EXT_tooling_info.
285 bool ext_custom_border_color{}; ///< Support for VK_EXT_custom_border_color. 285 bool ext_transform_feedback{}; ///< Support for VK_EXT_transform_feedback.
286 bool ext_extended_dynamic_state{}; ///< Support for VK_EXT_extended_dynamic_state. 286 bool ext_custom_border_color{}; ///< Support for VK_EXT_custom_border_color.
287 bool ext_robustness2{}; ///< Support for VK_EXT_robustness2. 287 bool ext_extended_dynamic_state{}; ///< Support for VK_EXT_extended_dynamic_state.
288 bool ext_shader_stencil_export{}; ///< Support for VK_EXT_shader_stencil_export. 288 bool ext_robustness2{}; ///< Support for VK_EXT_robustness2.
289 bool nv_device_diagnostics_config{}; ///< Support for VK_NV_device_diagnostics_config. 289 bool ext_shader_stencil_export{}; ///< Support for VK_EXT_shader_stencil_export.
290 bool has_renderdoc{}; ///< Has RenderDoc attached 290 bool nv_device_diagnostics_config{}; ///< Support for VK_NV_device_diagnostics_config.
291 bool has_nsight_graphics{}; ///< Has Nsight Graphics attached 291 bool has_renderdoc{}; ///< Has RenderDoc attached
292 bool has_nsight_graphics{}; ///< Has Nsight Graphics attached
292 293
293 // Asynchronous Graphics Pipeline setting 294 // Asynchronous Graphics Pipeline setting
294 bool use_asynchronous_shaders{}; ///< Setting to use asynchronous shaders/graphics pipeline 295 bool use_asynchronous_shaders{}; ///< Setting to use asynchronous shaders/graphics pipeline
diff --git a/src/video_core/vulkan_common/vulkan_memory_allocator.cpp b/src/video_core/vulkan_common/vulkan_memory_allocator.cpp
new file mode 100644
index 000000000..d6eb3af31
--- /dev/null
+++ b/src/video_core/vulkan_common/vulkan_memory_allocator.cpp
@@ -0,0 +1,268 @@
1// Copyright 2018 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <bit>
7#include <optional>
8#include <vector>
9
10#include "common/alignment.h"
11#include "common/assert.h"
12#include "common/common_types.h"
13#include "common/logging/log.h"
14#include "video_core/vulkan_common/vulkan_device.h"
15#include "video_core/vulkan_common/vulkan_memory_allocator.h"
16#include "video_core/vulkan_common/vulkan_wrapper.h"
17
18namespace Vulkan {
19namespace {
20struct Range {
21 u64 begin;
22 u64 end;
23
24 [[nodiscard]] bool Contains(u64 iterator, u64 size) const noexcept {
25 return iterator < end && begin < iterator + size;
26 }
27};
28
29[[nodiscard]] u64 AllocationChunkSize(u64 required_size) {
30 static constexpr std::array sizes{
31 0x1000ULL << 10, 0x1400ULL << 10, 0x1800ULL << 10, 0x1c00ULL << 10, 0x2000ULL << 10,
32 0x3200ULL << 10, 0x4000ULL << 10, 0x6000ULL << 10, 0x8000ULL << 10, 0xA000ULL << 10,
33 0x10000ULL << 10, 0x18000ULL << 10, 0x20000ULL << 10,
34 };
35 static_assert(std::is_sorted(sizes.begin(), sizes.end()));
36
37 const auto it = std::ranges::lower_bound(sizes, required_size);
38 return it != sizes.end() ? *it : Common::AlignUp(required_size, 4ULL << 20);
39}
40
41[[nodiscard]] VkMemoryPropertyFlags MemoryUsagePropertyFlags(MemoryUsage usage) {
42 switch (usage) {
43 case MemoryUsage::DeviceLocal:
44 return VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
45 case MemoryUsage::Upload:
46 return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
47 case MemoryUsage::Download:
48 return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
49 VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
50 }
51 UNREACHABLE_MSG("Invalid memory usage={}", usage);
52 return VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
53}
54} // Anonymous namespace
55
56class MemoryAllocation {
57public:
58 explicit MemoryAllocation(const Device& device_, vk::DeviceMemory memory_,
59 VkMemoryPropertyFlags properties, u64 allocation_size_, u32 type)
60 : device{device_}, memory{std::move(memory_)}, allocation_size{allocation_size_},
61 property_flags{properties}, shifted_memory_type{1U << type} {}
62
63 [[nodiscard]] std::optional<MemoryCommit> Commit(VkDeviceSize size, VkDeviceSize alignment) {
64 const std::optional<u64> alloc = FindFreeRegion(size, alignment);
65 if (!alloc) {
66 // Signal out of memory, it'll try to do more allocations.
67 return std::nullopt;
68 }
69 const Range range{
70 .begin = *alloc,
71 .end = *alloc + size,
72 };
73 commits.insert(std::ranges::upper_bound(commits, *alloc, {}, &Range::begin), range);
74 return std::make_optional<MemoryCommit>(this, *memory, *alloc, *alloc + size);
75 }
76
77 void Free(u64 begin) {
78 const auto it = std::ranges::find(commits, begin, &Range::begin);
79 ASSERT_MSG(it != commits.end(), "Invalid commit");
80 commits.erase(it);
81 }
82
83 [[nodiscard]] std::span<u8> Map() {
84 if (memory_mapped_span.empty()) {
85 u8* const raw_pointer = memory.Map(0, allocation_size);
86 memory_mapped_span = std::span<u8>(raw_pointer, allocation_size);
87 }
88 return memory_mapped_span;
89 }
90
91 /// Returns whether this allocation is compatible with the arguments.
92 [[nodiscard]] bool IsCompatible(VkMemoryPropertyFlags flags, u32 type_mask) const {
93 return (flags & property_flags) && (type_mask & shifted_memory_type) != 0;
94 }
95
96private:
97 [[nodiscard]] static constexpr u32 ShiftType(u32 type) {
98 return 1U << type;
99 }
100
101 [[nodiscard]] std::optional<u64> FindFreeRegion(u64 size, u64 alignment) noexcept {
102 ASSERT(std::has_single_bit(alignment));
103 const u64 alignment_log2 = std::countr_zero(alignment);
104 std::optional<u64> candidate;
105 u64 iterator = 0;
106 auto commit = commits.begin();
107 while (iterator + size <= allocation_size) {
108 candidate = candidate.value_or(iterator);
109 if (commit == commits.end()) {
110 break;
111 }
112 if (commit->Contains(*candidate, size)) {
113 candidate = std::nullopt;
114 }
115 iterator = Common::AlignUpLog2(commit->end, alignment_log2);
116 ++commit;
117 }
118 return candidate;
119 }
120
121 const Device& device; ///< Vulkan device.
122 const vk::DeviceMemory memory; ///< Vulkan memory allocation handler.
123 const u64 allocation_size; ///< Size of this allocation.
124 const VkMemoryPropertyFlags property_flags; ///< Vulkan memory property flags.
125 const u32 shifted_memory_type; ///< Shifted Vulkan memory type.
126 std::vector<Range> commits; ///< All commit ranges done from this allocation.
127 std::span<u8> memory_mapped_span; ///< Memory mapped span. Empty if not queried before.
128};
129
130MemoryCommit::MemoryCommit(MemoryAllocation* allocation_, VkDeviceMemory memory_, u64 begin_,
131 u64 end_) noexcept
132 : allocation{allocation_}, memory{memory_}, begin{begin_}, end{end_} {}
133
134MemoryCommit::~MemoryCommit() {
135 Release();
136}
137
138MemoryCommit& MemoryCommit::operator=(MemoryCommit&& rhs) noexcept {
139 Release();
140 allocation = std::exchange(rhs.allocation, nullptr);
141 memory = rhs.memory;
142 begin = rhs.begin;
143 end = rhs.end;
144 span = std::exchange(rhs.span, std::span<u8>{});
145 return *this;
146}
147
148MemoryCommit::MemoryCommit(MemoryCommit&& rhs) noexcept
149 : allocation{std::exchange(rhs.allocation, nullptr)}, memory{rhs.memory}, begin{rhs.begin},
150 end{rhs.end}, span{std::exchange(rhs.span, std::span<u8>{})} {}
151
152std::span<u8> MemoryCommit::Map() {
153 if (span.empty()) {
154 span = allocation->Map().subspan(begin, end - begin);
155 }
156 return span;
157}
158
159void MemoryCommit::Release() {
160 if (allocation) {
161 allocation->Free(begin);
162 }
163}
164
165MemoryAllocator::MemoryAllocator(const Device& device_)
166 : device{device_}, properties{device_.GetPhysical().GetMemoryProperties()} {}
167
168MemoryAllocator::~MemoryAllocator() = default;
169
170MemoryCommit MemoryAllocator::Commit(const VkMemoryRequirements& requirements, MemoryUsage usage) {
171 // Find the fastest memory flags we can afford with the current requirements
172 const VkMemoryPropertyFlags flags = MemoryPropertyFlags(requirements.memoryTypeBits, usage);
173 if (std::optional<MemoryCommit> commit = TryCommit(requirements, flags)) {
174 return std::move(*commit);
175 }
176 // Commit has failed, allocate more memory.
177 // TODO(Rodrigo): Handle out of memory situations in some way like flushing to guest memory.
178 AllocMemory(flags, requirements.memoryTypeBits, AllocationChunkSize(requirements.size));
179
180 // Commit again, this time it won't fail since there's a fresh allocation above.
181 // If it does, there's a bug.
182 return TryCommit(requirements, flags).value();
183}
184
185MemoryCommit MemoryAllocator::Commit(const vk::Buffer& buffer, MemoryUsage usage) {
186 auto commit = Commit(device.GetLogical().GetBufferMemoryRequirements(*buffer), usage);
187 buffer.BindMemory(commit.Memory(), commit.Offset());
188 return commit;
189}
190
191MemoryCommit MemoryAllocator::Commit(const vk::Image& image, MemoryUsage usage) {
192 auto commit = Commit(device.GetLogical().GetImageMemoryRequirements(*image), usage);
193 image.BindMemory(commit.Memory(), commit.Offset());
194 return commit;
195}
196
197void MemoryAllocator::AllocMemory(VkMemoryPropertyFlags flags, u32 type_mask, u64 size) {
198 const u32 type = FindType(flags, type_mask).value();
199 vk::DeviceMemory memory = device.GetLogical().AllocateMemory({
200 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
201 .pNext = nullptr,
202 .allocationSize = size,
203 .memoryTypeIndex = type,
204 });
205 allocations.push_back(
206 std::make_unique<MemoryAllocation>(device, std::move(memory), flags, size, type));
207}
208
209std::optional<MemoryCommit> MemoryAllocator::TryCommit(const VkMemoryRequirements& requirements,
210 VkMemoryPropertyFlags flags) {
211 for (auto& allocation : allocations) {
212 if (!allocation->IsCompatible(flags, requirements.memoryTypeBits)) {
213 continue;
214 }
215 if (auto commit = allocation->Commit(requirements.size, requirements.alignment)) {
216 return commit;
217 }
218 }
219 return std::nullopt;
220}
221
222VkMemoryPropertyFlags MemoryAllocator::MemoryPropertyFlags(u32 type_mask, MemoryUsage usage) const {
223 return MemoryPropertyFlags(type_mask, MemoryUsagePropertyFlags(usage));
224}
225
226VkMemoryPropertyFlags MemoryAllocator::MemoryPropertyFlags(u32 type_mask,
227 VkMemoryPropertyFlags flags) const {
228 if (FindType(flags, type_mask)) {
229 // Found a memory type with those requirements
230 return flags;
231 }
232 if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) {
233 // Remove host cached bit in case it's not supported
234 return MemoryPropertyFlags(type_mask, flags & ~VK_MEMORY_PROPERTY_HOST_CACHED_BIT);
235 }
236 if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {
237 // Remove device local, if it's not supported by the requested resource
238 return MemoryPropertyFlags(type_mask, flags & ~VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
239 }
240 UNREACHABLE_MSG("No compatible memory types found");
241 return 0;
242}
243
244std::optional<u32> MemoryAllocator::FindType(VkMemoryPropertyFlags flags, u32 type_mask) const {
245 for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) {
246 const VkMemoryPropertyFlags type_flags = properties.memoryTypes[type_index].propertyFlags;
247 if ((type_mask & (1U << type_index)) && (type_flags & flags)) {
248 // The type matches in type and in the wanted properties.
249 return type_index;
250 }
251 }
252 // Failed to find index
253 return std::nullopt;
254}
255
256bool IsHostVisible(MemoryUsage usage) noexcept {
257 switch (usage) {
258 case MemoryUsage::DeviceLocal:
259 return false;
260 case MemoryUsage::Upload:
261 case MemoryUsage::Download:
262 return true;
263 }
264 UNREACHABLE_MSG("Invalid memory usage={}", usage);
265 return false;
266}
267
268} // namespace Vulkan
diff --git a/src/video_core/vulkan_common/vulkan_memory_allocator.h b/src/video_core/vulkan_common/vulkan_memory_allocator.h
new file mode 100644
index 000000000..9e6cfabf9
--- /dev/null
+++ b/src/video_core/vulkan_common/vulkan_memory_allocator.h
@@ -0,0 +1,117 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <span>
9#include <utility>
10#include <vector>
11#include "common/common_types.h"
12#include "video_core/vulkan_common/vulkan_wrapper.h"
13
14namespace Vulkan {
15
16class Device;
17class MemoryMap;
18class MemoryAllocation;
19
20/// Hints and requirements for the backing memory type of a commit
21enum class MemoryUsage {
22 DeviceLocal, ///< Hints device local usages, fastest memory type to read and write from the GPU
23 Upload, ///< Requires a host visible memory type optimized for CPU to GPU uploads
24 Download, ///< Requires a host visible memory type optimized for GPU to CPU readbacks
25};
26
27/// Ownership handle of a memory commitment.
28/// Points to a subregion of a memory allocation.
29class MemoryCommit {
30public:
31 explicit MemoryCommit() noexcept = default;
32 explicit MemoryCommit(MemoryAllocation* allocation_, VkDeviceMemory memory_, u64 begin_,
33 u64 end_) noexcept;
34 ~MemoryCommit();
35
36 MemoryCommit& operator=(MemoryCommit&&) noexcept;
37 MemoryCommit(MemoryCommit&&) noexcept;
38
39 MemoryCommit& operator=(const MemoryCommit&) = delete;
40 MemoryCommit(const MemoryCommit&) = delete;
41
42 /// Returns a host visible memory map.
43 /// It will map the backing allocation if it hasn't been mapped before.
44 std::span<u8> Map();
45
46 /// Returns the Vulkan memory handler.
47 VkDeviceMemory Memory() const {
48 return memory;
49 }
50
51 /// Returns the start position of the commit relative to the allocation.
52 VkDeviceSize Offset() const {
53 return static_cast<VkDeviceSize>(begin);
54 }
55
56private:
57 void Release();
58
59 MemoryAllocation* allocation{}; ///< Pointer to the large memory allocation.
60 VkDeviceMemory memory{}; ///< Vulkan device memory handler.
61 u64 begin{}; ///< Beginning offset in bytes to where the commit exists.
62 u64 end{}; ///< Offset in bytes where the commit ends.
63 std::span<u8> span; ///< Host visible memory span. Empty if not queried before.
64};
65
66/// Memory allocator container.
67/// Allocates and releases memory allocations on demand.
68class MemoryAllocator {
69public:
70 explicit MemoryAllocator(const Device& device_);
71 ~MemoryAllocator();
72
73 MemoryAllocator& operator=(const MemoryAllocator&) = delete;
74 MemoryAllocator(const MemoryAllocator&) = delete;
75
76 /**
77 * Commits a memory with the specified requirements.
78 *
79 * @param requirements Requirements returned from a Vulkan call.
80 * @param usage Indicates how the memory will be used.
81 *
82 * @returns A memory commit.
83 */
84 MemoryCommit Commit(const VkMemoryRequirements& requirements, MemoryUsage usage);
85
86 /// Commits memory required by the buffer and binds it.
87 MemoryCommit Commit(const vk::Buffer& buffer, MemoryUsage usage);
88
89 /// Commits memory required by the image and binds it.
90 MemoryCommit Commit(const vk::Image& image, MemoryUsage usage);
91
92private:
93 /// Allocates a chunk of memory.
94 void AllocMemory(VkMemoryPropertyFlags flags, u32 type_mask, u64 size);
95
96 /// Tries to allocate a memory commit.
97 std::optional<MemoryCommit> TryCommit(const VkMemoryRequirements& requirements,
98 VkMemoryPropertyFlags flags);
99
100 /// Returns the fastest compatible memory property flags from a wanted usage.
101 VkMemoryPropertyFlags MemoryPropertyFlags(u32 type_mask, MemoryUsage usage) const;
102
103 /// Returns the fastest compatible memory property flags from the wanted flags.
104 VkMemoryPropertyFlags MemoryPropertyFlags(u32 type_mask, VkMemoryPropertyFlags flags) const;
105
106 /// Returns index to the fastest memory type compatible with the passed requirements.
107 std::optional<u32> FindType(VkMemoryPropertyFlags flags, u32 type_mask) const;
108
109 const Device& device; ///< Device handle.
110 const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
111 std::vector<std::unique_ptr<MemoryAllocation>> allocations; ///< Current allocations.
112};
113
114/// Returns true when a memory usage is guaranteed to be host visible.
115bool IsHostVisible(MemoryUsage usage) noexcept;
116
117} // namespace Vulkan
diff --git a/src/video_core/vulkan_common/vulkan_wrapper.h b/src/video_core/vulkan_common/vulkan_wrapper.h
index 912cab46c..9689de0cb 100644
--- a/src/video_core/vulkan_common/vulkan_wrapper.h
+++ b/src/video_core/vulkan_common/vulkan_wrapper.h
@@ -144,152 +144,152 @@ inline VkResult Filter(VkResult result) {
144 144
145/// Table holding Vulkan instance function pointers. 145/// Table holding Vulkan instance function pointers.
146struct InstanceDispatch { 146struct InstanceDispatch {
147 PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; 147 PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr{};
148 148
149 PFN_vkCreateInstance vkCreateInstance; 149 PFN_vkCreateInstance vkCreateInstance{};
150 PFN_vkDestroyInstance vkDestroyInstance; 150 PFN_vkDestroyInstance vkDestroyInstance{};
151 PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties; 151 PFN_vkEnumerateInstanceExtensionProperties vkEnumerateInstanceExtensionProperties{};
152 PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties; 152 PFN_vkEnumerateInstanceLayerProperties vkEnumerateInstanceLayerProperties{};
153 153
154 PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT; 154 PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT{};
155 PFN_vkCreateDevice vkCreateDevice; 155 PFN_vkCreateDevice vkCreateDevice{};
156 PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT; 156 PFN_vkDestroyDebugUtilsMessengerEXT vkDestroyDebugUtilsMessengerEXT{};
157 PFN_vkDestroyDevice vkDestroyDevice; 157 PFN_vkDestroyDevice vkDestroyDevice{};
158 PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR; 158 PFN_vkDestroySurfaceKHR vkDestroySurfaceKHR{};
159 PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties; 159 PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties{};
160 PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices; 160 PFN_vkEnumeratePhysicalDevices vkEnumeratePhysicalDevices{};
161 PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; 161 PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr{};
162 PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR; 162 PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR{};
163 PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties; 163 PFN_vkGetPhysicalDeviceFormatProperties vkGetPhysicalDeviceFormatProperties{};
164 PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; 164 PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties{};
165 PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; 165 PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties{};
166 PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR; 166 PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR{};
167 PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties; 167 PFN_vkGetPhysicalDeviceQueueFamilyProperties vkGetPhysicalDeviceQueueFamilyProperties{};
168 PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR; 168 PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR vkGetPhysicalDeviceSurfaceCapabilitiesKHR{};
169 PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR; 169 PFN_vkGetPhysicalDeviceSurfaceFormatsKHR vkGetPhysicalDeviceSurfaceFormatsKHR{};
170 PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR; 170 PFN_vkGetPhysicalDeviceSurfacePresentModesKHR vkGetPhysicalDeviceSurfacePresentModesKHR{};
171 PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR; 171 PFN_vkGetPhysicalDeviceSurfaceSupportKHR vkGetPhysicalDeviceSurfaceSupportKHR{};
172 PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR; 172 PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR{};
173 PFN_vkQueuePresentKHR vkQueuePresentKHR; 173 PFN_vkQueuePresentKHR vkQueuePresentKHR{};
174}; 174};
175 175
176/// Table holding Vulkan device function pointers. 176/// Table holding Vulkan device function pointers.
177struct DeviceDispatch : public InstanceDispatch { 177struct DeviceDispatch : public InstanceDispatch {
178 PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR; 178 PFN_vkAcquireNextImageKHR vkAcquireNextImageKHR{};
179 PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers; 179 PFN_vkAllocateCommandBuffers vkAllocateCommandBuffers{};
180 PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets; 180 PFN_vkAllocateDescriptorSets vkAllocateDescriptorSets{};
181 PFN_vkAllocateMemory vkAllocateMemory; 181 PFN_vkAllocateMemory vkAllocateMemory{};
182 PFN_vkBeginCommandBuffer vkBeginCommandBuffer; 182 PFN_vkBeginCommandBuffer vkBeginCommandBuffer{};
183 PFN_vkBindBufferMemory vkBindBufferMemory; 183 PFN_vkBindBufferMemory vkBindBufferMemory{};
184 PFN_vkBindImageMemory vkBindImageMemory; 184 PFN_vkBindImageMemory vkBindImageMemory{};
185 PFN_vkCmdBeginQuery vkCmdBeginQuery; 185 PFN_vkCmdBeginQuery vkCmdBeginQuery{};
186 PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass; 186 PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass{};
187 PFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT; 187 PFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT{};
188 PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT; 188 PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT{};
189 PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets; 189 PFN_vkCmdBindDescriptorSets vkCmdBindDescriptorSets{};
190 PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer; 190 PFN_vkCmdBindIndexBuffer vkCmdBindIndexBuffer{};
191 PFN_vkCmdBindPipeline vkCmdBindPipeline; 191 PFN_vkCmdBindPipeline vkCmdBindPipeline{};
192 PFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT; 192 PFN_vkCmdBindTransformFeedbackBuffersEXT vkCmdBindTransformFeedbackBuffersEXT{};
193 PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers; 193 PFN_vkCmdBindVertexBuffers vkCmdBindVertexBuffers{};
194 PFN_vkCmdBlitImage vkCmdBlitImage; 194 PFN_vkCmdBlitImage vkCmdBlitImage{};
195 PFN_vkCmdClearAttachments vkCmdClearAttachments; 195 PFN_vkCmdClearAttachments vkCmdClearAttachments{};
196 PFN_vkCmdCopyBuffer vkCmdCopyBuffer; 196 PFN_vkCmdCopyBuffer vkCmdCopyBuffer{};
197 PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage; 197 PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage{};
198 PFN_vkCmdCopyImage vkCmdCopyImage; 198 PFN_vkCmdCopyImage vkCmdCopyImage{};
199 PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer; 199 PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer{};
200 PFN_vkCmdDispatch vkCmdDispatch; 200 PFN_vkCmdDispatch vkCmdDispatch{};
201 PFN_vkCmdDraw vkCmdDraw; 201 PFN_vkCmdDraw vkCmdDraw{};
202 PFN_vkCmdDrawIndexed vkCmdDrawIndexed; 202 PFN_vkCmdDrawIndexed vkCmdDrawIndexed{};
203 PFN_vkCmdEndQuery vkCmdEndQuery; 203 PFN_vkCmdEndQuery vkCmdEndQuery{};
204 PFN_vkCmdEndRenderPass vkCmdEndRenderPass; 204 PFN_vkCmdEndRenderPass vkCmdEndRenderPass{};
205 PFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT; 205 PFN_vkCmdEndTransformFeedbackEXT vkCmdEndTransformFeedbackEXT{};
206 PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT; 206 PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT{};
207 PFN_vkCmdFillBuffer vkCmdFillBuffer; 207 PFN_vkCmdFillBuffer vkCmdFillBuffer{};
208 PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier; 208 PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier{};
209 PFN_vkCmdPushConstants vkCmdPushConstants; 209 PFN_vkCmdPushConstants vkCmdPushConstants{};
210 PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants; 210 PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants{};
211 PFN_vkCmdSetDepthBias vkCmdSetDepthBias; 211 PFN_vkCmdSetDepthBias vkCmdSetDepthBias{};
212 PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds; 212 PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds{};
213 PFN_vkCmdSetEvent vkCmdSetEvent; 213 PFN_vkCmdSetEvent vkCmdSetEvent{};
214 PFN_vkCmdSetScissor vkCmdSetScissor; 214 PFN_vkCmdSetScissor vkCmdSetScissor{};
215 PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask; 215 PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask{};
216 PFN_vkCmdSetStencilReference vkCmdSetStencilReference; 216 PFN_vkCmdSetStencilReference vkCmdSetStencilReference{};
217 PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask; 217 PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask{};
218 PFN_vkCmdSetViewport vkCmdSetViewport; 218 PFN_vkCmdSetViewport vkCmdSetViewport{};
219 PFN_vkCmdWaitEvents vkCmdWaitEvents; 219 PFN_vkCmdWaitEvents vkCmdWaitEvents{};
220 PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT; 220 PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT{};
221 PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT; 221 PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT{};
222 PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT; 222 PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT{};
223 PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT; 223 PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT{};
224 PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT; 224 PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT{};
225 PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT; 225 PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT{};
226 PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT; 226 PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT{};
227 PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT; 227 PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT{};
228 PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT; 228 PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT{};
229 PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT; 229 PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT{};
230 PFN_vkCmdResolveImage vkCmdResolveImage; 230 PFN_vkCmdResolveImage vkCmdResolveImage{};
231 PFN_vkCreateBuffer vkCreateBuffer; 231 PFN_vkCreateBuffer vkCreateBuffer{};
232 PFN_vkCreateBufferView vkCreateBufferView; 232 PFN_vkCreateBufferView vkCreateBufferView{};
233 PFN_vkCreateCommandPool vkCreateCommandPool; 233 PFN_vkCreateCommandPool vkCreateCommandPool{};
234 PFN_vkCreateComputePipelines vkCreateComputePipelines; 234 PFN_vkCreateComputePipelines vkCreateComputePipelines{};
235 PFN_vkCreateDescriptorPool vkCreateDescriptorPool; 235 PFN_vkCreateDescriptorPool vkCreateDescriptorPool{};
236 PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout; 236 PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout{};
237 PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR; 237 PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR{};
238 PFN_vkCreateEvent vkCreateEvent; 238 PFN_vkCreateEvent vkCreateEvent{};
239 PFN_vkCreateFence vkCreateFence; 239 PFN_vkCreateFence vkCreateFence{};
240 PFN_vkCreateFramebuffer vkCreateFramebuffer; 240 PFN_vkCreateFramebuffer vkCreateFramebuffer{};
241 PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines; 241 PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines{};
242 PFN_vkCreateImage vkCreateImage; 242 PFN_vkCreateImage vkCreateImage{};
243 PFN_vkCreateImageView vkCreateImageView; 243 PFN_vkCreateImageView vkCreateImageView{};
244 PFN_vkCreatePipelineLayout vkCreatePipelineLayout; 244 PFN_vkCreatePipelineLayout vkCreatePipelineLayout{};
245 PFN_vkCreateQueryPool vkCreateQueryPool; 245 PFN_vkCreateQueryPool vkCreateQueryPool{};
246 PFN_vkCreateRenderPass vkCreateRenderPass; 246 PFN_vkCreateRenderPass vkCreateRenderPass{};
247 PFN_vkCreateSampler vkCreateSampler; 247 PFN_vkCreateSampler vkCreateSampler{};
248 PFN_vkCreateSemaphore vkCreateSemaphore; 248 PFN_vkCreateSemaphore vkCreateSemaphore{};
249 PFN_vkCreateShaderModule vkCreateShaderModule; 249 PFN_vkCreateShaderModule vkCreateShaderModule{};
250 PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR; 250 PFN_vkCreateSwapchainKHR vkCreateSwapchainKHR{};
251 PFN_vkDestroyBuffer vkDestroyBuffer; 251 PFN_vkDestroyBuffer vkDestroyBuffer{};
252 PFN_vkDestroyBufferView vkDestroyBufferView; 252 PFN_vkDestroyBufferView vkDestroyBufferView{};
253 PFN_vkDestroyCommandPool vkDestroyCommandPool; 253 PFN_vkDestroyCommandPool vkDestroyCommandPool{};
254 PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool; 254 PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool{};
255 PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout; 255 PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout{};
256 PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR; 256 PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR{};
257 PFN_vkDestroyEvent vkDestroyEvent; 257 PFN_vkDestroyEvent vkDestroyEvent{};
258 PFN_vkDestroyFence vkDestroyFence; 258 PFN_vkDestroyFence vkDestroyFence{};
259 PFN_vkDestroyFramebuffer vkDestroyFramebuffer; 259 PFN_vkDestroyFramebuffer vkDestroyFramebuffer{};
260 PFN_vkDestroyImage vkDestroyImage; 260 PFN_vkDestroyImage vkDestroyImage{};
261 PFN_vkDestroyImageView vkDestroyImageView; 261 PFN_vkDestroyImageView vkDestroyImageView{};
262 PFN_vkDestroyPipeline vkDestroyPipeline; 262 PFN_vkDestroyPipeline vkDestroyPipeline{};
263 PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout; 263 PFN_vkDestroyPipelineLayout vkDestroyPipelineLayout{};
264 PFN_vkDestroyQueryPool vkDestroyQueryPool; 264 PFN_vkDestroyQueryPool vkDestroyQueryPool{};
265 PFN_vkDestroyRenderPass vkDestroyRenderPass; 265 PFN_vkDestroyRenderPass vkDestroyRenderPass{};
266 PFN_vkDestroySampler vkDestroySampler; 266 PFN_vkDestroySampler vkDestroySampler{};
267 PFN_vkDestroySemaphore vkDestroySemaphore; 267 PFN_vkDestroySemaphore vkDestroySemaphore{};
268 PFN_vkDestroyShaderModule vkDestroyShaderModule; 268 PFN_vkDestroyShaderModule vkDestroyShaderModule{};
269 PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR; 269 PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR{};
270 PFN_vkDeviceWaitIdle vkDeviceWaitIdle; 270 PFN_vkDeviceWaitIdle vkDeviceWaitIdle{};
271 PFN_vkEndCommandBuffer vkEndCommandBuffer; 271 PFN_vkEndCommandBuffer vkEndCommandBuffer{};
272 PFN_vkFreeCommandBuffers vkFreeCommandBuffers; 272 PFN_vkFreeCommandBuffers vkFreeCommandBuffers{};
273 PFN_vkFreeDescriptorSets vkFreeDescriptorSets; 273 PFN_vkFreeDescriptorSets vkFreeDescriptorSets{};
274 PFN_vkFreeMemory vkFreeMemory; 274 PFN_vkFreeMemory vkFreeMemory{};
275 PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; 275 PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements{};
276 PFN_vkGetDeviceQueue vkGetDeviceQueue; 276 PFN_vkGetDeviceQueue vkGetDeviceQueue{};
277 PFN_vkGetEventStatus vkGetEventStatus; 277 PFN_vkGetEventStatus vkGetEventStatus{};
278 PFN_vkGetFenceStatus vkGetFenceStatus; 278 PFN_vkGetFenceStatus vkGetFenceStatus{};
279 PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; 279 PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements{};
280 PFN_vkGetQueryPoolResults vkGetQueryPoolResults; 280 PFN_vkGetQueryPoolResults vkGetQueryPoolResults{};
281 PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR; 281 PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR{};
282 PFN_vkMapMemory vkMapMemory; 282 PFN_vkMapMemory vkMapMemory{};
283 PFN_vkQueueSubmit vkQueueSubmit; 283 PFN_vkQueueSubmit vkQueueSubmit{};
284 PFN_vkResetFences vkResetFences; 284 PFN_vkResetFences vkResetFences{};
285 PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT; 285 PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT{};
286 PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT; 286 PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT{};
287 PFN_vkSetDebugUtilsObjectTagEXT vkSetDebugUtilsObjectTagEXT; 287 PFN_vkSetDebugUtilsObjectTagEXT vkSetDebugUtilsObjectTagEXT{};
288 PFN_vkUnmapMemory vkUnmapMemory; 288 PFN_vkUnmapMemory vkUnmapMemory{};
289 PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR; 289 PFN_vkUpdateDescriptorSetWithTemplateKHR vkUpdateDescriptorSetWithTemplateKHR{};
290 PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets; 290 PFN_vkUpdateDescriptorSets vkUpdateDescriptorSets{};
291 PFN_vkWaitForFences vkWaitForFences; 291 PFN_vkWaitForFences vkWaitForFences{};
292 PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR; 292 PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR{};
293}; 293};
294 294
295/// Loads instance agnostic function pointers. 295/// Loads instance agnostic function pointers.
diff --git a/src/yuzu/applets/profile_select.cpp b/src/yuzu/applets/profile_select.cpp
index 4bf2bfd40..0a4c48b3d 100644
--- a/src/yuzu/applets/profile_select.cpp
+++ b/src/yuzu/applets/profile_select.cpp
@@ -93,7 +93,7 @@ QtProfileSelectionDialog::QtProfileSelectionDialog(QWidget* parent)
93 93
94 const auto& profiles = profile_manager->GetAllUsers(); 94 const auto& profiles = profile_manager->GetAllUsers();
95 for (const auto& user : profiles) { 95 for (const auto& user : profiles) {
96 Service::Account::ProfileBase profile; 96 Service::Account::ProfileBase profile{};
97 if (!profile_manager->GetProfileBase(user, profile)) 97 if (!profile_manager->GetProfileBase(user, profile))
98 continue; 98 continue;
99 99
diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp
index 85ee2577d..4528eb196 100644
--- a/src/yuzu/bootmanager.cpp
+++ b/src/yuzu/bootmanager.cpp
@@ -290,8 +290,8 @@ GRenderWindow::GRenderWindow(GMainWindow* parent, EmuThread* emu_thread_,
290 QString::fromUtf8(Common::g_scm_branch), 290 QString::fromUtf8(Common::g_scm_branch),
291 QString::fromUtf8(Common::g_scm_desc))); 291 QString::fromUtf8(Common::g_scm_desc)));
292 setAttribute(Qt::WA_AcceptTouchEvents); 292 setAttribute(Qt::WA_AcceptTouchEvents);
293 auto layout = new QHBoxLayout(this); 293 auto* layout = new QHBoxLayout(this);
294 layout->setMargin(0); 294 layout->setContentsMargins(0, 0, 0, 0);
295 setLayout(layout); 295 setLayout(layout);
296 input_subsystem->Initialize(); 296 input_subsystem->Initialize();
297 297
@@ -394,7 +394,7 @@ void GRenderWindow::mousePressEvent(QMouseEvent* event) {
394 input_subsystem->GetMouse()->PressButton(x, y, event->button()); 394 input_subsystem->GetMouse()->PressButton(x, y, event->button());
395 395
396 if (event->button() == Qt::LeftButton) { 396 if (event->button() == Qt::LeftButton) {
397 this->TouchPressed(x, y); 397 this->TouchPressed(x, y, 0);
398 } 398 }
399 399
400 emit MouseActivity(); 400 emit MouseActivity();
@@ -409,7 +409,7 @@ void GRenderWindow::mouseMoveEvent(QMouseEvent* event) {
409 auto pos = event->pos(); 409 auto pos = event->pos();
410 const auto [x, y] = ScaleTouch(pos); 410 const auto [x, y] = ScaleTouch(pos);
411 input_subsystem->GetMouse()->MouseMove(x, y); 411 input_subsystem->GetMouse()->MouseMove(x, y);
412 this->TouchMoved(x, y); 412 this->TouchMoved(x, y, 0);
413 413
414 emit MouseActivity(); 414 emit MouseActivity();
415} 415}
@@ -423,36 +423,72 @@ void GRenderWindow::mouseReleaseEvent(QMouseEvent* event) {
423 input_subsystem->GetMouse()->ReleaseButton(event->button()); 423 input_subsystem->GetMouse()->ReleaseButton(event->button());
424 424
425 if (event->button() == Qt::LeftButton) { 425 if (event->button() == Qt::LeftButton) {
426 this->TouchReleased(); 426 this->TouchReleased(0);
427 } 427 }
428} 428}
429 429
430void GRenderWindow::TouchBeginEvent(const QTouchEvent* event) { 430void GRenderWindow::TouchBeginEvent(const QTouchEvent* event) {
431 // TouchBegin always has exactly one touch point, so take the .first() 431 QList<QTouchEvent::TouchPoint> touch_points = event->touchPoints();
432 const auto [x, y] = ScaleTouch(event->touchPoints().first().pos()); 432 for (const auto& touch_point : touch_points) {
433 this->TouchPressed(x, y); 433 if (!TouchUpdate(touch_point)) {
434 TouchStart(touch_point);
435 }
436 }
434} 437}
435 438
436void GRenderWindow::TouchUpdateEvent(const QTouchEvent* event) { 439void GRenderWindow::TouchUpdateEvent(const QTouchEvent* event) {
437 QPointF pos; 440 QList<QTouchEvent::TouchPoint> touch_points = event->touchPoints();
438 int active_points = 0; 441 for (const auto& touch_point : touch_points) {
439 442 if (!TouchUpdate(touch_point)) {
440 // average all active touch points 443 TouchStart(touch_point);
441 for (const auto& tp : event->touchPoints()) {
442 if (tp.state() & (Qt::TouchPointPressed | Qt::TouchPointMoved | Qt::TouchPointStationary)) {
443 active_points++;
444 pos += tp.pos();
445 } 444 }
446 } 445 }
446 // Release all inactive points
447 for (std::size_t id = 0; id < touch_ids.size(); ++id) {
448 if (!TouchExist(touch_ids[id], touch_points)) {
449 touch_ids[id] = 0;
450 this->TouchReleased(id + 1);
451 }
452 }
453}
447 454
448 pos /= active_points; 455void GRenderWindow::TouchEndEvent() {
456 for (std::size_t id = 0; id < touch_ids.size(); ++id) {
457 if (touch_ids[id] != 0) {
458 touch_ids[id] = 0;
459 this->TouchReleased(id + 1);
460 }
461 }
462}
449 463
450 const auto [x, y] = ScaleTouch(pos); 464bool GRenderWindow::TouchStart(const QTouchEvent::TouchPoint& touch_point) {
451 this->TouchMoved(x, y); 465 for (std::size_t id = 0; id < touch_ids.size(); ++id) {
466 if (touch_ids[id] == 0) {
467 touch_ids[id] = touch_point.id() + 1;
468 const auto [x, y] = ScaleTouch(touch_point.pos());
469 this->TouchPressed(x, y, id + 1);
470 return true;
471 }
472 }
473 return false;
452} 474}
453 475
454void GRenderWindow::TouchEndEvent() { 476bool GRenderWindow::TouchUpdate(const QTouchEvent::TouchPoint& touch_point) {
455 this->TouchReleased(); 477 for (std::size_t id = 0; id < touch_ids.size(); ++id) {
478 if (touch_ids[id] == static_cast<std::size_t>(touch_point.id() + 1)) {
479 const auto [x, y] = ScaleTouch(touch_point.pos());
480 this->TouchMoved(x, y, id + 1);
481 return true;
482 }
483 }
484 return false;
485}
486
487bool GRenderWindow::TouchExist(std::size_t id,
488 const QList<QTouchEvent::TouchPoint>& touch_points) const {
489 return std::any_of(touch_points.begin(), touch_points.end(), [id](const auto& point) {
490 return id == static_cast<std::size_t>(point.id() + 1);
491 });
456} 492}
457 493
458bool GRenderWindow::event(QEvent* event) { 494bool GRenderWindow::event(QEvent* event) {
diff --git a/src/yuzu/bootmanager.h b/src/yuzu/bootmanager.h
index 339095509..b5ec7de07 100644
--- a/src/yuzu/bootmanager.h
+++ b/src/yuzu/bootmanager.h
@@ -11,6 +11,7 @@
11 11
12#include <QImage> 12#include <QImage>
13#include <QThread> 13#include <QThread>
14#include <QTouchEvent>
14#include <QWidget> 15#include <QWidget>
15#include <QWindow> 16#include <QWindow>
16 17
@@ -21,7 +22,6 @@
21class GRenderWindow; 22class GRenderWindow;
22class GMainWindow; 23class GMainWindow;
23class QKeyEvent; 24class QKeyEvent;
24class QTouchEvent;
25class QStringList; 25class QStringList;
26 26
27namespace InputCommon { 27namespace InputCommon {
@@ -191,6 +191,10 @@ private:
191 void TouchUpdateEvent(const QTouchEvent* event); 191 void TouchUpdateEvent(const QTouchEvent* event);
192 void TouchEndEvent(); 192 void TouchEndEvent();
193 193
194 bool TouchStart(const QTouchEvent::TouchPoint& touch_point);
195 bool TouchUpdate(const QTouchEvent::TouchPoint& touch_point);
196 bool TouchExist(std::size_t id, const QList<QTouchEvent::TouchPoint>& touch_points) const;
197
194 void OnMinimalClientAreaChangeRequest(std::pair<u32, u32> minimal_size) override; 198 void OnMinimalClientAreaChangeRequest(std::pair<u32, u32> minimal_size) override;
195 199
196 bool InitializeOpenGL(); 200 bool InitializeOpenGL();
@@ -215,6 +219,8 @@ private:
215 219
216 bool first_frame = false; 220 bool first_frame = false;
217 221
222 std::array<std::size_t, 16> touch_ids{};
223
218protected: 224protected:
219 void showEvent(QShowEvent* event) override; 225 void showEvent(QShowEvent* event) override;
220 bool eventFilter(QObject* object, QEvent* event) override; 226 bool eventFilter(QObject* object, QEvent* event) override;
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp
index cda448718..8d85a1986 100644
--- a/src/yuzu/configuration/config.cpp
+++ b/src/yuzu/configuration/config.cpp
@@ -464,13 +464,7 @@ void Config::ReadMouseValues() {
464void Config::ReadTouchscreenValues() { 464void Config::ReadTouchscreenValues() {
465 Settings::values.touchscreen.enabled = 465 Settings::values.touchscreen.enabled =
466 ReadSetting(QStringLiteral("touchscreen_enabled"), true).toBool(); 466 ReadSetting(QStringLiteral("touchscreen_enabled"), true).toBool();
467 Settings::values.touchscreen.device =
468 ReadSetting(QStringLiteral("touchscreen_device"), QStringLiteral("engine:emu_window"))
469 .toString()
470 .toStdString();
471 467
472 Settings::values.touchscreen.finger =
473 ReadSetting(QStringLiteral("touchscreen_finger"), 0).toUInt();
474 Settings::values.touchscreen.rotation_angle = 468 Settings::values.touchscreen.rotation_angle =
475 ReadSetting(QStringLiteral("touchscreen_angle"), 0).toUInt(); 469 ReadSetting(QStringLiteral("touchscreen_angle"), 0).toUInt();
476 Settings::values.touchscreen.diameter_x = 470 Settings::values.touchscreen.diameter_x =
@@ -563,7 +557,8 @@ void Config::ReadMotionTouchValues() {
563 .toString() 557 .toString()
564 .toStdString(); 558 .toStdString();
565 Settings::values.touch_device = 559 Settings::values.touch_device =
566 ReadSetting(QStringLiteral("touch_device"), QStringLiteral("engine:emu_window")) 560 ReadSetting(QStringLiteral("touch_device"),
561 QStringLiteral("min_x:100,min_y:50,max_x:1800,max_y:850"))
567 .toString() 562 .toString()
568 .toStdString(); 563 .toStdString();
569 Settings::values.use_touch_from_button = 564 Settings::values.use_touch_from_button =
@@ -1005,7 +1000,8 @@ void Config::SavePlayerValue(std::size_t player_index) {
1005 static_cast<u8>(Settings::ControllerType::ProController)); 1000 static_cast<u8>(Settings::ControllerType::ProController));
1006 1001
1007 if (!player_prefix.isEmpty()) { 1002 if (!player_prefix.isEmpty()) {
1008 WriteSetting(QStringLiteral("%1connected").arg(player_prefix), player.connected, false); 1003 WriteSetting(QStringLiteral("%1connected").arg(player_prefix), player.connected,
1004 player_index == 0);
1009 WriteSetting(QStringLiteral("%1vibration_enabled").arg(player_prefix), 1005 WriteSetting(QStringLiteral("%1vibration_enabled").arg(player_prefix),
1010 player.vibration_enabled, true); 1006 player.vibration_enabled, true);
1011 WriteSetting(QStringLiteral("%1vibration_strength").arg(player_prefix), 1007 WriteSetting(QStringLiteral("%1vibration_strength").arg(player_prefix),
@@ -1087,10 +1083,7 @@ void Config::SaveTouchscreenValues() {
1087 const auto& touchscreen = Settings::values.touchscreen; 1083 const auto& touchscreen = Settings::values.touchscreen;
1088 1084
1089 WriteSetting(QStringLiteral("touchscreen_enabled"), touchscreen.enabled, true); 1085 WriteSetting(QStringLiteral("touchscreen_enabled"), touchscreen.enabled, true);
1090 WriteSetting(QStringLiteral("touchscreen_device"), QString::fromStdString(touchscreen.device),
1091 QStringLiteral("engine:emu_window"));
1092 1086
1093 WriteSetting(QStringLiteral("touchscreen_finger"), touchscreen.finger, 0);
1094 WriteSetting(QStringLiteral("touchscreen_angle"), touchscreen.rotation_angle, 0); 1087 WriteSetting(QStringLiteral("touchscreen_angle"), touchscreen.rotation_angle, 0);
1095 WriteSetting(QStringLiteral("touchscreen_diameter_x"), touchscreen.diameter_x, 15); 1088 WriteSetting(QStringLiteral("touchscreen_diameter_x"), touchscreen.diameter_x, 15);
1096 WriteSetting(QStringLiteral("touchscreen_diameter_y"), touchscreen.diameter_y, 15); 1089 WriteSetting(QStringLiteral("touchscreen_diameter_y"), touchscreen.diameter_y, 15);
diff --git a/src/yuzu/configuration/configure_dialog.cpp b/src/yuzu/configuration/configure_dialog.cpp
index b33f8437a..d6b17a28d 100644
--- a/src/yuzu/configuration/configure_dialog.cpp
+++ b/src/yuzu/configuration/configure_dialog.cpp
@@ -117,31 +117,13 @@ void ConfigureDialog::UpdateVisibleTabs() {
117 return; 117 return;
118 } 118 }
119 119
120 const std::map<QWidget*, QString> widgets = {
121 {ui->generalTab, tr("General")},
122 {ui->systemTab, tr("System")},
123 {ui->profileManagerTab, tr("Profiles")},
124 {ui->inputTab, tr("Controls")},
125 {ui->hotkeysTab, tr("Hotkeys")},
126 {ui->cpuTab, tr("CPU")},
127 {ui->cpuDebugTab, tr("Debug")},
128 {ui->graphicsTab, tr("Graphics")},
129 {ui->graphicsAdvancedTab, tr("Advanced")},
130 {ui->audioTab, tr("Audio")},
131 {ui->debugTab, tr("Debug")},
132 {ui->webTab, tr("Web")},
133 {ui->uiTab, tr("UI")},
134 {ui->filesystemTab, tr("Filesystem")},
135 {ui->serviceTab, tr("Services")},
136 };
137
138 [[maybe_unused]] const QSignalBlocker blocker(ui->tabWidget); 120 [[maybe_unused]] const QSignalBlocker blocker(ui->tabWidget);
139 121
140 ui->tabWidget->clear(); 122 ui->tabWidget->clear();
141 123
142 const QList<QWidget*> tabs = qvariant_cast<QList<QWidget*>>(items[0]->data(Qt::UserRole)); 124 const auto tabs = qvariant_cast<QList<QWidget*>>(items[0]->data(Qt::UserRole));
143 125
144 for (const auto tab : tabs) { 126 for (auto* const tab : tabs) {
145 ui->tabWidget->addTab(tab, tab->accessibleName()); 127 ui->tabWidget->addTab(tab, tab->accessibleName());
146 } 128 }
147} 129}
diff --git a/src/yuzu/configuration/configure_input_player.cpp b/src/yuzu/configuration/configure_input_player.cpp
index 46ea026e4..13f0351d4 100644
--- a/src/yuzu/configuration/configure_input_player.cpp
+++ b/src/yuzu/configuration/configure_input_player.cpp
@@ -575,6 +575,16 @@ void ConfigureInputPlayer::ApplyConfiguration() {
575 575
576 std::transform(motions_param.begin(), motions_param.end(), motions.begin(), 576 std::transform(motions_param.begin(), motions_param.end(), motions.begin(),
577 [](const Common::ParamPackage& param) { return param.Serialize(); }); 577 [](const Common::ParamPackage& param) { return param.Serialize(); });
578
579 // Apply configuration for handheld
580 if (player_index == 0) {
581 auto& handheld = Settings::values.players.GetValue()[HANDHELD_INDEX];
582 if (player.controller_type == Settings::ControllerType::Handheld) {
583 handheld = player;
584 }
585 handheld.connected = ui->groupConnectedController->isChecked() &&
586 player.controller_type == Settings::ControllerType::Handheld;
587 }
578} 588}
579 589
580void ConfigureInputPlayer::TryConnectSelectedController() { 590void ConfigureInputPlayer::TryConnectSelectedController() {
diff --git a/src/yuzu/configuration/configure_motion_touch.cpp b/src/yuzu/configuration/configure_motion_touch.cpp
index eb8eacbf9..1f2b792e4 100644
--- a/src/yuzu/configuration/configure_motion_touch.cpp
+++ b/src/yuzu/configuration/configure_motion_touch.cpp
@@ -4,12 +4,15 @@
4 4
5#include <array> 5#include <array>
6#include <sstream> 6#include <sstream>
7
7#include <QCloseEvent> 8#include <QCloseEvent>
8#include <QLabel> 9#include <QLabel>
9#include <QMessageBox> 10#include <QMessageBox>
10#include <QPushButton> 11#include <QPushButton>
12#include <QRegularExpression>
11#include <QStringListModel> 13#include <QStringListModel>
12#include <QVBoxLayout> 14#include <QVBoxLayout>
15
13#include "common/logging/log.h" 16#include "common/logging/log.h"
14#include "core/settings.h" 17#include "core/settings.h"
15#include "input_common/main.h" 18#include "input_common/main.h"
@@ -78,19 +81,11 @@ void CalibrationConfigurationDialog::UpdateButtonText(const QString& text) {
78 cancel_button->setText(text); 81 cancel_button->setText(text);
79} 82}
80 83
81constexpr std::array<std::pair<const char*, const char*>, 2> TouchProviders = {{
82 {"emu_window", QT_TRANSLATE_NOOP("ConfigureMotionTouch", "Emulator Window")},
83 {"cemuhookudp", QT_TRANSLATE_NOOP("ConfigureMotionTouch", "CemuhookUDP")},
84}};
85
86ConfigureMotionTouch::ConfigureMotionTouch(QWidget* parent, 84ConfigureMotionTouch::ConfigureMotionTouch(QWidget* parent,
87 InputCommon::InputSubsystem* input_subsystem_) 85 InputCommon::InputSubsystem* input_subsystem_)
88 : QDialog(parent), input_subsystem{input_subsystem_}, 86 : QDialog(parent), input_subsystem{input_subsystem_},
89 ui(std::make_unique<Ui::ConfigureMotionTouch>()) { 87 ui(std::make_unique<Ui::ConfigureMotionTouch>()) {
90 ui->setupUi(this); 88 ui->setupUi(this);
91 for (const auto& [provider, name] : TouchProviders) {
92 ui->touch_provider->addItem(tr(name), QString::fromUtf8(provider));
93 }
94 89
95 ui->udp_learn_more->setOpenExternalLinks(true); 90 ui->udp_learn_more->setOpenExternalLinks(true);
96 ui->udp_learn_more->setText( 91 ui->udp_learn_more->setText(
@@ -109,11 +104,7 @@ ConfigureMotionTouch::~ConfigureMotionTouch() = default;
109void ConfigureMotionTouch::SetConfiguration() { 104void ConfigureMotionTouch::SetConfiguration() {
110 const Common::ParamPackage motion_param(Settings::values.motion_device); 105 const Common::ParamPackage motion_param(Settings::values.motion_device);
111 const Common::ParamPackage touch_param(Settings::values.touch_device); 106 const Common::ParamPackage touch_param(Settings::values.touch_device);
112 const std::string motion_engine = motion_param.Get("engine", "motion_emu");
113 const std::string touch_engine = touch_param.Get("engine", "emu_window");
114 107
115 ui->touch_provider->setCurrentIndex(
116 ui->touch_provider->findData(QString::fromStdString(touch_engine)));
117 ui->touch_from_button_checkbox->setChecked(Settings::values.use_touch_from_button); 108 ui->touch_from_button_checkbox->setChecked(Settings::values.use_touch_from_button);
118 touch_from_button_maps = Settings::values.touch_from_button_maps; 109 touch_from_button_maps = Settings::values.touch_from_button_maps;
119 for (const auto& touch_map : touch_from_button_maps) { 110 for (const auto& touch_map : touch_from_button_maps) {
@@ -146,30 +137,21 @@ void ConfigureMotionTouch::SetConfiguration() {
146} 137}
147 138
148void ConfigureMotionTouch::UpdateUiDisplay() { 139void ConfigureMotionTouch::UpdateUiDisplay() {
149 const QString touch_engine = ui->touch_provider->currentData().toString();
150 const QString cemuhook_udp = QStringLiteral("cemuhookudp"); 140 const QString cemuhook_udp = QStringLiteral("cemuhookudp");
151 141
152 ui->motion_sensitivity_label->setVisible(true); 142 ui->motion_sensitivity_label->setVisible(true);
153 ui->motion_sensitivity->setVisible(true); 143 ui->motion_sensitivity->setVisible(true);
154 144
155 if (touch_engine == cemuhook_udp) { 145 ui->touch_calibration->setVisible(true);
156 ui->touch_calibration->setVisible(true); 146 ui->touch_calibration_config->setVisible(true);
157 ui->touch_calibration_config->setVisible(true); 147 ui->touch_calibration_label->setVisible(true);
158 ui->touch_calibration_label->setVisible(true); 148 ui->touch_calibration->setText(
159 ui->touch_calibration->setText( 149 QStringLiteral("(%1, %2) - (%3, %4)").arg(min_x).arg(min_y).arg(max_x).arg(max_y));
160 QStringLiteral("(%1, %2) - (%3, %4)").arg(min_x).arg(min_y).arg(max_x).arg(max_y));
161 } else {
162 ui->touch_calibration->setVisible(false);
163 ui->touch_calibration_config->setVisible(false);
164 ui->touch_calibration_label->setVisible(false);
165 }
166 150
167 ui->udp_config_group_box->setVisible(true); 151 ui->udp_config_group_box->setVisible(true);
168} 152}
169 153
170void ConfigureMotionTouch::ConnectEvents() { 154void ConfigureMotionTouch::ConnectEvents() {
171 connect(ui->touch_provider, qOverload<int>(&QComboBox::currentIndexChanged), this,
172 [this](int index) { UpdateUiDisplay(); });
173 connect(ui->udp_test, &QPushButton::clicked, this, &ConfigureMotionTouch::OnCemuhookUDPTest); 155 connect(ui->udp_test, &QPushButton::clicked, this, &ConfigureMotionTouch::OnCemuhookUDPTest);
174 connect(ui->udp_add, &QPushButton::clicked, this, &ConfigureMotionTouch::OnUDPAddServer); 156 connect(ui->udp_add, &QPushButton::clicked, this, &ConfigureMotionTouch::OnUDPAddServer);
175 connect(ui->udp_remove, &QPushButton::clicked, this, &ConfigureMotionTouch::OnUDPDeleteServer); 157 connect(ui->udp_remove, &QPushButton::clicked, this, &ConfigureMotionTouch::OnUDPDeleteServer);
@@ -185,14 +167,15 @@ void ConfigureMotionTouch::ConnectEvents() {
185} 167}
186 168
187void ConfigureMotionTouch::OnUDPAddServer() { 169void ConfigureMotionTouch::OnUDPAddServer() {
188 QRegExp re(tr(R"re(^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4]" 170 // Validator for IP address
189 "[0-9]|[01]?[0-9][0-9]?)$)re")); // a valid ip address 171 const QRegularExpression re(QStringLiteral(
172 R"re(^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$)re"));
190 bool ok; 173 bool ok;
191 QString port_text = ui->udp_port->text(); 174 const QString port_text = ui->udp_port->text();
192 QString server_text = ui->udp_server->text(); 175 const QString server_text = ui->udp_server->text();
193 const QString server_string = tr("%1:%2").arg(server_text, port_text); 176 const QString server_string = tr("%1:%2").arg(server_text, port_text);
194 int port_number = port_text.toInt(&ok, 10); 177 const int port_number = port_text.toInt(&ok, 10);
195 int row = udp_server_list_model->rowCount(); 178 const int row = udp_server_list_model->rowCount();
196 179
197 if (!ok) { 180 if (!ok) {
198 QMessageBox::warning(this, tr("yuzu"), tr("Port number has invalid characters")); 181 QMessageBox::warning(this, tr("yuzu"), tr("Port number has invalid characters"));
@@ -202,7 +185,7 @@ void ConfigureMotionTouch::OnUDPAddServer() {
202 QMessageBox::warning(this, tr("yuzu"), tr("Port has to be in range 0 and 65353")); 185 QMessageBox::warning(this, tr("yuzu"), tr("Port has to be in range 0 and 65353"));
203 return; 186 return;
204 } 187 }
205 if (!re.exactMatch(server_text)) { 188 if (!re.match(server_text).hasMatch()) {
206 QMessageBox::warning(this, tr("yuzu"), tr("IP address is not valid")); 189 QMessageBox::warning(this, tr("yuzu"), tr("IP address is not valid"));
207 return; 190 return;
208 } 191 }
@@ -324,17 +307,11 @@ void ConfigureMotionTouch::ApplyConfiguration() {
324 return; 307 return;
325 } 308 }
326 309
327 std::string touch_engine = ui->touch_provider->currentData().toString().toStdString();
328
329 Common::ParamPackage touch_param{}; 310 Common::ParamPackage touch_param{};
330 touch_param.Set("engine", std::move(touch_engine)); 311 touch_param.Set("min_x", min_x);
331 312 touch_param.Set("min_y", min_y);
332 if (touch_engine == "cemuhookudp") { 313 touch_param.Set("max_x", max_x);
333 touch_param.Set("min_x", min_x); 314 touch_param.Set("max_y", max_y);
334 touch_param.Set("min_y", min_y);
335 touch_param.Set("max_x", max_x);
336 touch_param.Set("max_y", max_y);
337 }
338 315
339 Settings::values.touch_device = touch_param.Serialize(); 316 Settings::values.touch_device = touch_param.Serialize();
340 Settings::values.use_touch_from_button = ui->touch_from_button_checkbox->isChecked(); 317 Settings::values.use_touch_from_button = ui->touch_from_button_checkbox->isChecked();
diff --git a/src/yuzu/configuration/configure_motion_touch.ui b/src/yuzu/configuration/configure_motion_touch.ui
index ebca835ac..1e35ea946 100644
--- a/src/yuzu/configuration/configure_motion_touch.ui
+++ b/src/yuzu/configuration/configure_motion_touch.ui
@@ -68,23 +68,9 @@
68 <item> 68 <item>
69 <layout class="QHBoxLayout"> 69 <layout class="QHBoxLayout">
70 <item> 70 <item>
71 <widget class="QLabel" name="touch_provider_label">
72 <property name="text">
73 <string>Touch Provider:</string>
74 </property>
75 </widget>
76 </item>
77 <item>
78 <widget class="QComboBox" name="touch_provider"/>
79 </item>
80 </layout>
81 </item>
82 <item>
83 <layout class="QHBoxLayout">
84 <item>
85 <widget class="QLabel" name="touch_calibration_label"> 71 <widget class="QLabel" name="touch_calibration_label">
86 <property name="text"> 72 <property name="text">
87 <string>Calibration:</string> 73 <string>UDP Calibration:</string>
88 </property> 74 </property>
89 </widget> 75 </widget>
90 </item> 76 </item>
diff --git a/src/yuzu/configuration/configure_profile_manager.cpp b/src/yuzu/configuration/configure_profile_manager.cpp
index 13d9a4757..d102a43af 100644
--- a/src/yuzu/configuration/configure_profile_manager.cpp
+++ b/src/yuzu/configuration/configure_profile_manager.cpp
@@ -40,7 +40,7 @@ QString GetImagePath(Common::UUID uuid) {
40} 40}
41 41
42QString GetAccountUsername(const Service::Account::ProfileManager& manager, Common::UUID uuid) { 42QString GetAccountUsername(const Service::Account::ProfileManager& manager, Common::UUID uuid) {
43 Service::Account::ProfileBase profile; 43 Service::Account::ProfileBase profile{};
44 if (!manager.GetProfileBase(uuid, profile)) { 44 if (!manager.GetProfileBase(uuid, profile)) {
45 return {}; 45 return {};
46 } 46 }
@@ -147,7 +147,7 @@ void ConfigureProfileManager::SetConfiguration() {
147void ConfigureProfileManager::PopulateUserList() { 147void ConfigureProfileManager::PopulateUserList() {
148 const auto& profiles = profile_manager->GetAllUsers(); 148 const auto& profiles = profile_manager->GetAllUsers();
149 for (const auto& user : profiles) { 149 for (const auto& user : profiles) {
150 Service::Account::ProfileBase profile; 150 Service::Account::ProfileBase profile{};
151 if (!profile_manager->GetProfileBase(user, profile)) 151 if (!profile_manager->GetProfileBase(user, profile))
152 continue; 152 continue;
153 153
@@ -212,7 +212,7 @@ void ConfigureProfileManager::RenameUser() {
212 const auto uuid = profile_manager->GetUser(user); 212 const auto uuid = profile_manager->GetUser(user);
213 ASSERT(uuid); 213 ASSERT(uuid);
214 214
215 Service::Account::ProfileBase profile; 215 Service::Account::ProfileBase profile{};
216 if (!profile_manager->GetProfileBase(*uuid, profile)) 216 if (!profile_manager->GetProfileBase(*uuid, profile))
217 return; 217 return;
218 218
diff --git a/src/yuzu/configuration/configure_service.cpp b/src/yuzu/configuration/configure_service.cpp
index 0de7a4f0b..b580cfff2 100644
--- a/src/yuzu/configuration/configure_service.cpp
+++ b/src/yuzu/configuration/configure_service.cpp
@@ -9,6 +9,7 @@
9#include "ui_configure_service.h" 9#include "ui_configure_service.h"
10#include "yuzu/configuration/configure_service.h" 10#include "yuzu/configuration/configure_service.h"
11 11
12#ifdef YUZU_ENABLE_BOXCAT
12namespace { 13namespace {
13QString FormatEventStatusString(const Service::BCAT::EventStatus& status) { 14QString FormatEventStatusString(const Service::BCAT::EventStatus& status) {
14 QString out; 15 QString out;
@@ -32,6 +33,7 @@ QString FormatEventStatusString(const Service::BCAT::EventStatus& status) {
32 return out; 33 return out;
33} 34}
34} // Anonymous namespace 35} // Anonymous namespace
36#endif
35 37
36ConfigureService::ConfigureService(QWidget* parent) 38ConfigureService::ConfigureService(QWidget* parent)
37 : QWidget(parent), ui(std::make_unique<Ui::ConfigureService>()) { 39 : QWidget(parent), ui(std::make_unique<Ui::ConfigureService>()) {
diff --git a/src/yuzu/configuration/configure_touchscreen_advanced.cpp b/src/yuzu/configuration/configure_touchscreen_advanced.cpp
index 7d7cc00b7..29c86c7bc 100644
--- a/src/yuzu/configuration/configure_touchscreen_advanced.cpp
+++ b/src/yuzu/configuration/configure_touchscreen_advanced.cpp
@@ -33,21 +33,18 @@ void ConfigureTouchscreenAdvanced::RetranslateUI() {
33} 33}
34 34
35void ConfigureTouchscreenAdvanced::ApplyConfiguration() { 35void ConfigureTouchscreenAdvanced::ApplyConfiguration() {
36 Settings::values.touchscreen.finger = ui->finger_box->value();
37 Settings::values.touchscreen.diameter_x = ui->diameter_x_box->value(); 36 Settings::values.touchscreen.diameter_x = ui->diameter_x_box->value();
38 Settings::values.touchscreen.diameter_y = ui->diameter_y_box->value(); 37 Settings::values.touchscreen.diameter_y = ui->diameter_y_box->value();
39 Settings::values.touchscreen.rotation_angle = ui->angle_box->value(); 38 Settings::values.touchscreen.rotation_angle = ui->angle_box->value();
40} 39}
41 40
42void ConfigureTouchscreenAdvanced::LoadConfiguration() { 41void ConfigureTouchscreenAdvanced::LoadConfiguration() {
43 ui->finger_box->setValue(Settings::values.touchscreen.finger);
44 ui->diameter_x_box->setValue(Settings::values.touchscreen.diameter_x); 42 ui->diameter_x_box->setValue(Settings::values.touchscreen.diameter_x);
45 ui->diameter_y_box->setValue(Settings::values.touchscreen.diameter_y); 43 ui->diameter_y_box->setValue(Settings::values.touchscreen.diameter_y);
46 ui->angle_box->setValue(Settings::values.touchscreen.rotation_angle); 44 ui->angle_box->setValue(Settings::values.touchscreen.rotation_angle);
47} 45}
48 46
49void ConfigureTouchscreenAdvanced::RestoreDefaults() { 47void ConfigureTouchscreenAdvanced::RestoreDefaults() {
50 ui->finger_box->setValue(0);
51 ui->diameter_x_box->setValue(15); 48 ui->diameter_x_box->setValue(15);
52 ui->diameter_y_box->setValue(15); 49 ui->diameter_y_box->setValue(15);
53 ui->angle_box->setValue(0); 50 ui->angle_box->setValue(0);
diff --git a/src/yuzu/configuration/configure_touchscreen_advanced.ui b/src/yuzu/configuration/configure_touchscreen_advanced.ui
index 30ceccddb..88e7cf050 100644
--- a/src/yuzu/configuration/configure_touchscreen_advanced.ui
+++ b/src/yuzu/configuration/configure_touchscreen_advanced.ui
@@ -65,20 +65,13 @@
65 </property> 65 </property>
66 </spacer> 66 </spacer>
67 </item> 67 </item>
68 <item row="2" column="1"> 68 <item row="1" column="1">
69 <widget class="QLabel" name="label_4"> 69 <widget class="QLabel" name="label_4">
70 <property name="text"> 70 <property name="text">
71 <string>Touch Diameter Y</string> 71 <string>Touch Diameter Y</string>
72 </property> 72 </property>
73 </widget> 73 </widget>
74 </item> 74 </item>
75 <item row="0" column="1">
76 <widget class="QLabel" name="label">
77 <property name="text">
78 <string>Finger</string>
79 </property>
80 </widget>
81 </item>
82 <item row="0" column="3"> 75 <item row="0" column="3">
83 <spacer name="horizontalSpacer_2"> 76 <spacer name="horizontalSpacer_2">
84 <property name="orientation"> 77 <property name="orientation">
@@ -92,37 +85,27 @@
92 </property> 85 </property>
93 </spacer> 86 </spacer>
94 </item> 87 </item>
95 <item row="1" column="1"> 88 <item row="0" column="1">
96 <widget class="QLabel" name="label_3"> 89 <widget class="QLabel" name="label_3">
97 <property name="text"> 90 <property name="text">
98 <string>Touch Diameter X</string> 91 <string>Touch Diameter X</string>
99 </property> 92 </property>
100 </widget> 93 </widget>
101 </item> 94 </item>
102 <item row="0" column="2"> 95 <item row="2" column="1">
103 <widget class="QSpinBox" name="finger_box">
104 <property name="minimumSize">
105 <size>
106 <width>80</width>
107 <height>0</height>
108 </size>
109 </property>
110 </widget>
111 </item>
112 <item row="3" column="1">
113 <widget class="QLabel" name="label_5"> 96 <widget class="QLabel" name="label_5">
114 <property name="text"> 97 <property name="text">
115 <string>Rotational Angle</string> 98 <string>Rotational Angle</string>
116 </property> 99 </property>
117 </widget> 100 </widget>
118 </item> 101 </item>
119 <item row="1" column="2"> 102 <item row="0" column="2">
120 <widget class="QSpinBox" name="diameter_x_box"/> 103 <widget class="QSpinBox" name="diameter_x_box"/>
121 </item> 104 </item>
122 <item row="2" column="2"> 105 <item row="1" column="2">
123 <widget class="QSpinBox" name="diameter_y_box"/> 106 <widget class="QSpinBox" name="diameter_y_box"/>
124 </item> 107 </item>
125 <item row="3" column="2"> 108 <item row="2" column="2">
126 <widget class="QSpinBox" name="angle_box"/> 109 <widget class="QSpinBox" name="angle_box"/>
127 </item> 110 </item>
128 </layout> 111 </layout>
diff --git a/src/yuzu/debugger/wait_tree.cpp b/src/yuzu/debugger/wait_tree.cpp
index 0925c10b4..a93b5d3c2 100644
--- a/src/yuzu/debugger/wait_tree.cpp
+++ b/src/yuzu/debugger/wait_tree.cpp
@@ -14,10 +14,10 @@
14#include "core/core.h" 14#include "core/core.h"
15#include "core/hle/kernel/handle_table.h" 15#include "core/hle/kernel/handle_table.h"
16#include "core/hle/kernel/k_scheduler.h" 16#include "core/hle/kernel/k_scheduler.h"
17#include "core/hle/kernel/mutex.h" 17#include "core/hle/kernel/k_synchronization_object.h"
18#include "core/hle/kernel/process.h" 18#include "core/hle/kernel/process.h"
19#include "core/hle/kernel/readable_event.h" 19#include "core/hle/kernel/readable_event.h"
20#include "core/hle/kernel/synchronization_object.h" 20#include "core/hle/kernel/svc_common.h"
21#include "core/hle/kernel/thread.h" 21#include "core/hle/kernel/thread.h"
22#include "core/memory.h" 22#include "core/memory.h"
23 23
@@ -116,7 +116,7 @@ QString WaitTreeText::GetText() const {
116WaitTreeMutexInfo::WaitTreeMutexInfo(VAddr mutex_address, const Kernel::HandleTable& handle_table) 116WaitTreeMutexInfo::WaitTreeMutexInfo(VAddr mutex_address, const Kernel::HandleTable& handle_table)
117 : mutex_address(mutex_address) { 117 : mutex_address(mutex_address) {
118 mutex_value = Core::System::GetInstance().Memory().Read32(mutex_address); 118 mutex_value = Core::System::GetInstance().Memory().Read32(mutex_address);
119 owner_handle = static_cast<Kernel::Handle>(mutex_value & Kernel::Mutex::MutexOwnerMask); 119 owner_handle = static_cast<Kernel::Handle>(mutex_value & Kernel::Svc::HandleWaitMask);
120 owner = handle_table.Get<Kernel::Thread>(owner_handle); 120 owner = handle_table.Get<Kernel::Thread>(owner_handle);
121} 121}
122 122
@@ -127,7 +127,7 @@ QString WaitTreeMutexInfo::GetText() const {
127} 127}
128 128
129std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutexInfo::GetChildren() const { 129std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeMutexInfo::GetChildren() const {
130 const bool has_waiters = (mutex_value & Kernel::Mutex::MutexHasWaitersFlag) != 0; 130 const bool has_waiters = (mutex_value & Kernel::Svc::HandleWaitMask) != 0;
131 131
132 std::vector<std::unique_ptr<WaitTreeItem>> list; 132 std::vector<std::unique_ptr<WaitTreeItem>> list;
133 list.push_back(std::make_unique<WaitTreeText>(tr("has waiters: %1").arg(has_waiters))); 133 list.push_back(std::make_unique<WaitTreeText>(tr("has waiters: %1").arg(has_waiters)));
@@ -169,7 +169,8 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeCallstack::GetChildren() cons
169 return list; 169 return list;
170} 170}
171 171
172WaitTreeSynchronizationObject::WaitTreeSynchronizationObject(const Kernel::SynchronizationObject& o) 172WaitTreeSynchronizationObject::WaitTreeSynchronizationObject(
173 const Kernel::KSynchronizationObject& o)
173 : object(o) {} 174 : object(o) {}
174WaitTreeSynchronizationObject::~WaitTreeSynchronizationObject() = default; 175WaitTreeSynchronizationObject::~WaitTreeSynchronizationObject() = default;
175 176
@@ -188,7 +189,7 @@ QString WaitTreeSynchronizationObject::GetText() const {
188} 189}
189 190
190std::unique_ptr<WaitTreeSynchronizationObject> WaitTreeSynchronizationObject::make( 191std::unique_ptr<WaitTreeSynchronizationObject> WaitTreeSynchronizationObject::make(
191 const Kernel::SynchronizationObject& object) { 192 const Kernel::KSynchronizationObject& object) {
192 switch (object.GetHandleType()) { 193 switch (object.GetHandleType()) {
193 case Kernel::HandleType::ReadableEvent: 194 case Kernel::HandleType::ReadableEvent:
194 return std::make_unique<WaitTreeEvent>(static_cast<const Kernel::ReadableEvent&>(object)); 195 return std::make_unique<WaitTreeEvent>(static_cast<const Kernel::ReadableEvent&>(object));
@@ -202,7 +203,7 @@ std::unique_ptr<WaitTreeSynchronizationObject> WaitTreeSynchronizationObject::ma
202std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeSynchronizationObject::GetChildren() const { 203std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeSynchronizationObject::GetChildren() const {
203 std::vector<std::unique_ptr<WaitTreeItem>> list; 204 std::vector<std::unique_ptr<WaitTreeItem>> list;
204 205
205 const auto& threads = object.GetWaitingThreads(); 206 const auto& threads = object.GetWaitingThreadsForDebugging();
206 if (threads.empty()) { 207 if (threads.empty()) {
207 list.push_back(std::make_unique<WaitTreeText>(tr("waited by no thread"))); 208 list.push_back(std::make_unique<WaitTreeText>(tr("waited by no thread")));
208 } else { 209 } else {
@@ -211,8 +212,8 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeSynchronizationObject::GetChi
211 return list; 212 return list;
212} 213}
213 214
214WaitTreeObjectList::WaitTreeObjectList( 215WaitTreeObjectList::WaitTreeObjectList(const std::vector<Kernel::KSynchronizationObject*>& list,
215 const std::vector<std::shared_ptr<Kernel::SynchronizationObject>>& list, bool w_all) 216 bool w_all)
216 : object_list(list), wait_all(w_all) {} 217 : object_list(list), wait_all(w_all) {}
217 218
218WaitTreeObjectList::~WaitTreeObjectList() = default; 219WaitTreeObjectList::~WaitTreeObjectList() = default;
@@ -237,8 +238,8 @@ WaitTreeThread::~WaitTreeThread() = default;
237QString WaitTreeThread::GetText() const { 238QString WaitTreeThread::GetText() const {
238 const auto& thread = static_cast<const Kernel::Thread&>(object); 239 const auto& thread = static_cast<const Kernel::Thread&>(object);
239 QString status; 240 QString status;
240 switch (thread.GetStatus()) { 241 switch (thread.GetState()) {
241 case Kernel::ThreadStatus::Ready: 242 case Kernel::ThreadState::Runnable:
242 if (!thread.IsPaused()) { 243 if (!thread.IsPaused()) {
243 if (thread.WasRunning()) { 244 if (thread.WasRunning()) {
244 status = tr("running"); 245 status = tr("running");
@@ -249,35 +250,39 @@ QString WaitTreeThread::GetText() const {
249 status = tr("paused"); 250 status = tr("paused");
250 } 251 }
251 break; 252 break;
252 case Kernel::ThreadStatus::Paused: 253 case Kernel::ThreadState::Waiting:
253 status = tr("paused"); 254 switch (thread.GetWaitReasonForDebugging()) {
254 break; 255 case Kernel::ThreadWaitReasonForDebugging::Sleep:
255 case Kernel::ThreadStatus::WaitHLEEvent: 256 status = tr("sleeping");
256 status = tr("waiting for HLE return"); 257 break;
257 break; 258 case Kernel::ThreadWaitReasonForDebugging::IPC:
258 case Kernel::ThreadStatus::WaitSleep: 259 status = tr("waiting for IPC reply");
259 status = tr("sleeping"); 260 break;
260 break; 261 case Kernel::ThreadWaitReasonForDebugging::Synchronization:
261 case Kernel::ThreadStatus::WaitIPC: 262 status = tr("waiting for objects");
262 status = tr("waiting for IPC reply"); 263 break;
263 break; 264 case Kernel::ThreadWaitReasonForDebugging::ConditionVar:
264 case Kernel::ThreadStatus::WaitSynch: 265 status = tr("waiting for condition variable");
265 status = tr("waiting for objects"); 266 break;
266 break; 267 case Kernel::ThreadWaitReasonForDebugging::Arbitration:
267 case Kernel::ThreadStatus::WaitMutex: 268 status = tr("waiting for address arbiter");
268 status = tr("waiting for mutex"); 269 break;
269 break; 270 case Kernel::ThreadWaitReasonForDebugging::Suspended:
270 case Kernel::ThreadStatus::WaitCondVar: 271 status = tr("waiting for suspend resume");
271 status = tr("waiting for condition variable"); 272 break;
273 default:
274 status = tr("waiting");
275 break;
276 }
272 break; 277 break;
273 case Kernel::ThreadStatus::WaitArb: 278 case Kernel::ThreadState::Initialized:
274 status = tr("waiting for address arbiter"); 279 status = tr("initialized");
275 break; 280 break;
276 case Kernel::ThreadStatus::Dormant: 281 case Kernel::ThreadState::Terminated:
277 status = tr("dormant"); 282 status = tr("terminated");
278 break; 283 break;
279 case Kernel::ThreadStatus::Dead: 284 default:
280 status = tr("dead"); 285 status = tr("unknown");
281 break; 286 break;
282 } 287 }
283 288
@@ -293,8 +298,8 @@ QColor WaitTreeThread::GetColor() const {
293 const std::size_t color_index = IsDarkTheme() ? 1 : 0; 298 const std::size_t color_index = IsDarkTheme() ? 1 : 0;
294 299
295 const auto& thread = static_cast<const Kernel::Thread&>(object); 300 const auto& thread = static_cast<const Kernel::Thread&>(object);
296 switch (thread.GetStatus()) { 301 switch (thread.GetState()) {
297 case Kernel::ThreadStatus::Ready: 302 case Kernel::ThreadState::Runnable:
298 if (!thread.IsPaused()) { 303 if (!thread.IsPaused()) {
299 if (thread.WasRunning()) { 304 if (thread.WasRunning()) {
300 return QColor(WaitTreeColors[0][color_index]); 305 return QColor(WaitTreeColors[0][color_index]);
@@ -304,21 +309,24 @@ QColor WaitTreeThread::GetColor() const {
304 } else { 309 } else {
305 return QColor(WaitTreeColors[2][color_index]); 310 return QColor(WaitTreeColors[2][color_index]);
306 } 311 }
307 case Kernel::ThreadStatus::Paused: 312 case Kernel::ThreadState::Waiting:
308 return QColor(WaitTreeColors[3][color_index]); 313 switch (thread.GetWaitReasonForDebugging()) {
309 case Kernel::ThreadStatus::WaitHLEEvent: 314 case Kernel::ThreadWaitReasonForDebugging::IPC:
310 case Kernel::ThreadStatus::WaitIPC: 315 return QColor(WaitTreeColors[4][color_index]);
311 return QColor(WaitTreeColors[4][color_index]); 316 case Kernel::ThreadWaitReasonForDebugging::Sleep:
312 case Kernel::ThreadStatus::WaitSleep: 317 return QColor(WaitTreeColors[5][color_index]);
313 return QColor(WaitTreeColors[5][color_index]); 318 case Kernel::ThreadWaitReasonForDebugging::Synchronization:
314 case Kernel::ThreadStatus::WaitSynch: 319 case Kernel::ThreadWaitReasonForDebugging::ConditionVar:
315 case Kernel::ThreadStatus::WaitMutex: 320 case Kernel::ThreadWaitReasonForDebugging::Arbitration:
316 case Kernel::ThreadStatus::WaitCondVar: 321 case Kernel::ThreadWaitReasonForDebugging::Suspended:
317 case Kernel::ThreadStatus::WaitArb: 322 return QColor(WaitTreeColors[6][color_index]);
318 return QColor(WaitTreeColors[6][color_index]); 323 break;
319 case Kernel::ThreadStatus::Dormant: 324 default:
325 return QColor(WaitTreeColors[3][color_index]);
326 }
327 case Kernel::ThreadState::Initialized:
320 return QColor(WaitTreeColors[7][color_index]); 328 return QColor(WaitTreeColors[7][color_index]);
321 case Kernel::ThreadStatus::Dead: 329 case Kernel::ThreadState::Terminated:
322 return QColor(WaitTreeColors[8][color_index]); 330 return QColor(WaitTreeColors[8][color_index]);
323 default: 331 default:
324 return WaitTreeItem::GetColor(); 332 return WaitTreeItem::GetColor();
@@ -354,11 +362,11 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
354 list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID()))); 362 list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadID())));
355 list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)") 363 list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
356 .arg(thread.GetPriority()) 364 .arg(thread.GetPriority())
357 .arg(thread.GetNominalPriority()))); 365 .arg(thread.GetBasePriority())));
358 list.push_back(std::make_unique<WaitTreeText>( 366 list.push_back(std::make_unique<WaitTreeText>(
359 tr("last running ticks = %1").arg(thread.GetLastScheduledTick()))); 367 tr("last running ticks = %1").arg(thread.GetLastScheduledTick())));
360 368
361 const VAddr mutex_wait_address = thread.GetMutexWaitAddress(); 369 const VAddr mutex_wait_address = thread.GetMutexWaitAddressForDebugging();
362 if (mutex_wait_address != 0) { 370 if (mutex_wait_address != 0) {
363 const auto& handle_table = thread.GetOwnerProcess()->GetHandleTable(); 371 const auto& handle_table = thread.GetOwnerProcess()->GetHandleTable();
364 list.push_back(std::make_unique<WaitTreeMutexInfo>(mutex_wait_address, handle_table)); 372 list.push_back(std::make_unique<WaitTreeMutexInfo>(mutex_wait_address, handle_table));
@@ -366,9 +374,11 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
366 list.push_back(std::make_unique<WaitTreeText>(tr("not waiting for mutex"))); 374 list.push_back(std::make_unique<WaitTreeText>(tr("not waiting for mutex")));
367 } 375 }
368 376
369 if (thread.GetStatus() == Kernel::ThreadStatus::WaitSynch) { 377 if (thread.GetState() == Kernel::ThreadState::Waiting &&
370 list.push_back(std::make_unique<WaitTreeObjectList>(thread.GetSynchronizationObjects(), 378 thread.GetWaitReasonForDebugging() ==
371 thread.IsWaitingSync())); 379 Kernel::ThreadWaitReasonForDebugging::Synchronization) {
380 list.push_back(std::make_unique<WaitTreeObjectList>(thread.GetWaitObjectsForDebugging(),
381 thread.IsCancellable()));
372 } 382 }
373 383
374 list.push_back(std::make_unique<WaitTreeCallstack>(thread)); 384 list.push_back(std::make_unique<WaitTreeCallstack>(thread));
@@ -380,7 +390,7 @@ WaitTreeEvent::WaitTreeEvent(const Kernel::ReadableEvent& object)
380 : WaitTreeSynchronizationObject(object) {} 390 : WaitTreeSynchronizationObject(object) {}
381WaitTreeEvent::~WaitTreeEvent() = default; 391WaitTreeEvent::~WaitTreeEvent() = default;
382 392
383WaitTreeThreadList::WaitTreeThreadList(const std::vector<std::shared_ptr<Kernel::Thread>>& list) 393WaitTreeThreadList::WaitTreeThreadList(const std::vector<Kernel::Thread*>& list)
384 : thread_list(list) {} 394 : thread_list(list) {}
385WaitTreeThreadList::~WaitTreeThreadList() = default; 395WaitTreeThreadList::~WaitTreeThreadList() = default;
386 396
diff --git a/src/yuzu/debugger/wait_tree.h b/src/yuzu/debugger/wait_tree.h
index 8e3bc4b24..cf96911ea 100644
--- a/src/yuzu/debugger/wait_tree.h
+++ b/src/yuzu/debugger/wait_tree.h
@@ -18,8 +18,8 @@ class EmuThread;
18 18
19namespace Kernel { 19namespace Kernel {
20class HandleTable; 20class HandleTable;
21class KSynchronizationObject;
21class ReadableEvent; 22class ReadableEvent;
22class SynchronizationObject;
23class Thread; 23class Thread;
24} // namespace Kernel 24} // namespace Kernel
25 25
@@ -102,30 +102,29 @@ private:
102class WaitTreeSynchronizationObject : public WaitTreeExpandableItem { 102class WaitTreeSynchronizationObject : public WaitTreeExpandableItem {
103 Q_OBJECT 103 Q_OBJECT
104public: 104public:
105 explicit WaitTreeSynchronizationObject(const Kernel::SynchronizationObject& object); 105 explicit WaitTreeSynchronizationObject(const Kernel::KSynchronizationObject& object);
106 ~WaitTreeSynchronizationObject() override; 106 ~WaitTreeSynchronizationObject() override;
107 107
108 static std::unique_ptr<WaitTreeSynchronizationObject> make( 108 static std::unique_ptr<WaitTreeSynchronizationObject> make(
109 const Kernel::SynchronizationObject& object); 109 const Kernel::KSynchronizationObject& object);
110 QString GetText() const override; 110 QString GetText() const override;
111 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override; 111 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
112 112
113protected: 113protected:
114 const Kernel::SynchronizationObject& object; 114 const Kernel::KSynchronizationObject& object;
115}; 115};
116 116
117class WaitTreeObjectList : public WaitTreeExpandableItem { 117class WaitTreeObjectList : public WaitTreeExpandableItem {
118 Q_OBJECT 118 Q_OBJECT
119public: 119public:
120 WaitTreeObjectList(const std::vector<std::shared_ptr<Kernel::SynchronizationObject>>& list, 120 WaitTreeObjectList(const std::vector<Kernel::KSynchronizationObject*>& list, bool wait_all);
121 bool wait_all);
122 ~WaitTreeObjectList() override; 121 ~WaitTreeObjectList() override;
123 122
124 QString GetText() const override; 123 QString GetText() const override;
125 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override; 124 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
126 125
127private: 126private:
128 const std::vector<std::shared_ptr<Kernel::SynchronizationObject>>& object_list; 127 const std::vector<Kernel::KSynchronizationObject*>& object_list;
129 bool wait_all; 128 bool wait_all;
130}; 129};
131 130
@@ -150,14 +149,14 @@ public:
150class WaitTreeThreadList : public WaitTreeExpandableItem { 149class WaitTreeThreadList : public WaitTreeExpandableItem {
151 Q_OBJECT 150 Q_OBJECT
152public: 151public:
153 explicit WaitTreeThreadList(const std::vector<std::shared_ptr<Kernel::Thread>>& list); 152 explicit WaitTreeThreadList(const std::vector<Kernel::Thread*>& list);
154 ~WaitTreeThreadList() override; 153 ~WaitTreeThreadList() override;
155 154
156 QString GetText() const override; 155 QString GetText() const override;
157 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override; 156 std::vector<std::unique_ptr<WaitTreeItem>> GetChildren() const override;
158 157
159private: 158private:
160 const std::vector<std::shared_ptr<Kernel::Thread>>& thread_list; 159 const std::vector<Kernel::Thread*>& thread_list;
161}; 160};
162 161
163class WaitTreeModel : public QAbstractItemModel { 162class WaitTreeModel : public QAbstractItemModel {
diff --git a/src/yuzu/game_list.cpp b/src/yuzu/game_list.cpp
index 70d865112..9afd5b45f 100644
--- a/src/yuzu/game_list.cpp
+++ b/src/yuzu/game_list.cpp
@@ -119,7 +119,7 @@ void GameListSearchField::setFocus() {
119GameListSearchField::GameListSearchField(GameList* parent) : QWidget{parent} { 119GameListSearchField::GameListSearchField(GameList* parent) : QWidget{parent} {
120 auto* const key_release_eater = new KeyReleaseEater(parent, this); 120 auto* const key_release_eater = new KeyReleaseEater(parent, this);
121 layout_filter = new QHBoxLayout; 121 layout_filter = new QHBoxLayout;
122 layout_filter->setMargin(8); 122 layout_filter->setContentsMargins(8, 8, 8, 8);
123 label_filter = new QLabel; 123 label_filter = new QLabel;
124 label_filter->setText(tr("Filter:")); 124 label_filter->setText(tr("Filter:"));
125 edit_filter = new QLineEdit; 125 edit_filter = new QLineEdit;
@@ -173,8 +173,8 @@ void GameList::OnItemExpanded(const QModelIndex& item) {
173 return; 173 return;
174 } 174 }
175 175
176 auto* game_dir = item.data(GameListDir::GameDirRole).value<UISettings::GameDir*>(); 176 UISettings::values.game_dirs[item.data(GameListDir::GameDirRole).toInt()].expanded =
177 game_dir->expanded = tree_view->isExpanded(item); 177 tree_view->isExpanded(item);
178} 178}
179 179
180// Event in order to filter the gamelist after editing the searchfield 180// Event in order to filter the gamelist after editing the searchfield
@@ -262,9 +262,9 @@ void GameList::OnUpdateThemedIcons() {
262 Qt::DecorationRole); 262 Qt::DecorationRole);
263 break; 263 break;
264 case GameListItemType::CustomDir: { 264 case GameListItemType::CustomDir: {
265 const UISettings::GameDir* game_dir = 265 const UISettings::GameDir& game_dir =
266 child->data(GameListDir::GameDirRole).value<UISettings::GameDir*>(); 266 UISettings::values.game_dirs[child->data(GameListDir::GameDirRole).toInt()];
267 const QString icon_name = QFileInfo::exists(game_dir->path) 267 const QString icon_name = QFileInfo::exists(game_dir.path)
268 ? QStringLiteral("folder") 268 ? QStringLiteral("folder")
269 : QStringLiteral("bad_folder"); 269 : QStringLiteral("bad_folder");
270 child->setData( 270 child->setData(
@@ -366,7 +366,7 @@ void GameList::AddDirEntry(GameListDir* entry_items) {
366 item_model->invisibleRootItem()->appendRow(entry_items); 366 item_model->invisibleRootItem()->appendRow(entry_items);
367 tree_view->setExpanded( 367 tree_view->setExpanded(
368 entry_items->index(), 368 entry_items->index(),
369 entry_items->data(GameListDir::GameDirRole).value<UISettings::GameDir*>()->expanded); 369 UISettings::values.game_dirs[entry_items->data(GameListDir::GameDirRole).toInt()].expanded);
370} 370}
371 371
372void GameList::AddEntry(const QList<QStandardItem*>& entry_items, GameListDir* parent) { 372void GameList::AddEntry(const QList<QStandardItem*>& entry_items, GameListDir* parent) {
@@ -549,7 +549,7 @@ void GameList::AddGamePopup(QMenu& context_menu, u64 program_id, const std::stri
549 549
550void GameList::AddCustomDirPopup(QMenu& context_menu, QModelIndex selected) { 550void GameList::AddCustomDirPopup(QMenu& context_menu, QModelIndex selected) {
551 UISettings::GameDir& game_dir = 551 UISettings::GameDir& game_dir =
552 *selected.data(GameListDir::GameDirRole).value<UISettings::GameDir*>(); 552 UISettings::values.game_dirs[selected.data(GameListDir::GameDirRole).toInt()];
553 553
554 QAction* deep_scan = context_menu.addAction(tr("Scan Subfolders")); 554 QAction* deep_scan = context_menu.addAction(tr("Scan Subfolders"));
555 QAction* delete_dir = context_menu.addAction(tr("Remove Game Directory")); 555 QAction* delete_dir = context_menu.addAction(tr("Remove Game Directory"));
@@ -568,8 +568,7 @@ void GameList::AddCustomDirPopup(QMenu& context_menu, QModelIndex selected) {
568} 568}
569 569
570void GameList::AddPermDirPopup(QMenu& context_menu, QModelIndex selected) { 570void GameList::AddPermDirPopup(QMenu& context_menu, QModelIndex selected) {
571 UISettings::GameDir& game_dir = 571 const int game_dir_index = selected.data(GameListDir::GameDirRole).toInt();
572 *selected.data(GameListDir::GameDirRole).value<UISettings::GameDir*>();
573 572
574 QAction* move_up = context_menu.addAction(tr("\u25B2 Move Up")); 573 QAction* move_up = context_menu.addAction(tr("\u25B2 Move Up"));
575 QAction* move_down = context_menu.addAction(tr("\u25bc Move Down")); 574 QAction* move_down = context_menu.addAction(tr("\u25bc Move Down"));
@@ -580,34 +579,39 @@ void GameList::AddPermDirPopup(QMenu& context_menu, QModelIndex selected) {
580 move_up->setEnabled(row > 0); 579 move_up->setEnabled(row > 0);
581 move_down->setEnabled(row < item_model->rowCount() - 2); 580 move_down->setEnabled(row < item_model->rowCount() - 2);
582 581
583 connect(move_up, &QAction::triggered, [this, selected, row, &game_dir] { 582 connect(move_up, &QAction::triggered, [this, selected, row, game_dir_index] {
584 // find the indices of the items in settings and swap them 583 const int other_index = selected.sibling(row - 1, 0).data(GameListDir::GameDirRole).toInt();
585 std::swap(UISettings::values.game_dirs[UISettings::values.game_dirs.indexOf(game_dir)], 584 // swap the items in the settings
586 UISettings::values.game_dirs[UISettings::values.game_dirs.indexOf( 585 std::swap(UISettings::values.game_dirs[game_dir_index],
587 *selected.sibling(row - 1, 0) 586 UISettings::values.game_dirs[other_index]);
588 .data(GameListDir::GameDirRole) 587 // swap the indexes held by the QVariants
589 .value<UISettings::GameDir*>())]); 588 item_model->setData(selected, QVariant(other_index), GameListDir::GameDirRole);
589 item_model->setData(selected.sibling(row - 1, 0), QVariant(game_dir_index),
590 GameListDir::GameDirRole);
590 // move the treeview items 591 // move the treeview items
591 QList<QStandardItem*> item = item_model->takeRow(row); 592 QList<QStandardItem*> item = item_model->takeRow(row);
592 item_model->invisibleRootItem()->insertRow(row - 1, item); 593 item_model->invisibleRootItem()->insertRow(row - 1, item);
593 tree_view->setExpanded(selected, game_dir.expanded); 594 tree_view->setExpanded(selected, UISettings::values.game_dirs[game_dir_index].expanded);
594 }); 595 });
595 596
596 connect(move_down, &QAction::triggered, [this, selected, row, &game_dir] { 597 connect(move_down, &QAction::triggered, [this, selected, row, game_dir_index] {
597 // find the indices of the items in settings and swap them 598 const int other_index = selected.sibling(row + 1, 0).data(GameListDir::GameDirRole).toInt();
598 std::swap(UISettings::values.game_dirs[UISettings::values.game_dirs.indexOf(game_dir)], 599 // swap the items in the settings
599 UISettings::values.game_dirs[UISettings::values.game_dirs.indexOf( 600 std::swap(UISettings::values.game_dirs[game_dir_index],
600 *selected.sibling(row + 1, 0) 601 UISettings::values.game_dirs[other_index]);
601 .data(GameListDir::GameDirRole) 602 // swap the indexes held by the QVariants
602 .value<UISettings::GameDir*>())]); 603 item_model->setData(selected, QVariant(other_index), GameListDir::GameDirRole);
604 item_model->setData(selected.sibling(row + 1, 0), QVariant(game_dir_index),
605 GameListDir::GameDirRole);
603 // move the treeview items 606 // move the treeview items
604 const QList<QStandardItem*> item = item_model->takeRow(row); 607 const QList<QStandardItem*> item = item_model->takeRow(row);
605 item_model->invisibleRootItem()->insertRow(row + 1, item); 608 item_model->invisibleRootItem()->insertRow(row + 1, item);
606 tree_view->setExpanded(selected, game_dir.expanded); 609 tree_view->setExpanded(selected, UISettings::values.game_dirs[game_dir_index].expanded);
607 }); 610 });
608 611
609 connect(open_directory_location, &QAction::triggered, 612 connect(open_directory_location, &QAction::triggered, [this, game_dir_index] {
610 [this, game_dir] { emit OpenDirectory(game_dir.path); }); 613 emit OpenDirectory(UISettings::values.game_dirs[game_dir_index].path);
614 });
611} 615}
612 616
613void GameList::LoadCompatibilityList() { 617void GameList::LoadCompatibilityList() {
diff --git a/src/yuzu/game_list_p.h b/src/yuzu/game_list_p.h
index df935022d..f25445f18 100644
--- a/src/yuzu/game_list_p.h
+++ b/src/yuzu/game_list_p.h
@@ -230,7 +230,7 @@ public:
230 setData(type(), TypeRole); 230 setData(type(), TypeRole);
231 231
232 UISettings::GameDir* game_dir = &directory; 232 UISettings::GameDir* game_dir = &directory;
233 setData(QVariant::fromValue(game_dir), GameDirRole); 233 setData(QVariant(UISettings::values.game_dirs.indexOf(directory)), GameDirRole);
234 234
235 const int icon_size = std::min(static_cast<int>(UISettings::values.icon_size), 64); 235 const int icon_size = std::min(static_cast<int>(UISettings::values.icon_size), 64);
236 switch (dir_type) { 236 switch (dir_type) {
diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp
index 41ef6f6b8..f76102459 100644
--- a/src/yuzu_cmd/config.cpp
+++ b/src/yuzu_cmd/config.cpp
@@ -296,10 +296,6 @@ void Config::ReadValues() {
296 sdl2_config->GetBoolean("ControlsGeneral", "motion_enabled", true)); 296 sdl2_config->GetBoolean("ControlsGeneral", "motion_enabled", true));
297 Settings::values.touchscreen.enabled = 297 Settings::values.touchscreen.enabled =
298 sdl2_config->GetBoolean("ControlsGeneral", "touch_enabled", true); 298 sdl2_config->GetBoolean("ControlsGeneral", "touch_enabled", true);
299 Settings::values.touchscreen.device =
300 sdl2_config->Get("ControlsGeneral", "touch_device", "engine:emu_window");
301 Settings::values.touchscreen.finger =
302 sdl2_config->GetInteger("ControlsGeneral", "touch_finger", 0);
303 Settings::values.touchscreen.rotation_angle = 299 Settings::values.touchscreen.rotation_angle =
304 sdl2_config->GetInteger("ControlsGeneral", "touch_angle", 0); 300 sdl2_config->GetInteger("ControlsGeneral", "touch_angle", 0);
305 Settings::values.touchscreen.diameter_x = 301 Settings::values.touchscreen.diameter_x =
diff --git a/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp b/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp
index e32bed5e6..7843d5167 100644
--- a/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp
+++ b/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp
@@ -29,16 +29,16 @@ EmuWindow_SDL2::~EmuWindow_SDL2() {
29} 29}
30 30
31void EmuWindow_SDL2::OnMouseMotion(s32 x, s32 y) { 31void EmuWindow_SDL2::OnMouseMotion(s32 x, s32 y) {
32 TouchMoved((unsigned)std::max(x, 0), (unsigned)std::max(y, 0)); 32 TouchMoved((unsigned)std::max(x, 0), (unsigned)std::max(y, 0), 0);
33 input_subsystem->GetMouse()->MouseMove(x, y); 33 input_subsystem->GetMouse()->MouseMove(x, y);
34} 34}
35 35
36void EmuWindow_SDL2::OnMouseButton(u32 button, u8 state, s32 x, s32 y) { 36void EmuWindow_SDL2::OnMouseButton(u32 button, u8 state, s32 x, s32 y) {
37 if (button == SDL_BUTTON_LEFT) { 37 if (button == SDL_BUTTON_LEFT) {
38 if (state == SDL_PRESSED) { 38 if (state == SDL_PRESSED) {
39 TouchPressed((unsigned)std::max(x, 0), (unsigned)std::max(y, 0)); 39 TouchPressed((unsigned)std::max(x, 0), (unsigned)std::max(y, 0), 0);
40 } else { 40 } else {
41 TouchReleased(); 41 TouchReleased(0);
42 } 42 }
43 } else if (button == SDL_BUTTON_RIGHT) { 43 } else if (button == SDL_BUTTON_RIGHT) {
44 if (state == SDL_PRESSED) { 44 if (state == SDL_PRESSED) {
@@ -66,16 +66,16 @@ void EmuWindow_SDL2::OnFingerDown(float x, float y) {
66 // 3DS does 66 // 3DS does
67 67
68 const auto [px, py] = TouchToPixelPos(x, y); 68 const auto [px, py] = TouchToPixelPos(x, y);
69 TouchPressed(px, py); 69 TouchPressed(px, py, 0);
70} 70}
71 71
72void EmuWindow_SDL2::OnFingerMotion(float x, float y) { 72void EmuWindow_SDL2::OnFingerMotion(float x, float y) {
73 const auto [px, py] = TouchToPixelPos(x, y); 73 const auto [px, py] = TouchToPixelPos(x, y);
74 TouchMoved(px, py); 74 TouchMoved(px, py, 0);
75} 75}
76 76
77void EmuWindow_SDL2::OnFingerUp() { 77void EmuWindow_SDL2::OnFingerUp() {
78 TouchReleased(); 78 TouchReleased(0);
79} 79}
80 80
81void EmuWindow_SDL2::OnKeyEvent(int key, u8 state) { 81void EmuWindow_SDL2::OnKeyEvent(int key, u8 state) {
diff --git a/src/yuzu_tester/CMakeLists.txt b/src/yuzu_tester/CMakeLists.txt
deleted file mode 100644
index d8a2a1511..000000000
--- a/src/yuzu_tester/CMakeLists.txt
+++ /dev/null
@@ -1,32 +0,0 @@
1set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${PROJECT_SOURCE_DIR}/CMakeModules)
2
3add_executable(yuzu-tester
4 config.cpp
5 config.h
6 default_ini.h
7 emu_window/emu_window_sdl2_hide.cpp
8 emu_window/emu_window_sdl2_hide.h
9 resource.h
10 service/yuzutest.cpp
11 service/yuzutest.h
12 yuzu.cpp
13 yuzu.rc
14)
15
16create_target_directory_groups(yuzu-tester)
17
18target_link_libraries(yuzu-tester PRIVATE common core input_common)
19target_link_libraries(yuzu-tester PRIVATE inih glad)
20if (MSVC)
21 target_link_libraries(yuzu-tester PRIVATE getopt)
22endif()
23target_link_libraries(yuzu-tester PRIVATE ${PLATFORM_LIBRARIES} SDL2 Threads::Threads)
24
25if(UNIX AND NOT APPLE)
26 install(TARGETS yuzu-tester RUNTIME DESTINATION "${CMAKE_INSTALL_PREFIX}/bin")
27endif()
28
29if (MSVC)
30 include(CopyYuzuSDLDeps)
31 copy_yuzu_SDL_deps(yuzu-tester)
32endif()
diff --git a/src/yuzu_tester/config.cpp b/src/yuzu_tester/config.cpp
deleted file mode 100644
index 0aa143e1f..000000000
--- a/src/yuzu_tester/config.cpp
+++ /dev/null
@@ -1,194 +0,0 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <memory>
6#include <sstream>
7#include <SDL.h>
8#include <inih/cpp/INIReader.h>
9#include "common/file_util.h"
10#include "common/logging/log.h"
11#include "common/param_package.h"
12#include "core/hle/service/acc/profile_manager.h"
13#include "core/settings.h"
14#include "input_common/main.h"
15#include "yuzu_tester/config.h"
16#include "yuzu_tester/default_ini.h"
17
18namespace FS = Common::FS;
19
20Config::Config() {
21 // TODO: Don't hardcode the path; let the frontend decide where to put the config files.
22 sdl2_config_loc = FS::GetUserPath(FS::UserPath::ConfigDir) + "sdl2-tester-config.ini";
23 sdl2_config = std::make_unique<INIReader>(sdl2_config_loc);
24
25 Reload();
26}
27
28Config::~Config() = default;
29
30bool Config::LoadINI(const std::string& default_contents, bool retry) {
31 const char* location = this->sdl2_config_loc.c_str();
32 if (sdl2_config->ParseError() < 0) {
33 if (retry) {
34 LOG_WARNING(Config, "Failed to load {}. Creating file from defaults...", location);
35 FS::CreateFullPath(location);
36 FS::WriteStringToFile(true, default_contents, location);
37 sdl2_config = std::make_unique<INIReader>(location); // Reopen file
38
39 return LoadINI(default_contents, false);
40 }
41 LOG_ERROR(Config, "Failed.");
42 return false;
43 }
44 LOG_INFO(Config, "Successfully loaded {}", location);
45 return true;
46}
47
48void Config::ReadValues() {
49 // Controls
50 for (std::size_t p = 0; p < Settings::values.players.GetValue().size(); ++p) {
51 for (int i = 0; i < Settings::NativeButton::NumButtons; ++i) {
52 Settings::values.players.GetValue()[p].buttons[i] = "";
53 }
54
55 for (int i = 0; i < Settings::NativeAnalog::NumAnalogs; ++i) {
56 Settings::values.players.GetValue()[p].analogs[i] = "";
57 }
58 }
59
60 Settings::values.mouse_enabled = false;
61 for (int i = 0; i < Settings::NativeMouseButton::NumMouseButtons; ++i) {
62 Settings::values.mouse_buttons[i] = "";
63 }
64
65 Settings::values.motion_device = "";
66
67 Settings::values.keyboard_enabled = false;
68
69 Settings::values.debug_pad_enabled = false;
70 for (int i = 0; i < Settings::NativeButton::NumButtons; ++i) {
71 Settings::values.debug_pad_buttons[i] = "";
72 }
73
74 for (int i = 0; i < Settings::NativeAnalog::NumAnalogs; ++i) {
75 Settings::values.debug_pad_analogs[i] = "";
76 }
77
78 Settings::values.vibration_enabled.SetValue(true);
79 Settings::values.enable_accurate_vibrations.SetValue(false);
80 Settings::values.motion_enabled.SetValue(true);
81 Settings::values.touchscreen.enabled = "";
82 Settings::values.touchscreen.device = "";
83 Settings::values.touchscreen.finger = 0;
84 Settings::values.touchscreen.rotation_angle = 0;
85 Settings::values.touchscreen.diameter_x = 15;
86 Settings::values.touchscreen.diameter_y = 15;
87
88 Settings::values.use_docked_mode.SetValue(
89 sdl2_config->GetBoolean("Controls", "use_docked_mode", true));
90
91 // Data Storage
92 Settings::values.use_virtual_sd =
93 sdl2_config->GetBoolean("Data Storage", "use_virtual_sd", true);
94 FS::GetUserPath(Common::FS::UserPath::NANDDir,
95 sdl2_config->Get("Data Storage", "nand_directory",
96 Common::FS::GetUserPath(Common::FS::UserPath::NANDDir)));
97 FS::GetUserPath(Common::FS::UserPath::SDMCDir,
98 sdl2_config->Get("Data Storage", "sdmc_directory",
99 Common::FS::GetUserPath(Common::FS::UserPath::SDMCDir)));
100
101 // System
102 Settings::values.current_user = std::clamp<int>(
103 sdl2_config->GetInteger("System", "current_user", 0), 0, Service::Account::MAX_USERS - 1);
104
105 const auto rng_seed_enabled = sdl2_config->GetBoolean("System", "rng_seed_enabled", false);
106 if (rng_seed_enabled) {
107 Settings::values.rng_seed.SetValue(sdl2_config->GetInteger("System", "rng_seed", 0));
108 } else {
109 Settings::values.rng_seed.SetValue(std::nullopt);
110 }
111
112 const auto custom_rtc_enabled = sdl2_config->GetBoolean("System", "custom_rtc_enabled", false);
113 if (custom_rtc_enabled) {
114 Settings::values.custom_rtc.SetValue(
115 std::chrono::seconds(sdl2_config->GetInteger("System", "custom_rtc", 0)));
116 } else {
117 Settings::values.custom_rtc.SetValue(std::nullopt);
118 }
119
120 // Core
121 Settings::values.use_multi_core.SetValue(
122 sdl2_config->GetBoolean("Core", "use_multi_core", false));
123
124 // Renderer
125 Settings::values.aspect_ratio.SetValue(
126 static_cast<int>(sdl2_config->GetInteger("Renderer", "aspect_ratio", 0)));
127 Settings::values.max_anisotropy.SetValue(
128 static_cast<int>(sdl2_config->GetInteger("Renderer", "max_anisotropy", 0)));
129 Settings::values.use_frame_limit.SetValue(false);
130 Settings::values.frame_limit.SetValue(100);
131 Settings::values.use_disk_shader_cache.SetValue(
132 sdl2_config->GetBoolean("Renderer", "use_disk_shader_cache", false));
133 const int gpu_accuracy_level = sdl2_config->GetInteger("Renderer", "gpu_accuracy", 0);
134 Settings::values.gpu_accuracy.SetValue(static_cast<Settings::GPUAccuracy>(gpu_accuracy_level));
135 Settings::values.use_asynchronous_gpu_emulation.SetValue(
136 sdl2_config->GetBoolean("Renderer", "use_asynchronous_gpu_emulation", false));
137 Settings::values.use_fast_gpu_time.SetValue(
138 sdl2_config->GetBoolean("Renderer", "use_fast_gpu_time", true));
139
140 Settings::values.bg_red.SetValue(
141 static_cast<float>(sdl2_config->GetReal("Renderer", "bg_red", 0.0)));
142 Settings::values.bg_green.SetValue(
143 static_cast<float>(sdl2_config->GetReal("Renderer", "bg_green", 0.0)));
144 Settings::values.bg_blue.SetValue(
145 static_cast<float>(sdl2_config->GetReal("Renderer", "bg_blue", 0.0)));
146
147 // Audio
148 Settings::values.sink_id = "null";
149 Settings::values.enable_audio_stretching.SetValue(false);
150 Settings::values.audio_device_id = "auto";
151 Settings::values.volume.SetValue(0);
152
153 Settings::values.language_index.SetValue(
154 sdl2_config->GetInteger("System", "language_index", 1));
155
156 // Miscellaneous
157 Settings::values.log_filter = sdl2_config->Get("Miscellaneous", "log_filter", "*:Trace");
158 Settings::values.use_dev_keys = sdl2_config->GetBoolean("Miscellaneous", "use_dev_keys", false);
159
160 // Debugging
161 Settings::values.program_args = "";
162 Settings::values.dump_exefs = sdl2_config->GetBoolean("Debugging", "dump_exefs", false);
163 Settings::values.dump_nso = sdl2_config->GetBoolean("Debugging", "dump_nso", false);
164
165 const auto title_list = sdl2_config->Get("AddOns", "title_ids", "");
166 std::stringstream ss(title_list);
167 std::string line;
168 while (std::getline(ss, line, '|')) {
169 const auto title_id = std::stoul(line, nullptr, 16);
170 const auto disabled_list = sdl2_config->Get("AddOns", "disabled_" + line, "");
171
172 std::stringstream inner_ss(disabled_list);
173 std::string inner_line;
174 std::vector<std::string> out;
175 while (std::getline(inner_ss, inner_line, '|')) {
176 out.push_back(inner_line);
177 }
178
179 Settings::values.disabled_addons.insert_or_assign(title_id, out);
180 }
181
182 // Web Service
183 Settings::values.enable_telemetry =
184 sdl2_config->GetBoolean("WebService", "enable_telemetry", true);
185 Settings::values.web_api_url =
186 sdl2_config->Get("WebService", "web_api_url", "https://api.yuzu-emu.org");
187 Settings::values.yuzu_username = sdl2_config->Get("WebService", "yuzu_username", "");
188 Settings::values.yuzu_token = sdl2_config->Get("WebService", "yuzu_token", "");
189}
190
191void Config::Reload() {
192 LoadINI(DefaultINI::sdl2_config_file);
193 ReadValues();
194}
diff --git a/src/yuzu_tester/config.h b/src/yuzu_tester/config.h
deleted file mode 100644
index 3b68e5bc9..000000000
--- a/src/yuzu_tester/config.h
+++ /dev/null
@@ -1,24 +0,0 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <string>
9
10class INIReader;
11
12class Config {
13 std::unique_ptr<INIReader> sdl2_config;
14 std::string sdl2_config_loc;
15
16 bool LoadINI(const std::string& default_contents = "", bool retry = true);
17 void ReadValues();
18
19public:
20 Config();
21 ~Config();
22
23 void Reload();
24};
diff --git a/src/yuzu_tester/default_ini.h b/src/yuzu_tester/default_ini.h
deleted file mode 100644
index 779c3791b..000000000
--- a/src/yuzu_tester/default_ini.h
+++ /dev/null
@@ -1,182 +0,0 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7namespace DefaultINI {
8
9const char* sdl2_config_file = R"(
10[Core]
11# Whether to use multi-core for CPU emulation
12# 0 (default): Disabled, 1: Enabled
13use_multi_core=
14
15[Cpu]
16# Enable inline page tables optimization (faster guest memory access)
17# 0: Disabled, 1 (default): Enabled
18cpuopt_page_tables =
19
20# Enable block linking CPU optimization (reduce block dispatcher use during predictable jumps)
21# 0: Disabled, 1 (default): Enabled
22cpuopt_block_linking =
23
24# Enable return stack buffer CPU optimization (reduce block dispatcher use during predictable returns)
25# 0: Disabled, 1 (default): Enabled
26cpuopt_return_stack_buffer =
27
28# Enable fast dispatcher CPU optimization (use a two-tiered dispatcher architecture)
29# 0: Disabled, 1 (default): Enabled
30cpuopt_fast_dispatcher =
31
32# Enable context elimination CPU Optimization (reduce host memory use for guest context)
33# 0: Disabled, 1 (default): Enabled
34cpuopt_context_elimination =
35
36# Enable constant propagation CPU optimization (basic IR optimization)
37# 0: Disabled, 1 (default): Enabled
38cpuopt_const_prop =
39
40# Enable miscellaneous CPU optimizations (basic IR optimization)
41# 0: Disabled, 1 (default): Enabled
42cpuopt_misc_ir =
43
44# Enable reduction of memory misalignment checks (reduce memory fallbacks for misaligned access)
45# 0: Disabled, 1 (default): Enabled
46cpuopt_reduce_misalign_checks =
47
48[Renderer]
49# Whether to use software or hardware rendering.
50# 0: Software, 1 (default): Hardware
51use_hw_renderer =
52
53# Whether to use the Just-In-Time (JIT) compiler for shader emulation
54# 0: Interpreter (slow), 1 (default): JIT (fast)
55use_shader_jit =
56
57# Aspect ratio
58# 0: Default (16:9), 1: Force 4:3, 2: Force 21:9, 3: Stretch to Window
59aspect_ratio =
60
61# Anisotropic filtering
62# 0: Default, 1: 2x, 2: 4x, 3: 8x, 4: 16x
63max_anisotropy =
64
65# Whether to enable V-Sync (caps the framerate at 60FPS) or not.
66# 0 (default): Off, 1: On
67use_vsync =
68
69# Whether to use disk based shader cache
70# 0 (default): Off, 1 : On
71use_disk_shader_cache =
72
73# Whether to use accurate GPU emulation
74# 0 (default): Off (fast), 1 : On (slow)
75use_accurate_gpu_emulation =
76
77# Whether to use asynchronous GPU emulation
78# 0 : Off (slow), 1 (default): On (fast)
79use_asynchronous_gpu_emulation =
80
81# The clear color for the renderer. What shows up on the sides of the bottom screen.
82# Must be in range of 0.0-1.0. Defaults to 1.0 for all.
83bg_red =
84bg_blue =
85bg_green =
86
87[Layout]
88# Layout for the screen inside the render window.
89# 0 (default): Default Top Bottom Screen, 1: Single Screen Only, 2: Large Screen Small Screen
90layout_option =
91
92# Toggle custom layout (using the settings below) on or off.
93# 0 (default): Off, 1: On
94custom_layout =
95
96# Screen placement when using Custom layout option
97# 0x, 0y is the top left corner of the render window.
98custom_top_left =
99custom_top_top =
100custom_top_right =
101custom_top_bottom =
102custom_bottom_left =
103custom_bottom_top =
104custom_bottom_right =
105custom_bottom_bottom =
106
107# Swaps the prominent screen with the other screen.
108# For example, if Single Screen is chosen, setting this to 1 will display the bottom screen instead of the top screen.
109# 0 (default): Top Screen is prominent, 1: Bottom Screen is prominent
110swap_screen =
111
112[Data Storage]
113# Whether to create a virtual SD card.
114# 1 (default): Yes, 0: No
115use_virtual_sd =
116
117[System]
118# Whether the system is docked
119# 1 (default): Yes, 0: No
120use_docked_mode =
121
122# Allow the use of NFC in games
123# 1 (default): Yes, 0 : No
124enable_nfc =
125
126# Sets the seed for the RNG generator built into the switch
127# rng_seed will be ignored and randomly generated if rng_seed_enabled is false
128rng_seed_enabled =
129rng_seed =
130
131# Sets the current time (in seconds since 12:00 AM Jan 1, 1970) that will be used by the time service
132# This will auto-increment, with the time set being the time the game is started
133# This override will only occur if custom_rtc_enabled is true, otherwise the current time is used
134custom_rtc_enabled =
135custom_rtc =
136
137# Sets the account username, max length is 32 characters
138# yuzu (default)
139username = yuzu
140
141# Sets the systems language index
142# 0: Japanese, 1: English (default), 2: French, 3: German, 4: Italian, 5: Spanish, 6: Chinese,
143# 7: Korean, 8: Dutch, 9: Portuguese, 10: Russian, 11: Taiwanese, 12: British English, 13: Canadian French,
144# 14: Latin American Spanish, 15: Simplified Chinese, 16: Traditional Chinese
145language_index =
146
147# The system region that yuzu will use during emulation
148# -1: Auto-select (default), 0: Japan, 1: USA, 2: Europe, 3: Australia, 4: China, 5: Korea, 6: Taiwan
149region_value =
150
151[Miscellaneous]
152# A filter which removes logs below a certain logging level.
153# Examples: *:Debug Kernel.SVC:Trace Service.*:Critical
154log_filter = *:Trace
155
156[Debugging]
157# Arguments to be passed to argv/argc in the emulated program. It is preferable to use the testing service datastring
158program_args=
159# Determines whether or not yuzu will dump the ExeFS of all games it attempts to load while loading them
160dump_exefs=false
161# Determines whether or not yuzu will dump all NSOs it attempts to load while loading them
162dump_nso=false
163
164[WebService]
165# Whether or not to enable telemetry
166# 0: No, 1 (default): Yes
167enable_telemetry =
168# URL for Web API
169web_api_url = https://api.yuzu-emu.org
170# Username and token for yuzu Web Service
171# See https://profile.yuzu-emu.org/ for more info
172yuzu_username =
173yuzu_token =
174
175[AddOns]
176# Used to disable add-ons
177# List of title IDs of games that will have add-ons disabled (separated by '|'):
178title_ids =
179# For each title ID, have a key/value pair called `disabled_<title_id>` equal to the names of the add-ons to disable (sep. by '|')
180# e.x. disabled_0100000000010000 = Update|DLC <- disables Updates and DLC on Super Mario Odyssey
181)";
182}
diff --git a/src/yuzu_tester/emu_window/emu_window_sdl2_hide.cpp b/src/yuzu_tester/emu_window/emu_window_sdl2_hide.cpp
deleted file mode 100644
index 358e03870..000000000
--- a/src/yuzu_tester/emu_window/emu_window_sdl2_hide.cpp
+++ /dev/null
@@ -1,146 +0,0 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <cstdlib>
7#include <string>
8
9#include <fmt/format.h>
10
11#define SDL_MAIN_HANDLED
12#include <SDL.h>
13
14#include <glad/glad.h>
15
16#include "common/logging/log.h"
17#include "common/scm_rev.h"
18#include "core/settings.h"
19#include "input_common/main.h"
20#include "yuzu_tester/emu_window/emu_window_sdl2_hide.h"
21
22bool EmuWindow_SDL2_Hide::SupportsRequiredGLExtensions() {
23 std::vector<std::string> unsupported_ext;
24
25 if (!GLAD_GL_ARB_direct_state_access)
26 unsupported_ext.push_back("ARB_direct_state_access");
27 if (!GLAD_GL_ARB_vertex_type_10f_11f_11f_rev)
28 unsupported_ext.push_back("ARB_vertex_type_10f_11f_11f_rev");
29 if (!GLAD_GL_ARB_texture_mirror_clamp_to_edge)
30 unsupported_ext.push_back("ARB_texture_mirror_clamp_to_edge");
31 if (!GLAD_GL_ARB_multi_bind)
32 unsupported_ext.push_back("ARB_multi_bind");
33
34 // Extensions required to support some texture formats.
35 if (!GLAD_GL_EXT_texture_compression_s3tc)
36 unsupported_ext.push_back("EXT_texture_compression_s3tc");
37 if (!GLAD_GL_ARB_texture_compression_rgtc)
38 unsupported_ext.push_back("ARB_texture_compression_rgtc");
39 if (!GLAD_GL_ARB_depth_buffer_float)
40 unsupported_ext.push_back("ARB_depth_buffer_float");
41
42 for (const std::string& ext : unsupported_ext)
43 LOG_CRITICAL(Frontend, "Unsupported GL extension: {}", ext);
44
45 return unsupported_ext.empty();
46}
47
48EmuWindow_SDL2_Hide::EmuWindow_SDL2_Hide() {
49 // Initialize the window
50 if (SDL_Init(SDL_INIT_VIDEO) < 0) {
51 LOG_CRITICAL(Frontend, "Failed to initialize SDL2! Exiting...");
52 exit(1);
53 }
54
55 input_subsystem->Initialize();
56
57 SDL_SetMainReady();
58
59 SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 4);
60 SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 3);
61 SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
62 SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
63 SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8);
64 SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8);
65 SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8);
66 SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 0);
67
68 std::string window_title = fmt::format("yuzu-tester {} | {}-{}", Common::g_build_fullname,
69 Common::g_scm_branch, Common::g_scm_desc);
70 render_window = SDL_CreateWindow(window_title.c_str(),
71 SDL_WINDOWPOS_UNDEFINED, // x position
72 SDL_WINDOWPOS_UNDEFINED, // y position
73 Layout::ScreenUndocked::Width, Layout::ScreenUndocked::Height,
74 SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE |
75 SDL_WINDOW_ALLOW_HIGHDPI | SDL_WINDOW_HIDDEN);
76
77 if (render_window == nullptr) {
78 LOG_CRITICAL(Frontend, "Failed to create SDL2 window! {}", SDL_GetError());
79 exit(1);
80 }
81
82 gl_context = SDL_GL_CreateContext(render_window);
83
84 if (gl_context == nullptr) {
85 LOG_CRITICAL(Frontend, "Failed to create SDL2 GL context! {}", SDL_GetError());
86 exit(1);
87 }
88
89 if (!gladLoadGLLoader(static_cast<GLADloadproc>(SDL_GL_GetProcAddress))) {
90 LOG_CRITICAL(Frontend, "Failed to initialize GL functions! {}", SDL_GetError());
91 exit(1);
92 }
93
94 if (!SupportsRequiredGLExtensions()) {
95 LOG_CRITICAL(Frontend, "GPU does not support all required OpenGL extensions! Exiting...");
96 exit(1);
97 }
98
99 SDL_PumpEvents();
100 SDL_GL_SetSwapInterval(false);
101 LOG_INFO(Frontend, "yuzu-tester Version: {} | {}-{}", Common::g_build_fullname,
102 Common::g_scm_branch, Common::g_scm_desc);
103 Settings::LogSettings();
104}
105
106EmuWindow_SDL2_Hide::~EmuWindow_SDL2_Hide() {
107 input_subsystem->Shutdown();
108 SDL_GL_DeleteContext(gl_context);
109 SDL_Quit();
110}
111
112bool EmuWindow_SDL2_Hide::IsShown() const {
113 return false;
114}
115
116class SDLGLContext : public Core::Frontend::GraphicsContext {
117public:
118 explicit SDLGLContext() {
119 // create a hidden window to make the shared context against
120 window = SDL_CreateWindow(NULL, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, 0, 0,
121 SDL_WINDOW_HIDDEN | SDL_WINDOW_OPENGL);
122 context = SDL_GL_CreateContext(window);
123 }
124
125 ~SDLGLContext() {
126 DoneCurrent();
127 SDL_GL_DeleteContext(context);
128 SDL_DestroyWindow(window);
129 }
130
131 void MakeCurrent() override {
132 SDL_GL_MakeCurrent(window, context);
133 }
134
135 void DoneCurrent() override {
136 SDL_GL_MakeCurrent(window, nullptr);
137 }
138
139private:
140 SDL_Window* window;
141 SDL_GLContext context;
142};
143
144std::unique_ptr<Core::Frontend::GraphicsContext> EmuWindow_SDL2_Hide::CreateSharedContext() const {
145 return std::make_unique<SDLGLContext>();
146}
diff --git a/src/yuzu_tester/emu_window/emu_window_sdl2_hide.h b/src/yuzu_tester/emu_window/emu_window_sdl2_hide.h
deleted file mode 100644
index adccdf35e..000000000
--- a/src/yuzu_tester/emu_window/emu_window_sdl2_hide.h
+++ /dev/null
@@ -1,37 +0,0 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "core/frontend/emu_window.h"
8
9struct SDL_Window;
10
11namespace InputCommon {
12class InputSubsystem;
13}
14
15class EmuWindow_SDL2_Hide : public Core::Frontend::EmuWindow {
16public:
17 explicit EmuWindow_SDL2_Hide();
18 ~EmuWindow_SDL2_Hide();
19
20 /// Whether the screen is being shown or not.
21 bool IsShown() const override;
22
23 std::unique_ptr<Core::Frontend::GraphicsContext> CreateSharedContext() const override;
24
25private:
26 /// Whether the GPU and driver supports the OpenGL extension required
27 bool SupportsRequiredGLExtensions();
28
29 std::unique_ptr<InputCommon::InputSubsystem> input_subsystem;
30
31 /// Internal SDL2 render window
32 SDL_Window* render_window;
33
34 using SDL_GLContext = void*;
35 /// The OpenGL context associated with the window
36 SDL_GLContext gl_context;
37};
diff --git a/src/yuzu_tester/resource.h b/src/yuzu_tester/resource.h
deleted file mode 100644
index df8e459e4..000000000
--- a/src/yuzu_tester/resource.h
+++ /dev/null
@@ -1,16 +0,0 @@
1//{{NO_DEPENDENCIES}}
2// Microsoft Visual C++ generated include file.
3// Used by pcafe.rc
4//
5#define IDI_ICON3 103
6
7// Next default values for new objects
8//
9#ifdef APSTUDIO_INVOKED
10#ifndef APSTUDIO_READONLY_SYMBOLS
11#define _APS_NEXT_RESOURCE_VALUE 105
12#define _APS_NEXT_COMMAND_VALUE 40001
13#define _APS_NEXT_CONTROL_VALUE 1001
14#define _APS_NEXT_SYMED_VALUE 101
15#endif
16#endif
diff --git a/src/yuzu_tester/service/yuzutest.cpp b/src/yuzu_tester/service/yuzutest.cpp
deleted file mode 100644
index e257fae25..000000000
--- a/src/yuzu_tester/service/yuzutest.cpp
+++ /dev/null
@@ -1,115 +0,0 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <memory>
6#include "common/string_util.h"
7#include "core/core.h"
8#include "core/hle/ipc_helpers.h"
9#include "core/hle/service/service.h"
10#include "core/hle/service/sm/sm.h"
11#include "yuzu_tester/service/yuzutest.h"
12
13namespace Service::Yuzu {
14
15constexpr u64 SERVICE_VERSION = 0x00000002;
16
17class YuzuTest final : public ServiceFramework<YuzuTest> {
18public:
19 explicit YuzuTest(Core::System& system_, std::string data_,
20 std::function<void(std::vector<TestResult>)> finish_callback_)
21 : ServiceFramework{system_, "yuzutest"}, data{std::move(data_)}, finish_callback{std::move(
22 finish_callback_)} {
23 static const FunctionInfo functions[] = {
24 {0, &YuzuTest::Initialize, "Initialize"},
25 {1, &YuzuTest::GetServiceVersion, "GetServiceVersion"},
26 {2, &YuzuTest::GetData, "GetData"},
27 {10, &YuzuTest::StartIndividual, "StartIndividual"},
28 {20, &YuzuTest::FinishIndividual, "FinishIndividual"},
29 {100, &YuzuTest::ExitProgram, "ExitProgram"},
30 };
31
32 RegisterHandlers(functions);
33 }
34
35private:
36 void Initialize(Kernel::HLERequestContext& ctx) {
37 LOG_DEBUG(Frontend, "called");
38 IPC::ResponseBuilder rb{ctx, 2};
39 rb.Push(RESULT_SUCCESS);
40 }
41
42 void GetServiceVersion(Kernel::HLERequestContext& ctx) {
43 LOG_DEBUG(Frontend, "called");
44 IPC::ResponseBuilder rb{ctx, 4};
45 rb.Push(RESULT_SUCCESS);
46 rb.Push(SERVICE_VERSION);
47 }
48
49 void GetData(Kernel::HLERequestContext& ctx) {
50 LOG_DEBUG(Frontend, "called");
51 const auto size = ctx.GetWriteBufferSize();
52 const auto write_size = std::min(size, data.size());
53 ctx.WriteBuffer(data.data(), write_size);
54
55 IPC::ResponseBuilder rb{ctx, 3};
56 rb.Push(RESULT_SUCCESS);
57 rb.Push<u32>(static_cast<u32>(write_size));
58 }
59
60 void StartIndividual(Kernel::HLERequestContext& ctx) {
61 const auto name_raw = ctx.ReadBuffer();
62
63 const auto name = Common::StringFromFixedZeroTerminatedBuffer(
64 reinterpret_cast<const char*>(name_raw.data()), name_raw.size());
65
66 LOG_DEBUG(Frontend, "called, name={}", name);
67
68 IPC::ResponseBuilder rb{ctx, 2};
69 rb.Push(RESULT_SUCCESS);
70 }
71
72 void FinishIndividual(Kernel::HLERequestContext& ctx) {
73 IPC::RequestParser rp{ctx};
74
75 const auto code = rp.PopRaw<u32>();
76
77 const auto result_data_raw = ctx.ReadBuffer();
78 const auto test_name_raw = ctx.ReadBuffer(1);
79
80 const auto data = Common::StringFromFixedZeroTerminatedBuffer(
81 reinterpret_cast<const char*>(result_data_raw.data()), result_data_raw.size());
82 const auto test_name = Common::StringFromFixedZeroTerminatedBuffer(
83 reinterpret_cast<const char*>(test_name_raw.data()), test_name_raw.size());
84
85 LOG_INFO(Frontend, "called, result_code={:08X}, data={}, name={}", code, data, test_name);
86
87 results.push_back({code, data, test_name});
88
89 IPC::ResponseBuilder rb{ctx, 2};
90 rb.Push(RESULT_SUCCESS);
91 }
92
93 void ExitProgram(Kernel::HLERequestContext& ctx) {
94 LOG_DEBUG(Frontend, "called");
95
96 IPC::ResponseBuilder rb{ctx, 2};
97 rb.Push(RESULT_SUCCESS);
98
99 finish_callback(std::move(results));
100 }
101
102 std::string data;
103
104 std::vector<TestResult> results;
105 std::function<void(std::vector<TestResult>)> finish_callback;
106};
107
108void InstallInterfaces(Core::System& system, std::string data,
109 std::function<void(std::vector<TestResult>)> finish_callback) {
110 auto& sm = system.ServiceManager();
111 std::make_shared<YuzuTest>(system, std::move(data), std::move(finish_callback))
112 ->InstallAsService(sm);
113}
114
115} // namespace Service::Yuzu
diff --git a/src/yuzu_tester/service/yuzutest.h b/src/yuzu_tester/service/yuzutest.h
deleted file mode 100644
index 7794814fa..000000000
--- a/src/yuzu_tester/service/yuzutest.h
+++ /dev/null
@@ -1,25 +0,0 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <functional>
8#include <string>
9
10namespace Core {
11class System;
12}
13
14namespace Service::Yuzu {
15
16struct TestResult {
17 u32 code;
18 std::string data;
19 std::string name;
20};
21
22void InstallInterfaces(Core::System& system, std::string data,
23 std::function<void(std::vector<TestResult>)> finish_callback);
24
25} // namespace Service::Yuzu
diff --git a/src/yuzu_tester/yuzu.cpp b/src/yuzu_tester/yuzu.cpp
deleted file mode 100644
index 09cf2ad77..000000000
--- a/src/yuzu_tester/yuzu.cpp
+++ /dev/null
@@ -1,268 +0,0 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <chrono>
6#include <iostream>
7#include <memory>
8#include <string>
9#include <thread>
10
11#include <fmt/ostream.h>
12
13#include "common/common_paths.h"
14#include "common/detached_tasks.h"
15#include "common/file_util.h"
16#include "common/logging/backend.h"
17#include "common/logging/filter.h"
18#include "common/logging/log.h"
19#include "common/microprofile.h"
20#include "common/scm_rev.h"
21#include "common/scope_exit.h"
22#include "common/string_util.h"
23#include "common/telemetry.h"
24#include "core/core.h"
25#include "core/crypto/key_manager.h"
26#include "core/file_sys/registered_cache.h"
27#include "core/file_sys/vfs_real.h"
28#include "core/hle/service/filesystem/filesystem.h"
29#include "core/loader/loader.h"
30#include "core/settings.h"
31#include "core/telemetry_session.h"
32#include "video_core/renderer_base.h"
33#include "yuzu_tester/config.h"
34#include "yuzu_tester/emu_window/emu_window_sdl2_hide.h"
35#include "yuzu_tester/service/yuzutest.h"
36
37#ifdef _WIN32
38// windows.h needs to be included before shellapi.h
39#include <windows.h>
40
41#include <shellapi.h>
42#endif
43
44#undef _UNICODE
45#include <getopt.h>
46#ifndef _MSC_VER
47#include <unistd.h>
48#endif
49
50#ifdef _WIN32
51extern "C" {
52// tells Nvidia and AMD drivers to use the dedicated GPU by default on laptops with switchable
53// graphics
54__declspec(dllexport) unsigned long NvOptimusEnablement = 0x00000001;
55__declspec(dllexport) int AmdPowerXpressRequestHighPerformance = 1;
56}
57#endif
58
59static void PrintHelp(const char* argv0) {
60 std::cout << "Usage: " << argv0
61 << " [options] <filename>\n"
62 "-h, --help Display this help and exit\n"
63 "-v, --version Output version information and exit\n"
64 "-d, --datastring Pass following string as data to test service command #2\n"
65 "-l, --log Log to console in addition to file (will log to file only "
66 "by default)\n";
67}
68
69static void PrintVersion() {
70 std::cout << "yuzu [Test Utility] " << Common::g_scm_branch << " " << Common::g_scm_desc
71 << std::endl;
72}
73
74static void InitializeLogging(bool console) {
75 Log::Filter log_filter(Log::Level::Debug);
76 log_filter.ParseFilterString(Settings::values.log_filter);
77 Log::SetGlobalFilter(log_filter);
78
79 if (console)
80 Log::AddBackend(std::make_unique<Log::ColorConsoleBackend>());
81
82 const std::string& log_dir = Common::FS::GetUserPath(Common::FS::UserPath::LogDir);
83 Common::FS::CreateFullPath(log_dir);
84 Log::AddBackend(std::make_unique<Log::FileBackend>(log_dir + LOG_FILE));
85#ifdef _WIN32
86 Log::AddBackend(std::make_unique<Log::DebuggerBackend>());
87#endif
88}
89
90/// Application entry point
91int main(int argc, char** argv) {
92 Common::DetachedTasks detached_tasks;
93 Config config;
94
95 int option_index = 0;
96
97#ifdef _WIN32
98 int argc_w;
99 auto argv_w = CommandLineToArgvW(GetCommandLineW(), &argc_w);
100
101 if (argv_w == nullptr) {
102 std::cout << "Failed to get command line arguments" << std::endl;
103 return -1;
104 }
105#endif
106 std::string filepath;
107
108 static struct option long_options[] = {
109 {"help", no_argument, 0, 'h'},
110 {"version", no_argument, 0, 'v'},
111 {"datastring", optional_argument, 0, 'd'},
112 {"log", no_argument, 0, 'l'},
113 {0, 0, 0, 0},
114 };
115
116 bool console_log = false;
117 std::string datastring;
118
119 while (optind < argc) {
120 int arg = getopt_long(argc, argv, "hvdl::", long_options, &option_index);
121 if (arg != -1) {
122 switch (static_cast<char>(arg)) {
123 case 'h':
124 PrintHelp(argv[0]);
125 return 0;
126 case 'v':
127 PrintVersion();
128 return 0;
129 case 'd':
130 datastring = argv[optind];
131 ++optind;
132 break;
133 case 'l':
134 console_log = true;
135 break;
136 }
137 } else {
138#ifdef _WIN32
139 filepath = Common::UTF16ToUTF8(argv_w[optind]);
140#else
141 filepath = argv[optind];
142#endif
143 optind++;
144 }
145 }
146
147 InitializeLogging(console_log);
148
149#ifdef _WIN32
150 LocalFree(argv_w);
151#endif
152
153 MicroProfileOnThreadCreate("EmuThread");
154 SCOPE_EXIT({ MicroProfileShutdown(); });
155
156 if (filepath.empty()) {
157 LOG_CRITICAL(Frontend, "Failed to load application: No application specified");
158 std::cout << "Failed to load application: No application specified" << std::endl;
159 PrintHelp(argv[0]);
160 return -1;
161 }
162
163 Core::System& system{Core::System::GetInstance()};
164
165 Settings::Apply(system);
166
167 const auto emu_window{std::make_unique<EmuWindow_SDL2_Hide>()};
168
169 bool finished = false;
170 int return_value = 0;
171 const auto callback = [&finished,
172 &return_value](std::vector<Service::Yuzu::TestResult> results) {
173 finished = true;
174 return_value = 0;
175
176 // Find the minimum length needed to fully enclose all test names (and the header field) in
177 // the fmt::format column by first finding the maximum size of any test name and comparing
178 // that to 9, the string length of 'Test Name'
179 const auto needed_length_name =
180 std::max<u64>(std::max_element(results.begin(), results.end(),
181 [](const auto& lhs, const auto& rhs) {
182 return lhs.name.size() < rhs.name.size();
183 })
184 ->name.size(),
185 9ull);
186
187 std::size_t passed = 0;
188 std::size_t failed = 0;
189
190 std::cout << fmt::format("Result [Res Code] | {:<{}} | Extra Data", "Test Name",
191 needed_length_name)
192 << std::endl;
193
194 for (const auto& res : results) {
195 const auto main_res = res.code == 0 ? "PASSED" : "FAILED";
196 if (res.code == 0)
197 ++passed;
198 else
199 ++failed;
200 std::cout << fmt::format("{} [{:08X}] | {:<{}} | {}", main_res, res.code, res.name,
201 needed_length_name, res.data)
202 << std::endl;
203 }
204
205 std::cout << std::endl
206 << fmt::format("{:4d} Passed | {:4d} Failed | {:4d} Total | {:2.2f} Passed Ratio",
207 passed, failed, passed + failed,
208 static_cast<float>(passed) / (passed + failed))
209 << std::endl
210 << (failed == 0 ? "PASSED" : "FAILED") << std::endl;
211
212 if (failed > 0)
213 return_value = -1;
214 };
215
216 system.SetContentProvider(std::make_unique<FileSys::ContentProviderUnion>());
217 system.SetFilesystem(std::make_shared<FileSys::RealVfsFilesystem>());
218 system.GetFileSystemController().CreateFactories(*system.GetFilesystem());
219
220 SCOPE_EXIT({ system.Shutdown(); });
221
222 const Core::System::ResultStatus load_result{system.Load(*emu_window, filepath)};
223
224 switch (load_result) {
225 case Core::System::ResultStatus::ErrorGetLoader:
226 LOG_CRITICAL(Frontend, "Failed to obtain loader for {}!", filepath);
227 return -1;
228 case Core::System::ResultStatus::ErrorLoader:
229 LOG_CRITICAL(Frontend, "Failed to load ROM!");
230 return -1;
231 case Core::System::ResultStatus::ErrorNotInitialized:
232 LOG_CRITICAL(Frontend, "CPUCore not initialized");
233 return -1;
234 case Core::System::ResultStatus::ErrorVideoCore:
235 LOG_CRITICAL(Frontend, "Failed to initialize VideoCore!");
236 return -1;
237 case Core::System::ResultStatus::Success:
238 break; // Expected case
239 default:
240 if (static_cast<u32>(load_result) >
241 static_cast<u32>(Core::System::ResultStatus::ErrorLoader)) {
242 const u16 loader_id = static_cast<u16>(Core::System::ResultStatus::ErrorLoader);
243 const u16 error_id = static_cast<u16>(load_result) - loader_id;
244 LOG_CRITICAL(Frontend,
245 "While attempting to load the ROM requested, an error occurred. Please "
246 "refer to the yuzu wiki for more information or the yuzu discord for "
247 "additional help.\n\nError Code: {:04X}-{:04X}\nError Description: {}",
248 loader_id, error_id, static_cast<Loader::ResultStatus>(error_id));
249 }
250 break;
251 }
252
253 Service::Yuzu::InstallInterfaces(system, datastring, callback);
254
255 system.TelemetrySession().AddField(Common::Telemetry::FieldType::App, "Frontend",
256 "SDLHideTester");
257
258 system.GPU().Start();
259
260 void(system.Run());
261 while (!finished) {
262 std::this_thread::sleep_for(std::chrono::milliseconds(1));
263 }
264 void(system.Pause());
265
266 detached_tasks.WaitForAllTasks();
267 return return_value;
268}
diff --git a/src/yuzu_tester/yuzu.rc b/src/yuzu_tester/yuzu.rc
deleted file mode 100644
index 0cde75e2f..000000000
--- a/src/yuzu_tester/yuzu.rc
+++ /dev/null
@@ -1,17 +0,0 @@
1#include "winresrc.h"
2/////////////////////////////////////////////////////////////////////////////
3//
4// Icon
5//
6
7// Icon with lowest ID value placed first to ensure application icon
8// remains consistent on all systems.
9YUZU_ICON ICON "../../dist/yuzu.ico"
10
11
12/////////////////////////////////////////////////////////////////////////////
13//
14// RT_MANIFEST
15//
16
170 RT_MANIFEST "../../dist/yuzu.manifest"