summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/common/common_types.h10
-rw-r--r--src/common/fs/file.h5
-rw-r--r--src/common/input.h2
-rw-r--r--src/common/telemetry.h26
-rw-r--r--src/common/wall_clock.cpp16
-rw-r--r--src/common/wall_clock.h8
-rw-r--r--src/common/x64/native_clock.cpp6
-rw-r--r--src/core/arm/arm_interface.h6
-rw-r--r--src/core/file_sys/vfs.h18
-rw-r--r--src/core/hid/emulated_console.h1
-rw-r--r--src/core/hid/emulated_controller.h2
-rw-r--r--src/core/hid/hid_core.h1
-rw-r--r--src/core/hle/kernel/k_auto_object.h7
-rw-r--r--src/core/hle/kernel/k_auto_object_container.h4
-rw-r--r--src/core/hle/kernel/k_handle_table.h3
-rw-r--r--src/core/hle/kernel/k_memory_manager.h42
-rw-r--r--src/core/hle/kernel/k_memory_region.h80
-rw-r--r--src/core/hle/kernel/k_page_heap.h90
-rw-r--r--src/core/hle/kernel/k_page_table.cpp67
-rw-r--r--src/core/hle/kernel/k_page_table.h22
-rw-r--r--src/core/hle/kernel/k_slab_heap.h13
-rw-r--r--src/core/hle/kernel/svc.cpp2
-rw-r--r--src/core/hle/service/vi/display/vi_display.h2
-rw-r--r--src/core/loader/loader.h6
-rw-r--r--src/input_common/drivers/udp_client.cpp15
-rw-r--r--src/input_common/drivers/udp_client.h4
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_instructions.h31
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_memory.cpp107
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp142
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_instructions.h31
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp119
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_instructions.h30
-rw-r--r--src/shader_recompiler/frontend/ir/microinstruction.cpp19
-rw-r--r--src/shader_recompiler/frontend/ir/opcodes.inc19
-rw-r--r--src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp18
-rw-r--r--src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp49
-rw-r--r--src/shader_recompiler/ir_opt/lower_int64_to_int32.cpp20
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h47
-rw-r--r--src/video_core/engines/engine_upload.cpp7
-rw-r--r--src/video_core/engines/engine_upload.h8
-rw-r--r--src/video_core/engines/kepler_compute.cpp1
-rw-r--r--src/video_core/engines/kepler_memory.cpp4
-rw-r--r--src/video_core/engines/kepler_memory.h7
-rw-r--r--src/video_core/engines/maxwell_3d.cpp1
-rw-r--r--src/video_core/engines/maxwell_3d.h3
-rw-r--r--src/video_core/gpu.cpp8
-rw-r--r--src/video_core/memory_manager.h4
-rw-r--r--src/video_core/rasterizer_interface.h3
-rw-r--r--src/video_core/renderer_base.h8
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp22
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h2
-rw-r--r--src/video_core/renderer_opengl/gl_resource_manager.h50
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp22
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp6
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h4
-rw-r--r--src/video_core/shader_cache.cpp4
-rw-r--r--src/video_core/shader_cache.h3
-rw-r--r--src/yuzu/configuration/configure_dialog.cpp2
-rw-r--r--src/yuzu/configuration/configure_filesystem.cpp16
-rw-r--r--src/yuzu/configuration/configure_filesystem.h8
-rw-r--r--src/yuzu/configuration/configure_hotkeys.cpp32
-rw-r--r--src/yuzu/configuration/configure_input_player.cpp27
-rw-r--r--src/yuzu/configuration/configure_motion_touch.cpp38
-rw-r--r--src/yuzu/game_list.cpp2
-rw-r--r--src/yuzu/main.cpp1
66 files changed, 1110 insertions, 275 deletions
diff --git a/src/common/common_types.h b/src/common/common_types.h
index 4cec89fbd..99bffc460 100644
--- a/src/common/common_types.h
+++ b/src/common/common_types.h
@@ -46,13 +46,3 @@ using GPUVAddr = u64; ///< Represents a pointer in the GPU virtual address space
46 46
47using u128 = std::array<std::uint64_t, 2>; 47using u128 = std::array<std::uint64_t, 2>;
48static_assert(sizeof(u128) == 16, "u128 must be 128 bits wide"); 48static_assert(sizeof(u128) == 16, "u128 must be 128 bits wide");
49
50// An inheritable class to disallow the copy constructor and operator= functions
51class NonCopyable {
52protected:
53 constexpr NonCopyable() = default;
54 ~NonCopyable() = default;
55
56 NonCopyable(const NonCopyable&) = delete;
57 NonCopyable& operator=(const NonCopyable&) = delete;
58};
diff --git a/src/common/fs/file.h b/src/common/fs/file.h
index 2c4ab4332..a4f7944cd 100644
--- a/src/common/fs/file.h
+++ b/src/common/fs/file.h
@@ -188,9 +188,8 @@ public:
188 188
189#ifdef _WIN32 189#ifdef _WIN32
190 template <typename Path> 190 template <typename Path>
191 [[nodiscard]] void Open(const Path& path, FileAccessMode mode, 191 void Open(const Path& path, FileAccessMode mode, FileType type = FileType::BinaryFile,
192 FileType type = FileType::BinaryFile, 192 FileShareFlag flag = FileShareFlag::ShareReadOnly) {
193 FileShareFlag flag = FileShareFlag::ShareReadOnly) {
194 using ValueType = typename Path::value_type; 193 using ValueType = typename Path::value_type;
195 if constexpr (IsChar<ValueType>) { 194 if constexpr (IsChar<ValueType>) {
196 Open(ToU8String(path), mode, type, flag); 195 Open(ToU8String(path), mode, type, flag);
diff --git a/src/common/input.h b/src/common/input.h
index f4f9eb30a..54fcb24b0 100644
--- a/src/common/input.h
+++ b/src/common/input.h
@@ -209,6 +209,8 @@ enum class ButtonNames {
209 Triangle, 209 Triangle,
210 Share, 210 Share,
211 Options, 211 Options,
212 Home,
213 Touch,
212 214
213 // Mouse buttons 215 // Mouse buttons
214 ButtonMouseWheel, 216 ButtonMouseWheel,
diff --git a/src/common/telemetry.h b/src/common/telemetry.h
index 49186e848..d38aeac99 100644
--- a/src/common/telemetry.h
+++ b/src/common/telemetry.h
@@ -8,6 +8,7 @@
8#include <map> 8#include <map>
9#include <memory> 9#include <memory>
10#include <string> 10#include <string>
11#include "common/common_funcs.h"
11#include "common/common_types.h" 12#include "common/common_types.h"
12 13
13namespace Common::Telemetry { 14namespace Common::Telemetry {
@@ -28,7 +29,7 @@ struct VisitorInterface;
28/** 29/**
29 * Interface class for telemetry data fields. 30 * Interface class for telemetry data fields.
30 */ 31 */
31class FieldInterface : NonCopyable { 32class FieldInterface {
32public: 33public:
33 virtual ~FieldInterface() = default; 34 virtual ~FieldInterface() = default;
34 35
@@ -52,14 +53,15 @@ public:
52template <typename T> 53template <typename T>
53class Field : public FieldInterface { 54class Field : public FieldInterface {
54public: 55public:
56 YUZU_NON_COPYABLE(Field);
57
55 Field(FieldType type_, std::string name_, T value_) 58 Field(FieldType type_, std::string name_, T value_)
56 : name(std::move(name_)), type(type_), value(std::move(value_)) {} 59 : name(std::move(name_)), type(type_), value(std::move(value_)) {}
57 60
58 Field(const Field&) = default; 61 ~Field() override = default;
59 Field& operator=(const Field&) = default;
60 62
61 Field(Field&&) = default; 63 Field(Field&&) noexcept = default;
62 Field& operator=(Field&& other) = default; 64 Field& operator=(Field&& other) noexcept = default;
63 65
64 void Accept(VisitorInterface& visitor) const override; 66 void Accept(VisitorInterface& visitor) const override;
65 67
@@ -98,9 +100,15 @@ private:
98/** 100/**
99 * Collection of data fields that have been logged. 101 * Collection of data fields that have been logged.
100 */ 102 */
101class FieldCollection final : NonCopyable { 103class FieldCollection final {
102public: 104public:
105 YUZU_NON_COPYABLE(FieldCollection);
106
103 FieldCollection() = default; 107 FieldCollection() = default;
108 ~FieldCollection() = default;
109
110 FieldCollection(FieldCollection&&) noexcept = default;
111 FieldCollection& operator=(FieldCollection&&) noexcept = default;
104 112
105 /** 113 /**
106 * Accept method for the visitor pattern, visits each field in the collection. 114 * Accept method for the visitor pattern, visits each field in the collection.
@@ -133,7 +141,7 @@ private:
133 * Telemetry fields visitor interface class. A backend to log to a web service should implement 141 * Telemetry fields visitor interface class. A backend to log to a web service should implement
134 * this interface. 142 * this interface.
135 */ 143 */
136struct VisitorInterface : NonCopyable { 144struct VisitorInterface {
137 virtual ~VisitorInterface() = default; 145 virtual ~VisitorInterface() = default;
138 146
139 virtual void Visit(const Field<bool>& field) = 0; 147 virtual void Visit(const Field<bool>& field) = 0;
@@ -160,8 +168,8 @@ struct VisitorInterface : NonCopyable {
160 * Empty implementation of VisitorInterface that drops all fields. Used when a functional 168 * Empty implementation of VisitorInterface that drops all fields. Used when a functional
161 * backend implementation is not available. 169 * backend implementation is not available.
162 */ 170 */
163struct NullVisitor : public VisitorInterface { 171struct NullVisitor final : public VisitorInterface {
164 ~NullVisitor() = default; 172 YUZU_NON_COPYABLE(NullVisitor);
165 173
166 void Visit(const Field<bool>& /*field*/) override {} 174 void Visit(const Field<bool>& /*field*/) override {}
167 void Visit(const Field<double>& /*field*/) override {} 175 void Visit(const Field<double>& /*field*/) override {}
diff --git a/src/common/wall_clock.cpp b/src/common/wall_clock.cpp
index ffa282e88..9acf7551e 100644
--- a/src/common/wall_clock.cpp
+++ b/src/common/wall_clock.cpp
@@ -65,16 +65,20 @@ private:
65 65
66#ifdef ARCHITECTURE_x86_64 66#ifdef ARCHITECTURE_x86_64
67 67
68std::unique_ptr<WallClock> CreateBestMatchingClock(u32 emulated_cpu_frequency, 68std::unique_ptr<WallClock> CreateBestMatchingClock(u64 emulated_cpu_frequency,
69 u32 emulated_clock_frequency) { 69 u64 emulated_clock_frequency) {
70 const auto& caps = GetCPUCaps(); 70 const auto& caps = GetCPUCaps();
71 u64 rtsc_frequency = 0; 71 u64 rtsc_frequency = 0;
72 if (caps.invariant_tsc) { 72 if (caps.invariant_tsc) {
73 rtsc_frequency = EstimateRDTSCFrequency(); 73 rtsc_frequency = EstimateRDTSCFrequency();
74 } 74 }
75 75
76 // Fallback to StandardWallClock if rtsc period is higher than a nano second 76 // Fallback to StandardWallClock if the hardware TSC does not have the precision greater than:
77 if (rtsc_frequency <= 1000000000) { 77 // - A nanosecond
78 // - The emulated CPU frequency
79 // - The emulated clock counter frequency (CNTFRQ)
80 if (rtsc_frequency <= WallClock::NS_RATIO || rtsc_frequency <= emulated_cpu_frequency ||
81 rtsc_frequency <= emulated_clock_frequency) {
78 return std::make_unique<StandardWallClock>(emulated_cpu_frequency, 82 return std::make_unique<StandardWallClock>(emulated_cpu_frequency,
79 emulated_clock_frequency); 83 emulated_clock_frequency);
80 } else { 84 } else {
@@ -85,8 +89,8 @@ std::unique_ptr<WallClock> CreateBestMatchingClock(u32 emulated_cpu_frequency,
85 89
86#else 90#else
87 91
88std::unique_ptr<WallClock> CreateBestMatchingClock(u32 emulated_cpu_frequency, 92std::unique_ptr<WallClock> CreateBestMatchingClock(u64 emulated_cpu_frequency,
89 u32 emulated_clock_frequency) { 93 u64 emulated_clock_frequency) {
90 return std::make_unique<StandardWallClock>(emulated_cpu_frequency, emulated_clock_frequency); 94 return std::make_unique<StandardWallClock>(emulated_cpu_frequency, emulated_clock_frequency);
91} 95}
92 96
diff --git a/src/common/wall_clock.h b/src/common/wall_clock.h
index cef3e9499..874448c27 100644
--- a/src/common/wall_clock.h
+++ b/src/common/wall_clock.h
@@ -13,6 +13,10 @@ namespace Common {
13 13
14class WallClock { 14class WallClock {
15public: 15public:
16 static constexpr u64 NS_RATIO = 1'000'000'000;
17 static constexpr u64 US_RATIO = 1'000'000;
18 static constexpr u64 MS_RATIO = 1'000;
19
16 virtual ~WallClock() = default; 20 virtual ~WallClock() = default;
17 21
18 /// Returns current wall time in nanoseconds 22 /// Returns current wall time in nanoseconds
@@ -49,7 +53,7 @@ private:
49 bool is_native; 53 bool is_native;
50}; 54};
51 55
52[[nodiscard]] std::unique_ptr<WallClock> CreateBestMatchingClock(u32 emulated_cpu_frequency, 56[[nodiscard]] std::unique_ptr<WallClock> CreateBestMatchingClock(u64 emulated_cpu_frequency,
53 u32 emulated_clock_frequency); 57 u64 emulated_clock_frequency);
54 58
55} // namespace Common 59} // namespace Common
diff --git a/src/common/x64/native_clock.cpp b/src/common/x64/native_clock.cpp
index 82ee2c8a1..91b842829 100644
--- a/src/common/x64/native_clock.cpp
+++ b/src/common/x64/native_clock.cpp
@@ -47,9 +47,9 @@ NativeClock::NativeClock(u64 emulated_cpu_frequency_, u64 emulated_clock_frequen
47 _mm_mfence(); 47 _mm_mfence();
48 time_point.inner.last_measure = __rdtsc(); 48 time_point.inner.last_measure = __rdtsc();
49 time_point.inner.accumulated_ticks = 0U; 49 time_point.inner.accumulated_ticks = 0U;
50 ns_rtsc_factor = GetFixedPoint64Factor(1000000000, rtsc_frequency); 50 ns_rtsc_factor = GetFixedPoint64Factor(NS_RATIO, rtsc_frequency);
51 us_rtsc_factor = GetFixedPoint64Factor(1000000, rtsc_frequency); 51 us_rtsc_factor = GetFixedPoint64Factor(US_RATIO, rtsc_frequency);
52 ms_rtsc_factor = GetFixedPoint64Factor(1000, rtsc_frequency); 52 ms_rtsc_factor = GetFixedPoint64Factor(MS_RATIO, rtsc_frequency);
53 clock_rtsc_factor = GetFixedPoint64Factor(emulated_clock_frequency, rtsc_frequency); 53 clock_rtsc_factor = GetFixedPoint64Factor(emulated_clock_frequency, rtsc_frequency);
54 cpu_rtsc_factor = GetFixedPoint64Factor(emulated_cpu_frequency, rtsc_frequency); 54 cpu_rtsc_factor = GetFixedPoint64Factor(emulated_cpu_frequency, rtsc_frequency);
55} 55}
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h
index 689e3ceb5..c60322442 100644
--- a/src/core/arm/arm_interface.h
+++ b/src/core/arm/arm_interface.h
@@ -6,6 +6,7 @@
6 6
7#include <array> 7#include <array>
8#include <vector> 8#include <vector>
9#include "common/common_funcs.h"
9#include "common/common_types.h" 10#include "common/common_types.h"
10#include "core/hardware_properties.h" 11#include "core/hardware_properties.h"
11 12
@@ -24,8 +25,11 @@ class CPUInterruptHandler;
24using CPUInterrupts = std::array<CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>; 25using CPUInterrupts = std::array<CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>;
25 26
26/// Generic ARMv8 CPU interface 27/// Generic ARMv8 CPU interface
27class ARM_Interface : NonCopyable { 28class ARM_Interface {
28public: 29public:
30 YUZU_NON_COPYABLE(ARM_Interface);
31 YUZU_NON_MOVEABLE(ARM_Interface);
32
29 explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers_, 33 explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers_,
30 bool uses_wall_clock_) 34 bool uses_wall_clock_)
31 : system{system_}, interrupt_handlers{interrupt_handlers_}, uses_wall_clock{ 35 : system{system_}, interrupt_handlers{interrupt_handlers_}, uses_wall_clock{
diff --git a/src/core/file_sys/vfs.h b/src/core/file_sys/vfs.h
index 3e625fad6..1b9365853 100644
--- a/src/core/file_sys/vfs.h
+++ b/src/core/file_sys/vfs.h
@@ -12,6 +12,7 @@
12#include <type_traits> 12#include <type_traits>
13#include <vector> 13#include <vector>
14 14
15#include "common/common_funcs.h"
15#include "common/common_types.h" 16#include "common/common_types.h"
16#include "core/file_sys/vfs_types.h" 17#include "core/file_sys/vfs_types.h"
17 18
@@ -29,8 +30,11 @@ enum class VfsEntryType {
29// A class representing an abstract filesystem. A default implementation given the root VirtualDir 30// A class representing an abstract filesystem. A default implementation given the root VirtualDir
30// is provided for convenience, but if the Vfs implementation has any additional state or 31// is provided for convenience, but if the Vfs implementation has any additional state or
31// functionality, they will need to override. 32// functionality, they will need to override.
32class VfsFilesystem : NonCopyable { 33class VfsFilesystem {
33public: 34public:
35 YUZU_NON_COPYABLE(VfsFilesystem);
36 YUZU_NON_MOVEABLE(VfsFilesystem);
37
34 explicit VfsFilesystem(VirtualDir root); 38 explicit VfsFilesystem(VirtualDir root);
35 virtual ~VfsFilesystem(); 39 virtual ~VfsFilesystem();
36 40
@@ -77,8 +81,12 @@ protected:
77}; 81};
78 82
79// A class representing a file in an abstract filesystem. 83// A class representing a file in an abstract filesystem.
80class VfsFile : NonCopyable { 84class VfsFile {
81public: 85public:
86 YUZU_NON_COPYABLE(VfsFile);
87 YUZU_NON_MOVEABLE(VfsFile);
88
89 VfsFile() = default;
82 virtual ~VfsFile(); 90 virtual ~VfsFile();
83 91
84 // Retrieves the file name. 92 // Retrieves the file name.
@@ -176,8 +184,12 @@ public:
176}; 184};
177 185
178// A class representing a directory in an abstract filesystem. 186// A class representing a directory in an abstract filesystem.
179class VfsDirectory : NonCopyable { 187class VfsDirectory {
180public: 188public:
189 YUZU_NON_COPYABLE(VfsDirectory);
190 YUZU_NON_MOVEABLE(VfsDirectory);
191
192 VfsDirectory() = default;
181 virtual ~VfsDirectory(); 193 virtual ~VfsDirectory();
182 194
183 // Retrives the file located at path as if the current directory was root. Returns nullptr if 195 // Retrives the file located at path as if the current directory was root. Returns nullptr if
diff --git a/src/core/hid/emulated_console.h b/src/core/hid/emulated_console.h
index 707419102..5eb170823 100644
--- a/src/core/hid/emulated_console.h
+++ b/src/core/hid/emulated_console.h
@@ -10,6 +10,7 @@
10#include <mutex> 10#include <mutex>
11#include <unordered_map> 11#include <unordered_map>
12 12
13#include "common/common_funcs.h"
13#include "common/common_types.h" 14#include "common/common_types.h"
14#include "common/input.h" 15#include "common/input.h"
15#include "common/param_package.h" 16#include "common/param_package.h"
diff --git a/src/core/hid/emulated_controller.h b/src/core/hid/emulated_controller.h
index a63a83cce..d8642c5b3 100644
--- a/src/core/hid/emulated_controller.h
+++ b/src/core/hid/emulated_controller.h
@@ -13,8 +13,6 @@
13#include "common/common_types.h" 13#include "common/common_types.h"
14#include "common/input.h" 14#include "common/input.h"
15#include "common/param_package.h" 15#include "common/param_package.h"
16#include "common/point.h"
17#include "common/quaternion.h"
18#include "common/settings.h" 16#include "common/settings.h"
19#include "common/vector_math.h" 17#include "common/vector_math.h"
20#include "core/hid/hid_types.h" 18#include "core/hid/hid_types.h"
diff --git a/src/core/hid/hid_core.h b/src/core/hid/hid_core.h
index 837f7de49..717f605e7 100644
--- a/src/core/hid/hid_core.h
+++ b/src/core/hid/hid_core.h
@@ -6,6 +6,7 @@
6 6
7#include <memory> 7#include <memory>
8 8
9#include "common/common_funcs.h"
9#include "core/hid/hid_types.h" 10#include "core/hid/hid_types.h"
10 11
11namespace Core::HID { 12namespace Core::HID {
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h
index 165b76747..05779f2d5 100644
--- a/src/core/hle/kernel/k_auto_object.h
+++ b/src/core/hle/kernel/k_auto_object.h
@@ -20,8 +20,6 @@ class KernelCore;
20class KProcess; 20class KProcess;
21 21
22#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \ 22#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
23 YUZU_NON_COPYABLE(CLASS); \
24 YUZU_NON_MOVEABLE(CLASS); \
25 \ 23 \
26private: \ 24private: \
27 friend class ::Kernel::KClassTokenGenerator; \ 25 friend class ::Kernel::KClassTokenGenerator; \
@@ -32,6 +30,9 @@ private:
32 } \ 30 } \
33 \ 31 \
34public: \ 32public: \
33 YUZU_NON_COPYABLE(CLASS); \
34 YUZU_NON_MOVEABLE(CLASS); \
35 \
35 using BaseClass = BASE_CLASS; \ 36 using BaseClass = BASE_CLASS; \
36 static constexpr TypeObj GetStaticTypeObj() { \ 37 static constexpr TypeObj GetStaticTypeObj() { \
37 constexpr ClassTokenType Token = ClassToken(); \ 38 constexpr ClassTokenType Token = ClassToken(); \
@@ -224,9 +225,9 @@ private:
224 225
225template <typename T> 226template <typename T>
226class KScopedAutoObject { 227class KScopedAutoObject {
228public:
227 YUZU_NON_COPYABLE(KScopedAutoObject); 229 YUZU_NON_COPYABLE(KScopedAutoObject);
228 230
229public:
230 constexpr KScopedAutoObject() = default; 231 constexpr KScopedAutoObject() = default;
231 232
232 constexpr KScopedAutoObject(T* o) : m_obj(o) { 233 constexpr KScopedAutoObject(T* o) : m_obj(o) {
diff --git a/src/core/hle/kernel/k_auto_object_container.h b/src/core/hle/kernel/k_auto_object_container.h
index 4eadfe99d..697cc4289 100644
--- a/src/core/hle/kernel/k_auto_object_container.h
+++ b/src/core/hle/kernel/k_auto_object_container.h
@@ -16,13 +16,12 @@ class KernelCore;
16class KProcess; 16class KProcess;
17 17
18class KAutoObjectWithListContainer { 18class KAutoObjectWithListContainer {
19public:
19 YUZU_NON_COPYABLE(KAutoObjectWithListContainer); 20 YUZU_NON_COPYABLE(KAutoObjectWithListContainer);
20 YUZU_NON_MOVEABLE(KAutoObjectWithListContainer); 21 YUZU_NON_MOVEABLE(KAutoObjectWithListContainer);
21 22
22public:
23 using ListType = boost::intrusive::rbtree<KAutoObjectWithList>; 23 using ListType = boost::intrusive::rbtree<KAutoObjectWithList>;
24 24
25public:
26 class ListAccessor : public KScopedLightLock { 25 class ListAccessor : public KScopedLightLock {
27 public: 26 public:
28 explicit ListAccessor(KAutoObjectWithListContainer* container) 27 explicit ListAccessor(KAutoObjectWithListContainer* container)
@@ -48,7 +47,6 @@ public:
48 47
49 friend class ListAccessor; 48 friend class ListAccessor;
50 49
51public:
52 KAutoObjectWithListContainer(KernelCore& kernel) : m_lock(kernel), m_object_list() {} 50 KAutoObjectWithListContainer(KernelCore& kernel) : m_lock(kernel), m_object_list() {}
53 51
54 void Initialize() {} 52 void Initialize() {}
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h
index 4b114ec2f..87004a0f9 100644
--- a/src/core/hle/kernel/k_handle_table.h
+++ b/src/core/hle/kernel/k_handle_table.h
@@ -22,13 +22,12 @@ namespace Kernel {
22class KernelCore; 22class KernelCore;
23 23
24class KHandleTable { 24class KHandleTable {
25public:
25 YUZU_NON_COPYABLE(KHandleTable); 26 YUZU_NON_COPYABLE(KHandleTable);
26 YUZU_NON_MOVEABLE(KHandleTable); 27 YUZU_NON_MOVEABLE(KHandleTable);
27 28
28public:
29 static constexpr size_t MaxTableSize = 1024; 29 static constexpr size_t MaxTableSize = 1024;
30 30
31public:
32 explicit KHandleTable(KernelCore& kernel_); 31 explicit KHandleTable(KernelCore& kernel_);
33 ~KHandleTable(); 32 ~KHandleTable();
34 33
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h
index abd6c8ace..17c7690f1 100644
--- a/src/core/hle/kernel/k_memory_manager.h
+++ b/src/core/hle/kernel/k_memory_manager.h
@@ -8,6 +8,7 @@
8#include <mutex> 8#include <mutex>
9#include <tuple> 9#include <tuple>
10 10
11#include "common/common_funcs.h"
11#include "common/common_types.h" 12#include "common/common_types.h"
12#include "core/hle/kernel/k_page_heap.h" 13#include "core/hle/kernel/k_page_heap.h"
13#include "core/hle/result.h" 14#include "core/hle/result.h"
@@ -20,8 +21,11 @@ namespace Kernel {
20 21
21class KPageLinkedList; 22class KPageLinkedList;
22 23
23class KMemoryManager final : NonCopyable { 24class KMemoryManager final {
24public: 25public:
26 YUZU_NON_COPYABLE(KMemoryManager);
27 YUZU_NON_MOVEABLE(KMemoryManager);
28
25 enum class Pool : u32 { 29 enum class Pool : u32 {
26 Application = 0, 30 Application = 0,
27 Applet = 1, 31 Applet = 1,
@@ -88,26 +92,13 @@ public:
88 } 92 }
89 93
90private: 94private:
91 class Impl final : NonCopyable { 95 class Impl final {
92 private:
93 using RefCount = u16;
94
95 private:
96 KPageHeap heap;
97 Pool pool{};
98
99 public: 96 public:
100 static std::size_t CalculateManagementOverheadSize(std::size_t region_size); 97 YUZU_NON_COPYABLE(Impl);
101 98 YUZU_NON_MOVEABLE(Impl);
102 static constexpr std::size_t CalculateOptimizedProcessOverheadSize(
103 std::size_t region_size) {
104 return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
105 Common::BitSize<u64>()) *
106 sizeof(u64);
107 }
108 99
109 public:
110 Impl() = default; 100 Impl() = default;
101 ~Impl() = default;
111 102
112 std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address); 103 std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address);
113 104
@@ -130,6 +121,21 @@ private:
130 constexpr VAddr GetEndAddress() const { 121 constexpr VAddr GetEndAddress() const {
131 return heap.GetEndAddress(); 122 return heap.GetEndAddress();
132 } 123 }
124
125 static std::size_t CalculateManagementOverheadSize(std::size_t region_size);
126
127 static constexpr std::size_t CalculateOptimizedProcessOverheadSize(
128 std::size_t region_size) {
129 return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
130 Common::BitSize<u64>()) *
131 sizeof(u64);
132 }
133
134 private:
135 using RefCount = u16;
136
137 KPageHeap heap;
138 Pool pool{};
133 }; 139 };
134 140
135private: 141private:
diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h
index 90ab8fd62..e9bdf4e59 100644
--- a/src/core/hle/kernel/k_memory_region.h
+++ b/src/core/hle/kernel/k_memory_region.h
@@ -5,6 +5,7 @@
5#pragma once 5#pragma once
6 6
7#include "common/assert.h" 7#include "common/assert.h"
8#include "common/common_funcs.h"
8#include "common/common_types.h" 9#include "common/common_types.h"
9#include "common/intrusive_red_black_tree.h" 10#include "common/intrusive_red_black_tree.h"
10#include "core/hle/kernel/k_memory_region_type.h" 11#include "core/hle/kernel/k_memory_region_type.h"
@@ -13,11 +14,13 @@ namespace Kernel {
13 14
14class KMemoryRegionAllocator; 15class KMemoryRegionAllocator;
15 16
16class KMemoryRegion final : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryRegion>, 17class KMemoryRegion final : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryRegion> {
17 NonCopyable {
18 friend class KMemoryRegionTree; 18 friend class KMemoryRegionTree;
19 19
20public: 20public:
21 YUZU_NON_COPYABLE(KMemoryRegion);
22 YUZU_NON_MOVEABLE(KMemoryRegion);
23
21 constexpr KMemoryRegion() = default; 24 constexpr KMemoryRegion() = default;
22 constexpr KMemoryRegion(u64 address_, u64 last_address_) 25 constexpr KMemoryRegion(u64 address_, u64 last_address_)
23 : address{address_}, last_address{last_address_} {} 26 : address{address_}, last_address{last_address_} {}
@@ -29,6 +32,8 @@ public:
29 : KMemoryRegion(address_, last_address_, std::numeric_limits<u64>::max(), attributes_, 32 : KMemoryRegion(address_, last_address_, std::numeric_limits<u64>::max(), attributes_,
30 type_id_) {} 33 type_id_) {}
31 34
35 ~KMemoryRegion() = default;
36
32 static constexpr int Compare(const KMemoryRegion& lhs, const KMemoryRegion& rhs) { 37 static constexpr int Compare(const KMemoryRegion& lhs, const KMemoryRegion& rhs) {
33 if (lhs.GetAddress() < rhs.GetAddress()) { 38 if (lhs.GetAddress() < rhs.GetAddress()) {
34 return -1; 39 return -1;
@@ -39,16 +44,6 @@ public:
39 } 44 }
40 } 45 }
41 46
42private:
43 constexpr void Reset(u64 a, u64 la, u64 p, u32 r, u32 t) {
44 address = a;
45 pair_address = p;
46 last_address = la;
47 attributes = r;
48 type_id = t;
49 }
50
51public:
52 constexpr u64 GetAddress() const { 47 constexpr u64 GetAddress() const {
53 return address; 48 return address;
54 } 49 }
@@ -108,6 +103,14 @@ public:
108 } 103 }
109 104
110private: 105private:
106 constexpr void Reset(u64 a, u64 la, u64 p, u32 r, u32 t) {
107 address = a;
108 pair_address = p;
109 last_address = la;
110 attributes = r;
111 type_id = t;
112 }
113
111 u64 address{}; 114 u64 address{};
112 u64 last_address{}; 115 u64 last_address{};
113 u64 pair_address{}; 116 u64 pair_address{};
@@ -115,8 +118,25 @@ private:
115 u32 type_id{}; 118 u32 type_id{};
116}; 119};
117 120
118class KMemoryRegionTree final : NonCopyable { 121class KMemoryRegionTree final {
122private:
123 using TreeType =
124 Common::IntrusiveRedBlackTreeBaseTraits<KMemoryRegion>::TreeType<KMemoryRegion>;
125
119public: 126public:
127 YUZU_NON_COPYABLE(KMemoryRegionTree);
128 YUZU_NON_MOVEABLE(KMemoryRegionTree);
129
130 using value_type = TreeType::value_type;
131 using size_type = TreeType::size_type;
132 using difference_type = TreeType::difference_type;
133 using pointer = TreeType::pointer;
134 using const_pointer = TreeType::const_pointer;
135 using reference = TreeType::reference;
136 using const_reference = TreeType::const_reference;
137 using iterator = TreeType::iterator;
138 using const_iterator = TreeType::const_iterator;
139
120 struct DerivedRegionExtents { 140 struct DerivedRegionExtents {
121 const KMemoryRegion* first_region{}; 141 const KMemoryRegion* first_region{};
122 const KMemoryRegion* last_region{}; 142 const KMemoryRegion* last_region{};
@@ -140,29 +160,9 @@ public:
140 } 160 }
141 }; 161 };
142 162
143private:
144 using TreeType =
145 Common::IntrusiveRedBlackTreeBaseTraits<KMemoryRegion>::TreeType<KMemoryRegion>;
146
147public:
148 using value_type = TreeType::value_type;
149 using size_type = TreeType::size_type;
150 using difference_type = TreeType::difference_type;
151 using pointer = TreeType::pointer;
152 using const_pointer = TreeType::const_pointer;
153 using reference = TreeType::reference;
154 using const_reference = TreeType::const_reference;
155 using iterator = TreeType::iterator;
156 using const_iterator = TreeType::const_iterator;
157
158private:
159 TreeType m_tree{};
160 KMemoryRegionAllocator& memory_region_allocator;
161
162public:
163 explicit KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_); 163 explicit KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_);
164 ~KMemoryRegionTree() = default;
164 165
165public:
166 KMemoryRegion* FindModifiable(u64 address) { 166 KMemoryRegion* FindModifiable(u64 address) {
167 if (auto it = this->find(KMemoryRegion(address, address, 0, 0)); it != this->end()) { 167 if (auto it = this->find(KMemoryRegion(address, address, 0, 0)); it != this->end()) {
168 return std::addressof(*it); 168 return std::addressof(*it);
@@ -241,7 +241,6 @@ public:
241 return GetDerivedRegionExtents(static_cast<KMemoryRegionType>(type_id)); 241 return GetDerivedRegionExtents(static_cast<KMemoryRegionType>(type_id));
242 } 242 }
243 243
244public:
245 void InsertDirectly(u64 address, u64 last_address, u32 attr = 0, u32 type_id = 0); 244 void InsertDirectly(u64 address, u64 last_address, u32 attr = 0, u32 type_id = 0);
246 bool Insert(u64 address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0); 245 bool Insert(u64 address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0);
247 246
@@ -252,7 +251,6 @@ public:
252 return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size; 251 return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size;
253 } 252 }
254 253
255public:
256 // Iterator accessors. 254 // Iterator accessors.
257 iterator begin() { 255 iterator begin() {
258 return m_tree.begin(); 256 return m_tree.begin();
@@ -322,13 +320,21 @@ public:
322 iterator nfind(const_reference ref) const { 320 iterator nfind(const_reference ref) const {
323 return m_tree.nfind(ref); 321 return m_tree.nfind(ref);
324 } 322 }
323
324private:
325 TreeType m_tree{};
326 KMemoryRegionAllocator& memory_region_allocator;
325}; 327};
326 328
327class KMemoryRegionAllocator final : NonCopyable { 329class KMemoryRegionAllocator final {
328public: 330public:
331 YUZU_NON_COPYABLE(KMemoryRegionAllocator);
332 YUZU_NON_MOVEABLE(KMemoryRegionAllocator);
333
329 static constexpr size_t MaxMemoryRegions = 200; 334 static constexpr size_t MaxMemoryRegions = 200;
330 335
331 constexpr KMemoryRegionAllocator() = default; 336 constexpr KMemoryRegionAllocator() = default;
337 constexpr ~KMemoryRegionAllocator() = default;
332 338
333 template <typename... Args> 339 template <typename... Args>
334 KMemoryRegion* Allocate(Args&&... args) { 340 KMemoryRegion* Allocate(Args&&... args) {
diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h
index 8d9f30523..a65aa28a0 100644
--- a/src/core/hle/kernel/k_page_heap.h
+++ b/src/core/hle/kernel/k_page_heap.h
@@ -8,14 +8,44 @@
8#include <vector> 8#include <vector>
9 9
10#include "common/alignment.h" 10#include "common/alignment.h"
11#include "common/common_funcs.h"
11#include "common/common_types.h" 12#include "common/common_types.h"
12#include "core/hle/kernel/k_page_bitmap.h" 13#include "core/hle/kernel/k_page_bitmap.h"
13#include "core/hle/kernel/memory_types.h" 14#include "core/hle/kernel/memory_types.h"
14 15
15namespace Kernel { 16namespace Kernel {
16 17
17class KPageHeap final : NonCopyable { 18class KPageHeap final {
18public: 19public:
20 YUZU_NON_COPYABLE(KPageHeap);
21 YUZU_NON_MOVEABLE(KPageHeap);
22
23 KPageHeap() = default;
24 ~KPageHeap() = default;
25
26 constexpr VAddr GetAddress() const {
27 return heap_address;
28 }
29 constexpr std::size_t GetSize() const {
30 return heap_size;
31 }
32 constexpr VAddr GetEndAddress() const {
33 return GetAddress() + GetSize();
34 }
35 constexpr std::size_t GetPageOffset(VAddr block) const {
36 return (block - GetAddress()) / PageSize;
37 }
38
39 void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size);
40 VAddr AllocateBlock(s32 index, bool random);
41 void Free(VAddr addr, std::size_t num_pages);
42
43 void UpdateUsedSize() {
44 used_size = heap_size - (GetNumFreePages() * PageSize);
45 }
46
47 static std::size_t CalculateManagementOverheadSize(std::size_t region_size);
48
19 static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) { 49 static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) {
20 const auto target_pages{std::max(num_pages, align_pages)}; 50 const auto target_pages{std::max(num_pages, align_pages)};
21 for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) { 51 for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
@@ -45,21 +75,13 @@ public:
45 } 75 }
46 76
47private: 77private:
48 static constexpr std::size_t NumMemoryBlockPageShifts{7}; 78 class Block final {
49 static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
50 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
51 };
52
53 class Block final : NonCopyable {
54 private:
55 KPageBitmap bitmap;
56 VAddr heap_address{};
57 uintptr_t end_offset{};
58 std::size_t block_shift{};
59 std::size_t next_block_shift{};
60
61 public: 79 public:
80 YUZU_NON_COPYABLE(Block);
81 YUZU_NON_MOVEABLE(Block);
82
62 Block() = default; 83 Block() = default;
84 ~Block() = default;
63 85
64 constexpr std::size_t GetShift() const { 86 constexpr std::size_t GetShift() const {
65 return block_shift; 87 return block_shift;
@@ -129,7 +151,6 @@ private:
129 return heap_address + (offset << GetShift()); 151 return heap_address + (offset << GetShift());
130 } 152 }
131 153
132 public:
133 static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size, 154 static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size,
134 std::size_t cur_block_shift, 155 std::size_t cur_block_shift,
135 std::size_t next_block_shift) { 156 std::size_t next_block_shift) {
@@ -139,35 +160,15 @@ private:
139 return KPageBitmap::CalculateManagementOverheadSize( 160 return KPageBitmap::CalculateManagementOverheadSize(
140 (align * 2 + Common::AlignUp(region_size, align)) / cur_block_size); 161 (align * 2 + Common::AlignUp(region_size, align)) / cur_block_size);
141 } 162 }
142 };
143
144public:
145 KPageHeap() = default;
146
147 constexpr VAddr GetAddress() const {
148 return heap_address;
149 }
150 constexpr std::size_t GetSize() const {
151 return heap_size;
152 }
153 constexpr VAddr GetEndAddress() const {
154 return GetAddress() + GetSize();
155 }
156 constexpr std::size_t GetPageOffset(VAddr block) const {
157 return (block - GetAddress()) / PageSize;
158 }
159 163
160 void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size); 164 private:
161 VAddr AllocateBlock(s32 index, bool random); 165 KPageBitmap bitmap;
162 void Free(VAddr addr, std::size_t num_pages); 166 VAddr heap_address{};
163 167 uintptr_t end_offset{};
164 void UpdateUsedSize() { 168 std::size_t block_shift{};
165 used_size = heap_size - (GetNumFreePages() * PageSize); 169 std::size_t next_block_shift{};
166 } 170 };
167
168 static std::size_t CalculateManagementOverheadSize(std::size_t region_size);
169 171
170private:
171 constexpr std::size_t GetNumFreePages() const { 172 constexpr std::size_t GetNumFreePages() const {
172 std::size_t num_free{}; 173 std::size_t num_free{};
173 174
@@ -180,6 +181,11 @@ private:
180 181
181 void FreeBlock(VAddr block, s32 index); 182 void FreeBlock(VAddr block, s32 index);
182 183
184 static constexpr std::size_t NumMemoryBlockPageShifts{7};
185 static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
186 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
187 };
188
183 VAddr heap_address{}; 189 VAddr heap_address{};
184 std::size_t heap_size{}; 190 std::size_t heap_size{};
185 std::size_t used_size{}; 191 std::size_t used_size{};
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 2ebbc0819..912853e5c 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -61,7 +61,10 @@ constexpr std::size_t GetSizeInRange(const KMemoryInfo& info, VAddr start, VAddr
61 61
62} // namespace 62} // namespace
63 63
64KPageTable::KPageTable(Core::System& system_) : system{system_} {} 64KPageTable::KPageTable(Core::System& system_)
65 : general_lock{system_.Kernel()}, map_physical_memory_lock{system_.Kernel()}, system{system_} {}
66
67KPageTable::~KPageTable() = default;
65 68
66ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, 69ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type,
67 bool enable_aslr, VAddr code_addr, 70 bool enable_aslr, VAddr code_addr,
@@ -282,7 +285,7 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
282 R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); 285 R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
283 286
284 // Lock the table. 287 // Lock the table.
285 std::lock_guard lock{page_table_lock}; 288 KScopedLightLock lk(general_lock);
286 289
287 // Verify that the destination memory is unmapped. 290 // Verify that the destination memory is unmapped.
288 R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, 291 R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
@@ -300,7 +303,7 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
300} 303}
301 304
302ResultCode KPageTable::MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { 305ResultCode KPageTable::MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
303 std::lock_guard lock{page_table_lock}; 306 KScopedLightLock lk(general_lock);
304 307
305 const std::size_t num_pages{size / PageSize}; 308 const std::size_t num_pages{size / PageSize};
306 309
@@ -337,7 +340,7 @@ ResultCode KPageTable::MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t
337} 340}
338 341
339ResultCode KPageTable::UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { 342ResultCode KPageTable::UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
340 std::lock_guard lock{page_table_lock}; 343 KScopedLightLock lk(general_lock);
341 344
342 if (!size) { 345 if (!size) {
343 return ResultSuccess; 346 return ResultSuccess;
@@ -371,7 +374,7 @@ ResultCode KPageTable::UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size
371 374
372ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, 375ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
373 KPageTable& src_page_table, VAddr src_addr) { 376 KPageTable& src_page_table, VAddr src_addr) {
374 std::lock_guard lock{page_table_lock}; 377 KScopedLightLock lk(general_lock);
375 378
376 const std::size_t num_pages{size / PageSize}; 379 const std::size_t num_pages{size / PageSize};
377 380
@@ -399,10 +402,10 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
399 402
400ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { 403ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
401 // Lock the physical memory lock. 404 // Lock the physical memory lock.
402 std::lock_guard phys_lk(map_physical_memory_lock); 405 KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
403 406
404 // Lock the table. 407 // Lock the table.
405 std::lock_guard lock{page_table_lock}; 408 KScopedLightLock lk(general_lock);
406 409
407 std::size_t mapped_size{}; 410 std::size_t mapped_size{};
408 const VAddr end_addr{addr + size}; 411 const VAddr end_addr{addr + size};
@@ -478,7 +481,11 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
478} 481}
479 482
480ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) { 483ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
481 std::lock_guard lock{page_table_lock}; 484 // Lock the physical memory lock.
485 KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
486
487 // Lock the table.
488 KScopedLightLock lk(general_lock);
482 489
483 const VAddr end_addr{addr + size}; 490 const VAddr end_addr{addr + size};
484 ResultCode result{ResultSuccess}; 491 ResultCode result{ResultSuccess};
@@ -540,7 +547,7 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
540} 547}
541 548
542ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { 549ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
543 std::lock_guard lock{page_table_lock}; 550 KScopedLightLock lk(general_lock);
544 551
545 KMemoryState src_state{}; 552 KMemoryState src_state{};
546 CASCADE_CODE(CheckMemoryState( 553 CASCADE_CODE(CheckMemoryState(
@@ -579,7 +586,7 @@ ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t siz
579} 586}
580 587
581ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { 588ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
582 std::lock_guard lock{page_table_lock}; 589 KScopedLightLock lk(general_lock);
583 590
584 KMemoryState src_state{}; 591 KMemoryState src_state{};
585 CASCADE_CODE(CheckMemoryState( 592 CASCADE_CODE(CheckMemoryState(
@@ -622,6 +629,8 @@ ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t s
622 629
623ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_list, 630ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
624 KMemoryPermission perm) { 631 KMemoryPermission perm) {
632 ASSERT(this->IsLockedByCurrentThread());
633
625 VAddr cur_addr{addr}; 634 VAddr cur_addr{addr};
626 635
627 for (const auto& node : page_linked_list.Nodes()) { 636 for (const auto& node : page_linked_list.Nodes()) {
@@ -650,7 +659,7 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list
650 R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); 659 R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
651 660
652 // Lock the table. 661 // Lock the table.
653 std::lock_guard lock{page_table_lock}; 662 KScopedLightLock lk(general_lock);
654 663
655 // Check the memory state. 664 // Check the memory state.
656 R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, 665 R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
@@ -667,6 +676,8 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list
667} 676}
668 677
669ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) { 678ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) {
679 ASSERT(this->IsLockedByCurrentThread());
680
670 VAddr cur_addr{addr}; 681 VAddr cur_addr{addr};
671 682
672 for (const auto& node : page_linked_list.Nodes()) { 683 for (const auto& node : page_linked_list.Nodes()) {
@@ -691,7 +702,7 @@ ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
691 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); 702 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
692 703
693 // Lock the table. 704 // Lock the table.
694 std::lock_guard lock{page_table_lock}; 705 KScopedLightLock lk(general_lock);
695 706
696 // Check the memory state. 707 // Check the memory state.
697 R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, state, KMemoryPermission::None, 708 R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, state, KMemoryPermission::None,
@@ -712,7 +723,7 @@ ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
712 const size_t num_pages = size / PageSize; 723 const size_t num_pages = size / PageSize;
713 724
714 // Lock the table. 725 // Lock the table.
715 std::lock_guard lock{page_table_lock}; 726 KScopedLightLock lk(general_lock);
716 727
717 // Verify we can change the memory permission. 728 // Verify we can change the memory permission.
718 KMemoryState old_state; 729 KMemoryState old_state;
@@ -766,7 +777,7 @@ ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
766} 777}
767 778
768KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { 779KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) {
769 std::lock_guard lock{page_table_lock}; 780 KScopedLightLock lk(general_lock);
770 781
771 return block_manager->FindBlock(addr).GetMemoryInfo(); 782 return block_manager->FindBlock(addr).GetMemoryInfo();
772} 783}
@@ -781,7 +792,7 @@ KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
781} 792}
782 793
783ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) { 794ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) {
784 std::lock_guard lock{page_table_lock}; 795 KScopedLightLock lk(general_lock);
785 796
786 KMemoryState state{}; 797 KMemoryState state{};
787 KMemoryAttribute attribute{}; 798 KMemoryAttribute attribute{};
@@ -799,7 +810,7 @@ ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemo
799} 810}
800 811
801ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) { 812ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
802 std::lock_guard lock{page_table_lock}; 813 KScopedLightLock lk(general_lock);
803 814
804 KMemoryState state{}; 815 KMemoryState state{};
805 816
@@ -818,7 +829,7 @@ ResultCode KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
818 const size_t num_pages = size / PageSize; 829 const size_t num_pages = size / PageSize;
819 830
820 // Lock the table. 831 // Lock the table.
821 std::lock_guard lock{page_table_lock}; 832 KScopedLightLock lk(general_lock);
822 833
823 // Verify we can change the memory permission. 834 // Verify we can change the memory permission.
824 KMemoryState old_state; 835 KMemoryState old_state;
@@ -847,7 +858,7 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask
847 KMemoryAttribute::SetMask); 858 KMemoryAttribute::SetMask);
848 859
849 // Lock the table. 860 // Lock the table.
850 std::lock_guard lock{page_table_lock}; 861 KScopedLightLock lk(general_lock);
851 862
852 // Verify we can change the memory attribute. 863 // Verify we can change the memory attribute.
853 KMemoryState old_state; 864 KMemoryState old_state;
@@ -878,7 +889,7 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask
878 889
879ResultCode KPageTable::SetMaxHeapSize(std::size_t size) { 890ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
880 // Lock the table. 891 // Lock the table.
881 std::lock_guard lock{page_table_lock}; 892 KScopedLightLock lk(general_lock);
882 893
883 // Only process page tables are allowed to set heap size. 894 // Only process page tables are allowed to set heap size.
884 ASSERT(!this->IsKernel()); 895 ASSERT(!this->IsKernel());
@@ -889,15 +900,15 @@ ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
889} 900}
890 901
891ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) { 902ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
892 // Lock the physical memory lock. 903 // Lock the physical memory mutex.
893 std::lock_guard phys_lk(map_physical_memory_lock); 904 KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
894 905
895 // Try to perform a reduction in heap, instead of an extension. 906 // Try to perform a reduction in heap, instead of an extension.
896 VAddr cur_address{}; 907 VAddr cur_address{};
897 std::size_t allocation_size{}; 908 std::size_t allocation_size{};
898 { 909 {
899 // Lock the table. 910 // Lock the table.
900 std::lock_guard lk(page_table_lock); 911 KScopedLightLock lk(general_lock);
901 912
902 // Validate that setting heap size is possible at all. 913 // Validate that setting heap size is possible at all.
903 R_UNLESS(!is_kernel, ResultOutOfMemory); 914 R_UNLESS(!is_kernel, ResultOutOfMemory);
@@ -962,7 +973,7 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
962 // Map the pages. 973 // Map the pages.
963 { 974 {
964 // Lock the table. 975 // Lock the table.
965 std::lock_guard lk(page_table_lock); 976 KScopedLightLock lk(general_lock);
966 977
967 // Ensure that the heap hasn't changed since we began executing. 978 // Ensure that the heap hasn't changed since we began executing.
968 ASSERT(cur_address == current_heap_end); 979 ASSERT(cur_address == current_heap_end);
@@ -1004,7 +1015,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
1004 bool is_map_only, VAddr region_start, 1015 bool is_map_only, VAddr region_start,
1005 std::size_t region_num_pages, KMemoryState state, 1016 std::size_t region_num_pages, KMemoryState state,
1006 KMemoryPermission perm, PAddr map_addr) { 1017 KMemoryPermission perm, PAddr map_addr) {
1007 std::lock_guard lock{page_table_lock}; 1018 KScopedLightLock lk(general_lock);
1008 1019
1009 if (!CanContain(region_start, region_num_pages * PageSize, state)) { 1020 if (!CanContain(region_start, region_num_pages * PageSize, state)) {
1010 return ResultInvalidCurrentMemory; 1021 return ResultInvalidCurrentMemory;
@@ -1035,7 +1046,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
1035} 1046}
1036 1047
1037ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) { 1048ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
1038 std::lock_guard lock{page_table_lock}; 1049 KScopedLightLock lk(general_lock);
1039 1050
1040 KMemoryPermission perm{}; 1051 KMemoryPermission perm{};
1041 if (const ResultCode result{CheckMemoryState( 1052 if (const ResultCode result{CheckMemoryState(
@@ -1058,7 +1069,7 @@ ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
1058} 1069}
1059 1070
1060ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) { 1071ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
1061 std::lock_guard lock{page_table_lock}; 1072 KScopedLightLock lk(general_lock);
1062 1073
1063 KMemoryPermission perm{}; 1074 KMemoryPermission perm{};
1064 if (const ResultCode result{CheckMemoryState( 1075 if (const ResultCode result{CheckMemoryState(
@@ -1081,7 +1092,7 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
1081} 1092}
1082 1093
1083ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) { 1094ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
1084 std::lock_guard lock{page_table_lock}; 1095 KScopedLightLock lk(general_lock);
1085 1096
1086 KMemoryPermission new_perm = KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite; 1097 KMemoryPermission new_perm = KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite;
1087 1098
@@ -1108,7 +1119,7 @@ ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
1108} 1119}
1109 1120
1110ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) { 1121ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
1111 std::lock_guard lock{page_table_lock}; 1122 KScopedLightLock lk(general_lock);
1112 1123
1113 KMemoryPermission new_perm = KMemoryPermission::UserReadWrite; 1124 KMemoryPermission new_perm = KMemoryPermission::UserReadWrite;
1114 1125
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 60ae9b9e8..c98887d34 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -5,11 +5,12 @@
5#pragma once 5#pragma once
6 6
7#include <memory> 7#include <memory>
8#include <mutex>
9 8
9#include "common/common_funcs.h"
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "common/page_table.h" 11#include "common/page_table.h"
12#include "core/file_sys/program_metadata.h" 12#include "core/file_sys/program_metadata.h"
13#include "core/hle/kernel/k_light_lock.h"
13#include "core/hle/kernel/k_memory_block.h" 14#include "core/hle/kernel/k_memory_block.h"
14#include "core/hle/kernel/k_memory_manager.h" 15#include "core/hle/kernel/k_memory_manager.h"
15#include "core/hle/result.h" 16#include "core/hle/result.h"
@@ -22,9 +23,13 @@ namespace Kernel {
22 23
23class KMemoryBlockManager; 24class KMemoryBlockManager;
24 25
25class KPageTable final : NonCopyable { 26class KPageTable final {
26public: 27public:
28 YUZU_NON_COPYABLE(KPageTable);
29 YUZU_NON_MOVEABLE(KPageTable);
30
27 explicit KPageTable(Core::System& system_); 31 explicit KPageTable(Core::System& system_);
32 ~KPageTable();
28 33
29 ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, 34 ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
30 VAddr code_addr, std::size_t code_size, 35 VAddr code_addr, std::size_t code_size,
@@ -142,11 +147,12 @@ private:
142 } 147 }
143 148
144 bool IsLockedByCurrentThread() const { 149 bool IsLockedByCurrentThread() const {
145 return true; 150 return general_lock.IsLockedByCurrentThread();
146 } 151 }
147 152
148 std::recursive_mutex page_table_lock; 153 mutable KLightLock general_lock;
149 std::mutex map_physical_memory_lock; 154 mutable KLightLock map_physical_memory_lock;
155
150 std::unique_ptr<KMemoryBlockManager> block_manager; 156 std::unique_ptr<KMemoryBlockManager> block_manager;
151 157
152public: 158public:
@@ -205,7 +211,7 @@ public:
205 return alias_code_region_end - alias_code_region_start; 211 return alias_code_region_end - alias_code_region_start;
206 } 212 }
207 size_t GetNormalMemorySize() { 213 size_t GetNormalMemorySize() {
208 std::lock_guard lk(page_table_lock); 214 KScopedLightLock lk(general_lock);
209 return GetHeapSize() + mapped_physical_memory_size; 215 return GetHeapSize() + mapped_physical_memory_size;
210 } 216 }
211 constexpr std::size_t GetAddressSpaceWidth() const { 217 constexpr std::size_t GetAddressSpaceWidth() const {
@@ -247,7 +253,9 @@ public:
247 constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const { 253 constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
248 return !IsOutsideASLRRegion(address, size); 254 return !IsOutsideASLRRegion(address, size);
249 } 255 }
250 constexpr PAddr GetPhysicalAddr(VAddr addr) { 256
257 PAddr GetPhysicalAddr(VAddr addr) {
258 ASSERT(IsLockedByCurrentThread());
251 const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits]; 259 const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
252 ASSERT(backing_addr); 260 ASSERT(backing_addr);
253 return backing_addr + addr; 261 return backing_addr + addr;
diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h
index 0ad74b0a0..05c0bec9c 100644
--- a/src/core/hle/kernel/k_slab_heap.h
+++ b/src/core/hle/kernel/k_slab_heap.h
@@ -7,6 +7,7 @@
7#include <atomic> 7#include <atomic>
8 8
9#include "common/assert.h" 9#include "common/assert.h"
10#include "common/common_funcs.h"
10#include "common/common_types.h" 11#include "common/common_types.h"
11 12
12namespace Kernel { 13namespace Kernel {
@@ -15,13 +16,17 @@ class KernelCore;
15 16
16namespace impl { 17namespace impl {
17 18
18class KSlabHeapImpl final : NonCopyable { 19class KSlabHeapImpl final {
19public: 20public:
21 YUZU_NON_COPYABLE(KSlabHeapImpl);
22 YUZU_NON_MOVEABLE(KSlabHeapImpl);
23
20 struct Node { 24 struct Node {
21 Node* next{}; 25 Node* next{};
22 }; 26 };
23 27
24 constexpr KSlabHeapImpl() = default; 28 constexpr KSlabHeapImpl() = default;
29 constexpr ~KSlabHeapImpl() = default;
25 30
26 void Initialize(std::size_t size) { 31 void Initialize(std::size_t size) {
27 ASSERT(head == nullptr); 32 ASSERT(head == nullptr);
@@ -64,9 +69,13 @@ private:
64 69
65} // namespace impl 70} // namespace impl
66 71
67class KSlabHeapBase : NonCopyable { 72class KSlabHeapBase {
68public: 73public:
74 YUZU_NON_COPYABLE(KSlabHeapBase);
75 YUZU_NON_MOVEABLE(KSlabHeapBase);
76
69 constexpr KSlabHeapBase() = default; 77 constexpr KSlabHeapBase() = default;
78 constexpr ~KSlabHeapBase() = default;
70 79
71 constexpr bool Contains(uintptr_t addr) const { 80 constexpr bool Contains(uintptr_t addr) const {
72 return start <= addr && addr < end; 81 return start <= addr && addr < end;
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 40bb893ac..4f7aebf3f 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -2613,7 +2613,7 @@ static const FunctionDef SVC_Table_32[] = {
2613 {0x33, SvcWrap32<GetThreadContext32>, "GetThreadContext32"}, 2613 {0x33, SvcWrap32<GetThreadContext32>, "GetThreadContext32"},
2614 {0x34, SvcWrap32<WaitForAddress32>, "WaitForAddress32"}, 2614 {0x34, SvcWrap32<WaitForAddress32>, "WaitForAddress32"},
2615 {0x35, SvcWrap32<SignalToAddress32>, "SignalToAddress32"}, 2615 {0x35, SvcWrap32<SignalToAddress32>, "SignalToAddress32"},
2616 {0x36, nullptr, "Unknown"}, 2616 {0x36, SvcWrap32<SynchronizePreemptionState>, "SynchronizePreemptionState32"},
2617 {0x37, nullptr, "Unknown"}, 2617 {0x37, nullptr, "Unknown"},
2618 {0x38, nullptr, "Unknown"}, 2618 {0x38, nullptr, "Unknown"},
2619 {0x39, nullptr, "Unknown"}, 2619 {0x39, nullptr, "Unknown"},
diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h
index 0979fc421..329f4ba86 100644
--- a/src/core/hle/service/vi/display/vi_display.h
+++ b/src/core/hle/service/vi/display/vi_display.h
@@ -28,10 +28,10 @@ class Layer;
28 28
29/// Represents a single display type 29/// Represents a single display type
30class Display { 30class Display {
31public:
31 YUZU_NON_COPYABLE(Display); 32 YUZU_NON_COPYABLE(Display);
32 YUZU_NON_MOVEABLE(Display); 33 YUZU_NON_MOVEABLE(Display);
33 34
34public:
35 /// Constructs a display with a given unique ID and name. 35 /// Constructs a display with a given unique ID and name.
36 /// 36 ///
37 /// @param id The unique ID for this display. 37 /// @param id The unique ID for this display.
diff --git a/src/core/loader/loader.h b/src/core/loader/loader.h
index 7b1bac3f7..8b6b3b68f 100644
--- a/src/core/loader/loader.h
+++ b/src/core/loader/loader.h
@@ -11,6 +11,7 @@
11#include <utility> 11#include <utility>
12#include <vector> 12#include <vector>
13 13
14#include "common/common_funcs.h"
14#include "common/common_types.h" 15#include "common/common_types.h"
15#include "core/file_sys/control_metadata.h" 16#include "core/file_sys/control_metadata.h"
16#include "core/file_sys/vfs.h" 17#include "core/file_sys/vfs.h"
@@ -139,8 +140,11 @@ std::string GetResultStatusString(ResultStatus status);
139std::ostream& operator<<(std::ostream& os, ResultStatus status); 140std::ostream& operator<<(std::ostream& os, ResultStatus status);
140 141
141/// Interface for loading an application 142/// Interface for loading an application
142class AppLoader : NonCopyable { 143class AppLoader {
143public: 144public:
145 YUZU_NON_COPYABLE(AppLoader);
146 YUZU_NON_MOVEABLE(AppLoader);
147
144 struct LoadParameters { 148 struct LoadParameters {
145 s32 main_thread_priority; 149 s32 main_thread_priority;
146 u64 main_thread_stack_size; 150 u64 main_thread_stack_size;
diff --git a/src/input_common/drivers/udp_client.cpp b/src/input_common/drivers/udp_client.cpp
index d1cdb1ab2..333173e3d 100644
--- a/src/input_common/drivers/udp_client.cpp
+++ b/src/input_common/drivers/udp_client.cpp
@@ -271,7 +271,7 @@ void UDPClient::OnPadData(Response::PadData data, std::size_t client) {
271 const auto touch_axis_y_id = 271 const auto touch_axis_y_id =
272 static_cast<int>(id == 0 ? PadAxes::Touch1Y : PadAxes::Touch2Y); 272 static_cast<int>(id == 0 ? PadAxes::Touch1Y : PadAxes::Touch2Y);
273 const auto touch_button_id = 273 const auto touch_button_id =
274 static_cast<int>(id == 0 ? PadButton::Touch1 : PadButton::touch2); 274 static_cast<int>(id == 0 ? PadButton::Touch1 : PadButton::Touch2);
275 275
276 // TODO: Use custom calibration per device 276 // TODO: Use custom calibration per device
277 const Common::ParamPackage touch_param(Settings::values.touch_device.GetValue()); 277 const Common::ParamPackage touch_param(Settings::values.touch_device.GetValue());
@@ -319,6 +319,9 @@ void UDPClient::OnPadData(Response::PadData data, std::size_t client) {
319 SetButton(identifier, button, button_status); 319 SetButton(identifier, button, button_status);
320 } 320 }
321 321
322 SetButton(identifier, static_cast<int>(PadButton::Home), data.home != 0);
323 SetButton(identifier, static_cast<int>(PadButton::TouchHardPress), data.touch_hard_press != 0);
324
322 SetBattery(identifier, GetBatteryLevel(data.info.battery)); 325 SetBattery(identifier, GetBatteryLevel(data.info.battery));
323} 326}
324 327
@@ -393,7 +396,7 @@ std::vector<Common::ParamPackage> UDPClient::GetInputDevices() const {
393 396
394ButtonMapping UDPClient::GetButtonMappingForDevice(const Common::ParamPackage& params) { 397ButtonMapping UDPClient::GetButtonMappingForDevice(const Common::ParamPackage& params) {
395 // This list excludes any button that can't be really mapped 398 // This list excludes any button that can't be really mapped
396 static constexpr std::array<std::pair<Settings::NativeButton::Values, PadButton>, 18> 399 static constexpr std::array<std::pair<Settings::NativeButton::Values, PadButton>, 20>
397 switch_to_dsu_button = { 400 switch_to_dsu_button = {
398 std::pair{Settings::NativeButton::A, PadButton::Circle}, 401 std::pair{Settings::NativeButton::A, PadButton::Circle},
399 {Settings::NativeButton::B, PadButton::Cross}, 402 {Settings::NativeButton::B, PadButton::Cross},
@@ -413,6 +416,8 @@ ButtonMapping UDPClient::GetButtonMappingForDevice(const Common::ParamPackage& p
413 {Settings::NativeButton::SR, PadButton::R2}, 416 {Settings::NativeButton::SR, PadButton::R2},
414 {Settings::NativeButton::LStick, PadButton::L3}, 417 {Settings::NativeButton::LStick, PadButton::L3},
415 {Settings::NativeButton::RStick, PadButton::R3}, 418 {Settings::NativeButton::RStick, PadButton::R3},
419 {Settings::NativeButton::Home, PadButton::Home},
420 {Settings::NativeButton::Screenshot, PadButton::TouchHardPress},
416 }; 421 };
417 if (!params.Has("guid") || !params.Has("port") || !params.Has("pad")) { 422 if (!params.Has("guid") || !params.Has("port") || !params.Has("pad")) {
418 return {}; 423 return {};
@@ -517,6 +522,12 @@ Common::Input::ButtonNames UDPClient::GetUIButtonName(const Common::ParamPackage
517 return Common::Input::ButtonNames::Share; 522 return Common::Input::ButtonNames::Share;
518 case PadButton::Options: 523 case PadButton::Options:
519 return Common::Input::ButtonNames::Options; 524 return Common::Input::ButtonNames::Options;
525 case PadButton::Home:
526 return Common::Input::ButtonNames::Home;
527 case PadButton::Touch1:
528 case PadButton::Touch2:
529 case PadButton::TouchHardPress:
530 return Common::Input::ButtonNames::Touch;
520 default: 531 default:
521 return Common::Input::ButtonNames::Undefined; 532 return Common::Input::ButtonNames::Undefined;
522 } 533 }
diff --git a/src/input_common/drivers/udp_client.h b/src/input_common/drivers/udp_client.h
index 30d7c2682..e9c178139 100644
--- a/src/input_common/drivers/udp_client.h
+++ b/src/input_common/drivers/udp_client.h
@@ -84,7 +84,9 @@ private:
84 Cross = 0x4000, 84 Cross = 0x4000,
85 Square = 0x8000, 85 Square = 0x8000,
86 Touch1 = 0x10000, 86 Touch1 = 0x10000,
87 touch2 = 0x20000, 87 Touch2 = 0x20000,
88 Home = 0x40000,
89 TouchHardPress = 0x80000,
88 }; 90 };
89 91
90 enum class PadAxes : u8 { 92 enum class PadAxes : u8 {
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h b/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h
index b48007856..5efbe4e6f 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h
@@ -372,6 +372,8 @@ void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, ScalarU32 poin
372 ScalarU32 value); 372 ScalarU32 value);
373void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset, 373void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
374 Register value); 374 Register value);
375void EmitSharedAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
376 Register value);
375void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 377void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
376 ScalarU32 offset, ScalarU32 value); 378 ScalarU32 offset, ScalarU32 value);
377void EmitStorageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 379void EmitStorageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
@@ -412,6 +414,24 @@ void EmitStorageAtomicXor64(EmitContext& ctx, IR::Inst& inst, const IR::Value& b
412 ScalarU32 offset, Register value); 414 ScalarU32 offset, Register value);
413void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 415void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
414 ScalarU32 offset, Register value); 416 ScalarU32 offset, Register value);
417void EmitStorageAtomicIAdd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
418 ScalarU32 offset, Register value);
419void EmitStorageAtomicSMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
420 ScalarU32 offset, Register value);
421void EmitStorageAtomicUMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
422 ScalarU32 offset, Register value);
423void EmitStorageAtomicSMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
424 ScalarU32 offset, Register value);
425void EmitStorageAtomicUMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
426 ScalarU32 offset, Register value);
427void EmitStorageAtomicAnd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
428 ScalarU32 offset, Register value);
429void EmitStorageAtomicOr32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
430 ScalarU32 offset, Register value);
431void EmitStorageAtomicXor32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
432 ScalarU32 offset, Register value);
433void EmitStorageAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
434 ScalarU32 offset, Register value);
415void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 435void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
416 ScalarU32 offset, ScalarF32 value); 436 ScalarU32 offset, ScalarF32 value);
417void EmitStorageAtomicAddF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 437void EmitStorageAtomicAddF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
@@ -448,6 +468,17 @@ void EmitGlobalAtomicAnd64(EmitContext& ctx);
448void EmitGlobalAtomicOr64(EmitContext& ctx); 468void EmitGlobalAtomicOr64(EmitContext& ctx);
449void EmitGlobalAtomicXor64(EmitContext& ctx); 469void EmitGlobalAtomicXor64(EmitContext& ctx);
450void EmitGlobalAtomicExchange64(EmitContext& ctx); 470void EmitGlobalAtomicExchange64(EmitContext& ctx);
471void EmitGlobalAtomicIAdd32x2(EmitContext& ctx);
472void EmitGlobalAtomicSMin32x2(EmitContext& ctx);
473void EmitGlobalAtomicUMin32x2(EmitContext& ctx);
474void EmitGlobalAtomicSMax32x2(EmitContext& ctx);
475void EmitGlobalAtomicUMax32x2(EmitContext& ctx);
476void EmitGlobalAtomicInc32x2(EmitContext& ctx);
477void EmitGlobalAtomicDec32x2(EmitContext& ctx);
478void EmitGlobalAtomicAnd32x2(EmitContext& ctx);
479void EmitGlobalAtomicOr32x2(EmitContext& ctx);
480void EmitGlobalAtomicXor32x2(EmitContext& ctx);
481void EmitGlobalAtomicExchange32x2(EmitContext& ctx);
451void EmitGlobalAtomicAddF32(EmitContext& ctx); 482void EmitGlobalAtomicAddF32(EmitContext& ctx);
452void EmitGlobalAtomicAddF16x2(EmitContext& ctx); 483void EmitGlobalAtomicAddF16x2(EmitContext& ctx);
453void EmitGlobalAtomicAddF32x2(EmitContext& ctx); 484void EmitGlobalAtomicAddF32x2(EmitContext& ctx);
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_memory.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_memory.cpp
index f135b67f5..f0fd94a28 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_memory.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_memory.cpp
@@ -311,6 +311,13 @@ void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, ScalarU32 poin
311 ctx.LongAdd("ATOMS.EXCH.U64 {}.x,{},shared_mem[{}];", inst, value, pointer_offset); 311 ctx.LongAdd("ATOMS.EXCH.U64 {}.x,{},shared_mem[{}];", inst, value, pointer_offset);
312} 312}
313 313
314void EmitSharedAtomicExchange32x2([[maybe_unused]] EmitContext& ctx,
315 [[maybe_unused]] IR::Inst& inst,
316 [[maybe_unused]] ScalarU32 pointer_offset,
317 [[maybe_unused]] Register value) {
318 throw NotImplementedException("GLASM instruction");
319}
320
314void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 321void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
315 ScalarU32 offset, ScalarU32 value) { 322 ScalarU32 offset, ScalarU32 value) {
316 Atom(ctx, inst, binding, offset, value, "ADD", "U32"); 323 Atom(ctx, inst, binding, offset, value, "ADD", "U32");
@@ -411,6 +418,62 @@ void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Val
411 Atom(ctx, inst, binding, offset, value, "EXCH", "U64"); 418 Atom(ctx, inst, binding, offset, value, "EXCH", "U64");
412} 419}
413 420
421void EmitStorageAtomicIAdd32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
422 [[maybe_unused]] const IR::Value& binding,
423 [[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
424 throw NotImplementedException("GLASM instruction");
425}
426
427void EmitStorageAtomicSMin32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
428 [[maybe_unused]] const IR::Value& binding,
429 [[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
430 throw NotImplementedException("GLASM instruction");
431}
432
433void EmitStorageAtomicUMin32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
434 [[maybe_unused]] const IR::Value& binding,
435 [[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
436 throw NotImplementedException("GLASM instruction");
437}
438
439void EmitStorageAtomicSMax32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
440 [[maybe_unused]] const IR::Value& binding,
441 [[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
442 throw NotImplementedException("GLASM instruction");
443}
444
445void EmitStorageAtomicUMax32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
446 [[maybe_unused]] const IR::Value& binding,
447 [[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
448 throw NotImplementedException("GLASM instruction");
449}
450
451void EmitStorageAtomicAnd32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
452 [[maybe_unused]] const IR::Value& binding,
453 [[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
454 throw NotImplementedException("GLASM instruction");
455}
456
457void EmitStorageAtomicOr32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
458 [[maybe_unused]] const IR::Value& binding,
459 [[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
460 throw NotImplementedException("GLASM instruction");
461}
462
463void EmitStorageAtomicXor32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
464 [[maybe_unused]] const IR::Value& binding,
465 [[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
466 throw NotImplementedException("GLASM instruction");
467}
468
469void EmitStorageAtomicExchange32x2([[maybe_unused]] EmitContext& ctx,
470 [[maybe_unused]] IR::Inst& inst,
471 [[maybe_unused]] const IR::Value& binding,
472 [[maybe_unused]] ScalarU32 offset,
473 [[maybe_unused]] Register value) {
474 throw NotImplementedException("GLASM instruction");
475}
476
414void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 477void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
415 ScalarU32 offset, ScalarF32 value) { 478 ScalarU32 offset, ScalarF32 value) {
416 Atom(ctx, inst, binding, offset, value, "ADD", "F32"); 479 Atom(ctx, inst, binding, offset, value, "ADD", "F32");
@@ -537,6 +600,50 @@ void EmitGlobalAtomicExchange64(EmitContext&) {
537 throw NotImplementedException("GLASM instruction"); 600 throw NotImplementedException("GLASM instruction");
538} 601}
539 602
603void EmitGlobalAtomicIAdd32x2(EmitContext&) {
604 throw NotImplementedException("GLASM instruction");
605}
606
607void EmitGlobalAtomicSMin32x2(EmitContext&) {
608 throw NotImplementedException("GLASM instruction");
609}
610
611void EmitGlobalAtomicUMin32x2(EmitContext&) {
612 throw NotImplementedException("GLASM instruction");
613}
614
615void EmitGlobalAtomicSMax32x2(EmitContext&) {
616 throw NotImplementedException("GLASM instruction");
617}
618
619void EmitGlobalAtomicUMax32x2(EmitContext&) {
620 throw NotImplementedException("GLASM instruction");
621}
622
623void EmitGlobalAtomicInc32x2(EmitContext&) {
624 throw NotImplementedException("GLASM instruction");
625}
626
627void EmitGlobalAtomicDec32x2(EmitContext&) {
628 throw NotImplementedException("GLASM instruction");
629}
630
631void EmitGlobalAtomicAnd32x2(EmitContext&) {
632 throw NotImplementedException("GLASM instruction");
633}
634
635void EmitGlobalAtomicOr32x2(EmitContext&) {
636 throw NotImplementedException("GLASM instruction");
637}
638
639void EmitGlobalAtomicXor32x2(EmitContext&) {
640 throw NotImplementedException("GLASM instruction");
641}
642
643void EmitGlobalAtomicExchange32x2(EmitContext&) {
644 throw NotImplementedException("GLASM instruction");
645}
646
540void EmitGlobalAtomicAddF32(EmitContext&) { 647void EmitGlobalAtomicAddF32(EmitContext&) {
541 throw NotImplementedException("GLASM instruction"); 648 throw NotImplementedException("GLASM instruction");
542} 649}
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
index dc377b053..a409a7ab3 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
@@ -105,6 +105,13 @@ void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_vi
105 pointer_offset, value, pointer_offset, value); 105 pointer_offset, value, pointer_offset, value);
106} 106}
107 107
108void EmitSharedAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
109 std::string_view value) {
110 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
111 ctx.AddU32x2("{}=uvec2(smem[{}>>2],smem[({}+4)>>2]);", inst, pointer_offset, pointer_offset);
112 ctx.Add("smem[{}>>2]={}.x;smem[({}+4)>>2]={}.y;", pointer_offset, value, pointer_offset, value);
113}
114
108void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 115void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
109 const IR::Value& offset, std::string_view value) { 116 const IR::Value& offset, std::string_view value) {
110 ctx.AddU32("{}=atomicAdd({}_ssbo{}[{}>>2],{});", inst, ctx.stage_name, binding.U32(), 117 ctx.AddU32("{}=atomicAdd({}_ssbo{}[{}>>2],{});", inst, ctx.stage_name, binding.U32(),
@@ -265,6 +272,97 @@ void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Val
265 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value); 272 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
266} 273}
267 274
275void EmitStorageAtomicIAdd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
276 const IR::Value& offset, std::string_view value) {
277 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
278 ctx.AddU32x2("{}=uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
279 binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
280 ctx.var_alloc.Consume(offset));
281 ctx.Add("{}_ssbo{}[{}>>2]+={}.x;{}_ssbo{}[({}>>2)+1]+={}.y;", ctx.stage_name, binding.U32(),
282 ctx.var_alloc.Consume(offset), value, ctx.stage_name, binding.U32(),
283 ctx.var_alloc.Consume(offset), value);
284}
285
286void EmitStorageAtomicSMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
287 const IR::Value& offset, std::string_view value) {
288 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
289 ctx.AddU32x2("{}=ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
290 binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
291 ctx.var_alloc.Consume(offset));
292 ctx.Add("for(int "
293 "i=0;i<2;++i){{{}_ssbo{}[({}>>2)+i]=uint(min(int({}_ssbo{}[({}>>2)+i]),int({}[i])));}}",
294 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
295 binding.U32(), ctx.var_alloc.Consume(offset), value);
296}
297
298void EmitStorageAtomicUMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
299 const IR::Value& offset, std::string_view value) {
300 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
301 ctx.AddU32x2("{}=uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
302 binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
303 ctx.var_alloc.Consume(offset));
304 ctx.Add("for(int i=0;i<2;++i){{ "
305 "{}_ssbo{}[({}>>2)+i]=min({}_ssbo{}[({}>>2)+i],{}[i]);}}",
306 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
307 binding.U32(), ctx.var_alloc.Consume(offset), value);
308}
309
310void EmitStorageAtomicSMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
311 const IR::Value& offset, std::string_view value) {
312 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
313 ctx.AddU32x2("{}=ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
314 binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
315 ctx.var_alloc.Consume(offset));
316 ctx.Add("for(int "
317 "i=0;i<2;++i){{{}_ssbo{}[({}>>2)+i]=uint(max(int({}_ssbo{}[({}>>2)+i]),int({}[i])));}}",
318 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
319 binding.U32(), ctx.var_alloc.Consume(offset), value);
320}
321
322void EmitStorageAtomicUMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
323 const IR::Value& offset, std::string_view value) {
324 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
325 ctx.AddU32x2("{}=uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
326 binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
327 ctx.var_alloc.Consume(offset));
328 ctx.Add("for(int i=0;i<2;++i){{{}_ssbo{}[({}>>2)+i]=max({}_ssbo{}[({}>>2)+i],{}[i]);}}",
329 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
330 binding.U32(), ctx.var_alloc.Consume(offset), value);
331}
332
333void EmitStorageAtomicAnd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
334 const IR::Value& offset, std::string_view value) {
335 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
336 ctx.AddU32x2("{}=uvec2(atomicAnd({}_ssbo{}[{}>>2],{}.x),atomicAnd({}_ssbo{}[({}>>2)+1],{}.y));",
337 inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
338 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
339}
340
341void EmitStorageAtomicOr32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
342 const IR::Value& offset, std::string_view value) {
343 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
344 ctx.AddU32x2("{}=uvec2(atomicOr({}_ssbo{}[{}>>2],{}.x),atomicOr({}_ssbo{}[({}>>2)+1],{}.y));",
345 inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
346 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
347}
348
349void EmitStorageAtomicXor32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
350 const IR::Value& offset, std::string_view value) {
351 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
352 ctx.AddU32x2("{}=uvec2(atomicXor({}_ssbo{}[{}>>2],{}.x),atomicXor({}_ssbo{}[({}>>2)+1],{}.y));",
353 inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
354 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
355}
356
357void EmitStorageAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
358 const IR::Value& offset, std::string_view value) {
359 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
360 ctx.AddU32x2("{}=uvec2(atomicExchange({}_ssbo{}[{}>>2],{}.x),atomicExchange({}_ssbo{}[({}>>2)+"
361 "1],{}.y));",
362 inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
363 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
364}
365
268void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 366void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
269 const IR::Value& offset, std::string_view value) { 367 const IR::Value& offset, std::string_view value) {
270 SsboCasFunctionF32(ctx, inst, binding, offset, value, "CasFloatAdd"); 368 SsboCasFunctionF32(ctx, inst, binding, offset, value, "CasFloatAdd");
@@ -388,6 +486,50 @@ void EmitGlobalAtomicExchange64(EmitContext&) {
388 throw NotImplementedException("GLSL Instrucion"); 486 throw NotImplementedException("GLSL Instrucion");
389} 487}
390 488
489void EmitGlobalAtomicIAdd32x2(EmitContext&) {
490 throw NotImplementedException("GLSL Instrucion");
491}
492
493void EmitGlobalAtomicSMin32x2(EmitContext&) {
494 throw NotImplementedException("GLSL Instrucion");
495}
496
497void EmitGlobalAtomicUMin32x2(EmitContext&) {
498 throw NotImplementedException("GLSL Instrucion");
499}
500
501void EmitGlobalAtomicSMax32x2(EmitContext&) {
502 throw NotImplementedException("GLSL Instrucion");
503}
504
505void EmitGlobalAtomicUMax32x2(EmitContext&) {
506 throw NotImplementedException("GLSL Instrucion");
507}
508
509void EmitGlobalAtomicInc32x2(EmitContext&) {
510 throw NotImplementedException("GLSL Instrucion");
511}
512
513void EmitGlobalAtomicDec32x2(EmitContext&) {
514 throw NotImplementedException("GLSL Instrucion");
515}
516
517void EmitGlobalAtomicAnd32x2(EmitContext&) {
518 throw NotImplementedException("GLSL Instrucion");
519}
520
521void EmitGlobalAtomicOr32x2(EmitContext&) {
522 throw NotImplementedException("GLSL Instrucion");
523}
524
525void EmitGlobalAtomicXor32x2(EmitContext&) {
526 throw NotImplementedException("GLSL Instrucion");
527}
528
529void EmitGlobalAtomicExchange32x2(EmitContext&) {
530 throw NotImplementedException("GLSL Instrucion");
531}
532
391void EmitGlobalAtomicAddF32(EmitContext&) { 533void EmitGlobalAtomicAddF32(EmitContext&) {
392 throw NotImplementedException("GLSL Instrucion"); 534 throw NotImplementedException("GLSL Instrucion");
393} 535}
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h b/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h
index 6cabbc717..704baddc9 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h
@@ -442,6 +442,8 @@ void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, std::string_vi
442 std::string_view value); 442 std::string_view value);
443void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset, 443void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
444 std::string_view value); 444 std::string_view value);
445void EmitSharedAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
446 std::string_view value);
445void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 447void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
446 const IR::Value& offset, std::string_view value); 448 const IR::Value& offset, std::string_view value);
447void EmitStorageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 449void EmitStorageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
@@ -482,6 +484,24 @@ void EmitStorageAtomicXor64(EmitContext& ctx, IR::Inst& inst, const IR::Value& b
482 const IR::Value& offset, std::string_view value); 484 const IR::Value& offset, std::string_view value);
483void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 485void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
484 const IR::Value& offset, std::string_view value); 486 const IR::Value& offset, std::string_view value);
487void EmitStorageAtomicIAdd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
488 const IR::Value& offset, std::string_view value);
489void EmitStorageAtomicSMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
490 const IR::Value& offset, std::string_view value);
491void EmitStorageAtomicUMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
492 const IR::Value& offset, std::string_view value);
493void EmitStorageAtomicSMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
494 const IR::Value& offset, std::string_view value);
495void EmitStorageAtomicUMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
496 const IR::Value& offset, std::string_view value);
497void EmitStorageAtomicAnd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
498 const IR::Value& offset, std::string_view value);
499void EmitStorageAtomicOr32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
500 const IR::Value& offset, std::string_view value);
501void EmitStorageAtomicXor32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
502 const IR::Value& offset, std::string_view value);
503void EmitStorageAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
504 const IR::Value& offset, std::string_view value);
485void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 505void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
486 const IR::Value& offset, std::string_view value); 506 const IR::Value& offset, std::string_view value);
487void EmitStorageAtomicAddF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 507void EmitStorageAtomicAddF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
@@ -518,6 +538,17 @@ void EmitGlobalAtomicAnd64(EmitContext& ctx);
518void EmitGlobalAtomicOr64(EmitContext& ctx); 538void EmitGlobalAtomicOr64(EmitContext& ctx);
519void EmitGlobalAtomicXor64(EmitContext& ctx); 539void EmitGlobalAtomicXor64(EmitContext& ctx);
520void EmitGlobalAtomicExchange64(EmitContext& ctx); 540void EmitGlobalAtomicExchange64(EmitContext& ctx);
541void EmitGlobalAtomicIAdd32x2(EmitContext& ctx);
542void EmitGlobalAtomicSMin32x2(EmitContext& ctx);
543void EmitGlobalAtomicUMin32x2(EmitContext& ctx);
544void EmitGlobalAtomicSMax32x2(EmitContext& ctx);
545void EmitGlobalAtomicUMax32x2(EmitContext& ctx);
546void EmitGlobalAtomicInc32x2(EmitContext& ctx);
547void EmitGlobalAtomicDec32x2(EmitContext& ctx);
548void EmitGlobalAtomicAnd32x2(EmitContext& ctx);
549void EmitGlobalAtomicOr32x2(EmitContext& ctx);
550void EmitGlobalAtomicXor32x2(EmitContext& ctx);
551void EmitGlobalAtomicExchange32x2(EmitContext& ctx);
521void EmitGlobalAtomicAddF32(EmitContext& ctx); 552void EmitGlobalAtomicAddF32(EmitContext& ctx);
522void EmitGlobalAtomicAddF16x2(EmitContext& ctx); 553void EmitGlobalAtomicAddF16x2(EmitContext& ctx);
523void EmitGlobalAtomicAddF32x2(EmitContext& ctx); 554void EmitGlobalAtomicAddF32x2(EmitContext& ctx);
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
index 46ba52a25..d3cbb14a9 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
@@ -82,6 +82,17 @@ Id StorageAtomicU64(EmitContext& ctx, const IR::Value& binding, const IR::Value&
82 ctx.OpStore(pointer, ctx.OpBitcast(ctx.U32[2], result)); 82 ctx.OpStore(pointer, ctx.OpBitcast(ctx.U32[2], result));
83 return original_value; 83 return original_value;
84} 84}
85
86Id StorageAtomicU32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, Id value,
87 Id (Sirit::Module::*non_atomic_func)(Id, Id, Id)) {
88 LOG_WARNING(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
89 const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
90 binding, offset, sizeof(u32[2]))};
91 const Id original_value{ctx.OpLoad(ctx.U32[2], pointer)};
92 const Id result{(ctx.*non_atomic_func)(ctx.U32[2], value, original_value)};
93 ctx.OpStore(pointer, result);
94 return original_value;
95}
85} // Anonymous namespace 96} // Anonymous namespace
86 97
87Id EmitSharedAtomicIAdd32(EmitContext& ctx, Id offset, Id value) { 98Id EmitSharedAtomicIAdd32(EmitContext& ctx, Id offset, Id value) {
@@ -141,7 +152,7 @@ Id EmitSharedAtomicExchange64(EmitContext& ctx, Id offset, Id value) {
141 const auto [scope, semantics]{AtomicArgs(ctx)}; 152 const auto [scope, semantics]{AtomicArgs(ctx)};
142 return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value); 153 return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value);
143 } 154 }
144 LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic"); 155 LOG_WARNING(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
145 const Id pointer_1{SharedPointer(ctx, offset, 0)}; 156 const Id pointer_1{SharedPointer(ctx, offset, 0)};
146 const Id pointer_2{SharedPointer(ctx, offset, 1)}; 157 const Id pointer_2{SharedPointer(ctx, offset, 1)};
147 const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)}; 158 const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)};
@@ -152,6 +163,18 @@ Id EmitSharedAtomicExchange64(EmitContext& ctx, Id offset, Id value) {
152 return ctx.OpBitcast(ctx.U64, ctx.OpCompositeConstruct(ctx.U32[2], value_1, value_2)); 163 return ctx.OpBitcast(ctx.U64, ctx.OpCompositeConstruct(ctx.U32[2], value_1, value_2));
153} 164}
154 165
166Id EmitSharedAtomicExchange32x2(EmitContext& ctx, Id offset, Id value) {
167 LOG_WARNING(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
168 const Id pointer_1{SharedPointer(ctx, offset, 0)};
169 const Id pointer_2{SharedPointer(ctx, offset, 1)};
170 const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)};
171 const Id value_2{ctx.OpLoad(ctx.U32[1], pointer_2)};
172 const Id new_vector{ctx.OpBitcast(ctx.U32[2], value)};
173 ctx.OpStore(pointer_1, ctx.OpCompositeExtract(ctx.U32[1], new_vector, 0U));
174 ctx.OpStore(pointer_2, ctx.OpCompositeExtract(ctx.U32[1], new_vector, 1U));
175 return ctx.OpCompositeConstruct(ctx.U32[2], value_1, value_2);
176}
177
155Id EmitStorageAtomicIAdd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, 178Id EmitStorageAtomicIAdd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
156 Id value) { 179 Id value) {
157 return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicIAdd); 180 return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicIAdd);
@@ -275,6 +298,56 @@ Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const
275 return original; 298 return original;
276} 299}
277 300
301Id EmitStorageAtomicIAdd32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
302 Id value) {
303 return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpIAdd);
304}
305
306Id EmitStorageAtomicSMin32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
307 Id value) {
308 return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpSMin);
309}
310
311Id EmitStorageAtomicUMin32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
312 Id value) {
313 return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpUMin);
314}
315
316Id EmitStorageAtomicSMax32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
317 Id value) {
318 return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpSMax);
319}
320
321Id EmitStorageAtomicUMax32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
322 Id value) {
323 return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpUMax);
324}
325
326Id EmitStorageAtomicAnd32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
327 Id value) {
328 return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpBitwiseAnd);
329}
330
331Id EmitStorageAtomicOr32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
332 Id value) {
333 return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpBitwiseOr);
334}
335
336Id EmitStorageAtomicXor32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
337 Id value) {
338 return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpBitwiseXor);
339}
340
341Id EmitStorageAtomicExchange32x2(EmitContext& ctx, const IR::Value& binding,
342 const IR::Value& offset, Id value) {
343 LOG_WARNING(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
344 const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
345 binding, offset, sizeof(u32[2]))};
346 const Id original{ctx.OpLoad(ctx.U32[2], pointer)};
347 ctx.OpStore(pointer, value);
348 return original;
349}
350
278Id EmitStorageAtomicAddF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, 351Id EmitStorageAtomicAddF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
279 Id value) { 352 Id value) {
280 const Id ssbo{ctx.ssbos[binding.U32()].U32}; 353 const Id ssbo{ctx.ssbos[binding.U32()].U32};
@@ -418,6 +491,50 @@ Id EmitGlobalAtomicExchange64(EmitContext&) {
418 throw NotImplementedException("SPIR-V Instruction"); 491 throw NotImplementedException("SPIR-V Instruction");
419} 492}
420 493
494Id EmitGlobalAtomicIAdd32x2(EmitContext&) {
495 throw NotImplementedException("SPIR-V Instruction");
496}
497
498Id EmitGlobalAtomicSMin32x2(EmitContext&) {
499 throw NotImplementedException("SPIR-V Instruction");
500}
501
502Id EmitGlobalAtomicUMin32x2(EmitContext&) {
503 throw NotImplementedException("SPIR-V Instruction");
504}
505
506Id EmitGlobalAtomicSMax32x2(EmitContext&) {
507 throw NotImplementedException("SPIR-V Instruction");
508}
509
510Id EmitGlobalAtomicUMax32x2(EmitContext&) {
511 throw NotImplementedException("SPIR-V Instruction");
512}
513
514Id EmitGlobalAtomicInc32x2(EmitContext&) {
515 throw NotImplementedException("SPIR-V Instruction");
516}
517
518Id EmitGlobalAtomicDec32x2(EmitContext&) {
519 throw NotImplementedException("SPIR-V Instruction");
520}
521
522Id EmitGlobalAtomicAnd32x2(EmitContext&) {
523 throw NotImplementedException("SPIR-V Instruction");
524}
525
526Id EmitGlobalAtomicOr32x2(EmitContext&) {
527 throw NotImplementedException("SPIR-V Instruction");
528}
529
530Id EmitGlobalAtomicXor32x2(EmitContext&) {
531 throw NotImplementedException("SPIR-V Instruction");
532}
533
534Id EmitGlobalAtomicExchange32x2(EmitContext&) {
535 throw NotImplementedException("SPIR-V Instruction");
536}
537
421Id EmitGlobalAtomicAddF32(EmitContext&) { 538Id EmitGlobalAtomicAddF32(EmitContext&) {
422 throw NotImplementedException("SPIR-V Instruction"); 539 throw NotImplementedException("SPIR-V Instruction");
423} 540}
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
index 887112deb..f263b41b0 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
@@ -335,6 +335,7 @@ Id EmitSharedAtomicOr32(EmitContext& ctx, Id pointer_offset, Id value);
335Id EmitSharedAtomicXor32(EmitContext& ctx, Id pointer_offset, Id value); 335Id EmitSharedAtomicXor32(EmitContext& ctx, Id pointer_offset, Id value);
336Id EmitSharedAtomicExchange32(EmitContext& ctx, Id pointer_offset, Id value); 336Id EmitSharedAtomicExchange32(EmitContext& ctx, Id pointer_offset, Id value);
337Id EmitSharedAtomicExchange64(EmitContext& ctx, Id pointer_offset, Id value); 337Id EmitSharedAtomicExchange64(EmitContext& ctx, Id pointer_offset, Id value);
338Id EmitSharedAtomicExchange32x2(EmitContext& ctx, Id pointer_offset, Id value);
338Id EmitStorageAtomicIAdd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, 339Id EmitStorageAtomicIAdd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
339 Id value); 340 Id value);
340Id EmitStorageAtomicSMin32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, 341Id EmitStorageAtomicSMin32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
@@ -375,6 +376,24 @@ Id EmitStorageAtomicXor64(EmitContext& ctx, const IR::Value& binding, const IR::
375 Id value); 376 Id value);
376Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, 377Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
377 Id value); 378 Id value);
379Id EmitStorageAtomicIAdd32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
380 Id value);
381Id EmitStorageAtomicSMin32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
382 Id value);
383Id EmitStorageAtomicUMin32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
384 Id value);
385Id EmitStorageAtomicSMax32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
386 Id value);
387Id EmitStorageAtomicUMax32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
388 Id value);
389Id EmitStorageAtomicAnd32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
390 Id value);
391Id EmitStorageAtomicOr32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
392 Id value);
393Id EmitStorageAtomicXor32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
394 Id value);
395Id EmitStorageAtomicExchange32x2(EmitContext& ctx, const IR::Value& binding,
396 const IR::Value& offset, Id value);
378Id EmitStorageAtomicAddF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, 397Id EmitStorageAtomicAddF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
379 Id value); 398 Id value);
380Id EmitStorageAtomicAddF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, 399Id EmitStorageAtomicAddF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
@@ -411,6 +430,17 @@ Id EmitGlobalAtomicAnd64(EmitContext& ctx);
411Id EmitGlobalAtomicOr64(EmitContext& ctx); 430Id EmitGlobalAtomicOr64(EmitContext& ctx);
412Id EmitGlobalAtomicXor64(EmitContext& ctx); 431Id EmitGlobalAtomicXor64(EmitContext& ctx);
413Id EmitGlobalAtomicExchange64(EmitContext& ctx); 432Id EmitGlobalAtomicExchange64(EmitContext& ctx);
433Id EmitGlobalAtomicIAdd32x2(EmitContext& ctx);
434Id EmitGlobalAtomicSMin32x2(EmitContext& ctx);
435Id EmitGlobalAtomicUMin32x2(EmitContext& ctx);
436Id EmitGlobalAtomicSMax32x2(EmitContext& ctx);
437Id EmitGlobalAtomicUMax32x2(EmitContext& ctx);
438Id EmitGlobalAtomicInc32x2(EmitContext& ctx);
439Id EmitGlobalAtomicDec32x2(EmitContext& ctx);
440Id EmitGlobalAtomicAnd32x2(EmitContext& ctx);
441Id EmitGlobalAtomicOr32x2(EmitContext& ctx);
442Id EmitGlobalAtomicXor32x2(EmitContext& ctx);
443Id EmitGlobalAtomicExchange32x2(EmitContext& ctx);
414Id EmitGlobalAtomicAddF32(EmitContext& ctx); 444Id EmitGlobalAtomicAddF32(EmitContext& ctx);
415Id EmitGlobalAtomicAddF16x2(EmitContext& ctx); 445Id EmitGlobalAtomicAddF16x2(EmitContext& ctx);
416Id EmitGlobalAtomicAddF32x2(EmitContext& ctx); 446Id EmitGlobalAtomicAddF32x2(EmitContext& ctx);
diff --git a/src/shader_recompiler/frontend/ir/microinstruction.cpp b/src/shader_recompiler/frontend/ir/microinstruction.cpp
index 97e2bf6af..631446cf7 100644
--- a/src/shader_recompiler/frontend/ir/microinstruction.cpp
+++ b/src/shader_recompiler/frontend/ir/microinstruction.cpp
@@ -118,6 +118,7 @@ bool Inst::MayHaveSideEffects() const noexcept {
118 case Opcode::SharedAtomicXor32: 118 case Opcode::SharedAtomicXor32:
119 case Opcode::SharedAtomicExchange32: 119 case Opcode::SharedAtomicExchange32:
120 case Opcode::SharedAtomicExchange64: 120 case Opcode::SharedAtomicExchange64:
121 case Opcode::SharedAtomicExchange32x2:
121 case Opcode::GlobalAtomicIAdd32: 122 case Opcode::GlobalAtomicIAdd32:
122 case Opcode::GlobalAtomicSMin32: 123 case Opcode::GlobalAtomicSMin32:
123 case Opcode::GlobalAtomicUMin32: 124 case Opcode::GlobalAtomicUMin32:
@@ -138,6 +139,15 @@ bool Inst::MayHaveSideEffects() const noexcept {
138 case Opcode::GlobalAtomicOr64: 139 case Opcode::GlobalAtomicOr64:
139 case Opcode::GlobalAtomicXor64: 140 case Opcode::GlobalAtomicXor64:
140 case Opcode::GlobalAtomicExchange64: 141 case Opcode::GlobalAtomicExchange64:
142 case Opcode::GlobalAtomicIAdd32x2:
143 case Opcode::GlobalAtomicSMin32x2:
144 case Opcode::GlobalAtomicUMin32x2:
145 case Opcode::GlobalAtomicSMax32x2:
146 case Opcode::GlobalAtomicUMax32x2:
147 case Opcode::GlobalAtomicAnd32x2:
148 case Opcode::GlobalAtomicOr32x2:
149 case Opcode::GlobalAtomicXor32x2:
150 case Opcode::GlobalAtomicExchange32x2:
141 case Opcode::GlobalAtomicAddF32: 151 case Opcode::GlobalAtomicAddF32:
142 case Opcode::GlobalAtomicAddF16x2: 152 case Opcode::GlobalAtomicAddF16x2:
143 case Opcode::GlobalAtomicAddF32x2: 153 case Opcode::GlobalAtomicAddF32x2:
@@ -165,6 +175,15 @@ bool Inst::MayHaveSideEffects() const noexcept {
165 case Opcode::StorageAtomicOr64: 175 case Opcode::StorageAtomicOr64:
166 case Opcode::StorageAtomicXor64: 176 case Opcode::StorageAtomicXor64:
167 case Opcode::StorageAtomicExchange64: 177 case Opcode::StorageAtomicExchange64:
178 case Opcode::StorageAtomicIAdd32x2:
179 case Opcode::StorageAtomicSMin32x2:
180 case Opcode::StorageAtomicUMin32x2:
181 case Opcode::StorageAtomicSMax32x2:
182 case Opcode::StorageAtomicUMax32x2:
183 case Opcode::StorageAtomicAnd32x2:
184 case Opcode::StorageAtomicOr32x2:
185 case Opcode::StorageAtomicXor32x2:
186 case Opcode::StorageAtomicExchange32x2:
168 case Opcode::StorageAtomicAddF32: 187 case Opcode::StorageAtomicAddF32:
169 case Opcode::StorageAtomicAddF16x2: 188 case Opcode::StorageAtomicAddF16x2:
170 case Opcode::StorageAtomicAddF32x2: 189 case Opcode::StorageAtomicAddF32x2:
diff --git a/src/shader_recompiler/frontend/ir/opcodes.inc b/src/shader_recompiler/frontend/ir/opcodes.inc
index b94ce7406..efb6bfac3 100644
--- a/src/shader_recompiler/frontend/ir/opcodes.inc
+++ b/src/shader_recompiler/frontend/ir/opcodes.inc
@@ -341,6 +341,7 @@ OPCODE(SharedAtomicOr32, U32, U32,
341OPCODE(SharedAtomicXor32, U32, U32, U32, ) 341OPCODE(SharedAtomicXor32, U32, U32, U32, )
342OPCODE(SharedAtomicExchange32, U32, U32, U32, ) 342OPCODE(SharedAtomicExchange32, U32, U32, U32, )
343OPCODE(SharedAtomicExchange64, U64, U32, U64, ) 343OPCODE(SharedAtomicExchange64, U64, U32, U64, )
344OPCODE(SharedAtomicExchange32x2, U32x2, U32, U32x2, )
344 345
345OPCODE(GlobalAtomicIAdd32, U32, U64, U32, ) 346OPCODE(GlobalAtomicIAdd32, U32, U64, U32, )
346OPCODE(GlobalAtomicSMin32, U32, U64, U32, ) 347OPCODE(GlobalAtomicSMin32, U32, U64, U32, )
@@ -362,6 +363,15 @@ OPCODE(GlobalAtomicAnd64, U64, U64,
362OPCODE(GlobalAtomicOr64, U64, U64, U64, ) 363OPCODE(GlobalAtomicOr64, U64, U64, U64, )
363OPCODE(GlobalAtomicXor64, U64, U64, U64, ) 364OPCODE(GlobalAtomicXor64, U64, U64, U64, )
364OPCODE(GlobalAtomicExchange64, U64, U64, U64, ) 365OPCODE(GlobalAtomicExchange64, U64, U64, U64, )
366OPCODE(GlobalAtomicIAdd32x2, U32x2, U32x2, U32x2, )
367OPCODE(GlobalAtomicSMin32x2, U32x2, U32x2, U32x2, )
368OPCODE(GlobalAtomicUMin32x2, U32x2, U32x2, U32x2, )
369OPCODE(GlobalAtomicSMax32x2, U32x2, U32x2, U32x2, )
370OPCODE(GlobalAtomicUMax32x2, U32x2, U32x2, U32x2, )
371OPCODE(GlobalAtomicAnd32x2, U32x2, U32x2, U32x2, )
372OPCODE(GlobalAtomicOr32x2, U32x2, U32x2, U32x2, )
373OPCODE(GlobalAtomicXor32x2, U32x2, U32x2, U32x2, )
374OPCODE(GlobalAtomicExchange32x2, U32x2, U32x2, U32x2, )
365OPCODE(GlobalAtomicAddF32, F32, U64, F32, ) 375OPCODE(GlobalAtomicAddF32, F32, U64, F32, )
366OPCODE(GlobalAtomicAddF16x2, U32, U64, F16x2, ) 376OPCODE(GlobalAtomicAddF16x2, U32, U64, F16x2, )
367OPCODE(GlobalAtomicAddF32x2, U32, U64, F32x2, ) 377OPCODE(GlobalAtomicAddF32x2, U32, U64, F32x2, )
@@ -390,6 +400,15 @@ OPCODE(StorageAtomicAnd64, U64, U32,
390OPCODE(StorageAtomicOr64, U64, U32, U32, U64, ) 400OPCODE(StorageAtomicOr64, U64, U32, U32, U64, )
391OPCODE(StorageAtomicXor64, U64, U32, U32, U64, ) 401OPCODE(StorageAtomicXor64, U64, U32, U32, U64, )
392OPCODE(StorageAtomicExchange64, U64, U32, U32, U64, ) 402OPCODE(StorageAtomicExchange64, U64, U32, U32, U64, )
403OPCODE(StorageAtomicIAdd32x2, U32x2, U32, U32, U32x2, )
404OPCODE(StorageAtomicSMin32x2, U32x2, U32, U32, U32x2, )
405OPCODE(StorageAtomicUMin32x2, U32x2, U32, U32, U32x2, )
406OPCODE(StorageAtomicSMax32x2, U32x2, U32, U32, U32x2, )
407OPCODE(StorageAtomicUMax32x2, U32x2, U32, U32, U32x2, )
408OPCODE(StorageAtomicAnd32x2, U32x2, U32, U32, U32x2, )
409OPCODE(StorageAtomicOr32x2, U32x2, U32, U32, U32x2, )
410OPCODE(StorageAtomicXor32x2, U32x2, U32, U32, U32x2, )
411OPCODE(StorageAtomicExchange32x2, U32x2, U32, U32, U32x2, )
393OPCODE(StorageAtomicAddF32, F32, U32, U32, F32, ) 412OPCODE(StorageAtomicAddF32, F32, U32, U32, F32, )
394OPCODE(StorageAtomicAddF16x2, U32, U32, U32, F16x2, ) 413OPCODE(StorageAtomicAddF16x2, U32, U32, U32, F16x2, )
395OPCODE(StorageAtomicAddF32x2, U32, U32, U32, F32x2, ) 414OPCODE(StorageAtomicAddF32x2, U32, U32, U32, F32x2, )
diff --git a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
index b6a20f904..bfd2ae650 100644
--- a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
+++ b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
@@ -360,6 +360,15 @@ void VisitUsages(Info& info, IR::Inst& inst) {
360 case IR::Opcode::GlobalAtomicOr64: 360 case IR::Opcode::GlobalAtomicOr64:
361 case IR::Opcode::GlobalAtomicXor64: 361 case IR::Opcode::GlobalAtomicXor64:
362 case IR::Opcode::GlobalAtomicExchange64: 362 case IR::Opcode::GlobalAtomicExchange64:
363 case IR::Opcode::GlobalAtomicIAdd32x2:
364 case IR::Opcode::GlobalAtomicSMin32x2:
365 case IR::Opcode::GlobalAtomicUMin32x2:
366 case IR::Opcode::GlobalAtomicSMax32x2:
367 case IR::Opcode::GlobalAtomicUMax32x2:
368 case IR::Opcode::GlobalAtomicAnd32x2:
369 case IR::Opcode::GlobalAtomicOr32x2:
370 case IR::Opcode::GlobalAtomicXor32x2:
371 case IR::Opcode::GlobalAtomicExchange32x2:
363 case IR::Opcode::GlobalAtomicAddF32: 372 case IR::Opcode::GlobalAtomicAddF32:
364 case IR::Opcode::GlobalAtomicAddF16x2: 373 case IR::Opcode::GlobalAtomicAddF16x2:
365 case IR::Opcode::GlobalAtomicAddF32x2: 374 case IR::Opcode::GlobalAtomicAddF32x2:
@@ -597,6 +606,15 @@ void VisitUsages(Info& info, IR::Inst& inst) {
597 break; 606 break;
598 case IR::Opcode::LoadStorage64: 607 case IR::Opcode::LoadStorage64:
599 case IR::Opcode::WriteStorage64: 608 case IR::Opcode::WriteStorage64:
609 case IR::Opcode::StorageAtomicIAdd32x2:
610 case IR::Opcode::StorageAtomicSMin32x2:
611 case IR::Opcode::StorageAtomicUMin32x2:
612 case IR::Opcode::StorageAtomicSMax32x2:
613 case IR::Opcode::StorageAtomicUMax32x2:
614 case IR::Opcode::StorageAtomicAnd32x2:
615 case IR::Opcode::StorageAtomicOr32x2:
616 case IR::Opcode::StorageAtomicXor32x2:
617 case IR::Opcode::StorageAtomicExchange32x2:
600 info.used_storage_buffer_types |= IR::Type::U32x2; 618 info.used_storage_buffer_types |= IR::Type::U32x2;
601 break; 619 break;
602 case IR::Opcode::LoadStorage128: 620 case IR::Opcode::LoadStorage128:
diff --git a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
index 4197b0095..38592afd0 100644
--- a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
+++ b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
@@ -92,6 +92,15 @@ bool IsGlobalMemory(const IR::Inst& inst) {
92 case IR::Opcode::GlobalAtomicOr64: 92 case IR::Opcode::GlobalAtomicOr64:
93 case IR::Opcode::GlobalAtomicXor64: 93 case IR::Opcode::GlobalAtomicXor64:
94 case IR::Opcode::GlobalAtomicExchange64: 94 case IR::Opcode::GlobalAtomicExchange64:
95 case IR::Opcode::GlobalAtomicIAdd32x2:
96 case IR::Opcode::GlobalAtomicSMin32x2:
97 case IR::Opcode::GlobalAtomicUMin32x2:
98 case IR::Opcode::GlobalAtomicSMax32x2:
99 case IR::Opcode::GlobalAtomicUMax32x2:
100 case IR::Opcode::GlobalAtomicAnd32x2:
101 case IR::Opcode::GlobalAtomicOr32x2:
102 case IR::Opcode::GlobalAtomicXor32x2:
103 case IR::Opcode::GlobalAtomicExchange32x2:
95 case IR::Opcode::GlobalAtomicAddF32: 104 case IR::Opcode::GlobalAtomicAddF32:
96 case IR::Opcode::GlobalAtomicAddF16x2: 105 case IR::Opcode::GlobalAtomicAddF16x2:
97 case IR::Opcode::GlobalAtomicAddF32x2: 106 case IR::Opcode::GlobalAtomicAddF32x2:
@@ -135,6 +144,15 @@ bool IsGlobalMemoryWrite(const IR::Inst& inst) {
135 case IR::Opcode::GlobalAtomicOr64: 144 case IR::Opcode::GlobalAtomicOr64:
136 case IR::Opcode::GlobalAtomicXor64: 145 case IR::Opcode::GlobalAtomicXor64:
137 case IR::Opcode::GlobalAtomicExchange64: 146 case IR::Opcode::GlobalAtomicExchange64:
147 case IR::Opcode::GlobalAtomicIAdd32x2:
148 case IR::Opcode::GlobalAtomicSMin32x2:
149 case IR::Opcode::GlobalAtomicUMin32x2:
150 case IR::Opcode::GlobalAtomicSMax32x2:
151 case IR::Opcode::GlobalAtomicUMax32x2:
152 case IR::Opcode::GlobalAtomicAnd32x2:
153 case IR::Opcode::GlobalAtomicOr32x2:
154 case IR::Opcode::GlobalAtomicXor32x2:
155 case IR::Opcode::GlobalAtomicExchange32x2:
138 case IR::Opcode::GlobalAtomicAddF32: 156 case IR::Opcode::GlobalAtomicAddF32:
139 case IR::Opcode::GlobalAtomicAddF16x2: 157 case IR::Opcode::GlobalAtomicAddF16x2:
140 case IR::Opcode::GlobalAtomicAddF32x2: 158 case IR::Opcode::GlobalAtomicAddF32x2:
@@ -199,6 +217,8 @@ IR::Opcode GlobalToStorage(IR::Opcode opcode) {
199 return IR::Opcode::StorageAtomicOr32; 217 return IR::Opcode::StorageAtomicOr32;
200 case IR::Opcode::GlobalAtomicXor32: 218 case IR::Opcode::GlobalAtomicXor32:
201 return IR::Opcode::StorageAtomicXor32; 219 return IR::Opcode::StorageAtomicXor32;
220 case IR::Opcode::GlobalAtomicExchange32:
221 return IR::Opcode::StorageAtomicExchange32;
202 case IR::Opcode::GlobalAtomicIAdd64: 222 case IR::Opcode::GlobalAtomicIAdd64:
203 return IR::Opcode::StorageAtomicIAdd64; 223 return IR::Opcode::StorageAtomicIAdd64;
204 case IR::Opcode::GlobalAtomicSMin64: 224 case IR::Opcode::GlobalAtomicSMin64:
@@ -215,10 +235,26 @@ IR::Opcode GlobalToStorage(IR::Opcode opcode) {
215 return IR::Opcode::StorageAtomicOr64; 235 return IR::Opcode::StorageAtomicOr64;
216 case IR::Opcode::GlobalAtomicXor64: 236 case IR::Opcode::GlobalAtomicXor64:
217 return IR::Opcode::StorageAtomicXor64; 237 return IR::Opcode::StorageAtomicXor64;
218 case IR::Opcode::GlobalAtomicExchange32:
219 return IR::Opcode::StorageAtomicExchange32;
220 case IR::Opcode::GlobalAtomicExchange64: 238 case IR::Opcode::GlobalAtomicExchange64:
221 return IR::Opcode::StorageAtomicExchange64; 239 return IR::Opcode::StorageAtomicExchange64;
240 case IR::Opcode::GlobalAtomicIAdd32x2:
241 return IR::Opcode::StorageAtomicIAdd32x2;
242 case IR::Opcode::GlobalAtomicSMin32x2:
243 return IR::Opcode::StorageAtomicSMin32x2;
244 case IR::Opcode::GlobalAtomicUMin32x2:
245 return IR::Opcode::StorageAtomicUMin32x2;
246 case IR::Opcode::GlobalAtomicSMax32x2:
247 return IR::Opcode::StorageAtomicSMax32x2;
248 case IR::Opcode::GlobalAtomicUMax32x2:
249 return IR::Opcode::StorageAtomicUMax32x2;
250 case IR::Opcode::GlobalAtomicAnd32x2:
251 return IR::Opcode::StorageAtomicAnd32x2;
252 case IR::Opcode::GlobalAtomicOr32x2:
253 return IR::Opcode::StorageAtomicOr32x2;
254 case IR::Opcode::GlobalAtomicXor32x2:
255 return IR::Opcode::StorageAtomicXor32x2;
256 case IR::Opcode::GlobalAtomicExchange32x2:
257 return IR::Opcode::StorageAtomicExchange32x2;
222 case IR::Opcode::GlobalAtomicAddF32: 258 case IR::Opcode::GlobalAtomicAddF32:
223 return IR::Opcode::StorageAtomicAddF32; 259 return IR::Opcode::StorageAtomicAddF32;
224 case IR::Opcode::GlobalAtomicAddF16x2: 260 case IR::Opcode::GlobalAtomicAddF16x2:
@@ -454,6 +490,15 @@ void Replace(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
454 case IR::Opcode::GlobalAtomicOr64: 490 case IR::Opcode::GlobalAtomicOr64:
455 case IR::Opcode::GlobalAtomicXor64: 491 case IR::Opcode::GlobalAtomicXor64:
456 case IR::Opcode::GlobalAtomicExchange64: 492 case IR::Opcode::GlobalAtomicExchange64:
493 case IR::Opcode::GlobalAtomicIAdd32x2:
494 case IR::Opcode::GlobalAtomicSMin32x2:
495 case IR::Opcode::GlobalAtomicUMin32x2:
496 case IR::Opcode::GlobalAtomicSMax32x2:
497 case IR::Opcode::GlobalAtomicUMax32x2:
498 case IR::Opcode::GlobalAtomicAnd32x2:
499 case IR::Opcode::GlobalAtomicOr32x2:
500 case IR::Opcode::GlobalAtomicXor32x2:
501 case IR::Opcode::GlobalAtomicExchange32x2:
457 case IR::Opcode::GlobalAtomicAddF32: 502 case IR::Opcode::GlobalAtomicAddF32:
458 case IR::Opcode::GlobalAtomicAddF16x2: 503 case IR::Opcode::GlobalAtomicAddF16x2:
459 case IR::Opcode::GlobalAtomicAddF32x2: 504 case IR::Opcode::GlobalAtomicAddF32x2:
diff --git a/src/shader_recompiler/ir_opt/lower_int64_to_int32.cpp b/src/shader_recompiler/ir_opt/lower_int64_to_int32.cpp
index e80d3d1d9..c2654cd9b 100644
--- a/src/shader_recompiler/ir_opt/lower_int64_to_int32.cpp
+++ b/src/shader_recompiler/ir_opt/lower_int64_to_int32.cpp
@@ -199,6 +199,26 @@ void Lower(IR::Block& block, IR::Inst& inst) {
199 return ShiftRightLogical64To32(block, inst); 199 return ShiftRightLogical64To32(block, inst);
200 case IR::Opcode::ShiftRightArithmetic64: 200 case IR::Opcode::ShiftRightArithmetic64:
201 return ShiftRightArithmetic64To32(block, inst); 201 return ShiftRightArithmetic64To32(block, inst);
202 case IR::Opcode::SharedAtomicExchange64:
203 return inst.ReplaceOpcode(IR::Opcode::SharedAtomicExchange32x2);
204 case IR::Opcode::GlobalAtomicIAdd64:
205 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicIAdd32x2);
206 case IR::Opcode::GlobalAtomicSMin64:
207 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicSMin32x2);
208 case IR::Opcode::GlobalAtomicUMin64:
209 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicUMin32x2);
210 case IR::Opcode::GlobalAtomicSMax64:
211 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicSMax32x2);
212 case IR::Opcode::GlobalAtomicUMax64:
213 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicUMax32x2);
214 case IR::Opcode::GlobalAtomicAnd64:
215 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicAnd32x2);
216 case IR::Opcode::GlobalAtomicOr64:
217 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicOr32x2);
218 case IR::Opcode::GlobalAtomicXor64:
219 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicXor32x2);
220 case IR::Opcode::GlobalAtomicExchange64:
221 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicExchange32x2);
202 default: 222 default:
203 break; 223 break;
204 } 224 }
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 048dba4f3..fa26eb8b0 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -131,6 +131,8 @@ public:
131 131
132 void DownloadMemory(VAddr cpu_addr, u64 size); 132 void DownloadMemory(VAddr cpu_addr, u64 size);
133 133
134 bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<u8> inlined_buffer);
135
134 void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size); 136 void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size);
135 137
136 void DisableGraphicsUniformBuffer(size_t stage, u32 index); 138 void DisableGraphicsUniformBuffer(size_t stage, u32 index);
@@ -808,6 +810,8 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
808 return; 810 return;
809 } 811 }
810 MICROPROFILE_SCOPE(GPU_DownloadMemory); 812 MICROPROFILE_SCOPE(GPU_DownloadMemory);
813 const bool is_accuracy_normal =
814 Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::Normal;
811 815
812 boost::container::small_vector<std::pair<BufferCopy, BufferId>, 1> downloads; 816 boost::container::small_vector<std::pair<BufferCopy, BufferId>, 1> downloads;
813 u64 total_size_bytes = 0; 817 u64 total_size_bytes = 0;
@@ -819,6 +823,9 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
819 ForEachBufferInRange(cpu_addr, size, [&](BufferId buffer_id, Buffer& buffer) { 823 ForEachBufferInRange(cpu_addr, size, [&](BufferId buffer_id, Buffer& buffer) {
820 buffer.ForEachDownloadRangeAndClear( 824 buffer.ForEachDownloadRangeAndClear(
821 cpu_addr, size, [&](u64 range_offset, u64 range_size) { 825 cpu_addr, size, [&](u64 range_offset, u64 range_size) {
826 if (is_accuracy_normal) {
827 return;
828 }
822 const VAddr buffer_addr = buffer.CpuAddr(); 829 const VAddr buffer_addr = buffer.CpuAddr();
823 const auto add_download = [&](VAddr start, VAddr end) { 830 const auto add_download = [&](VAddr start, VAddr end) {
824 const u64 new_offset = start - buffer_addr; 831 const u64 new_offset = start - buffer_addr;
@@ -1417,10 +1424,8 @@ void BufferCache<P>::MarkWrittenBuffer(BufferId buffer_id, VAddr cpu_addr, u32 s
1417 const IntervalType base_interval{cpu_addr, cpu_addr + size}; 1424 const IntervalType base_interval{cpu_addr, cpu_addr + size};
1418 common_ranges.add(base_interval); 1425 common_ranges.add(base_interval);
1419 1426
1420 const bool is_accuracy_high =
1421 Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::High;
1422 const bool is_async = Settings::values.use_asynchronous_gpu_emulation.GetValue(); 1427 const bool is_async = Settings::values.use_asynchronous_gpu_emulation.GetValue();
1423 if (!is_async && !is_accuracy_high) { 1428 if (!is_async) {
1424 return; 1429 return;
1425 } 1430 }
1426 uncommitted_ranges.add(base_interval); 1431 uncommitted_ranges.add(base_interval);
@@ -1644,6 +1649,42 @@ void BufferCache<P>::MappedUploadMemory(Buffer& buffer, u64 total_size_bytes,
1644} 1649}
1645 1650
1646template <class P> 1651template <class P>
1652bool BufferCache<P>::InlineMemory(VAddr dest_address, size_t copy_size,
1653 std::span<u8> inlined_buffer) {
1654 const bool is_dirty = IsRegionRegistered(dest_address, copy_size);
1655 if (!is_dirty) {
1656 return false;
1657 }
1658 if (!IsRegionGpuModified(dest_address, copy_size)) {
1659 return false;
1660 }
1661
1662 const IntervalType subtract_interval{dest_address, dest_address + copy_size};
1663 ClearDownload(subtract_interval);
1664 common_ranges.subtract(subtract_interval);
1665
1666 BufferId buffer_id = FindBuffer(dest_address, static_cast<u32>(copy_size));
1667 auto& buffer = slot_buffers[buffer_id];
1668 SynchronizeBuffer(buffer, dest_address, static_cast<u32>(copy_size));
1669
1670 if constexpr (USE_MEMORY_MAPS) {
1671 std::array copies{BufferCopy{
1672 .src_offset = 0,
1673 .dst_offset = buffer.Offset(dest_address),
1674 .size = copy_size,
1675 }};
1676 auto upload_staging = runtime.UploadStagingBuffer(copy_size);
1677 u8* const src_pointer = upload_staging.mapped_span.data();
1678 std::memcpy(src_pointer, inlined_buffer.data(), copy_size);
1679 runtime.CopyBuffer(buffer, upload_staging.buffer, copies);
1680 } else {
1681 buffer.ImmediateUpload(buffer.Offset(dest_address), inlined_buffer.first(copy_size));
1682 }
1683
1684 return true;
1685}
1686
1687template <class P>
1647void BufferCache<P>::DownloadBufferMemory(Buffer& buffer) { 1688void BufferCache<P>::DownloadBufferMemory(Buffer& buffer) {
1648 DownloadBufferMemory(buffer, buffer.CpuAddr(), buffer.SizeBytes()); 1689 DownloadBufferMemory(buffer, buffer.CpuAddr(), buffer.SizeBytes());
1649} 1690}
diff --git a/src/video_core/engines/engine_upload.cpp b/src/video_core/engines/engine_upload.cpp
index 71d7e1473..351b110fe 100644
--- a/src/video_core/engines/engine_upload.cpp
+++ b/src/video_core/engines/engine_upload.cpp
@@ -7,6 +7,7 @@
7#include "common/assert.h" 7#include "common/assert.h"
8#include "video_core/engines/engine_upload.h" 8#include "video_core/engines/engine_upload.h"
9#include "video_core/memory_manager.h" 9#include "video_core/memory_manager.h"
10#include "video_core/rasterizer_interface.h"
10#include "video_core/textures/decoders.h" 11#include "video_core/textures/decoders.h"
11 12
12namespace Tegra::Engines::Upload { 13namespace Tegra::Engines::Upload {
@@ -16,6 +17,10 @@ State::State(MemoryManager& memory_manager_, Registers& regs_)
16 17
17State::~State() = default; 18State::~State() = default;
18 19
20void State::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
21 rasterizer = rasterizer_;
22}
23
19void State::ProcessExec(const bool is_linear_) { 24void State::ProcessExec(const bool is_linear_) {
20 write_offset = 0; 25 write_offset = 0;
21 copy_size = regs.line_length_in * regs.line_count; 26 copy_size = regs.line_length_in * regs.line_count;
@@ -32,7 +37,7 @@ void State::ProcessData(const u32 data, const bool is_last_call) {
32 } 37 }
33 const GPUVAddr address{regs.dest.Address()}; 38 const GPUVAddr address{regs.dest.Address()};
34 if (is_linear) { 39 if (is_linear) {
35 memory_manager.WriteBlock(address, inner_buffer.data(), copy_size); 40 rasterizer->AccelerateInlineToMemory(address, copy_size, inner_buffer);
36 } else { 41 } else {
37 UNIMPLEMENTED_IF(regs.dest.z != 0); 42 UNIMPLEMENTED_IF(regs.dest.z != 0);
38 UNIMPLEMENTED_IF(regs.dest.depth != 1); 43 UNIMPLEMENTED_IF(regs.dest.depth != 1);
diff --git a/src/video_core/engines/engine_upload.h b/src/video_core/engines/engine_upload.h
index 1c7f1effa..c9c5ec8c3 100644
--- a/src/video_core/engines/engine_upload.h
+++ b/src/video_core/engines/engine_upload.h
@@ -12,6 +12,10 @@ namespace Tegra {
12class MemoryManager; 12class MemoryManager;
13} 13}
14 14
15namespace VideoCore {
16class RasterizerInterface;
17}
18
15namespace Tegra::Engines::Upload { 19namespace Tegra::Engines::Upload {
16 20
17struct Registers { 21struct Registers {
@@ -60,6 +64,9 @@ public:
60 void ProcessExec(bool is_linear_); 64 void ProcessExec(bool is_linear_);
61 void ProcessData(u32 data, bool is_last_call); 65 void ProcessData(u32 data, bool is_last_call);
62 66
67 /// Binds a rasterizer to this engine.
68 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
69
63private: 70private:
64 u32 write_offset = 0; 71 u32 write_offset = 0;
65 u32 copy_size = 0; 72 u32 copy_size = 0;
@@ -68,6 +75,7 @@ private:
68 bool is_linear = false; 75 bool is_linear = false;
69 Registers& regs; 76 Registers& regs;
70 MemoryManager& memory_manager; 77 MemoryManager& memory_manager;
78 VideoCore::RasterizerInterface* rasterizer = nullptr;
71}; 79};
72 80
73} // namespace Tegra::Engines::Upload 81} // namespace Tegra::Engines::Upload
diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp
index 492b4c5a3..5a1c12076 100644
--- a/src/video_core/engines/kepler_compute.cpp
+++ b/src/video_core/engines/kepler_compute.cpp
@@ -22,6 +22,7 @@ KeplerCompute::~KeplerCompute() = default;
22 22
23void KeplerCompute::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) { 23void KeplerCompute::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
24 rasterizer = rasterizer_; 24 rasterizer = rasterizer_;
25 upload_state.BindRasterizer(rasterizer);
25} 26}
26 27
27void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_call) { 28void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp
index 560551157..8aed16caa 100644
--- a/src/video_core/engines/kepler_memory.cpp
+++ b/src/video_core/engines/kepler_memory.cpp
@@ -19,6 +19,10 @@ KeplerMemory::KeplerMemory(Core::System& system_, MemoryManager& memory_manager)
19 19
20KeplerMemory::~KeplerMemory() = default; 20KeplerMemory::~KeplerMemory() = default;
21 21
22void KeplerMemory::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
23 upload_state.BindRasterizer(rasterizer_);
24}
25
22void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call) { 26void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
23 ASSERT_MSG(method < Regs::NUM_REGS, 27 ASSERT_MSG(method < Regs::NUM_REGS,
24 "Invalid KeplerMemory register, increase the size of the Regs structure"); 28 "Invalid KeplerMemory register, increase the size of the Regs structure");
diff --git a/src/video_core/engines/kepler_memory.h b/src/video_core/engines/kepler_memory.h
index 0d8ea09a9..949e2fae1 100644
--- a/src/video_core/engines/kepler_memory.h
+++ b/src/video_core/engines/kepler_memory.h
@@ -22,6 +22,10 @@ namespace Tegra {
22class MemoryManager; 22class MemoryManager;
23} 23}
24 24
25namespace VideoCore {
26class RasterizerInterface;
27}
28
25namespace Tegra::Engines { 29namespace Tegra::Engines {
26 30
27/** 31/**
@@ -38,6 +42,9 @@ public:
38 explicit KeplerMemory(Core::System& system_, MemoryManager& memory_manager); 42 explicit KeplerMemory(Core::System& system_, MemoryManager& memory_manager);
39 ~KeplerMemory() override; 43 ~KeplerMemory() override;
40 44
45 /// Binds a rasterizer to this engine.
46 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
47
41 /// Write the value to the register identified by method. 48 /// Write the value to the register identified by method.
42 void CallMethod(u32 method, u32 method_argument, bool is_last_call) override; 49 void CallMethod(u32 method, u32 method_argument, bool is_last_call) override;
43 50
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index c38ebd670..5d6d217bb 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -31,6 +31,7 @@ Maxwell3D::~Maxwell3D() = default;
31 31
32void Maxwell3D::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) { 32void Maxwell3D::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
33 rasterizer = rasterizer_; 33 rasterizer = rasterizer_;
34 upload_state.BindRasterizer(rasterizer_);
34} 35}
35 36
36void Maxwell3D::InitializeRegisterDefaults() { 37void Maxwell3D::InitializeRegisterDefaults() {
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index f22342dfb..dc9df6c8b 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -1557,7 +1557,8 @@ private:
1557 1557
1558 static constexpr u32 null_cb_data = 0xFFFFFFFF; 1558 static constexpr u32 null_cb_data = 0xFFFFFFFF;
1559 struct CBDataState { 1559 struct CBDataState {
1560 std::array<std::array<u32, 0x4000>, 16> buffer; 1560 static constexpr size_t inline_size = 0x4000;
1561 std::array<std::array<u32, inline_size>, 16> buffer;
1561 u32 current{null_cb_data}; 1562 u32 current{null_cb_data};
1562 u32 id{null_cb_data}; 1563 u32 id{null_cb_data};
1563 u32 start_pos{}; 1564 u32 start_pos{};
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 705765c99..ba9ba082f 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -59,6 +59,7 @@ struct GPU::Impl {
59 maxwell_3d->BindRasterizer(rasterizer); 59 maxwell_3d->BindRasterizer(rasterizer);
60 fermi_2d->BindRasterizer(rasterizer); 60 fermi_2d->BindRasterizer(rasterizer);
61 kepler_compute->BindRasterizer(rasterizer); 61 kepler_compute->BindRasterizer(rasterizer);
62 kepler_memory->BindRasterizer(rasterizer);
62 maxwell_dma->BindRasterizer(rasterizer); 63 maxwell_dma->BindRasterizer(rasterizer);
63 } 64 }
64 65
@@ -502,8 +503,13 @@ struct GPU::Impl {
502 case BufferMethods::SemaphoreAddressHigh: 503 case BufferMethods::SemaphoreAddressHigh:
503 case BufferMethods::SemaphoreAddressLow: 504 case BufferMethods::SemaphoreAddressLow:
504 case BufferMethods::SemaphoreSequence: 505 case BufferMethods::SemaphoreSequence:
506 break;
505 case BufferMethods::UnkCacheFlush: 507 case BufferMethods::UnkCacheFlush:
508 rasterizer->SyncGuestHost();
509 break;
506 case BufferMethods::WrcacheFlush: 510 case BufferMethods::WrcacheFlush:
511 rasterizer->SignalReference();
512 break;
507 case BufferMethods::FenceValue: 513 case BufferMethods::FenceValue:
508 break; 514 break;
509 case BufferMethods::RefCnt: 515 case BufferMethods::RefCnt:
@@ -513,7 +519,7 @@ struct GPU::Impl {
513 ProcessFenceActionMethod(); 519 ProcessFenceActionMethod();
514 break; 520 break;
515 case BufferMethods::WaitForInterrupt: 521 case BufferMethods::WaitForInterrupt:
516 ProcessWaitForInterruptMethod(); 522 rasterizer->WaitForIdle();
517 break; 523 break;
518 case BufferMethods::SemaphoreTrigger: { 524 case BufferMethods::SemaphoreTrigger: {
519 ProcessSemaphoreTriggerMethod(); 525 ProcessSemaphoreTriggerMethod();
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index 38d8d9d74..61bfe47c7 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -143,6 +143,8 @@ public:
143 [[nodiscard]] GPUVAddr Allocate(std::size_t size, std::size_t align); 143 [[nodiscard]] GPUVAddr Allocate(std::size_t size, std::size_t align);
144 void Unmap(GPUVAddr gpu_addr, std::size_t size); 144 void Unmap(GPUVAddr gpu_addr, std::size_t size);
145 145
146 void FlushRegion(GPUVAddr gpu_addr, size_t size) const;
147
146private: 148private:
147 [[nodiscard]] PageEntry GetPageEntry(GPUVAddr gpu_addr) const; 149 [[nodiscard]] PageEntry GetPageEntry(GPUVAddr gpu_addr) const;
148 void SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size = page_size); 150 void SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size = page_size);
@@ -153,8 +155,6 @@ private:
153 void TryLockPage(PageEntry page_entry, std::size_t size); 155 void TryLockPage(PageEntry page_entry, std::size_t size);
154 void TryUnlockPage(PageEntry page_entry, std::size_t size); 156 void TryUnlockPage(PageEntry page_entry, std::size_t size);
155 157
156 void FlushRegion(GPUVAddr gpu_addr, size_t size) const;
157
158 void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, 158 void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size,
159 bool is_safe) const; 159 bool is_safe) const;
160 void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size, 160 void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size,
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index b094fc064..1f1f12291 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -123,6 +123,9 @@ public:
123 123
124 [[nodiscard]] virtual Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() = 0; 124 [[nodiscard]] virtual Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() = 0;
125 125
126 virtual void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
127 std::span<u8> memory) = 0;
128
126 /// Attempt to use a faster method to display the framebuffer to screen 129 /// Attempt to use a faster method to display the framebuffer to screen
127 [[nodiscard]] virtual bool AccelerateDisplay(const Tegra::FramebufferConfig& config, 130 [[nodiscard]] virtual bool AccelerateDisplay(const Tegra::FramebufferConfig& config,
128 VAddr framebuffer_addr, u32 pixel_stride) { 131 VAddr framebuffer_addr, u32 pixel_stride) {
diff --git a/src/video_core/renderer_base.h b/src/video_core/renderer_base.h
index bb204454e..c5f974080 100644
--- a/src/video_core/renderer_base.h
+++ b/src/video_core/renderer_base.h
@@ -5,9 +5,10 @@
5#pragma once 5#pragma once
6 6
7#include <atomic> 7#include <atomic>
8#include <functional>
8#include <memory> 9#include <memory>
9#include <optional>
10 10
11#include "common/common_funcs.h"
11#include "common/common_types.h" 12#include "common/common_types.h"
12#include "core/frontend/emu_window.h" 13#include "core/frontend/emu_window.h"
13#include "video_core/gpu.h" 14#include "video_core/gpu.h"
@@ -28,8 +29,11 @@ struct RendererSettings {
28 Layout::FramebufferLayout screenshot_framebuffer_layout; 29 Layout::FramebufferLayout screenshot_framebuffer_layout;
29}; 30};
30 31
31class RendererBase : NonCopyable { 32class RendererBase {
32public: 33public:
34 YUZU_NON_COPYABLE(RendererBase);
35 YUZU_NON_MOVEABLE(RendererBase);
36
33 explicit RendererBase(Core::Frontend::EmuWindow& window, 37 explicit RendererBase(Core::Frontend::EmuWindow& window,
34 std::unique_ptr<Core::Frontend::GraphicsContext> context); 38 std::unique_ptr<Core::Frontend::GraphicsContext> context);
35 virtual ~RendererBase(); 39 virtual ~RendererBase();
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 9b516c64f..142412a8e 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -484,6 +484,28 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerOpenGL::AccessAccelerateDMA()
484 return accelerate_dma; 484 return accelerate_dma;
485} 485}
486 486
487void RasterizerOpenGL::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
488 std::span<u8> memory) {
489 auto cpu_addr = gpu_memory.GpuToCpuAddress(address);
490 if (!cpu_addr) [[unlikely]] {
491 gpu_memory.WriteBlock(address, memory.data(), copy_size);
492 return;
493 }
494 gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size);
495 {
496 std::unique_lock<std::mutex> lock{buffer_cache.mutex};
497 if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) {
498 buffer_cache.WriteMemory(*cpu_addr, copy_size);
499 }
500 }
501 {
502 std::scoped_lock lock_texture{texture_cache.mutex};
503 texture_cache.WriteMemory(*cpu_addr, copy_size);
504 }
505 shader_cache.InvalidateRegion(*cpu_addr, copy_size);
506 query_cache.InvalidateRegion(*cpu_addr, copy_size);
507}
508
487bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config, 509bool RasterizerOpenGL::AccelerateDisplay(const Tegra::FramebufferConfig& config,
488 VAddr framebuffer_addr, u32 pixel_stride) { 510 VAddr framebuffer_addr, u32 pixel_stride) {
489 if (framebuffer_addr == 0) { 511 if (framebuffer_addr == 0) {
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index d0397b745..98f6fd342 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -106,6 +106,8 @@ public:
106 const Tegra::Engines::Fermi2D::Surface& dst, 106 const Tegra::Engines::Fermi2D::Surface& dst,
107 const Tegra::Engines::Fermi2D::Config& copy_config) override; 107 const Tegra::Engines::Fermi2D::Config& copy_config) override;
108 Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override; 108 Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
109 void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
110 std::span<u8> memory) override;
109 bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, 111 bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
110 u32 pixel_stride) override; 112 u32 pixel_stride) override;
111 void LoadDiskResources(u64 title_id, std::stop_token stop_loading, 113 void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
diff --git a/src/video_core/renderer_opengl/gl_resource_manager.h b/src/video_core/renderer_opengl/gl_resource_manager.h
index b2d5bfd3b..84e07f8bd 100644
--- a/src/video_core/renderer_opengl/gl_resource_manager.h
+++ b/src/video_core/renderer_opengl/gl_resource_manager.h
@@ -7,12 +7,14 @@
7#include <string_view> 7#include <string_view>
8#include <utility> 8#include <utility>
9#include <glad/glad.h> 9#include <glad/glad.h>
10#include "common/common_types.h" 10#include "common/common_funcs.h"
11 11
12namespace OpenGL { 12namespace OpenGL {
13 13
14class OGLRenderbuffer : private NonCopyable { 14class OGLRenderbuffer final {
15public: 15public:
16 YUZU_NON_COPYABLE(OGLRenderbuffer);
17
16 OGLRenderbuffer() = default; 18 OGLRenderbuffer() = default;
17 19
18 OGLRenderbuffer(OGLRenderbuffer&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 20 OGLRenderbuffer(OGLRenderbuffer&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -36,8 +38,10 @@ public:
36 GLuint handle = 0; 38 GLuint handle = 0;
37}; 39};
38 40
39class OGLTexture : private NonCopyable { 41class OGLTexture final {
40public: 42public:
43 YUZU_NON_COPYABLE(OGLTexture);
44
41 OGLTexture() = default; 45 OGLTexture() = default;
42 46
43 OGLTexture(OGLTexture&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 47 OGLTexture(OGLTexture&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -61,8 +65,10 @@ public:
61 GLuint handle = 0; 65 GLuint handle = 0;
62}; 66};
63 67
64class OGLTextureView : private NonCopyable { 68class OGLTextureView final {
65public: 69public:
70 YUZU_NON_COPYABLE(OGLTextureView);
71
66 OGLTextureView() = default; 72 OGLTextureView() = default;
67 73
68 OGLTextureView(OGLTextureView&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 74 OGLTextureView(OGLTextureView&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -86,8 +92,10 @@ public:
86 GLuint handle = 0; 92 GLuint handle = 0;
87}; 93};
88 94
89class OGLSampler : private NonCopyable { 95class OGLSampler final {
90public: 96public:
97 YUZU_NON_COPYABLE(OGLSampler);
98
91 OGLSampler() = default; 99 OGLSampler() = default;
92 100
93 OGLSampler(OGLSampler&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 101 OGLSampler(OGLSampler&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -111,8 +119,10 @@ public:
111 GLuint handle = 0; 119 GLuint handle = 0;
112}; 120};
113 121
114class OGLShader : private NonCopyable { 122class OGLShader final {
115public: 123public:
124 YUZU_NON_COPYABLE(OGLShader);
125
116 OGLShader() = default; 126 OGLShader() = default;
117 127
118 OGLShader(OGLShader&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 128 OGLShader(OGLShader&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -132,8 +142,10 @@ public:
132 GLuint handle = 0; 142 GLuint handle = 0;
133}; 143};
134 144
135class OGLProgram : private NonCopyable { 145class OGLProgram final {
136public: 146public:
147 YUZU_NON_COPYABLE(OGLProgram);
148
137 OGLProgram() = default; 149 OGLProgram() = default;
138 150
139 OGLProgram(OGLProgram&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 151 OGLProgram(OGLProgram&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -154,8 +166,10 @@ public:
154 GLuint handle = 0; 166 GLuint handle = 0;
155}; 167};
156 168
157class OGLAssemblyProgram : private NonCopyable { 169class OGLAssemblyProgram final {
158public: 170public:
171 YUZU_NON_COPYABLE(OGLAssemblyProgram);
172
159 OGLAssemblyProgram() = default; 173 OGLAssemblyProgram() = default;
160 174
161 OGLAssemblyProgram(OGLAssemblyProgram&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 175 OGLAssemblyProgram(OGLAssemblyProgram&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -176,8 +190,10 @@ public:
176 GLuint handle = 0; 190 GLuint handle = 0;
177}; 191};
178 192
179class OGLPipeline : private NonCopyable { 193class OGLPipeline final {
180public: 194public:
195 YUZU_NON_COPYABLE(OGLPipeline);
196
181 OGLPipeline() = default; 197 OGLPipeline() = default;
182 OGLPipeline(OGLPipeline&& o) noexcept : handle{std::exchange<GLuint>(o.handle, 0)} {} 198 OGLPipeline(OGLPipeline&& o) noexcept : handle{std::exchange<GLuint>(o.handle, 0)} {}
183 199
@@ -198,8 +214,10 @@ public:
198 GLuint handle = 0; 214 GLuint handle = 0;
199}; 215};
200 216
201class OGLBuffer : private NonCopyable { 217class OGLBuffer final {
202public: 218public:
219 YUZU_NON_COPYABLE(OGLBuffer);
220
203 OGLBuffer() = default; 221 OGLBuffer() = default;
204 222
205 OGLBuffer(OGLBuffer&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 223 OGLBuffer(OGLBuffer&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -223,8 +241,10 @@ public:
223 GLuint handle = 0; 241 GLuint handle = 0;
224}; 242};
225 243
226class OGLSync : private NonCopyable { 244class OGLSync final {
227public: 245public:
246 YUZU_NON_COPYABLE(OGLSync);
247
228 OGLSync() = default; 248 OGLSync() = default;
229 249
230 OGLSync(OGLSync&& o) noexcept : handle(std::exchange(o.handle, nullptr)) {} 250 OGLSync(OGLSync&& o) noexcept : handle(std::exchange(o.handle, nullptr)) {}
@@ -247,8 +267,10 @@ public:
247 GLsync handle = 0; 267 GLsync handle = 0;
248}; 268};
249 269
250class OGLFramebuffer : private NonCopyable { 270class OGLFramebuffer final {
251public: 271public:
272 YUZU_NON_COPYABLE(OGLFramebuffer);
273
252 OGLFramebuffer() = default; 274 OGLFramebuffer() = default;
253 275
254 OGLFramebuffer(OGLFramebuffer&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 276 OGLFramebuffer(OGLFramebuffer&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -272,8 +294,10 @@ public:
272 GLuint handle = 0; 294 GLuint handle = 0;
273}; 295};
274 296
275class OGLQuery : private NonCopyable { 297class OGLQuery final {
276public: 298public:
299 YUZU_NON_COPYABLE(OGLQuery);
300
277 OGLQuery() = default; 301 OGLQuery() = default;
278 302
279 OGLQuery(OGLQuery&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 303 OGLQuery(OGLQuery&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index fd334a146..2227d9197 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -548,6 +548,28 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerVulkan::AccessAccelerateDMA()
548 return accelerate_dma; 548 return accelerate_dma;
549} 549}
550 550
551void RasterizerVulkan::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
552 std::span<u8> memory) {
553 auto cpu_addr = gpu_memory.GpuToCpuAddress(address);
554 if (!cpu_addr) [[unlikely]] {
555 gpu_memory.WriteBlock(address, memory.data(), copy_size);
556 return;
557 }
558 gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size);
559 {
560 std::unique_lock<std::mutex> lock{buffer_cache.mutex};
561 if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) {
562 buffer_cache.WriteMemory(*cpu_addr, copy_size);
563 }
564 }
565 {
566 std::scoped_lock lock_texture{texture_cache.mutex};
567 texture_cache.WriteMemory(*cpu_addr, copy_size);
568 }
569 pipeline_cache.InvalidateRegion(*cpu_addr, copy_size);
570 query_cache.InvalidateRegion(*cpu_addr, copy_size);
571}
572
551bool RasterizerVulkan::AccelerateDisplay(const Tegra::FramebufferConfig& config, 573bool RasterizerVulkan::AccelerateDisplay(const Tegra::FramebufferConfig& config,
552 VAddr framebuffer_addr, u32 pixel_stride) { 574 VAddr framebuffer_addr, u32 pixel_stride) {
553 if (!framebuffer_addr) { 575 if (!framebuffer_addr) {
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 866827247..5af2e275b 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -99,6 +99,8 @@ public:
99 const Tegra::Engines::Fermi2D::Surface& dst, 99 const Tegra::Engines::Fermi2D::Surface& dst,
100 const Tegra::Engines::Fermi2D::Config& copy_config) override; 100 const Tegra::Engines::Fermi2D::Config& copy_config) override;
101 Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override; 101 Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
102 void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
103 std::span<u8> memory) override;
102 bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, 104 bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
103 u32 pixel_stride) override; 105 u32 pixel_stride) override;
104 void LoadDiskResources(u64 title_id, std::stop_token stop_loading, 106 void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index 3bfdf41ba..7d9d4f7ba 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -140,12 +140,12 @@ bool VKScheduler::UpdateRescaling(bool is_rescaling) {
140void VKScheduler::WorkerThread(std::stop_token stop_token) { 140void VKScheduler::WorkerThread(std::stop_token stop_token) {
141 Common::SetCurrentThreadName("yuzu:VulkanWorker"); 141 Common::SetCurrentThreadName("yuzu:VulkanWorker");
142 do { 142 do {
143 if (work_queue.empty()) {
144 wait_cv.notify_all();
145 }
146 std::unique_ptr<CommandChunk> work; 143 std::unique_ptr<CommandChunk> work;
147 { 144 {
148 std::unique_lock lock{work_mutex}; 145 std::unique_lock lock{work_mutex};
146 if (work_queue.empty()) {
147 wait_cv.notify_all();
148 }
149 work_cv.wait(lock, stop_token, [this] { return !work_queue.empty(); }); 149 work_cv.wait(lock, stop_token, [this] { return !work_queue.empty(); });
150 if (stop_token.stop_requested()) { 150 if (stop_token.stop_requested()) {
151 continue; 151 continue;
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
index 1b06c9296..e69aa136b 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.h
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -146,6 +146,7 @@ private:
146 using FuncType = TypedCommand<T>; 146 using FuncType = TypedCommand<T>;
147 static_assert(sizeof(FuncType) < sizeof(data), "Lambda is too large"); 147 static_assert(sizeof(FuncType) < sizeof(data), "Lambda is too large");
148 148
149 recorded_counts++;
149 command_offset = Common::AlignUp(command_offset, alignof(FuncType)); 150 command_offset = Common::AlignUp(command_offset, alignof(FuncType));
150 if (command_offset > sizeof(data) - sizeof(FuncType)) { 151 if (command_offset > sizeof(data) - sizeof(FuncType)) {
151 return false; 152 return false;
@@ -167,7 +168,7 @@ private:
167 } 168 }
168 169
169 bool Empty() const { 170 bool Empty() const {
170 return command_offset == 0; 171 return recorded_counts == 0;
171 } 172 }
172 173
173 bool HasSubmit() const { 174 bool HasSubmit() const {
@@ -178,6 +179,7 @@ private:
178 Command* first = nullptr; 179 Command* first = nullptr;
179 Command* last = nullptr; 180 Command* last = nullptr;
180 181
182 size_t recorded_counts = 0;
181 size_t command_offset = 0; 183 size_t command_offset = 0;
182 bool submit = false; 184 bool submit = false;
183 alignas(std::max_align_t) std::array<u8, 0x8000> data{}; 185 alignas(std::max_align_t) std::array<u8, 0x8000> data{};
diff --git a/src/video_core/shader_cache.cpp b/src/video_core/shader_cache.cpp
index 78bf90c48..87636857d 100644
--- a/src/video_core/shader_cache.cpp
+++ b/src/video_core/shader_cache.cpp
@@ -170,7 +170,7 @@ void ShaderCache::RemovePendingShaders() {
170 marked_for_removal.clear(); 170 marked_for_removal.clear();
171 171
172 if (!removed_shaders.empty()) { 172 if (!removed_shaders.empty()) {
173 RemoveShadersFromStorage(std::move(removed_shaders)); 173 RemoveShadersFromStorage(removed_shaders);
174 } 174 }
175} 175}
176 176
@@ -213,7 +213,7 @@ void ShaderCache::UnmarkMemory(Entry* entry) {
213 rasterizer.UpdatePagesCachedCount(addr, size, -1); 213 rasterizer.UpdatePagesCachedCount(addr, size, -1);
214} 214}
215 215
216void ShaderCache::RemoveShadersFromStorage(std::vector<ShaderInfo*> removed_shaders) { 216void ShaderCache::RemoveShadersFromStorage(std::span<ShaderInfo*> removed_shaders) {
217 // Remove them from the cache 217 // Remove them from the cache
218 std::erase_if(storage, [&removed_shaders](const std::unique_ptr<ShaderInfo>& shader) { 218 std::erase_if(storage, [&removed_shaders](const std::unique_ptr<ShaderInfo>& shader) {
219 return std::ranges::find(removed_shaders, shader.get()) != removed_shaders.end(); 219 return std::ranges::find(removed_shaders, shader.get()) != removed_shaders.end();
diff --git a/src/video_core/shader_cache.h b/src/video_core/shader_cache.h
index 136fe294c..8836bc8c6 100644
--- a/src/video_core/shader_cache.h
+++ b/src/video_core/shader_cache.h
@@ -4,7 +4,6 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <algorithm>
8#include <array> 7#include <array>
9#include <memory> 8#include <memory>
10#include <mutex> 9#include <mutex>
@@ -138,7 +137,7 @@ private:
138 /// @param removed_shaders Shaders to be removed from the storage 137 /// @param removed_shaders Shaders to be removed from the storage
139 /// @pre invalidation_mutex is locked 138 /// @pre invalidation_mutex is locked
140 /// @pre lookup_mutex is locked 139 /// @pre lookup_mutex is locked
141 void RemoveShadersFromStorage(std::vector<ShaderInfo*> removed_shaders); 140 void RemoveShadersFromStorage(std::span<ShaderInfo*> removed_shaders);
142 141
143 /// @brief Creates a new entry in the lookup cache and returns its pointer 142 /// @brief Creates a new entry in the lookup cache and returns its pointer
144 /// @pre lookup_mutex is locked 143 /// @pre lookup_mutex is locked
diff --git a/src/yuzu/configuration/configure_dialog.cpp b/src/yuzu/configuration/configure_dialog.cpp
index 464e7a489..19133ccf5 100644
--- a/src/yuzu/configuration/configure_dialog.cpp
+++ b/src/yuzu/configuration/configure_dialog.cpp
@@ -109,7 +109,7 @@ void ConfigureDialog::ApplyConfiguration() {
109 ui_tab->ApplyConfiguration(); 109 ui_tab->ApplyConfiguration();
110 system_tab->ApplyConfiguration(); 110 system_tab->ApplyConfiguration();
111 profile_tab->ApplyConfiguration(); 111 profile_tab->ApplyConfiguration();
112 filesystem_tab->applyConfiguration(); 112 filesystem_tab->ApplyConfiguration();
113 input_tab->ApplyConfiguration(); 113 input_tab->ApplyConfiguration();
114 hotkeys_tab->ApplyConfiguration(registry); 114 hotkeys_tab->ApplyConfiguration(registry);
115 cpu_tab->ApplyConfiguration(); 115 cpu_tab->ApplyConfiguration();
diff --git a/src/yuzu/configuration/configure_filesystem.cpp b/src/yuzu/configuration/configure_filesystem.cpp
index 9cb317822..d6fb43f8b 100644
--- a/src/yuzu/configuration/configure_filesystem.cpp
+++ b/src/yuzu/configuration/configure_filesystem.cpp
@@ -14,7 +14,7 @@
14ConfigureFilesystem::ConfigureFilesystem(QWidget* parent) 14ConfigureFilesystem::ConfigureFilesystem(QWidget* parent)
15 : QWidget(parent), ui(std::make_unique<Ui::ConfigureFilesystem>()) { 15 : QWidget(parent), ui(std::make_unique<Ui::ConfigureFilesystem>()) {
16 ui->setupUi(this); 16 ui->setupUi(this);
17 this->setConfiguration(); 17 SetConfiguration();
18 18
19 connect(ui->nand_directory_button, &QToolButton::pressed, this, 19 connect(ui->nand_directory_button, &QToolButton::pressed, this,
20 [this] { SetDirectory(DirectoryTarget::NAND, ui->nand_directory_edit); }); 20 [this] { SetDirectory(DirectoryTarget::NAND, ui->nand_directory_edit); });
@@ -38,7 +38,15 @@ ConfigureFilesystem::ConfigureFilesystem(QWidget* parent)
38 38
39ConfigureFilesystem::~ConfigureFilesystem() = default; 39ConfigureFilesystem::~ConfigureFilesystem() = default;
40 40
41void ConfigureFilesystem::setConfiguration() { 41void ConfigureFilesystem::changeEvent(QEvent* event) {
42 if (event->type() == QEvent::LanguageChange) {
43 RetranslateUI();
44 }
45
46 QWidget::changeEvent(event);
47}
48
49void ConfigureFilesystem::SetConfiguration() {
42 ui->nand_directory_edit->setText( 50 ui->nand_directory_edit->setText(
43 QString::fromStdString(Common::FS::GetYuzuPathString(Common::FS::YuzuPath::NANDDir))); 51 QString::fromStdString(Common::FS::GetYuzuPathString(Common::FS::YuzuPath::NANDDir)));
44 ui->sdmc_directory_edit->setText( 52 ui->sdmc_directory_edit->setText(
@@ -60,7 +68,7 @@ void ConfigureFilesystem::setConfiguration() {
60 UpdateEnabledControls(); 68 UpdateEnabledControls();
61} 69}
62 70
63void ConfigureFilesystem::applyConfiguration() { 71void ConfigureFilesystem::ApplyConfiguration() {
64 Common::FS::SetYuzuPath(Common::FS::YuzuPath::NANDDir, 72 Common::FS::SetYuzuPath(Common::FS::YuzuPath::NANDDir,
65 ui->nand_directory_edit->text().toStdString()); 73 ui->nand_directory_edit->text().toStdString());
66 Common::FS::SetYuzuPath(Common::FS::YuzuPath::SDMCDir, 74 Common::FS::SetYuzuPath(Common::FS::YuzuPath::SDMCDir,
@@ -143,6 +151,6 @@ void ConfigureFilesystem::UpdateEnabledControls() {
143 !ui->gamecard_current_game->isChecked()); 151 !ui->gamecard_current_game->isChecked());
144} 152}
145 153
146void ConfigureFilesystem::retranslateUi() { 154void ConfigureFilesystem::RetranslateUI() {
147 ui->retranslateUi(this); 155 ui->retranslateUi(this);
148} 156}
diff --git a/src/yuzu/configuration/configure_filesystem.h b/src/yuzu/configuration/configure_filesystem.h
index 2147cd405..b4f9355eb 100644
--- a/src/yuzu/configuration/configure_filesystem.h
+++ b/src/yuzu/configuration/configure_filesystem.h
@@ -20,11 +20,13 @@ public:
20 explicit ConfigureFilesystem(QWidget* parent = nullptr); 20 explicit ConfigureFilesystem(QWidget* parent = nullptr);
21 ~ConfigureFilesystem() override; 21 ~ConfigureFilesystem() override;
22 22
23 void applyConfiguration(); 23 void ApplyConfiguration();
24 void retranslateUi();
25 24
26private: 25private:
27 void setConfiguration(); 26 void changeEvent(QEvent* event) override;
27
28 void RetranslateUI();
29 void SetConfiguration();
28 30
29 enum class DirectoryTarget { 31 enum class DirectoryTarget {
30 NAND, 32 NAND,
diff --git a/src/yuzu/configuration/configure_hotkeys.cpp b/src/yuzu/configuration/configure_hotkeys.cpp
index be10e0a31..53e629a5e 100644
--- a/src/yuzu/configuration/configure_hotkeys.cpp
+++ b/src/yuzu/configuration/configure_hotkeys.cpp
@@ -178,52 +178,52 @@ void ConfigureHotkeys::SetPollingResult(Core::HID::NpadButton button, const bool
178QString ConfigureHotkeys::GetButtonName(Core::HID::NpadButton button) const { 178QString ConfigureHotkeys::GetButtonName(Core::HID::NpadButton button) const {
179 Core::HID::NpadButtonState state{button}; 179 Core::HID::NpadButtonState state{button};
180 if (state.a) { 180 if (state.a) {
181 return tr("A"); 181 return QStringLiteral("A");
182 } 182 }
183 if (state.b) { 183 if (state.b) {
184 return tr("B"); 184 return QStringLiteral("B");
185 } 185 }
186 if (state.x) { 186 if (state.x) {
187 return tr("X"); 187 return QStringLiteral("X");
188 } 188 }
189 if (state.y) { 189 if (state.y) {
190 return tr("Y"); 190 return QStringLiteral("Y");
191 } 191 }
192 if (state.l || state.right_sl || state.left_sl) { 192 if (state.l || state.right_sl || state.left_sl) {
193 return tr("L"); 193 return QStringLiteral("L");
194 } 194 }
195 if (state.r || state.right_sr || state.left_sr) { 195 if (state.r || state.right_sr || state.left_sr) {
196 return tr("R"); 196 return QStringLiteral("R");
197 } 197 }
198 if (state.zl) { 198 if (state.zl) {
199 return tr("ZL"); 199 return QStringLiteral("ZL");
200 } 200 }
201 if (state.zr) { 201 if (state.zr) {
202 return tr("ZR"); 202 return QStringLiteral("ZR");
203 } 203 }
204 if (state.left) { 204 if (state.left) {
205 return tr("Dpad_Left"); 205 return QStringLiteral("Dpad_Left");
206 } 206 }
207 if (state.right) { 207 if (state.right) {
208 return tr("Dpad_Right"); 208 return QStringLiteral("Dpad_Right");
209 } 209 }
210 if (state.up) { 210 if (state.up) {
211 return tr("Dpad_Up"); 211 return QStringLiteral("Dpad_Up");
212 } 212 }
213 if (state.down) { 213 if (state.down) {
214 return tr("Dpad_Down"); 214 return QStringLiteral("Dpad_Down");
215 } 215 }
216 if (state.stick_l) { 216 if (state.stick_l) {
217 return tr("Left_Stick"); 217 return QStringLiteral("Left_Stick");
218 } 218 }
219 if (state.stick_r) { 219 if (state.stick_r) {
220 return tr("Right_Stick"); 220 return QStringLiteral("Right_Stick");
221 } 221 }
222 if (state.minus) { 222 if (state.minus) {
223 return tr("Minus"); 223 return QStringLiteral("Minus");
224 } 224 }
225 if (state.plus) { 225 if (state.plus) {
226 return tr("Plus"); 226 return QStringLiteral("Plus");
227 } 227 }
228 return tr("Invalid"); 228 return tr("Invalid");
229} 229}
diff --git a/src/yuzu/configuration/configure_input_player.cpp b/src/yuzu/configuration/configure_input_player.cpp
index 752504236..cc0534907 100644
--- a/src/yuzu/configuration/configure_input_player.cpp
+++ b/src/yuzu/configuration/configure_input_player.cpp
@@ -102,6 +102,10 @@ QString GetButtonName(Common::Input::ButtonNames button_name) {
102 return QObject::tr("Share"); 102 return QObject::tr("Share");
103 case Common::Input::ButtonNames::Options: 103 case Common::Input::ButtonNames::Options:
104 return QObject::tr("Options"); 104 return QObject::tr("Options");
105 case Common::Input::ButtonNames::Home:
106 return QObject::tr("Home");
107 case Common::Input::ButtonNames::Touch:
108 return QObject::tr("Touch");
105 case Common::Input::ButtonNames::ButtonMouseWheel: 109 case Common::Input::ButtonNames::ButtonMouseWheel:
106 return QObject::tr("Wheel", "Indicates the mouse wheel"); 110 return QObject::tr("Wheel", "Indicates the mouse wheel");
107 case Common::Input::ButtonNames::ButtonBackward: 111 case Common::Input::ButtonNames::ButtonBackward:
@@ -326,7 +330,7 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i
326 connect(button, &QPushButton::clicked, [=, this] { 330 connect(button, &QPushButton::clicked, [=, this] {
327 HandleClick( 331 HandleClick(
328 button, button_id, 332 button, button_id,
329 [=, this](Common::ParamPackage params) { 333 [=, this](const Common::ParamPackage& params) {
330 emulated_controller->SetButtonParam(button_id, params); 334 emulated_controller->SetButtonParam(button_id, params);
331 }, 335 },
332 InputCommon::Polling::InputType::Button); 336 InputCommon::Polling::InputType::Button);
@@ -392,7 +396,7 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i
392 connect(button, &QPushButton::clicked, [=, this] { 396 connect(button, &QPushButton::clicked, [=, this] {
393 HandleClick( 397 HandleClick(
394 button, motion_id, 398 button, motion_id,
395 [=, this](Common::ParamPackage params) { 399 [=, this](const Common::ParamPackage& params) {
396 emulated_controller->SetMotionParam(motion_id, params); 400 emulated_controller->SetMotionParam(motion_id, params);
397 }, 401 },
398 InputCommon::Polling::InputType::Motion); 402 InputCommon::Polling::InputType::Motion);
@@ -497,10 +501,11 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i
497 param.Set("invert_y", invert_str); 501 param.Set("invert_y", invert_str);
498 emulated_controller->SetStickParam(analog_id, param); 502 emulated_controller->SetStickParam(analog_id, param);
499 } 503 }
500 for (int sub_button_id = 0; sub_button_id < ANALOG_SUB_BUTTONS_NUM; 504 for (int analog_sub_button_id = 0;
501 ++sub_button_id) { 505 analog_sub_button_id < ANALOG_SUB_BUTTONS_NUM;
502 analog_map_buttons[analog_id][sub_button_id]->setText( 506 ++analog_sub_button_id) {
503 AnalogToText(param, analog_sub_buttons[sub_button_id])); 507 analog_map_buttons[analog_id][analog_sub_button_id]->setText(
508 AnalogToText(param, analog_sub_buttons[analog_sub_button_id]));
504 } 509 }
505 }); 510 });
506 context_menu.exec(analog_map_buttons[analog_id][sub_button_id]->mapToGlobal( 511 context_menu.exec(analog_map_buttons[analog_id][sub_button_id]->mapToGlobal(
@@ -783,7 +788,7 @@ void ConfigureInputPlayer::UpdateInputDeviceCombobox() {
783 if (devices.size() == 1) { 788 if (devices.size() == 1) {
784 const auto devices_it = std::find_if( 789 const auto devices_it = std::find_if(
785 input_devices.begin(), input_devices.end(), 790 input_devices.begin(), input_devices.end(),
786 [first_engine, first_guid, first_port, first_pad](const Common::ParamPackage param) { 791 [first_engine, first_guid, first_port, first_pad](const Common::ParamPackage& param) {
787 return param.Get("engine", "") == first_engine && 792 return param.Get("engine", "") == first_engine &&
788 param.Get("guid", "") == first_guid && param.Get("port", 0) == first_port && 793 param.Get("guid", "") == first_guid && param.Get("port", 0) == first_port &&
789 param.Get("pad", 0) == first_pad; 794 param.Get("pad", 0) == first_pad;
@@ -814,7 +819,7 @@ void ConfigureInputPlayer::UpdateInputDeviceCombobox() {
814 if (is_engine_equal && is_port_equal) { 819 if (is_engine_equal && is_port_equal) {
815 const auto devices_it = std::find_if( 820 const auto devices_it = std::find_if(
816 input_devices.begin(), input_devices.end(), 821 input_devices.begin(), input_devices.end(),
817 [first_engine, first_guid, second_guid, first_port](const Common::ParamPackage param) { 822 [first_engine, first_guid, second_guid, first_port](const Common::ParamPackage& param) {
818 const bool is_guid_valid = 823 const bool is_guid_valid =
819 (param.Get("guid", "") == first_guid && 824 (param.Get("guid", "") == first_guid &&
820 param.Get("guid2", "") == second_guid) || 825 param.Get("guid2", "") == second_guid) ||
@@ -1026,7 +1031,7 @@ int ConfigureInputPlayer::GetIndexFromControllerType(Core::HID::NpadStyleIndex t
1026void ConfigureInputPlayer::UpdateInputDevices() { 1031void ConfigureInputPlayer::UpdateInputDevices() {
1027 input_devices = input_subsystem->GetInputDevices(); 1032 input_devices = input_subsystem->GetInputDevices();
1028 ui->comboDevices->clear(); 1033 ui->comboDevices->clear();
1029 for (auto device : input_devices) { 1034 for (const auto& device : input_devices) {
1030 ui->comboDevices->addItem(QString::fromStdString(device.Get("display", "Unknown")), {}); 1035 ui->comboDevices->addItem(QString::fromStdString(device.Get("display", "Unknown")), {});
1031 } 1036 }
1032} 1037}
@@ -1308,7 +1313,7 @@ void ConfigureInputPlayer::HandleClick(
1308 } 1313 }
1309 button->setFocus(); 1314 button->setFocus();
1310 1315
1311 input_setter = new_input_setter; 1316 input_setter = std::move(new_input_setter);
1312 1317
1313 input_subsystem->BeginMapping(type); 1318 input_subsystem->BeginMapping(type);
1314 1319
@@ -1358,7 +1363,7 @@ bool ConfigureInputPlayer::IsInputAcceptable(const Common::ParamPackage& params)
1358 return params.Get("engine", "") == "keyboard" || params.Get("engine", "") == "mouse"; 1363 return params.Get("engine", "") == "keyboard" || params.Get("engine", "") == "mouse";
1359 } 1364 }
1360 1365
1361 const auto current_input_device = input_devices[ui->comboDevices->currentIndex()]; 1366 const auto& current_input_device = input_devices[ui->comboDevices->currentIndex()];
1362 return params.Get("engine", "") == current_input_device.Get("engine", "") && 1367 return params.Get("engine", "") == current_input_device.Get("engine", "") &&
1363 (params.Get("guid", "") == current_input_device.Get("guid", "") || 1368 (params.Get("guid", "") == current_input_device.Get("guid", "") ||
1364 params.Get("guid", "") == current_input_device.Get("guid2", "")) && 1369 params.Get("guid", "") == current_input_device.Get("guid2", "")) &&
diff --git a/src/yuzu/configuration/configure_motion_touch.cpp b/src/yuzu/configuration/configure_motion_touch.cpp
index 8539a5c8b..4340de304 100644
--- a/src/yuzu/configuration/configure_motion_touch.cpp
+++ b/src/yuzu/configuration/configure_motion_touch.cpp
@@ -42,23 +42,25 @@ CalibrationConfigurationDialog::CalibrationConfigurationDialog(QWidget* parent,
42 job = std::make_unique<CalibrationConfigurationJob>( 42 job = std::make_unique<CalibrationConfigurationJob>(
43 host, port, 43 host, port,
44 [this](CalibrationConfigurationJob::Status status) { 44 [this](CalibrationConfigurationJob::Status status) {
45 QString text; 45 QMetaObject::invokeMethod(this, [status, this] {
46 switch (status) { 46 QString text;
47 case CalibrationConfigurationJob::Status::Ready: 47 switch (status) {
48 text = tr("Touch the top left corner <br>of your touchpad."); 48 case CalibrationConfigurationJob::Status::Ready:
49 break; 49 text = tr("Touch the top left corner <br>of your touchpad.");
50 case CalibrationConfigurationJob::Status::Stage1Completed: 50 break;
51 text = tr("Now touch the bottom right corner <br>of your touchpad."); 51 case CalibrationConfigurationJob::Status::Stage1Completed:
52 break; 52 text = tr("Now touch the bottom right corner <br>of your touchpad.");
53 case CalibrationConfigurationJob::Status::Completed: 53 break;
54 text = tr("Configuration completed!"); 54 case CalibrationConfigurationJob::Status::Completed:
55 break; 55 text = tr("Configuration completed!");
56 default: 56 break;
57 break; 57 default:
58 } 58 break;
59 QMetaObject::invokeMethod(this, "UpdateLabelText", Q_ARG(QString, text)); 59 }
60 UpdateLabelText(text);
61 });
60 if (status == CalibrationConfigurationJob::Status::Completed) { 62 if (status == CalibrationConfigurationJob::Status::Completed) {
61 QMetaObject::invokeMethod(this, "UpdateButtonText", Q_ARG(QString, tr("OK"))); 63 QMetaObject::invokeMethod(this, [this] { UpdateButtonText(tr("OK")); });
62 } 64 }
63 }, 65 },
64 [this](u16 min_x_, u16 min_y_, u16 max_x_, u16 max_y_) { 66 [this](u16 min_x_, u16 min_y_, u16 max_x_, u16 max_y_) {
@@ -215,11 +217,11 @@ void ConfigureMotionTouch::OnCemuhookUDPTest() {
215 ui->udp_server->text().toStdString(), static_cast<u16>(ui->udp_port->text().toInt()), 217 ui->udp_server->text().toStdString(), static_cast<u16>(ui->udp_port->text().toInt()),
216 [this] { 218 [this] {
217 LOG_INFO(Frontend, "UDP input test success"); 219 LOG_INFO(Frontend, "UDP input test success");
218 QMetaObject::invokeMethod(this, "ShowUDPTestResult", Q_ARG(bool, true)); 220 QMetaObject::invokeMethod(this, [this] { ShowUDPTestResult(true); });
219 }, 221 },
220 [this] { 222 [this] {
221 LOG_ERROR(Frontend, "UDP input test failed"); 223 LOG_ERROR(Frontend, "UDP input test failed");
222 QMetaObject::invokeMethod(this, "ShowUDPTestResult", Q_ARG(bool, false)); 224 QMetaObject::invokeMethod(this, [this] { ShowUDPTestResult(false); });
223 }); 225 });
224} 226}
225 227
diff --git a/src/yuzu/game_list.cpp b/src/yuzu/game_list.cpp
index 8b5c4a10a..e3661b390 100644
--- a/src/yuzu/game_list.cpp
+++ b/src/yuzu/game_list.cpp
@@ -161,7 +161,7 @@ GameListSearchField::GameListSearchField(GameList* parent) : QWidget{parent} {
161 * @return true if the haystack contains all words of userinput 161 * @return true if the haystack contains all words of userinput
162 */ 162 */
163static bool ContainsAllWords(const QString& haystack, const QString& userinput) { 163static bool ContainsAllWords(const QString& haystack, const QString& userinput) {
164 const QStringList userinput_split = userinput.split(QLatin1Char{' '}, QString::SkipEmptyParts); 164 const QStringList userinput_split = userinput.split(QLatin1Char{' '}, Qt::SkipEmptyParts);
165 165
166 return std::all_of(userinput_split.begin(), userinput_split.end(), 166 return std::all_of(userinput_split.begin(), userinput_split.end(),
167 [&haystack](const QString& s) { return haystack.contains(s); }); 167 [&haystack](const QString& s) { return haystack.contains(s); });
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index d9e689d14..556d2cdb3 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -965,6 +965,7 @@ void GMainWindow::LinkActionShortcut(QAction* action, const QString& action_name
965 static const QString main_window = QStringLiteral("Main Window"); 965 static const QString main_window = QStringLiteral("Main Window");
966 action->setShortcut(hotkey_registry.GetKeySequence(main_window, action_name)); 966 action->setShortcut(hotkey_registry.GetKeySequence(main_window, action_name));
967 action->setShortcutContext(hotkey_registry.GetShortcutContext(main_window, action_name)); 967 action->setShortcutContext(hotkey_registry.GetShortcutContext(main_window, action_name));
968 action->setAutoRepeat(false);
968 969
969 this->addAction(action); 970 this->addAction(action);
970 971