summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/common/common_types.h10
-rw-r--r--src/common/input.h2
-rw-r--r--src/common/telemetry.h26
-rw-r--r--src/core/arm/arm_interface.h6
-rw-r--r--src/core/file_sys/vfs.h18
-rw-r--r--src/core/hid/emulated_console.h1
-rw-r--r--src/core/hid/emulated_controller.h2
-rw-r--r--src/core/hid/hid_core.h1
-rw-r--r--src/core/hle/kernel/k_auto_object.h7
-rw-r--r--src/core/hle/kernel/k_auto_object_container.h4
-rw-r--r--src/core/hle/kernel/k_handle_table.h3
-rw-r--r--src/core/hle/kernel/k_memory_manager.h42
-rw-r--r--src/core/hle/kernel/k_memory_region.h80
-rw-r--r--src/core/hle/kernel/k_page_heap.h90
-rw-r--r--src/core/hle/kernel/k_page_table.cpp67
-rw-r--r--src/core/hle/kernel/k_page_table.h22
-rw-r--r--src/core/hle/kernel/k_slab_heap.h13
-rw-r--r--src/core/hle/service/vi/display/vi_display.h2
-rw-r--r--src/core/loader/loader.h6
-rw-r--r--src/input_common/drivers/udp_client.cpp15
-rw-r--r--src/input_common/drivers/udp_client.h4
-rw-r--r--src/input_common/helpers/stick_from_buttons.cpp30
-rw-r--r--src/input_common/helpers/touch_from_buttons.cpp1
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_instructions.h31
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_memory.cpp107
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp142
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_instructions.h31
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp119
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_instructions.h30
-rw-r--r--src/shader_recompiler/frontend/ir/microinstruction.cpp19
-rw-r--r--src/shader_recompiler/frontend/ir/opcodes.inc19
-rw-r--r--src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp18
-rw-r--r--src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp49
-rw-r--r--src/shader_recompiler/ir_opt/lower_int64_to_int32.cpp20
-rw-r--r--src/video_core/renderer_base.h8
-rw-r--r--src/video_core/renderer_opengl/gl_resource_manager.h50
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp6
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h4
-rw-r--r--src/yuzu/configuration/configure_input_player.cpp4
-rw-r--r--src/yuzu/main.cpp1
40 files changed, 911 insertions, 199 deletions
diff --git a/src/common/common_types.h b/src/common/common_types.h
index 4cec89fbd..99bffc460 100644
--- a/src/common/common_types.h
+++ b/src/common/common_types.h
@@ -46,13 +46,3 @@ using GPUVAddr = u64; ///< Represents a pointer in the GPU virtual address space
46 46
47using u128 = std::array<std::uint64_t, 2>; 47using u128 = std::array<std::uint64_t, 2>;
48static_assert(sizeof(u128) == 16, "u128 must be 128 bits wide"); 48static_assert(sizeof(u128) == 16, "u128 must be 128 bits wide");
49
50// An inheritable class to disallow the copy constructor and operator= functions
51class NonCopyable {
52protected:
53 constexpr NonCopyable() = default;
54 ~NonCopyable() = default;
55
56 NonCopyable(const NonCopyable&) = delete;
57 NonCopyable& operator=(const NonCopyable&) = delete;
58};
diff --git a/src/common/input.h b/src/common/input.h
index f4f9eb30a..54fcb24b0 100644
--- a/src/common/input.h
+++ b/src/common/input.h
@@ -209,6 +209,8 @@ enum class ButtonNames {
209 Triangle, 209 Triangle,
210 Share, 210 Share,
211 Options, 211 Options,
212 Home,
213 Touch,
212 214
213 // Mouse buttons 215 // Mouse buttons
214 ButtonMouseWheel, 216 ButtonMouseWheel,
diff --git a/src/common/telemetry.h b/src/common/telemetry.h
index 49186e848..d38aeac99 100644
--- a/src/common/telemetry.h
+++ b/src/common/telemetry.h
@@ -8,6 +8,7 @@
8#include <map> 8#include <map>
9#include <memory> 9#include <memory>
10#include <string> 10#include <string>
11#include "common/common_funcs.h"
11#include "common/common_types.h" 12#include "common/common_types.h"
12 13
13namespace Common::Telemetry { 14namespace Common::Telemetry {
@@ -28,7 +29,7 @@ struct VisitorInterface;
28/** 29/**
29 * Interface class for telemetry data fields. 30 * Interface class for telemetry data fields.
30 */ 31 */
31class FieldInterface : NonCopyable { 32class FieldInterface {
32public: 33public:
33 virtual ~FieldInterface() = default; 34 virtual ~FieldInterface() = default;
34 35
@@ -52,14 +53,15 @@ public:
52template <typename T> 53template <typename T>
53class Field : public FieldInterface { 54class Field : public FieldInterface {
54public: 55public:
56 YUZU_NON_COPYABLE(Field);
57
55 Field(FieldType type_, std::string name_, T value_) 58 Field(FieldType type_, std::string name_, T value_)
56 : name(std::move(name_)), type(type_), value(std::move(value_)) {} 59 : name(std::move(name_)), type(type_), value(std::move(value_)) {}
57 60
58 Field(const Field&) = default; 61 ~Field() override = default;
59 Field& operator=(const Field&) = default;
60 62
61 Field(Field&&) = default; 63 Field(Field&&) noexcept = default;
62 Field& operator=(Field&& other) = default; 64 Field& operator=(Field&& other) noexcept = default;
63 65
64 void Accept(VisitorInterface& visitor) const override; 66 void Accept(VisitorInterface& visitor) const override;
65 67
@@ -98,9 +100,15 @@ private:
98/** 100/**
99 * Collection of data fields that have been logged. 101 * Collection of data fields that have been logged.
100 */ 102 */
101class FieldCollection final : NonCopyable { 103class FieldCollection final {
102public: 104public:
105 YUZU_NON_COPYABLE(FieldCollection);
106
103 FieldCollection() = default; 107 FieldCollection() = default;
108 ~FieldCollection() = default;
109
110 FieldCollection(FieldCollection&&) noexcept = default;
111 FieldCollection& operator=(FieldCollection&&) noexcept = default;
104 112
105 /** 113 /**
106 * Accept method for the visitor pattern, visits each field in the collection. 114 * Accept method for the visitor pattern, visits each field in the collection.
@@ -133,7 +141,7 @@ private:
133 * Telemetry fields visitor interface class. A backend to log to a web service should implement 141 * Telemetry fields visitor interface class. A backend to log to a web service should implement
134 * this interface. 142 * this interface.
135 */ 143 */
136struct VisitorInterface : NonCopyable { 144struct VisitorInterface {
137 virtual ~VisitorInterface() = default; 145 virtual ~VisitorInterface() = default;
138 146
139 virtual void Visit(const Field<bool>& field) = 0; 147 virtual void Visit(const Field<bool>& field) = 0;
@@ -160,8 +168,8 @@ struct VisitorInterface : NonCopyable {
160 * Empty implementation of VisitorInterface that drops all fields. Used when a functional 168 * Empty implementation of VisitorInterface that drops all fields. Used when a functional
161 * backend implementation is not available. 169 * backend implementation is not available.
162 */ 170 */
163struct NullVisitor : public VisitorInterface { 171struct NullVisitor final : public VisitorInterface {
164 ~NullVisitor() = default; 172 YUZU_NON_COPYABLE(NullVisitor);
165 173
166 void Visit(const Field<bool>& /*field*/) override {} 174 void Visit(const Field<bool>& /*field*/) override {}
167 void Visit(const Field<double>& /*field*/) override {} 175 void Visit(const Field<double>& /*field*/) override {}
diff --git a/src/core/arm/arm_interface.h b/src/core/arm/arm_interface.h
index 689e3ceb5..c60322442 100644
--- a/src/core/arm/arm_interface.h
+++ b/src/core/arm/arm_interface.h
@@ -6,6 +6,7 @@
6 6
7#include <array> 7#include <array>
8#include <vector> 8#include <vector>
9#include "common/common_funcs.h"
9#include "common/common_types.h" 10#include "common/common_types.h"
10#include "core/hardware_properties.h" 11#include "core/hardware_properties.h"
11 12
@@ -24,8 +25,11 @@ class CPUInterruptHandler;
24using CPUInterrupts = std::array<CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>; 25using CPUInterrupts = std::array<CPUInterruptHandler, Core::Hardware::NUM_CPU_CORES>;
25 26
26/// Generic ARMv8 CPU interface 27/// Generic ARMv8 CPU interface
27class ARM_Interface : NonCopyable { 28class ARM_Interface {
28public: 29public:
30 YUZU_NON_COPYABLE(ARM_Interface);
31 YUZU_NON_MOVEABLE(ARM_Interface);
32
29 explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers_, 33 explicit ARM_Interface(System& system_, CPUInterrupts& interrupt_handlers_,
30 bool uses_wall_clock_) 34 bool uses_wall_clock_)
31 : system{system_}, interrupt_handlers{interrupt_handlers_}, uses_wall_clock{ 35 : system{system_}, interrupt_handlers{interrupt_handlers_}, uses_wall_clock{
diff --git a/src/core/file_sys/vfs.h b/src/core/file_sys/vfs.h
index 3e625fad6..1b9365853 100644
--- a/src/core/file_sys/vfs.h
+++ b/src/core/file_sys/vfs.h
@@ -12,6 +12,7 @@
12#include <type_traits> 12#include <type_traits>
13#include <vector> 13#include <vector>
14 14
15#include "common/common_funcs.h"
15#include "common/common_types.h" 16#include "common/common_types.h"
16#include "core/file_sys/vfs_types.h" 17#include "core/file_sys/vfs_types.h"
17 18
@@ -29,8 +30,11 @@ enum class VfsEntryType {
29// A class representing an abstract filesystem. A default implementation given the root VirtualDir 30// A class representing an abstract filesystem. A default implementation given the root VirtualDir
30// is provided for convenience, but if the Vfs implementation has any additional state or 31// is provided for convenience, but if the Vfs implementation has any additional state or
31// functionality, they will need to override. 32// functionality, they will need to override.
32class VfsFilesystem : NonCopyable { 33class VfsFilesystem {
33public: 34public:
35 YUZU_NON_COPYABLE(VfsFilesystem);
36 YUZU_NON_MOVEABLE(VfsFilesystem);
37
34 explicit VfsFilesystem(VirtualDir root); 38 explicit VfsFilesystem(VirtualDir root);
35 virtual ~VfsFilesystem(); 39 virtual ~VfsFilesystem();
36 40
@@ -77,8 +81,12 @@ protected:
77}; 81};
78 82
79// A class representing a file in an abstract filesystem. 83// A class representing a file in an abstract filesystem.
80class VfsFile : NonCopyable { 84class VfsFile {
81public: 85public:
86 YUZU_NON_COPYABLE(VfsFile);
87 YUZU_NON_MOVEABLE(VfsFile);
88
89 VfsFile() = default;
82 virtual ~VfsFile(); 90 virtual ~VfsFile();
83 91
84 // Retrieves the file name. 92 // Retrieves the file name.
@@ -176,8 +184,12 @@ public:
176}; 184};
177 185
178// A class representing a directory in an abstract filesystem. 186// A class representing a directory in an abstract filesystem.
179class VfsDirectory : NonCopyable { 187class VfsDirectory {
180public: 188public:
189 YUZU_NON_COPYABLE(VfsDirectory);
190 YUZU_NON_MOVEABLE(VfsDirectory);
191
192 VfsDirectory() = default;
181 virtual ~VfsDirectory(); 193 virtual ~VfsDirectory();
182 194
183 // Retrives the file located at path as if the current directory was root. Returns nullptr if 195 // Retrives the file located at path as if the current directory was root. Returns nullptr if
diff --git a/src/core/hid/emulated_console.h b/src/core/hid/emulated_console.h
index 707419102..5eb170823 100644
--- a/src/core/hid/emulated_console.h
+++ b/src/core/hid/emulated_console.h
@@ -10,6 +10,7 @@
10#include <mutex> 10#include <mutex>
11#include <unordered_map> 11#include <unordered_map>
12 12
13#include "common/common_funcs.h"
13#include "common/common_types.h" 14#include "common/common_types.h"
14#include "common/input.h" 15#include "common/input.h"
15#include "common/param_package.h" 16#include "common/param_package.h"
diff --git a/src/core/hid/emulated_controller.h b/src/core/hid/emulated_controller.h
index a63a83cce..d8642c5b3 100644
--- a/src/core/hid/emulated_controller.h
+++ b/src/core/hid/emulated_controller.h
@@ -13,8 +13,6 @@
13#include "common/common_types.h" 13#include "common/common_types.h"
14#include "common/input.h" 14#include "common/input.h"
15#include "common/param_package.h" 15#include "common/param_package.h"
16#include "common/point.h"
17#include "common/quaternion.h"
18#include "common/settings.h" 16#include "common/settings.h"
19#include "common/vector_math.h" 17#include "common/vector_math.h"
20#include "core/hid/hid_types.h" 18#include "core/hid/hid_types.h"
diff --git a/src/core/hid/hid_core.h b/src/core/hid/hid_core.h
index 837f7de49..717f605e7 100644
--- a/src/core/hid/hid_core.h
+++ b/src/core/hid/hid_core.h
@@ -6,6 +6,7 @@
6 6
7#include <memory> 7#include <memory>
8 8
9#include "common/common_funcs.h"
9#include "core/hid/hid_types.h" 10#include "core/hid/hid_types.h"
10 11
11namespace Core::HID { 12namespace Core::HID {
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h
index 165b76747..05779f2d5 100644
--- a/src/core/hle/kernel/k_auto_object.h
+++ b/src/core/hle/kernel/k_auto_object.h
@@ -20,8 +20,6 @@ class KernelCore;
20class KProcess; 20class KProcess;
21 21
22#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \ 22#define KERNEL_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
23 YUZU_NON_COPYABLE(CLASS); \
24 YUZU_NON_MOVEABLE(CLASS); \
25 \ 23 \
26private: \ 24private: \
27 friend class ::Kernel::KClassTokenGenerator; \ 25 friend class ::Kernel::KClassTokenGenerator; \
@@ -32,6 +30,9 @@ private:
32 } \ 30 } \
33 \ 31 \
34public: \ 32public: \
33 YUZU_NON_COPYABLE(CLASS); \
34 YUZU_NON_MOVEABLE(CLASS); \
35 \
35 using BaseClass = BASE_CLASS; \ 36 using BaseClass = BASE_CLASS; \
36 static constexpr TypeObj GetStaticTypeObj() { \ 37 static constexpr TypeObj GetStaticTypeObj() { \
37 constexpr ClassTokenType Token = ClassToken(); \ 38 constexpr ClassTokenType Token = ClassToken(); \
@@ -224,9 +225,9 @@ private:
224 225
225template <typename T> 226template <typename T>
226class KScopedAutoObject { 227class KScopedAutoObject {
228public:
227 YUZU_NON_COPYABLE(KScopedAutoObject); 229 YUZU_NON_COPYABLE(KScopedAutoObject);
228 230
229public:
230 constexpr KScopedAutoObject() = default; 231 constexpr KScopedAutoObject() = default;
231 232
232 constexpr KScopedAutoObject(T* o) : m_obj(o) { 233 constexpr KScopedAutoObject(T* o) : m_obj(o) {
diff --git a/src/core/hle/kernel/k_auto_object_container.h b/src/core/hle/kernel/k_auto_object_container.h
index 4eadfe99d..697cc4289 100644
--- a/src/core/hle/kernel/k_auto_object_container.h
+++ b/src/core/hle/kernel/k_auto_object_container.h
@@ -16,13 +16,12 @@ class KernelCore;
16class KProcess; 16class KProcess;
17 17
18class KAutoObjectWithListContainer { 18class KAutoObjectWithListContainer {
19public:
19 YUZU_NON_COPYABLE(KAutoObjectWithListContainer); 20 YUZU_NON_COPYABLE(KAutoObjectWithListContainer);
20 YUZU_NON_MOVEABLE(KAutoObjectWithListContainer); 21 YUZU_NON_MOVEABLE(KAutoObjectWithListContainer);
21 22
22public:
23 using ListType = boost::intrusive::rbtree<KAutoObjectWithList>; 23 using ListType = boost::intrusive::rbtree<KAutoObjectWithList>;
24 24
25public:
26 class ListAccessor : public KScopedLightLock { 25 class ListAccessor : public KScopedLightLock {
27 public: 26 public:
28 explicit ListAccessor(KAutoObjectWithListContainer* container) 27 explicit ListAccessor(KAutoObjectWithListContainer* container)
@@ -48,7 +47,6 @@ public:
48 47
49 friend class ListAccessor; 48 friend class ListAccessor;
50 49
51public:
52 KAutoObjectWithListContainer(KernelCore& kernel) : m_lock(kernel), m_object_list() {} 50 KAutoObjectWithListContainer(KernelCore& kernel) : m_lock(kernel), m_object_list() {}
53 51
54 void Initialize() {} 52 void Initialize() {}
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h
index 4b114ec2f..87004a0f9 100644
--- a/src/core/hle/kernel/k_handle_table.h
+++ b/src/core/hle/kernel/k_handle_table.h
@@ -22,13 +22,12 @@ namespace Kernel {
22class KernelCore; 22class KernelCore;
23 23
24class KHandleTable { 24class KHandleTable {
25public:
25 YUZU_NON_COPYABLE(KHandleTable); 26 YUZU_NON_COPYABLE(KHandleTable);
26 YUZU_NON_MOVEABLE(KHandleTable); 27 YUZU_NON_MOVEABLE(KHandleTable);
27 28
28public:
29 static constexpr size_t MaxTableSize = 1024; 29 static constexpr size_t MaxTableSize = 1024;
30 30
31public:
32 explicit KHandleTable(KernelCore& kernel_); 31 explicit KHandleTable(KernelCore& kernel_);
33 ~KHandleTable(); 32 ~KHandleTable();
34 33
diff --git a/src/core/hle/kernel/k_memory_manager.h b/src/core/hle/kernel/k_memory_manager.h
index abd6c8ace..17c7690f1 100644
--- a/src/core/hle/kernel/k_memory_manager.h
+++ b/src/core/hle/kernel/k_memory_manager.h
@@ -8,6 +8,7 @@
8#include <mutex> 8#include <mutex>
9#include <tuple> 9#include <tuple>
10 10
11#include "common/common_funcs.h"
11#include "common/common_types.h" 12#include "common/common_types.h"
12#include "core/hle/kernel/k_page_heap.h" 13#include "core/hle/kernel/k_page_heap.h"
13#include "core/hle/result.h" 14#include "core/hle/result.h"
@@ -20,8 +21,11 @@ namespace Kernel {
20 21
21class KPageLinkedList; 22class KPageLinkedList;
22 23
23class KMemoryManager final : NonCopyable { 24class KMemoryManager final {
24public: 25public:
26 YUZU_NON_COPYABLE(KMemoryManager);
27 YUZU_NON_MOVEABLE(KMemoryManager);
28
25 enum class Pool : u32 { 29 enum class Pool : u32 {
26 Application = 0, 30 Application = 0,
27 Applet = 1, 31 Applet = 1,
@@ -88,26 +92,13 @@ public:
88 } 92 }
89 93
90private: 94private:
91 class Impl final : NonCopyable { 95 class Impl final {
92 private:
93 using RefCount = u16;
94
95 private:
96 KPageHeap heap;
97 Pool pool{};
98
99 public: 96 public:
100 static std::size_t CalculateManagementOverheadSize(std::size_t region_size); 97 YUZU_NON_COPYABLE(Impl);
101 98 YUZU_NON_MOVEABLE(Impl);
102 static constexpr std::size_t CalculateOptimizedProcessOverheadSize(
103 std::size_t region_size) {
104 return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
105 Common::BitSize<u64>()) *
106 sizeof(u64);
107 }
108 99
109 public:
110 Impl() = default; 100 Impl() = default;
101 ~Impl() = default;
111 102
112 std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address); 103 std::size_t Initialize(Pool new_pool, u64 start_address, u64 end_address);
113 104
@@ -130,6 +121,21 @@ private:
130 constexpr VAddr GetEndAddress() const { 121 constexpr VAddr GetEndAddress() const {
131 return heap.GetEndAddress(); 122 return heap.GetEndAddress();
132 } 123 }
124
125 static std::size_t CalculateManagementOverheadSize(std::size_t region_size);
126
127 static constexpr std::size_t CalculateOptimizedProcessOverheadSize(
128 std::size_t region_size) {
129 return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
130 Common::BitSize<u64>()) *
131 sizeof(u64);
132 }
133
134 private:
135 using RefCount = u16;
136
137 KPageHeap heap;
138 Pool pool{};
133 }; 139 };
134 140
135private: 141private:
diff --git a/src/core/hle/kernel/k_memory_region.h b/src/core/hle/kernel/k_memory_region.h
index 90ab8fd62..e9bdf4e59 100644
--- a/src/core/hle/kernel/k_memory_region.h
+++ b/src/core/hle/kernel/k_memory_region.h
@@ -5,6 +5,7 @@
5#pragma once 5#pragma once
6 6
7#include "common/assert.h" 7#include "common/assert.h"
8#include "common/common_funcs.h"
8#include "common/common_types.h" 9#include "common/common_types.h"
9#include "common/intrusive_red_black_tree.h" 10#include "common/intrusive_red_black_tree.h"
10#include "core/hle/kernel/k_memory_region_type.h" 11#include "core/hle/kernel/k_memory_region_type.h"
@@ -13,11 +14,13 @@ namespace Kernel {
13 14
14class KMemoryRegionAllocator; 15class KMemoryRegionAllocator;
15 16
16class KMemoryRegion final : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryRegion>, 17class KMemoryRegion final : public Common::IntrusiveRedBlackTreeBaseNode<KMemoryRegion> {
17 NonCopyable {
18 friend class KMemoryRegionTree; 18 friend class KMemoryRegionTree;
19 19
20public: 20public:
21 YUZU_NON_COPYABLE(KMemoryRegion);
22 YUZU_NON_MOVEABLE(KMemoryRegion);
23
21 constexpr KMemoryRegion() = default; 24 constexpr KMemoryRegion() = default;
22 constexpr KMemoryRegion(u64 address_, u64 last_address_) 25 constexpr KMemoryRegion(u64 address_, u64 last_address_)
23 : address{address_}, last_address{last_address_} {} 26 : address{address_}, last_address{last_address_} {}
@@ -29,6 +32,8 @@ public:
29 : KMemoryRegion(address_, last_address_, std::numeric_limits<u64>::max(), attributes_, 32 : KMemoryRegion(address_, last_address_, std::numeric_limits<u64>::max(), attributes_,
30 type_id_) {} 33 type_id_) {}
31 34
35 ~KMemoryRegion() = default;
36
32 static constexpr int Compare(const KMemoryRegion& lhs, const KMemoryRegion& rhs) { 37 static constexpr int Compare(const KMemoryRegion& lhs, const KMemoryRegion& rhs) {
33 if (lhs.GetAddress() < rhs.GetAddress()) { 38 if (lhs.GetAddress() < rhs.GetAddress()) {
34 return -1; 39 return -1;
@@ -39,16 +44,6 @@ public:
39 } 44 }
40 } 45 }
41 46
42private:
43 constexpr void Reset(u64 a, u64 la, u64 p, u32 r, u32 t) {
44 address = a;
45 pair_address = p;
46 last_address = la;
47 attributes = r;
48 type_id = t;
49 }
50
51public:
52 constexpr u64 GetAddress() const { 47 constexpr u64 GetAddress() const {
53 return address; 48 return address;
54 } 49 }
@@ -108,6 +103,14 @@ public:
108 } 103 }
109 104
110private: 105private:
106 constexpr void Reset(u64 a, u64 la, u64 p, u32 r, u32 t) {
107 address = a;
108 pair_address = p;
109 last_address = la;
110 attributes = r;
111 type_id = t;
112 }
113
111 u64 address{}; 114 u64 address{};
112 u64 last_address{}; 115 u64 last_address{};
113 u64 pair_address{}; 116 u64 pair_address{};
@@ -115,8 +118,25 @@ private:
115 u32 type_id{}; 118 u32 type_id{};
116}; 119};
117 120
118class KMemoryRegionTree final : NonCopyable { 121class KMemoryRegionTree final {
122private:
123 using TreeType =
124 Common::IntrusiveRedBlackTreeBaseTraits<KMemoryRegion>::TreeType<KMemoryRegion>;
125
119public: 126public:
127 YUZU_NON_COPYABLE(KMemoryRegionTree);
128 YUZU_NON_MOVEABLE(KMemoryRegionTree);
129
130 using value_type = TreeType::value_type;
131 using size_type = TreeType::size_type;
132 using difference_type = TreeType::difference_type;
133 using pointer = TreeType::pointer;
134 using const_pointer = TreeType::const_pointer;
135 using reference = TreeType::reference;
136 using const_reference = TreeType::const_reference;
137 using iterator = TreeType::iterator;
138 using const_iterator = TreeType::const_iterator;
139
120 struct DerivedRegionExtents { 140 struct DerivedRegionExtents {
121 const KMemoryRegion* first_region{}; 141 const KMemoryRegion* first_region{};
122 const KMemoryRegion* last_region{}; 142 const KMemoryRegion* last_region{};
@@ -140,29 +160,9 @@ public:
140 } 160 }
141 }; 161 };
142 162
143private:
144 using TreeType =
145 Common::IntrusiveRedBlackTreeBaseTraits<KMemoryRegion>::TreeType<KMemoryRegion>;
146
147public:
148 using value_type = TreeType::value_type;
149 using size_type = TreeType::size_type;
150 using difference_type = TreeType::difference_type;
151 using pointer = TreeType::pointer;
152 using const_pointer = TreeType::const_pointer;
153 using reference = TreeType::reference;
154 using const_reference = TreeType::const_reference;
155 using iterator = TreeType::iterator;
156 using const_iterator = TreeType::const_iterator;
157
158private:
159 TreeType m_tree{};
160 KMemoryRegionAllocator& memory_region_allocator;
161
162public:
163 explicit KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_); 163 explicit KMemoryRegionTree(KMemoryRegionAllocator& memory_region_allocator_);
164 ~KMemoryRegionTree() = default;
164 165
165public:
166 KMemoryRegion* FindModifiable(u64 address) { 166 KMemoryRegion* FindModifiable(u64 address) {
167 if (auto it = this->find(KMemoryRegion(address, address, 0, 0)); it != this->end()) { 167 if (auto it = this->find(KMemoryRegion(address, address, 0, 0)); it != this->end()) {
168 return std::addressof(*it); 168 return std::addressof(*it);
@@ -241,7 +241,6 @@ public:
241 return GetDerivedRegionExtents(static_cast<KMemoryRegionType>(type_id)); 241 return GetDerivedRegionExtents(static_cast<KMemoryRegionType>(type_id));
242 } 242 }
243 243
244public:
245 void InsertDirectly(u64 address, u64 last_address, u32 attr = 0, u32 type_id = 0); 244 void InsertDirectly(u64 address, u64 last_address, u32 attr = 0, u32 type_id = 0);
246 bool Insert(u64 address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0); 245 bool Insert(u64 address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0);
247 246
@@ -252,7 +251,6 @@ public:
252 return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size; 251 return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size;
253 } 252 }
254 253
255public:
256 // Iterator accessors. 254 // Iterator accessors.
257 iterator begin() { 255 iterator begin() {
258 return m_tree.begin(); 256 return m_tree.begin();
@@ -322,13 +320,21 @@ public:
322 iterator nfind(const_reference ref) const { 320 iterator nfind(const_reference ref) const {
323 return m_tree.nfind(ref); 321 return m_tree.nfind(ref);
324 } 322 }
323
324private:
325 TreeType m_tree{};
326 KMemoryRegionAllocator& memory_region_allocator;
325}; 327};
326 328
327class KMemoryRegionAllocator final : NonCopyable { 329class KMemoryRegionAllocator final {
328public: 330public:
331 YUZU_NON_COPYABLE(KMemoryRegionAllocator);
332 YUZU_NON_MOVEABLE(KMemoryRegionAllocator);
333
329 static constexpr size_t MaxMemoryRegions = 200; 334 static constexpr size_t MaxMemoryRegions = 200;
330 335
331 constexpr KMemoryRegionAllocator() = default; 336 constexpr KMemoryRegionAllocator() = default;
337 constexpr ~KMemoryRegionAllocator() = default;
332 338
333 template <typename... Args> 339 template <typename... Args>
334 KMemoryRegion* Allocate(Args&&... args) { 340 KMemoryRegion* Allocate(Args&&... args) {
diff --git a/src/core/hle/kernel/k_page_heap.h b/src/core/hle/kernel/k_page_heap.h
index 8d9f30523..a65aa28a0 100644
--- a/src/core/hle/kernel/k_page_heap.h
+++ b/src/core/hle/kernel/k_page_heap.h
@@ -8,14 +8,44 @@
8#include <vector> 8#include <vector>
9 9
10#include "common/alignment.h" 10#include "common/alignment.h"
11#include "common/common_funcs.h"
11#include "common/common_types.h" 12#include "common/common_types.h"
12#include "core/hle/kernel/k_page_bitmap.h" 13#include "core/hle/kernel/k_page_bitmap.h"
13#include "core/hle/kernel/memory_types.h" 14#include "core/hle/kernel/memory_types.h"
14 15
15namespace Kernel { 16namespace Kernel {
16 17
17class KPageHeap final : NonCopyable { 18class KPageHeap final {
18public: 19public:
20 YUZU_NON_COPYABLE(KPageHeap);
21 YUZU_NON_MOVEABLE(KPageHeap);
22
23 KPageHeap() = default;
24 ~KPageHeap() = default;
25
26 constexpr VAddr GetAddress() const {
27 return heap_address;
28 }
29 constexpr std::size_t GetSize() const {
30 return heap_size;
31 }
32 constexpr VAddr GetEndAddress() const {
33 return GetAddress() + GetSize();
34 }
35 constexpr std::size_t GetPageOffset(VAddr block) const {
36 return (block - GetAddress()) / PageSize;
37 }
38
39 void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size);
40 VAddr AllocateBlock(s32 index, bool random);
41 void Free(VAddr addr, std::size_t num_pages);
42
43 void UpdateUsedSize() {
44 used_size = heap_size - (GetNumFreePages() * PageSize);
45 }
46
47 static std::size_t CalculateManagementOverheadSize(std::size_t region_size);
48
19 static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) { 49 static constexpr s32 GetAlignedBlockIndex(std::size_t num_pages, std::size_t align_pages) {
20 const auto target_pages{std::max(num_pages, align_pages)}; 50 const auto target_pages{std::max(num_pages, align_pages)};
21 for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) { 51 for (std::size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
@@ -45,21 +75,13 @@ public:
45 } 75 }
46 76
47private: 77private:
48 static constexpr std::size_t NumMemoryBlockPageShifts{7}; 78 class Block final {
49 static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
50 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
51 };
52
53 class Block final : NonCopyable {
54 private:
55 KPageBitmap bitmap;
56 VAddr heap_address{};
57 uintptr_t end_offset{};
58 std::size_t block_shift{};
59 std::size_t next_block_shift{};
60
61 public: 79 public:
80 YUZU_NON_COPYABLE(Block);
81 YUZU_NON_MOVEABLE(Block);
82
62 Block() = default; 83 Block() = default;
84 ~Block() = default;
63 85
64 constexpr std::size_t GetShift() const { 86 constexpr std::size_t GetShift() const {
65 return block_shift; 87 return block_shift;
@@ -129,7 +151,6 @@ private:
129 return heap_address + (offset << GetShift()); 151 return heap_address + (offset << GetShift());
130 } 152 }
131 153
132 public:
133 static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size, 154 static constexpr std::size_t CalculateManagementOverheadSize(std::size_t region_size,
134 std::size_t cur_block_shift, 155 std::size_t cur_block_shift,
135 std::size_t next_block_shift) { 156 std::size_t next_block_shift) {
@@ -139,35 +160,15 @@ private:
139 return KPageBitmap::CalculateManagementOverheadSize( 160 return KPageBitmap::CalculateManagementOverheadSize(
140 (align * 2 + Common::AlignUp(region_size, align)) / cur_block_size); 161 (align * 2 + Common::AlignUp(region_size, align)) / cur_block_size);
141 } 162 }
142 };
143
144public:
145 KPageHeap() = default;
146
147 constexpr VAddr GetAddress() const {
148 return heap_address;
149 }
150 constexpr std::size_t GetSize() const {
151 return heap_size;
152 }
153 constexpr VAddr GetEndAddress() const {
154 return GetAddress() + GetSize();
155 }
156 constexpr std::size_t GetPageOffset(VAddr block) const {
157 return (block - GetAddress()) / PageSize;
158 }
159 163
160 void Initialize(VAddr heap_address, std::size_t heap_size, std::size_t metadata_size); 164 private:
161 VAddr AllocateBlock(s32 index, bool random); 165 KPageBitmap bitmap;
162 void Free(VAddr addr, std::size_t num_pages); 166 VAddr heap_address{};
163 167 uintptr_t end_offset{};
164 void UpdateUsedSize() { 168 std::size_t block_shift{};
165 used_size = heap_size - (GetNumFreePages() * PageSize); 169 std::size_t next_block_shift{};
166 } 170 };
167
168 static std::size_t CalculateManagementOverheadSize(std::size_t region_size);
169 171
170private:
171 constexpr std::size_t GetNumFreePages() const { 172 constexpr std::size_t GetNumFreePages() const {
172 std::size_t num_free{}; 173 std::size_t num_free{};
173 174
@@ -180,6 +181,11 @@ private:
180 181
181 void FreeBlock(VAddr block, s32 index); 182 void FreeBlock(VAddr block, s32 index);
182 183
184 static constexpr std::size_t NumMemoryBlockPageShifts{7};
185 static constexpr std::array<std::size_t, NumMemoryBlockPageShifts> MemoryBlockPageShifts{
186 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E,
187 };
188
183 VAddr heap_address{}; 189 VAddr heap_address{};
184 std::size_t heap_size{}; 190 std::size_t heap_size{};
185 std::size_t used_size{}; 191 std::size_t used_size{};
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 2ebbc0819..912853e5c 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -61,7 +61,10 @@ constexpr std::size_t GetSizeInRange(const KMemoryInfo& info, VAddr start, VAddr
61 61
62} // namespace 62} // namespace
63 63
64KPageTable::KPageTable(Core::System& system_) : system{system_} {} 64KPageTable::KPageTable(Core::System& system_)
65 : general_lock{system_.Kernel()}, map_physical_memory_lock{system_.Kernel()}, system{system_} {}
66
67KPageTable::~KPageTable() = default;
65 68
66ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, 69ResultCode KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type,
67 bool enable_aslr, VAddr code_addr, 70 bool enable_aslr, VAddr code_addr,
@@ -282,7 +285,7 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
282 R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory); 285 R_UNLESS(this->CanContain(addr, size, state), ResultInvalidCurrentMemory);
283 286
284 // Lock the table. 287 // Lock the table.
285 std::lock_guard lock{page_table_lock}; 288 KScopedLightLock lk(general_lock);
286 289
287 // Verify that the destination memory is unmapped. 290 // Verify that the destination memory is unmapped.
288 R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free, 291 R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, KMemoryState::Free,
@@ -300,7 +303,7 @@ ResultCode KPageTable::MapProcessCode(VAddr addr, std::size_t num_pages, KMemory
300} 303}
301 304
302ResultCode KPageTable::MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { 305ResultCode KPageTable::MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
303 std::lock_guard lock{page_table_lock}; 306 KScopedLightLock lk(general_lock);
304 307
305 const std::size_t num_pages{size / PageSize}; 308 const std::size_t num_pages{size / PageSize};
306 309
@@ -337,7 +340,7 @@ ResultCode KPageTable::MapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t
337} 340}
338 341
339ResultCode KPageTable::UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { 342ResultCode KPageTable::UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
340 std::lock_guard lock{page_table_lock}; 343 KScopedLightLock lk(general_lock);
341 344
342 if (!size) { 345 if (!size) {
343 return ResultSuccess; 346 return ResultSuccess;
@@ -371,7 +374,7 @@ ResultCode KPageTable::UnmapCodeMemory(VAddr dst_addr, VAddr src_addr, std::size
371 374
372ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size, 375ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
373 KPageTable& src_page_table, VAddr src_addr) { 376 KPageTable& src_page_table, VAddr src_addr) {
374 std::lock_guard lock{page_table_lock}; 377 KScopedLightLock lk(general_lock);
375 378
376 const std::size_t num_pages{size / PageSize}; 379 const std::size_t num_pages{size / PageSize};
377 380
@@ -399,10 +402,10 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
399 402
400ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) { 403ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
401 // Lock the physical memory lock. 404 // Lock the physical memory lock.
402 std::lock_guard phys_lk(map_physical_memory_lock); 405 KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
403 406
404 // Lock the table. 407 // Lock the table.
405 std::lock_guard lock{page_table_lock}; 408 KScopedLightLock lk(general_lock);
406 409
407 std::size_t mapped_size{}; 410 std::size_t mapped_size{};
408 const VAddr end_addr{addr + size}; 411 const VAddr end_addr{addr + size};
@@ -478,7 +481,11 @@ ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
478} 481}
479 482
480ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) { 483ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
481 std::lock_guard lock{page_table_lock}; 484 // Lock the physical memory lock.
485 KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
486
487 // Lock the table.
488 KScopedLightLock lk(general_lock);
482 489
483 const VAddr end_addr{addr + size}; 490 const VAddr end_addr{addr + size};
484 ResultCode result{ResultSuccess}; 491 ResultCode result{ResultSuccess};
@@ -540,7 +547,7 @@ ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
540} 547}
541 548
542ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { 549ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
543 std::lock_guard lock{page_table_lock}; 550 KScopedLightLock lk(general_lock);
544 551
545 KMemoryState src_state{}; 552 KMemoryState src_state{};
546 CASCADE_CODE(CheckMemoryState( 553 CASCADE_CODE(CheckMemoryState(
@@ -579,7 +586,7 @@ ResultCode KPageTable::MapMemory(VAddr dst_addr, VAddr src_addr, std::size_t siz
579} 586}
580 587
581ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) { 588ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t size) {
582 std::lock_guard lock{page_table_lock}; 589 KScopedLightLock lk(general_lock);
583 590
584 KMemoryState src_state{}; 591 KMemoryState src_state{};
585 CASCADE_CODE(CheckMemoryState( 592 CASCADE_CODE(CheckMemoryState(
@@ -622,6 +629,8 @@ ResultCode KPageTable::UnmapMemory(VAddr dst_addr, VAddr src_addr, std::size_t s
622 629
623ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_list, 630ResultCode KPageTable::MapPages(VAddr addr, const KPageLinkedList& page_linked_list,
624 KMemoryPermission perm) { 631 KMemoryPermission perm) {
632 ASSERT(this->IsLockedByCurrentThread());
633
625 VAddr cur_addr{addr}; 634 VAddr cur_addr{addr};
626 635
627 for (const auto& node : page_linked_list.Nodes()) { 636 for (const auto& node : page_linked_list.Nodes()) {
@@ -650,7 +659,7 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list
650 R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory); 659 R_UNLESS(this->CanContain(address, size, state), ResultInvalidCurrentMemory);
651 660
652 // Lock the table. 661 // Lock the table.
653 std::lock_guard lock{page_table_lock}; 662 KScopedLightLock lk(general_lock);
654 663
655 // Check the memory state. 664 // Check the memory state.
656 R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free, 665 R_TRY(this->CheckMemoryState(address, size, KMemoryState::All, KMemoryState::Free,
@@ -667,6 +676,8 @@ ResultCode KPageTable::MapPages(VAddr address, KPageLinkedList& page_linked_list
667} 676}
668 677
669ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) { 678ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked_list) {
679 ASSERT(this->IsLockedByCurrentThread());
680
670 VAddr cur_addr{addr}; 681 VAddr cur_addr{addr};
671 682
672 for (const auto& node : page_linked_list.Nodes()) { 683 for (const auto& node : page_linked_list.Nodes()) {
@@ -691,7 +702,7 @@ ResultCode KPageTable::UnmapPages(VAddr addr, KPageLinkedList& page_linked_list,
691 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory); 702 R_UNLESS(this->Contains(addr, size), ResultInvalidCurrentMemory);
692 703
693 // Lock the table. 704 // Lock the table.
694 std::lock_guard lock{page_table_lock}; 705 KScopedLightLock lk(general_lock);
695 706
696 // Check the memory state. 707 // Check the memory state.
697 R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, state, KMemoryPermission::None, 708 R_TRY(this->CheckMemoryState(addr, size, KMemoryState::All, state, KMemoryPermission::None,
@@ -712,7 +723,7 @@ ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
712 const size_t num_pages = size / PageSize; 723 const size_t num_pages = size / PageSize;
713 724
714 // Lock the table. 725 // Lock the table.
715 std::lock_guard lock{page_table_lock}; 726 KScopedLightLock lk(general_lock);
716 727
717 // Verify we can change the memory permission. 728 // Verify we can change the memory permission.
718 KMemoryState old_state; 729 KMemoryState old_state;
@@ -766,7 +777,7 @@ ResultCode KPageTable::SetProcessMemoryPermission(VAddr addr, std::size_t size,
766} 777}
767 778
768KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) { 779KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) {
769 std::lock_guard lock{page_table_lock}; 780 KScopedLightLock lk(general_lock);
770 781
771 return block_manager->FindBlock(addr).GetMemoryInfo(); 782 return block_manager->FindBlock(addr).GetMemoryInfo();
772} 783}
@@ -781,7 +792,7 @@ KMemoryInfo KPageTable::QueryInfo(VAddr addr) {
781} 792}
782 793
783ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) { 794ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemoryPermission perm) {
784 std::lock_guard lock{page_table_lock}; 795 KScopedLightLock lk(general_lock);
785 796
786 KMemoryState state{}; 797 KMemoryState state{};
787 KMemoryAttribute attribute{}; 798 KMemoryAttribute attribute{};
@@ -799,7 +810,7 @@ ResultCode KPageTable::ReserveTransferMemory(VAddr addr, std::size_t size, KMemo
799} 810}
800 811
801ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) { 812ResultCode KPageTable::ResetTransferMemory(VAddr addr, std::size_t size) {
802 std::lock_guard lock{page_table_lock}; 813 KScopedLightLock lk(general_lock);
803 814
804 KMemoryState state{}; 815 KMemoryState state{};
805 816
@@ -818,7 +829,7 @@ ResultCode KPageTable::SetMemoryPermission(VAddr addr, std::size_t size,
818 const size_t num_pages = size / PageSize; 829 const size_t num_pages = size / PageSize;
819 830
820 // Lock the table. 831 // Lock the table.
821 std::lock_guard lock{page_table_lock}; 832 KScopedLightLock lk(general_lock);
822 833
823 // Verify we can change the memory permission. 834 // Verify we can change the memory permission.
824 KMemoryState old_state; 835 KMemoryState old_state;
@@ -847,7 +858,7 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask
847 KMemoryAttribute::SetMask); 858 KMemoryAttribute::SetMask);
848 859
849 // Lock the table. 860 // Lock the table.
850 std::lock_guard lock{page_table_lock}; 861 KScopedLightLock lk(general_lock);
851 862
852 // Verify we can change the memory attribute. 863 // Verify we can change the memory attribute.
853 KMemoryState old_state; 864 KMemoryState old_state;
@@ -878,7 +889,7 @@ ResultCode KPageTable::SetMemoryAttribute(VAddr addr, std::size_t size, u32 mask
878 889
879ResultCode KPageTable::SetMaxHeapSize(std::size_t size) { 890ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
880 // Lock the table. 891 // Lock the table.
881 std::lock_guard lock{page_table_lock}; 892 KScopedLightLock lk(general_lock);
882 893
883 // Only process page tables are allowed to set heap size. 894 // Only process page tables are allowed to set heap size.
884 ASSERT(!this->IsKernel()); 895 ASSERT(!this->IsKernel());
@@ -889,15 +900,15 @@ ResultCode KPageTable::SetMaxHeapSize(std::size_t size) {
889} 900}
890 901
891ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) { 902ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
892 // Lock the physical memory lock. 903 // Lock the physical memory mutex.
893 std::lock_guard phys_lk(map_physical_memory_lock); 904 KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
894 905
895 // Try to perform a reduction in heap, instead of an extension. 906 // Try to perform a reduction in heap, instead of an extension.
896 VAddr cur_address{}; 907 VAddr cur_address{};
897 std::size_t allocation_size{}; 908 std::size_t allocation_size{};
898 { 909 {
899 // Lock the table. 910 // Lock the table.
900 std::lock_guard lk(page_table_lock); 911 KScopedLightLock lk(general_lock);
901 912
902 // Validate that setting heap size is possible at all. 913 // Validate that setting heap size is possible at all.
903 R_UNLESS(!is_kernel, ResultOutOfMemory); 914 R_UNLESS(!is_kernel, ResultOutOfMemory);
@@ -962,7 +973,7 @@ ResultCode KPageTable::SetHeapSize(VAddr* out, std::size_t size) {
962 // Map the pages. 973 // Map the pages.
963 { 974 {
964 // Lock the table. 975 // Lock the table.
965 std::lock_guard lk(page_table_lock); 976 KScopedLightLock lk(general_lock);
966 977
967 // Ensure that the heap hasn't changed since we began executing. 978 // Ensure that the heap hasn't changed since we began executing.
968 ASSERT(cur_address == current_heap_end); 979 ASSERT(cur_address == current_heap_end);
@@ -1004,7 +1015,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
1004 bool is_map_only, VAddr region_start, 1015 bool is_map_only, VAddr region_start,
1005 std::size_t region_num_pages, KMemoryState state, 1016 std::size_t region_num_pages, KMemoryState state,
1006 KMemoryPermission perm, PAddr map_addr) { 1017 KMemoryPermission perm, PAddr map_addr) {
1007 std::lock_guard lock{page_table_lock}; 1018 KScopedLightLock lk(general_lock);
1008 1019
1009 if (!CanContain(region_start, region_num_pages * PageSize, state)) { 1020 if (!CanContain(region_start, region_num_pages * PageSize, state)) {
1010 return ResultInvalidCurrentMemory; 1021 return ResultInvalidCurrentMemory;
@@ -1035,7 +1046,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(std::size_t needed_num_pages,
1035} 1046}
1036 1047
1037ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) { 1048ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
1038 std::lock_guard lock{page_table_lock}; 1049 KScopedLightLock lk(general_lock);
1039 1050
1040 KMemoryPermission perm{}; 1051 KMemoryPermission perm{};
1041 if (const ResultCode result{CheckMemoryState( 1052 if (const ResultCode result{CheckMemoryState(
@@ -1058,7 +1069,7 @@ ResultCode KPageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
1058} 1069}
1059 1070
1060ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) { 1071ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
1061 std::lock_guard lock{page_table_lock}; 1072 KScopedLightLock lk(general_lock);
1062 1073
1063 KMemoryPermission perm{}; 1074 KMemoryPermission perm{};
1064 if (const ResultCode result{CheckMemoryState( 1075 if (const ResultCode result{CheckMemoryState(
@@ -1081,7 +1092,7 @@ ResultCode KPageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size)
1081} 1092}
1082 1093
1083ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) { 1094ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
1084 std::lock_guard lock{page_table_lock}; 1095 KScopedLightLock lk(general_lock);
1085 1096
1086 KMemoryPermission new_perm = KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite; 1097 KMemoryPermission new_perm = KMemoryPermission::NotMapped | KMemoryPermission::KernelReadWrite;
1087 1098
@@ -1108,7 +1119,7 @@ ResultCode KPageTable::LockForCodeMemory(VAddr addr, std::size_t size) {
1108} 1119}
1109 1120
1110ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) { 1121ResultCode KPageTable::UnlockForCodeMemory(VAddr addr, std::size_t size) {
1111 std::lock_guard lock{page_table_lock}; 1122 KScopedLightLock lk(general_lock);
1112 1123
1113 KMemoryPermission new_perm = KMemoryPermission::UserReadWrite; 1124 KMemoryPermission new_perm = KMemoryPermission::UserReadWrite;
1114 1125
diff --git a/src/core/hle/kernel/k_page_table.h b/src/core/hle/kernel/k_page_table.h
index 60ae9b9e8..c98887d34 100644
--- a/src/core/hle/kernel/k_page_table.h
+++ b/src/core/hle/kernel/k_page_table.h
@@ -5,11 +5,12 @@
5#pragma once 5#pragma once
6 6
7#include <memory> 7#include <memory>
8#include <mutex>
9 8
9#include "common/common_funcs.h"
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "common/page_table.h" 11#include "common/page_table.h"
12#include "core/file_sys/program_metadata.h" 12#include "core/file_sys/program_metadata.h"
13#include "core/hle/kernel/k_light_lock.h"
13#include "core/hle/kernel/k_memory_block.h" 14#include "core/hle/kernel/k_memory_block.h"
14#include "core/hle/kernel/k_memory_manager.h" 15#include "core/hle/kernel/k_memory_manager.h"
15#include "core/hle/result.h" 16#include "core/hle/result.h"
@@ -22,9 +23,13 @@ namespace Kernel {
22 23
23class KMemoryBlockManager; 24class KMemoryBlockManager;
24 25
25class KPageTable final : NonCopyable { 26class KPageTable final {
26public: 27public:
28 YUZU_NON_COPYABLE(KPageTable);
29 YUZU_NON_MOVEABLE(KPageTable);
30
27 explicit KPageTable(Core::System& system_); 31 explicit KPageTable(Core::System& system_);
32 ~KPageTable();
28 33
29 ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr, 34 ResultCode InitializeForProcess(FileSys::ProgramAddressSpaceType as_type, bool enable_aslr,
30 VAddr code_addr, std::size_t code_size, 35 VAddr code_addr, std::size_t code_size,
@@ -142,11 +147,12 @@ private:
142 } 147 }
143 148
144 bool IsLockedByCurrentThread() const { 149 bool IsLockedByCurrentThread() const {
145 return true; 150 return general_lock.IsLockedByCurrentThread();
146 } 151 }
147 152
148 std::recursive_mutex page_table_lock; 153 mutable KLightLock general_lock;
149 std::mutex map_physical_memory_lock; 154 mutable KLightLock map_physical_memory_lock;
155
150 std::unique_ptr<KMemoryBlockManager> block_manager; 156 std::unique_ptr<KMemoryBlockManager> block_manager;
151 157
152public: 158public:
@@ -205,7 +211,7 @@ public:
205 return alias_code_region_end - alias_code_region_start; 211 return alias_code_region_end - alias_code_region_start;
206 } 212 }
207 size_t GetNormalMemorySize() { 213 size_t GetNormalMemorySize() {
208 std::lock_guard lk(page_table_lock); 214 KScopedLightLock lk(general_lock);
209 return GetHeapSize() + mapped_physical_memory_size; 215 return GetHeapSize() + mapped_physical_memory_size;
210 } 216 }
211 constexpr std::size_t GetAddressSpaceWidth() const { 217 constexpr std::size_t GetAddressSpaceWidth() const {
@@ -247,7 +253,9 @@ public:
247 constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const { 253 constexpr bool IsInsideASLRRegion(VAddr address, std::size_t size) const {
248 return !IsOutsideASLRRegion(address, size); 254 return !IsOutsideASLRRegion(address, size);
249 } 255 }
250 constexpr PAddr GetPhysicalAddr(VAddr addr) { 256
257 PAddr GetPhysicalAddr(VAddr addr) {
258 ASSERT(IsLockedByCurrentThread());
251 const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits]; 259 const auto backing_addr = page_table_impl.backing_addr[addr >> PageBits];
252 ASSERT(backing_addr); 260 ASSERT(backing_addr);
253 return backing_addr + addr; 261 return backing_addr + addr;
diff --git a/src/core/hle/kernel/k_slab_heap.h b/src/core/hle/kernel/k_slab_heap.h
index 0ad74b0a0..05c0bec9c 100644
--- a/src/core/hle/kernel/k_slab_heap.h
+++ b/src/core/hle/kernel/k_slab_heap.h
@@ -7,6 +7,7 @@
7#include <atomic> 7#include <atomic>
8 8
9#include "common/assert.h" 9#include "common/assert.h"
10#include "common/common_funcs.h"
10#include "common/common_types.h" 11#include "common/common_types.h"
11 12
12namespace Kernel { 13namespace Kernel {
@@ -15,13 +16,17 @@ class KernelCore;
15 16
16namespace impl { 17namespace impl {
17 18
18class KSlabHeapImpl final : NonCopyable { 19class KSlabHeapImpl final {
19public: 20public:
21 YUZU_NON_COPYABLE(KSlabHeapImpl);
22 YUZU_NON_MOVEABLE(KSlabHeapImpl);
23
20 struct Node { 24 struct Node {
21 Node* next{}; 25 Node* next{};
22 }; 26 };
23 27
24 constexpr KSlabHeapImpl() = default; 28 constexpr KSlabHeapImpl() = default;
29 constexpr ~KSlabHeapImpl() = default;
25 30
26 void Initialize(std::size_t size) { 31 void Initialize(std::size_t size) {
27 ASSERT(head == nullptr); 32 ASSERT(head == nullptr);
@@ -64,9 +69,13 @@ private:
64 69
65} // namespace impl 70} // namespace impl
66 71
67class KSlabHeapBase : NonCopyable { 72class KSlabHeapBase {
68public: 73public:
74 YUZU_NON_COPYABLE(KSlabHeapBase);
75 YUZU_NON_MOVEABLE(KSlabHeapBase);
76
69 constexpr KSlabHeapBase() = default; 77 constexpr KSlabHeapBase() = default;
78 constexpr ~KSlabHeapBase() = default;
70 79
71 constexpr bool Contains(uintptr_t addr) const { 80 constexpr bool Contains(uintptr_t addr) const {
72 return start <= addr && addr < end; 81 return start <= addr && addr < end;
diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h
index 0979fc421..329f4ba86 100644
--- a/src/core/hle/service/vi/display/vi_display.h
+++ b/src/core/hle/service/vi/display/vi_display.h
@@ -28,10 +28,10 @@ class Layer;
28 28
29/// Represents a single display type 29/// Represents a single display type
30class Display { 30class Display {
31public:
31 YUZU_NON_COPYABLE(Display); 32 YUZU_NON_COPYABLE(Display);
32 YUZU_NON_MOVEABLE(Display); 33 YUZU_NON_MOVEABLE(Display);
33 34
34public:
35 /// Constructs a display with a given unique ID and name. 35 /// Constructs a display with a given unique ID and name.
36 /// 36 ///
37 /// @param id The unique ID for this display. 37 /// @param id The unique ID for this display.
diff --git a/src/core/loader/loader.h b/src/core/loader/loader.h
index 7b1bac3f7..8b6b3b68f 100644
--- a/src/core/loader/loader.h
+++ b/src/core/loader/loader.h
@@ -11,6 +11,7 @@
11#include <utility> 11#include <utility>
12#include <vector> 12#include <vector>
13 13
14#include "common/common_funcs.h"
14#include "common/common_types.h" 15#include "common/common_types.h"
15#include "core/file_sys/control_metadata.h" 16#include "core/file_sys/control_metadata.h"
16#include "core/file_sys/vfs.h" 17#include "core/file_sys/vfs.h"
@@ -139,8 +140,11 @@ std::string GetResultStatusString(ResultStatus status);
139std::ostream& operator<<(std::ostream& os, ResultStatus status); 140std::ostream& operator<<(std::ostream& os, ResultStatus status);
140 141
141/// Interface for loading an application 142/// Interface for loading an application
142class AppLoader : NonCopyable { 143class AppLoader {
143public: 144public:
145 YUZU_NON_COPYABLE(AppLoader);
146 YUZU_NON_MOVEABLE(AppLoader);
147
144 struct LoadParameters { 148 struct LoadParameters {
145 s32 main_thread_priority; 149 s32 main_thread_priority;
146 u64 main_thread_stack_size; 150 u64 main_thread_stack_size;
diff --git a/src/input_common/drivers/udp_client.cpp b/src/input_common/drivers/udp_client.cpp
index d1cdb1ab2..333173e3d 100644
--- a/src/input_common/drivers/udp_client.cpp
+++ b/src/input_common/drivers/udp_client.cpp
@@ -271,7 +271,7 @@ void UDPClient::OnPadData(Response::PadData data, std::size_t client) {
271 const auto touch_axis_y_id = 271 const auto touch_axis_y_id =
272 static_cast<int>(id == 0 ? PadAxes::Touch1Y : PadAxes::Touch2Y); 272 static_cast<int>(id == 0 ? PadAxes::Touch1Y : PadAxes::Touch2Y);
273 const auto touch_button_id = 273 const auto touch_button_id =
274 static_cast<int>(id == 0 ? PadButton::Touch1 : PadButton::touch2); 274 static_cast<int>(id == 0 ? PadButton::Touch1 : PadButton::Touch2);
275 275
276 // TODO: Use custom calibration per device 276 // TODO: Use custom calibration per device
277 const Common::ParamPackage touch_param(Settings::values.touch_device.GetValue()); 277 const Common::ParamPackage touch_param(Settings::values.touch_device.GetValue());
@@ -319,6 +319,9 @@ void UDPClient::OnPadData(Response::PadData data, std::size_t client) {
319 SetButton(identifier, button, button_status); 319 SetButton(identifier, button, button_status);
320 } 320 }
321 321
322 SetButton(identifier, static_cast<int>(PadButton::Home), data.home != 0);
323 SetButton(identifier, static_cast<int>(PadButton::TouchHardPress), data.touch_hard_press != 0);
324
322 SetBattery(identifier, GetBatteryLevel(data.info.battery)); 325 SetBattery(identifier, GetBatteryLevel(data.info.battery));
323} 326}
324 327
@@ -393,7 +396,7 @@ std::vector<Common::ParamPackage> UDPClient::GetInputDevices() const {
393 396
394ButtonMapping UDPClient::GetButtonMappingForDevice(const Common::ParamPackage& params) { 397ButtonMapping UDPClient::GetButtonMappingForDevice(const Common::ParamPackage& params) {
395 // This list excludes any button that can't be really mapped 398 // This list excludes any button that can't be really mapped
396 static constexpr std::array<std::pair<Settings::NativeButton::Values, PadButton>, 18> 399 static constexpr std::array<std::pair<Settings::NativeButton::Values, PadButton>, 20>
397 switch_to_dsu_button = { 400 switch_to_dsu_button = {
398 std::pair{Settings::NativeButton::A, PadButton::Circle}, 401 std::pair{Settings::NativeButton::A, PadButton::Circle},
399 {Settings::NativeButton::B, PadButton::Cross}, 402 {Settings::NativeButton::B, PadButton::Cross},
@@ -413,6 +416,8 @@ ButtonMapping UDPClient::GetButtonMappingForDevice(const Common::ParamPackage& p
413 {Settings::NativeButton::SR, PadButton::R2}, 416 {Settings::NativeButton::SR, PadButton::R2},
414 {Settings::NativeButton::LStick, PadButton::L3}, 417 {Settings::NativeButton::LStick, PadButton::L3},
415 {Settings::NativeButton::RStick, PadButton::R3}, 418 {Settings::NativeButton::RStick, PadButton::R3},
419 {Settings::NativeButton::Home, PadButton::Home},
420 {Settings::NativeButton::Screenshot, PadButton::TouchHardPress},
416 }; 421 };
417 if (!params.Has("guid") || !params.Has("port") || !params.Has("pad")) { 422 if (!params.Has("guid") || !params.Has("port") || !params.Has("pad")) {
418 return {}; 423 return {};
@@ -517,6 +522,12 @@ Common::Input::ButtonNames UDPClient::GetUIButtonName(const Common::ParamPackage
517 return Common::Input::ButtonNames::Share; 522 return Common::Input::ButtonNames::Share;
518 case PadButton::Options: 523 case PadButton::Options:
519 return Common::Input::ButtonNames::Options; 524 return Common::Input::ButtonNames::Options;
525 case PadButton::Home:
526 return Common::Input::ButtonNames::Home;
527 case PadButton::Touch1:
528 case PadButton::Touch2:
529 case PadButton::TouchHardPress:
530 return Common::Input::ButtonNames::Touch;
520 default: 531 default:
521 return Common::Input::ButtonNames::Undefined; 532 return Common::Input::ButtonNames::Undefined;
522 } 533 }
diff --git a/src/input_common/drivers/udp_client.h b/src/input_common/drivers/udp_client.h
index 30d7c2682..e9c178139 100644
--- a/src/input_common/drivers/udp_client.h
+++ b/src/input_common/drivers/udp_client.h
@@ -84,7 +84,9 @@ private:
84 Cross = 0x4000, 84 Cross = 0x4000,
85 Square = 0x8000, 85 Square = 0x8000,
86 Touch1 = 0x10000, 86 Touch1 = 0x10000,
87 touch2 = 0x20000, 87 Touch2 = 0x20000,
88 Home = 0x40000,
89 TouchHardPress = 0x80000,
88 }; 90 };
89 91
90 enum class PadAxes : u8 { 92 enum class PadAxes : u8 {
diff --git a/src/input_common/helpers/stick_from_buttons.cpp b/src/input_common/helpers/stick_from_buttons.cpp
index e23394f5f..31e6f62ab 100644
--- a/src/input_common/helpers/stick_from_buttons.cpp
+++ b/src/input_common/helpers/stick_from_buttons.cpp
@@ -167,12 +167,34 @@ public:
167 } 167 }
168 168
169 void UpdateModButtonStatus(const Common::Input::CallbackStatus& button_callback) { 169 void UpdateModButtonStatus(const Common::Input::CallbackStatus& button_callback) {
170 modifier_status = button_callback.button_status.value; 170 const auto& new_status = button_callback.button_status;
171 const bool new_button_value = new_status.inverted ? !new_status.value : new_status.value;
172 modifier_status.toggle = new_status.toggle;
173
174 // Update button status with current
175 if (!modifier_status.toggle) {
176 modifier_status.locked = false;
177 if (modifier_status.value != new_button_value) {
178 modifier_status.value = new_button_value;
179 }
180 } else {
181 // Toggle button and lock status
182 if (new_button_value && !modifier_status.locked) {
183 modifier_status.locked = true;
184 modifier_status.value = !modifier_status.value;
185 }
186
187 // Unlock button ready for next press
188 if (!new_button_value && modifier_status.locked) {
189 modifier_status.locked = false;
190 }
191 }
192
171 UpdateStatus(); 193 UpdateStatus();
172 } 194 }
173 195
174 void UpdateStatus() { 196 void UpdateStatus() {
175 const float coef = modifier_status ? modifier_scale : 1.0f; 197 const float coef = modifier_status.value ? modifier_scale : 1.0f;
176 198
177 bool r = right_status; 199 bool r = right_status;
178 bool l = left_status; 200 bool l = left_status;
@@ -266,7 +288,7 @@ public:
266 if (down_status) { 288 if (down_status) {
267 --y; 289 --y;
268 } 290 }
269 const float coef = modifier_status ? modifier_scale : 1.0f; 291 const float coef = modifier_status.value ? modifier_scale : 1.0f;
270 status.x.raw_value = static_cast<float>(x) * coef * (y == 0 ? 1.0f : SQRT_HALF); 292 status.x.raw_value = static_cast<float>(x) * coef * (y == 0 ? 1.0f : SQRT_HALF);
271 status.y.raw_value = static_cast<float>(y) * coef * (x == 0 ? 1.0f : SQRT_HALF); 293 status.y.raw_value = static_cast<float>(y) * coef * (x == 0 ? 1.0f : SQRT_HALF);
272 return status; 294 return status;
@@ -287,9 +309,9 @@ private:
287 bool down_status{}; 309 bool down_status{};
288 bool left_status{}; 310 bool left_status{};
289 bool right_status{}; 311 bool right_status{};
290 bool modifier_status{};
291 float last_x_axis_value{}; 312 float last_x_axis_value{};
292 float last_y_axis_value{}; 313 float last_y_axis_value{};
314 Common::Input::ButtonStatus modifier_status{};
293 const Common::Input::AnalogProperties properties{0.0f, 1.0f, 0.5f, 0.0f, false}; 315 const Common::Input::AnalogProperties properties{0.0f, 1.0f, 0.5f, 0.0f, false};
294 std::chrono::time_point<std::chrono::steady_clock> last_update; 316 std::chrono::time_point<std::chrono::steady_clock> last_update;
295}; 317};
diff --git a/src/input_common/helpers/touch_from_buttons.cpp b/src/input_common/helpers/touch_from_buttons.cpp
index ece1e3b32..f1b57d03a 100644
--- a/src/input_common/helpers/touch_from_buttons.cpp
+++ b/src/input_common/helpers/touch_from_buttons.cpp
@@ -4,7 +4,6 @@
4 4
5#include <algorithm> 5#include <algorithm>
6#include "common/settings.h" 6#include "common/settings.h"
7#include "core/frontend/framebuffer_layout.h"
8#include "input_common/helpers/touch_from_buttons.h" 7#include "input_common/helpers/touch_from_buttons.h"
9 8
10namespace InputCommon { 9namespace InputCommon {
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h b/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h
index b48007856..5efbe4e6f 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h
@@ -372,6 +372,8 @@ void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, ScalarU32 poin
372 ScalarU32 value); 372 ScalarU32 value);
373void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset, 373void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
374 Register value); 374 Register value);
375void EmitSharedAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
376 Register value);
375void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 377void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
376 ScalarU32 offset, ScalarU32 value); 378 ScalarU32 offset, ScalarU32 value);
377void EmitStorageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 379void EmitStorageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
@@ -412,6 +414,24 @@ void EmitStorageAtomicXor64(EmitContext& ctx, IR::Inst& inst, const IR::Value& b
412 ScalarU32 offset, Register value); 414 ScalarU32 offset, Register value);
413void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 415void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
414 ScalarU32 offset, Register value); 416 ScalarU32 offset, Register value);
417void EmitStorageAtomicIAdd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
418 ScalarU32 offset, Register value);
419void EmitStorageAtomicSMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
420 ScalarU32 offset, Register value);
421void EmitStorageAtomicUMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
422 ScalarU32 offset, Register value);
423void EmitStorageAtomicSMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
424 ScalarU32 offset, Register value);
425void EmitStorageAtomicUMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
426 ScalarU32 offset, Register value);
427void EmitStorageAtomicAnd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
428 ScalarU32 offset, Register value);
429void EmitStorageAtomicOr32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
430 ScalarU32 offset, Register value);
431void EmitStorageAtomicXor32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
432 ScalarU32 offset, Register value);
433void EmitStorageAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
434 ScalarU32 offset, Register value);
415void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 435void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
416 ScalarU32 offset, ScalarF32 value); 436 ScalarU32 offset, ScalarF32 value);
417void EmitStorageAtomicAddF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 437void EmitStorageAtomicAddF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
@@ -448,6 +468,17 @@ void EmitGlobalAtomicAnd64(EmitContext& ctx);
448void EmitGlobalAtomicOr64(EmitContext& ctx); 468void EmitGlobalAtomicOr64(EmitContext& ctx);
449void EmitGlobalAtomicXor64(EmitContext& ctx); 469void EmitGlobalAtomicXor64(EmitContext& ctx);
450void EmitGlobalAtomicExchange64(EmitContext& ctx); 470void EmitGlobalAtomicExchange64(EmitContext& ctx);
471void EmitGlobalAtomicIAdd32x2(EmitContext& ctx);
472void EmitGlobalAtomicSMin32x2(EmitContext& ctx);
473void EmitGlobalAtomicUMin32x2(EmitContext& ctx);
474void EmitGlobalAtomicSMax32x2(EmitContext& ctx);
475void EmitGlobalAtomicUMax32x2(EmitContext& ctx);
476void EmitGlobalAtomicInc32x2(EmitContext& ctx);
477void EmitGlobalAtomicDec32x2(EmitContext& ctx);
478void EmitGlobalAtomicAnd32x2(EmitContext& ctx);
479void EmitGlobalAtomicOr32x2(EmitContext& ctx);
480void EmitGlobalAtomicXor32x2(EmitContext& ctx);
481void EmitGlobalAtomicExchange32x2(EmitContext& ctx);
451void EmitGlobalAtomicAddF32(EmitContext& ctx); 482void EmitGlobalAtomicAddF32(EmitContext& ctx);
452void EmitGlobalAtomicAddF16x2(EmitContext& ctx); 483void EmitGlobalAtomicAddF16x2(EmitContext& ctx);
453void EmitGlobalAtomicAddF32x2(EmitContext& ctx); 484void EmitGlobalAtomicAddF32x2(EmitContext& ctx);
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_memory.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_memory.cpp
index f135b67f5..f0fd94a28 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_memory.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_memory.cpp
@@ -311,6 +311,13 @@ void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, ScalarU32 poin
311 ctx.LongAdd("ATOMS.EXCH.U64 {}.x,{},shared_mem[{}];", inst, value, pointer_offset); 311 ctx.LongAdd("ATOMS.EXCH.U64 {}.x,{},shared_mem[{}];", inst, value, pointer_offset);
312} 312}
313 313
314void EmitSharedAtomicExchange32x2([[maybe_unused]] EmitContext& ctx,
315 [[maybe_unused]] IR::Inst& inst,
316 [[maybe_unused]] ScalarU32 pointer_offset,
317 [[maybe_unused]] Register value) {
318 throw NotImplementedException("GLASM instruction");
319}
320
314void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 321void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
315 ScalarU32 offset, ScalarU32 value) { 322 ScalarU32 offset, ScalarU32 value) {
316 Atom(ctx, inst, binding, offset, value, "ADD", "U32"); 323 Atom(ctx, inst, binding, offset, value, "ADD", "U32");
@@ -411,6 +418,62 @@ void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Val
411 Atom(ctx, inst, binding, offset, value, "EXCH", "U64"); 418 Atom(ctx, inst, binding, offset, value, "EXCH", "U64");
412} 419}
413 420
421void EmitStorageAtomicIAdd32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
422 [[maybe_unused]] const IR::Value& binding,
423 [[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
424 throw NotImplementedException("GLASM instruction");
425}
426
427void EmitStorageAtomicSMin32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
428 [[maybe_unused]] const IR::Value& binding,
429 [[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
430 throw NotImplementedException("GLASM instruction");
431}
432
433void EmitStorageAtomicUMin32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
434 [[maybe_unused]] const IR::Value& binding,
435 [[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
436 throw NotImplementedException("GLASM instruction");
437}
438
439void EmitStorageAtomicSMax32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
440 [[maybe_unused]] const IR::Value& binding,
441 [[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
442 throw NotImplementedException("GLASM instruction");
443}
444
445void EmitStorageAtomicUMax32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
446 [[maybe_unused]] const IR::Value& binding,
447 [[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
448 throw NotImplementedException("GLASM instruction");
449}
450
451void EmitStorageAtomicAnd32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
452 [[maybe_unused]] const IR::Value& binding,
453 [[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
454 throw NotImplementedException("GLASM instruction");
455}
456
457void EmitStorageAtomicOr32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
458 [[maybe_unused]] const IR::Value& binding,
459 [[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
460 throw NotImplementedException("GLASM instruction");
461}
462
463void EmitStorageAtomicXor32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
464 [[maybe_unused]] const IR::Value& binding,
465 [[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
466 throw NotImplementedException("GLASM instruction");
467}
468
469void EmitStorageAtomicExchange32x2([[maybe_unused]] EmitContext& ctx,
470 [[maybe_unused]] IR::Inst& inst,
471 [[maybe_unused]] const IR::Value& binding,
472 [[maybe_unused]] ScalarU32 offset,
473 [[maybe_unused]] Register value) {
474 throw NotImplementedException("GLASM instruction");
475}
476
414void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 477void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
415 ScalarU32 offset, ScalarF32 value) { 478 ScalarU32 offset, ScalarF32 value) {
416 Atom(ctx, inst, binding, offset, value, "ADD", "F32"); 479 Atom(ctx, inst, binding, offset, value, "ADD", "F32");
@@ -537,6 +600,50 @@ void EmitGlobalAtomicExchange64(EmitContext&) {
537 throw NotImplementedException("GLASM instruction"); 600 throw NotImplementedException("GLASM instruction");
538} 601}
539 602
603void EmitGlobalAtomicIAdd32x2(EmitContext&) {
604 throw NotImplementedException("GLASM instruction");
605}
606
607void EmitGlobalAtomicSMin32x2(EmitContext&) {
608 throw NotImplementedException("GLASM instruction");
609}
610
611void EmitGlobalAtomicUMin32x2(EmitContext&) {
612 throw NotImplementedException("GLASM instruction");
613}
614
615void EmitGlobalAtomicSMax32x2(EmitContext&) {
616 throw NotImplementedException("GLASM instruction");
617}
618
619void EmitGlobalAtomicUMax32x2(EmitContext&) {
620 throw NotImplementedException("GLASM instruction");
621}
622
623void EmitGlobalAtomicInc32x2(EmitContext&) {
624 throw NotImplementedException("GLASM instruction");
625}
626
627void EmitGlobalAtomicDec32x2(EmitContext&) {
628 throw NotImplementedException("GLASM instruction");
629}
630
631void EmitGlobalAtomicAnd32x2(EmitContext&) {
632 throw NotImplementedException("GLASM instruction");
633}
634
635void EmitGlobalAtomicOr32x2(EmitContext&) {
636 throw NotImplementedException("GLASM instruction");
637}
638
639void EmitGlobalAtomicXor32x2(EmitContext&) {
640 throw NotImplementedException("GLASM instruction");
641}
642
643void EmitGlobalAtomicExchange32x2(EmitContext&) {
644 throw NotImplementedException("GLASM instruction");
645}
646
540void EmitGlobalAtomicAddF32(EmitContext&) { 647void EmitGlobalAtomicAddF32(EmitContext&) {
541 throw NotImplementedException("GLASM instruction"); 648 throw NotImplementedException("GLASM instruction");
542} 649}
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
index dc377b053..a409a7ab3 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_atomic.cpp
@@ -105,6 +105,13 @@ void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_vi
105 pointer_offset, value, pointer_offset, value); 105 pointer_offset, value, pointer_offset, value);
106} 106}
107 107
108void EmitSharedAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
109 std::string_view value) {
110 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
111 ctx.AddU32x2("{}=uvec2(smem[{}>>2],smem[({}+4)>>2]);", inst, pointer_offset, pointer_offset);
112 ctx.Add("smem[{}>>2]={}.x;smem[({}+4)>>2]={}.y;", pointer_offset, value, pointer_offset, value);
113}
114
108void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 115void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
109 const IR::Value& offset, std::string_view value) { 116 const IR::Value& offset, std::string_view value) {
110 ctx.AddU32("{}=atomicAdd({}_ssbo{}[{}>>2],{});", inst, ctx.stage_name, binding.U32(), 117 ctx.AddU32("{}=atomicAdd({}_ssbo{}[{}>>2],{});", inst, ctx.stage_name, binding.U32(),
@@ -265,6 +272,97 @@ void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Val
265 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value); 272 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
266} 273}
267 274
275void EmitStorageAtomicIAdd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
276 const IR::Value& offset, std::string_view value) {
277 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
278 ctx.AddU32x2("{}=uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
279 binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
280 ctx.var_alloc.Consume(offset));
281 ctx.Add("{}_ssbo{}[{}>>2]+={}.x;{}_ssbo{}[({}>>2)+1]+={}.y;", ctx.stage_name, binding.U32(),
282 ctx.var_alloc.Consume(offset), value, ctx.stage_name, binding.U32(),
283 ctx.var_alloc.Consume(offset), value);
284}
285
286void EmitStorageAtomicSMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
287 const IR::Value& offset, std::string_view value) {
288 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
289 ctx.AddU32x2("{}=ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
290 binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
291 ctx.var_alloc.Consume(offset));
292 ctx.Add("for(int "
293 "i=0;i<2;++i){{{}_ssbo{}[({}>>2)+i]=uint(min(int({}_ssbo{}[({}>>2)+i]),int({}[i])));}}",
294 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
295 binding.U32(), ctx.var_alloc.Consume(offset), value);
296}
297
298void EmitStorageAtomicUMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
299 const IR::Value& offset, std::string_view value) {
300 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
301 ctx.AddU32x2("{}=uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
302 binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
303 ctx.var_alloc.Consume(offset));
304 ctx.Add("for(int i=0;i<2;++i){{ "
305 "{}_ssbo{}[({}>>2)+i]=min({}_ssbo{}[({}>>2)+i],{}[i]);}}",
306 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
307 binding.U32(), ctx.var_alloc.Consume(offset), value);
308}
309
310void EmitStorageAtomicSMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
311 const IR::Value& offset, std::string_view value) {
312 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
313 ctx.AddU32x2("{}=ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
314 binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
315 ctx.var_alloc.Consume(offset));
316 ctx.Add("for(int "
317 "i=0;i<2;++i){{{}_ssbo{}[({}>>2)+i]=uint(max(int({}_ssbo{}[({}>>2)+i]),int({}[i])));}}",
318 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
319 binding.U32(), ctx.var_alloc.Consume(offset), value);
320}
321
322void EmitStorageAtomicUMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
323 const IR::Value& offset, std::string_view value) {
324 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
325 ctx.AddU32x2("{}=uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]);", inst, ctx.stage_name,
326 binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name, binding.U32(),
327 ctx.var_alloc.Consume(offset));
328 ctx.Add("for(int i=0;i<2;++i){{{}_ssbo{}[({}>>2)+i]=max({}_ssbo{}[({}>>2)+i],{}[i]);}}",
329 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
330 binding.U32(), ctx.var_alloc.Consume(offset), value);
331}
332
333void EmitStorageAtomicAnd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
334 const IR::Value& offset, std::string_view value) {
335 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
336 ctx.AddU32x2("{}=uvec2(atomicAnd({}_ssbo{}[{}>>2],{}.x),atomicAnd({}_ssbo{}[({}>>2)+1],{}.y));",
337 inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
338 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
339}
340
341void EmitStorageAtomicOr32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
342 const IR::Value& offset, std::string_view value) {
343 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
344 ctx.AddU32x2("{}=uvec2(atomicOr({}_ssbo{}[{}>>2],{}.x),atomicOr({}_ssbo{}[({}>>2)+1],{}.y));",
345 inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
346 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
347}
348
349void EmitStorageAtomicXor32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
350 const IR::Value& offset, std::string_view value) {
351 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
352 ctx.AddU32x2("{}=uvec2(atomicXor({}_ssbo{}[{}>>2],{}.x),atomicXor({}_ssbo{}[({}>>2)+1],{}.y));",
353 inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
354 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
355}
356
357void EmitStorageAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
358 const IR::Value& offset, std::string_view value) {
359 LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to 32x2");
360 ctx.AddU32x2("{}=uvec2(atomicExchange({}_ssbo{}[{}>>2],{}.x),atomicExchange({}_ssbo{}[({}>>2)+"
361 "1],{}.y));",
362 inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
363 ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
364}
365
268void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 366void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
269 const IR::Value& offset, std::string_view value) { 367 const IR::Value& offset, std::string_view value) {
270 SsboCasFunctionF32(ctx, inst, binding, offset, value, "CasFloatAdd"); 368 SsboCasFunctionF32(ctx, inst, binding, offset, value, "CasFloatAdd");
@@ -388,6 +486,50 @@ void EmitGlobalAtomicExchange64(EmitContext&) {
388 throw NotImplementedException("GLSL Instrucion"); 486 throw NotImplementedException("GLSL Instrucion");
389} 487}
390 488
489void EmitGlobalAtomicIAdd32x2(EmitContext&) {
490 throw NotImplementedException("GLSL Instrucion");
491}
492
493void EmitGlobalAtomicSMin32x2(EmitContext&) {
494 throw NotImplementedException("GLSL Instrucion");
495}
496
497void EmitGlobalAtomicUMin32x2(EmitContext&) {
498 throw NotImplementedException("GLSL Instrucion");
499}
500
501void EmitGlobalAtomicSMax32x2(EmitContext&) {
502 throw NotImplementedException("GLSL Instrucion");
503}
504
505void EmitGlobalAtomicUMax32x2(EmitContext&) {
506 throw NotImplementedException("GLSL Instrucion");
507}
508
509void EmitGlobalAtomicInc32x2(EmitContext&) {
510 throw NotImplementedException("GLSL Instrucion");
511}
512
513void EmitGlobalAtomicDec32x2(EmitContext&) {
514 throw NotImplementedException("GLSL Instrucion");
515}
516
517void EmitGlobalAtomicAnd32x2(EmitContext&) {
518 throw NotImplementedException("GLSL Instrucion");
519}
520
521void EmitGlobalAtomicOr32x2(EmitContext&) {
522 throw NotImplementedException("GLSL Instrucion");
523}
524
525void EmitGlobalAtomicXor32x2(EmitContext&) {
526 throw NotImplementedException("GLSL Instrucion");
527}
528
529void EmitGlobalAtomicExchange32x2(EmitContext&) {
530 throw NotImplementedException("GLSL Instrucion");
531}
532
391void EmitGlobalAtomicAddF32(EmitContext&) { 533void EmitGlobalAtomicAddF32(EmitContext&) {
392 throw NotImplementedException("GLSL Instrucion"); 534 throw NotImplementedException("GLSL Instrucion");
393} 535}
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h b/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h
index 6cabbc717..704baddc9 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h
@@ -442,6 +442,8 @@ void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, std::string_vi
442 std::string_view value); 442 std::string_view value);
443void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset, 443void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
444 std::string_view value); 444 std::string_view value);
445void EmitSharedAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
446 std::string_view value);
445void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 447void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
446 const IR::Value& offset, std::string_view value); 448 const IR::Value& offset, std::string_view value);
447void EmitStorageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 449void EmitStorageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
@@ -482,6 +484,24 @@ void EmitStorageAtomicXor64(EmitContext& ctx, IR::Inst& inst, const IR::Value& b
482 const IR::Value& offset, std::string_view value); 484 const IR::Value& offset, std::string_view value);
483void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 485void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
484 const IR::Value& offset, std::string_view value); 486 const IR::Value& offset, std::string_view value);
487void EmitStorageAtomicIAdd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
488 const IR::Value& offset, std::string_view value);
489void EmitStorageAtomicSMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
490 const IR::Value& offset, std::string_view value);
491void EmitStorageAtomicUMin32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
492 const IR::Value& offset, std::string_view value);
493void EmitStorageAtomicSMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
494 const IR::Value& offset, std::string_view value);
495void EmitStorageAtomicUMax32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
496 const IR::Value& offset, std::string_view value);
497void EmitStorageAtomicAnd32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
498 const IR::Value& offset, std::string_view value);
499void EmitStorageAtomicOr32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
500 const IR::Value& offset, std::string_view value);
501void EmitStorageAtomicXor32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
502 const IR::Value& offset, std::string_view value);
503void EmitStorageAtomicExchange32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
504 const IR::Value& offset, std::string_view value);
485void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 505void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
486 const IR::Value& offset, std::string_view value); 506 const IR::Value& offset, std::string_view value);
487void EmitStorageAtomicAddF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 507void EmitStorageAtomicAddF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
@@ -518,6 +538,17 @@ void EmitGlobalAtomicAnd64(EmitContext& ctx);
518void EmitGlobalAtomicOr64(EmitContext& ctx); 538void EmitGlobalAtomicOr64(EmitContext& ctx);
519void EmitGlobalAtomicXor64(EmitContext& ctx); 539void EmitGlobalAtomicXor64(EmitContext& ctx);
520void EmitGlobalAtomicExchange64(EmitContext& ctx); 540void EmitGlobalAtomicExchange64(EmitContext& ctx);
541void EmitGlobalAtomicIAdd32x2(EmitContext& ctx);
542void EmitGlobalAtomicSMin32x2(EmitContext& ctx);
543void EmitGlobalAtomicUMin32x2(EmitContext& ctx);
544void EmitGlobalAtomicSMax32x2(EmitContext& ctx);
545void EmitGlobalAtomicUMax32x2(EmitContext& ctx);
546void EmitGlobalAtomicInc32x2(EmitContext& ctx);
547void EmitGlobalAtomicDec32x2(EmitContext& ctx);
548void EmitGlobalAtomicAnd32x2(EmitContext& ctx);
549void EmitGlobalAtomicOr32x2(EmitContext& ctx);
550void EmitGlobalAtomicXor32x2(EmitContext& ctx);
551void EmitGlobalAtomicExchange32x2(EmitContext& ctx);
521void EmitGlobalAtomicAddF32(EmitContext& ctx); 552void EmitGlobalAtomicAddF32(EmitContext& ctx);
522void EmitGlobalAtomicAddF16x2(EmitContext& ctx); 553void EmitGlobalAtomicAddF16x2(EmitContext& ctx);
523void EmitGlobalAtomicAddF32x2(EmitContext& ctx); 554void EmitGlobalAtomicAddF32x2(EmitContext& ctx);
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
index 46ba52a25..d3cbb14a9 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_atomic.cpp
@@ -82,6 +82,17 @@ Id StorageAtomicU64(EmitContext& ctx, const IR::Value& binding, const IR::Value&
82 ctx.OpStore(pointer, ctx.OpBitcast(ctx.U32[2], result)); 82 ctx.OpStore(pointer, ctx.OpBitcast(ctx.U32[2], result));
83 return original_value; 83 return original_value;
84} 84}
85
86Id StorageAtomicU32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, Id value,
87 Id (Sirit::Module::*non_atomic_func)(Id, Id, Id)) {
88 LOG_WARNING(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
89 const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
90 binding, offset, sizeof(u32[2]))};
91 const Id original_value{ctx.OpLoad(ctx.U32[2], pointer)};
92 const Id result{(ctx.*non_atomic_func)(ctx.U32[2], value, original_value)};
93 ctx.OpStore(pointer, result);
94 return original_value;
95}
85} // Anonymous namespace 96} // Anonymous namespace
86 97
87Id EmitSharedAtomicIAdd32(EmitContext& ctx, Id offset, Id value) { 98Id EmitSharedAtomicIAdd32(EmitContext& ctx, Id offset, Id value) {
@@ -141,7 +152,7 @@ Id EmitSharedAtomicExchange64(EmitContext& ctx, Id offset, Id value) {
141 const auto [scope, semantics]{AtomicArgs(ctx)}; 152 const auto [scope, semantics]{AtomicArgs(ctx)};
142 return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value); 153 return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value);
143 } 154 }
144 LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic"); 155 LOG_WARNING(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
145 const Id pointer_1{SharedPointer(ctx, offset, 0)}; 156 const Id pointer_1{SharedPointer(ctx, offset, 0)};
146 const Id pointer_2{SharedPointer(ctx, offset, 1)}; 157 const Id pointer_2{SharedPointer(ctx, offset, 1)};
147 const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)}; 158 const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)};
@@ -152,6 +163,18 @@ Id EmitSharedAtomicExchange64(EmitContext& ctx, Id offset, Id value) {
152 return ctx.OpBitcast(ctx.U64, ctx.OpCompositeConstruct(ctx.U32[2], value_1, value_2)); 163 return ctx.OpBitcast(ctx.U64, ctx.OpCompositeConstruct(ctx.U32[2], value_1, value_2));
153} 164}
154 165
166Id EmitSharedAtomicExchange32x2(EmitContext& ctx, Id offset, Id value) {
167 LOG_WARNING(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
168 const Id pointer_1{SharedPointer(ctx, offset, 0)};
169 const Id pointer_2{SharedPointer(ctx, offset, 1)};
170 const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)};
171 const Id value_2{ctx.OpLoad(ctx.U32[1], pointer_2)};
172 const Id new_vector{ctx.OpBitcast(ctx.U32[2], value)};
173 ctx.OpStore(pointer_1, ctx.OpCompositeExtract(ctx.U32[1], new_vector, 0U));
174 ctx.OpStore(pointer_2, ctx.OpCompositeExtract(ctx.U32[1], new_vector, 1U));
175 return ctx.OpCompositeConstruct(ctx.U32[2], value_1, value_2);
176}
177
155Id EmitStorageAtomicIAdd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, 178Id EmitStorageAtomicIAdd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
156 Id value) { 179 Id value) {
157 return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicIAdd); 180 return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicIAdd);
@@ -275,6 +298,56 @@ Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const
275 return original; 298 return original;
276} 299}
277 300
301Id EmitStorageAtomicIAdd32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
302 Id value) {
303 return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpIAdd);
304}
305
306Id EmitStorageAtomicSMin32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
307 Id value) {
308 return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpSMin);
309}
310
311Id EmitStorageAtomicUMin32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
312 Id value) {
313 return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpUMin);
314}
315
316Id EmitStorageAtomicSMax32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
317 Id value) {
318 return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpSMax);
319}
320
321Id EmitStorageAtomicUMax32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
322 Id value) {
323 return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpUMax);
324}
325
326Id EmitStorageAtomicAnd32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
327 Id value) {
328 return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpBitwiseAnd);
329}
330
331Id EmitStorageAtomicOr32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
332 Id value) {
333 return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpBitwiseOr);
334}
335
336Id EmitStorageAtomicXor32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
337 Id value) {
338 return StorageAtomicU32x2(ctx, binding, offset, value, &Sirit::Module::OpBitwiseXor);
339}
340
341Id EmitStorageAtomicExchange32x2(EmitContext& ctx, const IR::Value& binding,
342 const IR::Value& offset, Id value) {
343 LOG_WARNING(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
344 const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
345 binding, offset, sizeof(u32[2]))};
346 const Id original{ctx.OpLoad(ctx.U32[2], pointer)};
347 ctx.OpStore(pointer, value);
348 return original;
349}
350
278Id EmitStorageAtomicAddF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, 351Id EmitStorageAtomicAddF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
279 Id value) { 352 Id value) {
280 const Id ssbo{ctx.ssbos[binding.U32()].U32}; 353 const Id ssbo{ctx.ssbos[binding.U32()].U32};
@@ -418,6 +491,50 @@ Id EmitGlobalAtomicExchange64(EmitContext&) {
418 throw NotImplementedException("SPIR-V Instruction"); 491 throw NotImplementedException("SPIR-V Instruction");
419} 492}
420 493
494Id EmitGlobalAtomicIAdd32x2(EmitContext&) {
495 throw NotImplementedException("SPIR-V Instruction");
496}
497
498Id EmitGlobalAtomicSMin32x2(EmitContext&) {
499 throw NotImplementedException("SPIR-V Instruction");
500}
501
502Id EmitGlobalAtomicUMin32x2(EmitContext&) {
503 throw NotImplementedException("SPIR-V Instruction");
504}
505
506Id EmitGlobalAtomicSMax32x2(EmitContext&) {
507 throw NotImplementedException("SPIR-V Instruction");
508}
509
510Id EmitGlobalAtomicUMax32x2(EmitContext&) {
511 throw NotImplementedException("SPIR-V Instruction");
512}
513
514Id EmitGlobalAtomicInc32x2(EmitContext&) {
515 throw NotImplementedException("SPIR-V Instruction");
516}
517
518Id EmitGlobalAtomicDec32x2(EmitContext&) {
519 throw NotImplementedException("SPIR-V Instruction");
520}
521
522Id EmitGlobalAtomicAnd32x2(EmitContext&) {
523 throw NotImplementedException("SPIR-V Instruction");
524}
525
526Id EmitGlobalAtomicOr32x2(EmitContext&) {
527 throw NotImplementedException("SPIR-V Instruction");
528}
529
530Id EmitGlobalAtomicXor32x2(EmitContext&) {
531 throw NotImplementedException("SPIR-V Instruction");
532}
533
534Id EmitGlobalAtomicExchange32x2(EmitContext&) {
535 throw NotImplementedException("SPIR-V Instruction");
536}
537
421Id EmitGlobalAtomicAddF32(EmitContext&) { 538Id EmitGlobalAtomicAddF32(EmitContext&) {
422 throw NotImplementedException("SPIR-V Instruction"); 539 throw NotImplementedException("SPIR-V Instruction");
423} 540}
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
index 887112deb..f263b41b0 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
@@ -335,6 +335,7 @@ Id EmitSharedAtomicOr32(EmitContext& ctx, Id pointer_offset, Id value);
335Id EmitSharedAtomicXor32(EmitContext& ctx, Id pointer_offset, Id value); 335Id EmitSharedAtomicXor32(EmitContext& ctx, Id pointer_offset, Id value);
336Id EmitSharedAtomicExchange32(EmitContext& ctx, Id pointer_offset, Id value); 336Id EmitSharedAtomicExchange32(EmitContext& ctx, Id pointer_offset, Id value);
337Id EmitSharedAtomicExchange64(EmitContext& ctx, Id pointer_offset, Id value); 337Id EmitSharedAtomicExchange64(EmitContext& ctx, Id pointer_offset, Id value);
338Id EmitSharedAtomicExchange32x2(EmitContext& ctx, Id pointer_offset, Id value);
338Id EmitStorageAtomicIAdd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, 339Id EmitStorageAtomicIAdd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
339 Id value); 340 Id value);
340Id EmitStorageAtomicSMin32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, 341Id EmitStorageAtomicSMin32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
@@ -375,6 +376,24 @@ Id EmitStorageAtomicXor64(EmitContext& ctx, const IR::Value& binding, const IR::
375 Id value); 376 Id value);
376Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, 377Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
377 Id value); 378 Id value);
379Id EmitStorageAtomicIAdd32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
380 Id value);
381Id EmitStorageAtomicSMin32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
382 Id value);
383Id EmitStorageAtomicUMin32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
384 Id value);
385Id EmitStorageAtomicSMax32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
386 Id value);
387Id EmitStorageAtomicUMax32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
388 Id value);
389Id EmitStorageAtomicAnd32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
390 Id value);
391Id EmitStorageAtomicOr32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
392 Id value);
393Id EmitStorageAtomicXor32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
394 Id value);
395Id EmitStorageAtomicExchange32x2(EmitContext& ctx, const IR::Value& binding,
396 const IR::Value& offset, Id value);
378Id EmitStorageAtomicAddF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, 397Id EmitStorageAtomicAddF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
379 Id value); 398 Id value);
380Id EmitStorageAtomicAddF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, 399Id EmitStorageAtomicAddF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
@@ -411,6 +430,17 @@ Id EmitGlobalAtomicAnd64(EmitContext& ctx);
411Id EmitGlobalAtomicOr64(EmitContext& ctx); 430Id EmitGlobalAtomicOr64(EmitContext& ctx);
412Id EmitGlobalAtomicXor64(EmitContext& ctx); 431Id EmitGlobalAtomicXor64(EmitContext& ctx);
413Id EmitGlobalAtomicExchange64(EmitContext& ctx); 432Id EmitGlobalAtomicExchange64(EmitContext& ctx);
433Id EmitGlobalAtomicIAdd32x2(EmitContext& ctx);
434Id EmitGlobalAtomicSMin32x2(EmitContext& ctx);
435Id EmitGlobalAtomicUMin32x2(EmitContext& ctx);
436Id EmitGlobalAtomicSMax32x2(EmitContext& ctx);
437Id EmitGlobalAtomicUMax32x2(EmitContext& ctx);
438Id EmitGlobalAtomicInc32x2(EmitContext& ctx);
439Id EmitGlobalAtomicDec32x2(EmitContext& ctx);
440Id EmitGlobalAtomicAnd32x2(EmitContext& ctx);
441Id EmitGlobalAtomicOr32x2(EmitContext& ctx);
442Id EmitGlobalAtomicXor32x2(EmitContext& ctx);
443Id EmitGlobalAtomicExchange32x2(EmitContext& ctx);
414Id EmitGlobalAtomicAddF32(EmitContext& ctx); 444Id EmitGlobalAtomicAddF32(EmitContext& ctx);
415Id EmitGlobalAtomicAddF16x2(EmitContext& ctx); 445Id EmitGlobalAtomicAddF16x2(EmitContext& ctx);
416Id EmitGlobalAtomicAddF32x2(EmitContext& ctx); 446Id EmitGlobalAtomicAddF32x2(EmitContext& ctx);
diff --git a/src/shader_recompiler/frontend/ir/microinstruction.cpp b/src/shader_recompiler/frontend/ir/microinstruction.cpp
index 97e2bf6af..631446cf7 100644
--- a/src/shader_recompiler/frontend/ir/microinstruction.cpp
+++ b/src/shader_recompiler/frontend/ir/microinstruction.cpp
@@ -118,6 +118,7 @@ bool Inst::MayHaveSideEffects() const noexcept {
118 case Opcode::SharedAtomicXor32: 118 case Opcode::SharedAtomicXor32:
119 case Opcode::SharedAtomicExchange32: 119 case Opcode::SharedAtomicExchange32:
120 case Opcode::SharedAtomicExchange64: 120 case Opcode::SharedAtomicExchange64:
121 case Opcode::SharedAtomicExchange32x2:
121 case Opcode::GlobalAtomicIAdd32: 122 case Opcode::GlobalAtomicIAdd32:
122 case Opcode::GlobalAtomicSMin32: 123 case Opcode::GlobalAtomicSMin32:
123 case Opcode::GlobalAtomicUMin32: 124 case Opcode::GlobalAtomicUMin32:
@@ -138,6 +139,15 @@ bool Inst::MayHaveSideEffects() const noexcept {
138 case Opcode::GlobalAtomicOr64: 139 case Opcode::GlobalAtomicOr64:
139 case Opcode::GlobalAtomicXor64: 140 case Opcode::GlobalAtomicXor64:
140 case Opcode::GlobalAtomicExchange64: 141 case Opcode::GlobalAtomicExchange64:
142 case Opcode::GlobalAtomicIAdd32x2:
143 case Opcode::GlobalAtomicSMin32x2:
144 case Opcode::GlobalAtomicUMin32x2:
145 case Opcode::GlobalAtomicSMax32x2:
146 case Opcode::GlobalAtomicUMax32x2:
147 case Opcode::GlobalAtomicAnd32x2:
148 case Opcode::GlobalAtomicOr32x2:
149 case Opcode::GlobalAtomicXor32x2:
150 case Opcode::GlobalAtomicExchange32x2:
141 case Opcode::GlobalAtomicAddF32: 151 case Opcode::GlobalAtomicAddF32:
142 case Opcode::GlobalAtomicAddF16x2: 152 case Opcode::GlobalAtomicAddF16x2:
143 case Opcode::GlobalAtomicAddF32x2: 153 case Opcode::GlobalAtomicAddF32x2:
@@ -165,6 +175,15 @@ bool Inst::MayHaveSideEffects() const noexcept {
165 case Opcode::StorageAtomicOr64: 175 case Opcode::StorageAtomicOr64:
166 case Opcode::StorageAtomicXor64: 176 case Opcode::StorageAtomicXor64:
167 case Opcode::StorageAtomicExchange64: 177 case Opcode::StorageAtomicExchange64:
178 case Opcode::StorageAtomicIAdd32x2:
179 case Opcode::StorageAtomicSMin32x2:
180 case Opcode::StorageAtomicUMin32x2:
181 case Opcode::StorageAtomicSMax32x2:
182 case Opcode::StorageAtomicUMax32x2:
183 case Opcode::StorageAtomicAnd32x2:
184 case Opcode::StorageAtomicOr32x2:
185 case Opcode::StorageAtomicXor32x2:
186 case Opcode::StorageAtomicExchange32x2:
168 case Opcode::StorageAtomicAddF32: 187 case Opcode::StorageAtomicAddF32:
169 case Opcode::StorageAtomicAddF16x2: 188 case Opcode::StorageAtomicAddF16x2:
170 case Opcode::StorageAtomicAddF32x2: 189 case Opcode::StorageAtomicAddF32x2:
diff --git a/src/shader_recompiler/frontend/ir/opcodes.inc b/src/shader_recompiler/frontend/ir/opcodes.inc
index b94ce7406..efb6bfac3 100644
--- a/src/shader_recompiler/frontend/ir/opcodes.inc
+++ b/src/shader_recompiler/frontend/ir/opcodes.inc
@@ -341,6 +341,7 @@ OPCODE(SharedAtomicOr32, U32, U32,
341OPCODE(SharedAtomicXor32, U32, U32, U32, ) 341OPCODE(SharedAtomicXor32, U32, U32, U32, )
342OPCODE(SharedAtomicExchange32, U32, U32, U32, ) 342OPCODE(SharedAtomicExchange32, U32, U32, U32, )
343OPCODE(SharedAtomicExchange64, U64, U32, U64, ) 343OPCODE(SharedAtomicExchange64, U64, U32, U64, )
344OPCODE(SharedAtomicExchange32x2, U32x2, U32, U32x2, )
344 345
345OPCODE(GlobalAtomicIAdd32, U32, U64, U32, ) 346OPCODE(GlobalAtomicIAdd32, U32, U64, U32, )
346OPCODE(GlobalAtomicSMin32, U32, U64, U32, ) 347OPCODE(GlobalAtomicSMin32, U32, U64, U32, )
@@ -362,6 +363,15 @@ OPCODE(GlobalAtomicAnd64, U64, U64,
362OPCODE(GlobalAtomicOr64, U64, U64, U64, ) 363OPCODE(GlobalAtomicOr64, U64, U64, U64, )
363OPCODE(GlobalAtomicXor64, U64, U64, U64, ) 364OPCODE(GlobalAtomicXor64, U64, U64, U64, )
364OPCODE(GlobalAtomicExchange64, U64, U64, U64, ) 365OPCODE(GlobalAtomicExchange64, U64, U64, U64, )
366OPCODE(GlobalAtomicIAdd32x2, U32x2, U32x2, U32x2, )
367OPCODE(GlobalAtomicSMin32x2, U32x2, U32x2, U32x2, )
368OPCODE(GlobalAtomicUMin32x2, U32x2, U32x2, U32x2, )
369OPCODE(GlobalAtomicSMax32x2, U32x2, U32x2, U32x2, )
370OPCODE(GlobalAtomicUMax32x2, U32x2, U32x2, U32x2, )
371OPCODE(GlobalAtomicAnd32x2, U32x2, U32x2, U32x2, )
372OPCODE(GlobalAtomicOr32x2, U32x2, U32x2, U32x2, )
373OPCODE(GlobalAtomicXor32x2, U32x2, U32x2, U32x2, )
374OPCODE(GlobalAtomicExchange32x2, U32x2, U32x2, U32x2, )
365OPCODE(GlobalAtomicAddF32, F32, U64, F32, ) 375OPCODE(GlobalAtomicAddF32, F32, U64, F32, )
366OPCODE(GlobalAtomicAddF16x2, U32, U64, F16x2, ) 376OPCODE(GlobalAtomicAddF16x2, U32, U64, F16x2, )
367OPCODE(GlobalAtomicAddF32x2, U32, U64, F32x2, ) 377OPCODE(GlobalAtomicAddF32x2, U32, U64, F32x2, )
@@ -390,6 +400,15 @@ OPCODE(StorageAtomicAnd64, U64, U32,
390OPCODE(StorageAtomicOr64, U64, U32, U32, U64, ) 400OPCODE(StorageAtomicOr64, U64, U32, U32, U64, )
391OPCODE(StorageAtomicXor64, U64, U32, U32, U64, ) 401OPCODE(StorageAtomicXor64, U64, U32, U32, U64, )
392OPCODE(StorageAtomicExchange64, U64, U32, U32, U64, ) 402OPCODE(StorageAtomicExchange64, U64, U32, U32, U64, )
403OPCODE(StorageAtomicIAdd32x2, U32x2, U32, U32, U32x2, )
404OPCODE(StorageAtomicSMin32x2, U32x2, U32, U32, U32x2, )
405OPCODE(StorageAtomicUMin32x2, U32x2, U32, U32, U32x2, )
406OPCODE(StorageAtomicSMax32x2, U32x2, U32, U32, U32x2, )
407OPCODE(StorageAtomicUMax32x2, U32x2, U32, U32, U32x2, )
408OPCODE(StorageAtomicAnd32x2, U32x2, U32, U32, U32x2, )
409OPCODE(StorageAtomicOr32x2, U32x2, U32, U32, U32x2, )
410OPCODE(StorageAtomicXor32x2, U32x2, U32, U32, U32x2, )
411OPCODE(StorageAtomicExchange32x2, U32x2, U32, U32, U32x2, )
393OPCODE(StorageAtomicAddF32, F32, U32, U32, F32, ) 412OPCODE(StorageAtomicAddF32, F32, U32, U32, F32, )
394OPCODE(StorageAtomicAddF16x2, U32, U32, U32, F16x2, ) 413OPCODE(StorageAtomicAddF16x2, U32, U32, U32, F16x2, )
395OPCODE(StorageAtomicAddF32x2, U32, U32, U32, F32x2, ) 414OPCODE(StorageAtomicAddF32x2, U32, U32, U32, F32x2, )
diff --git a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
index b6a20f904..bfd2ae650 100644
--- a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
+++ b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
@@ -360,6 +360,15 @@ void VisitUsages(Info& info, IR::Inst& inst) {
360 case IR::Opcode::GlobalAtomicOr64: 360 case IR::Opcode::GlobalAtomicOr64:
361 case IR::Opcode::GlobalAtomicXor64: 361 case IR::Opcode::GlobalAtomicXor64:
362 case IR::Opcode::GlobalAtomicExchange64: 362 case IR::Opcode::GlobalAtomicExchange64:
363 case IR::Opcode::GlobalAtomicIAdd32x2:
364 case IR::Opcode::GlobalAtomicSMin32x2:
365 case IR::Opcode::GlobalAtomicUMin32x2:
366 case IR::Opcode::GlobalAtomicSMax32x2:
367 case IR::Opcode::GlobalAtomicUMax32x2:
368 case IR::Opcode::GlobalAtomicAnd32x2:
369 case IR::Opcode::GlobalAtomicOr32x2:
370 case IR::Opcode::GlobalAtomicXor32x2:
371 case IR::Opcode::GlobalAtomicExchange32x2:
363 case IR::Opcode::GlobalAtomicAddF32: 372 case IR::Opcode::GlobalAtomicAddF32:
364 case IR::Opcode::GlobalAtomicAddF16x2: 373 case IR::Opcode::GlobalAtomicAddF16x2:
365 case IR::Opcode::GlobalAtomicAddF32x2: 374 case IR::Opcode::GlobalAtomicAddF32x2:
@@ -597,6 +606,15 @@ void VisitUsages(Info& info, IR::Inst& inst) {
597 break; 606 break;
598 case IR::Opcode::LoadStorage64: 607 case IR::Opcode::LoadStorage64:
599 case IR::Opcode::WriteStorage64: 608 case IR::Opcode::WriteStorage64:
609 case IR::Opcode::StorageAtomicIAdd32x2:
610 case IR::Opcode::StorageAtomicSMin32x2:
611 case IR::Opcode::StorageAtomicUMin32x2:
612 case IR::Opcode::StorageAtomicSMax32x2:
613 case IR::Opcode::StorageAtomicUMax32x2:
614 case IR::Opcode::StorageAtomicAnd32x2:
615 case IR::Opcode::StorageAtomicOr32x2:
616 case IR::Opcode::StorageAtomicXor32x2:
617 case IR::Opcode::StorageAtomicExchange32x2:
600 info.used_storage_buffer_types |= IR::Type::U32x2; 618 info.used_storage_buffer_types |= IR::Type::U32x2;
601 break; 619 break;
602 case IR::Opcode::LoadStorage128: 620 case IR::Opcode::LoadStorage128:
diff --git a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
index 4197b0095..38592afd0 100644
--- a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
+++ b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
@@ -92,6 +92,15 @@ bool IsGlobalMemory(const IR::Inst& inst) {
92 case IR::Opcode::GlobalAtomicOr64: 92 case IR::Opcode::GlobalAtomicOr64:
93 case IR::Opcode::GlobalAtomicXor64: 93 case IR::Opcode::GlobalAtomicXor64:
94 case IR::Opcode::GlobalAtomicExchange64: 94 case IR::Opcode::GlobalAtomicExchange64:
95 case IR::Opcode::GlobalAtomicIAdd32x2:
96 case IR::Opcode::GlobalAtomicSMin32x2:
97 case IR::Opcode::GlobalAtomicUMin32x2:
98 case IR::Opcode::GlobalAtomicSMax32x2:
99 case IR::Opcode::GlobalAtomicUMax32x2:
100 case IR::Opcode::GlobalAtomicAnd32x2:
101 case IR::Opcode::GlobalAtomicOr32x2:
102 case IR::Opcode::GlobalAtomicXor32x2:
103 case IR::Opcode::GlobalAtomicExchange32x2:
95 case IR::Opcode::GlobalAtomicAddF32: 104 case IR::Opcode::GlobalAtomicAddF32:
96 case IR::Opcode::GlobalAtomicAddF16x2: 105 case IR::Opcode::GlobalAtomicAddF16x2:
97 case IR::Opcode::GlobalAtomicAddF32x2: 106 case IR::Opcode::GlobalAtomicAddF32x2:
@@ -135,6 +144,15 @@ bool IsGlobalMemoryWrite(const IR::Inst& inst) {
135 case IR::Opcode::GlobalAtomicOr64: 144 case IR::Opcode::GlobalAtomicOr64:
136 case IR::Opcode::GlobalAtomicXor64: 145 case IR::Opcode::GlobalAtomicXor64:
137 case IR::Opcode::GlobalAtomicExchange64: 146 case IR::Opcode::GlobalAtomicExchange64:
147 case IR::Opcode::GlobalAtomicIAdd32x2:
148 case IR::Opcode::GlobalAtomicSMin32x2:
149 case IR::Opcode::GlobalAtomicUMin32x2:
150 case IR::Opcode::GlobalAtomicSMax32x2:
151 case IR::Opcode::GlobalAtomicUMax32x2:
152 case IR::Opcode::GlobalAtomicAnd32x2:
153 case IR::Opcode::GlobalAtomicOr32x2:
154 case IR::Opcode::GlobalAtomicXor32x2:
155 case IR::Opcode::GlobalAtomicExchange32x2:
138 case IR::Opcode::GlobalAtomicAddF32: 156 case IR::Opcode::GlobalAtomicAddF32:
139 case IR::Opcode::GlobalAtomicAddF16x2: 157 case IR::Opcode::GlobalAtomicAddF16x2:
140 case IR::Opcode::GlobalAtomicAddF32x2: 158 case IR::Opcode::GlobalAtomicAddF32x2:
@@ -199,6 +217,8 @@ IR::Opcode GlobalToStorage(IR::Opcode opcode) {
199 return IR::Opcode::StorageAtomicOr32; 217 return IR::Opcode::StorageAtomicOr32;
200 case IR::Opcode::GlobalAtomicXor32: 218 case IR::Opcode::GlobalAtomicXor32:
201 return IR::Opcode::StorageAtomicXor32; 219 return IR::Opcode::StorageAtomicXor32;
220 case IR::Opcode::GlobalAtomicExchange32:
221 return IR::Opcode::StorageAtomicExchange32;
202 case IR::Opcode::GlobalAtomicIAdd64: 222 case IR::Opcode::GlobalAtomicIAdd64:
203 return IR::Opcode::StorageAtomicIAdd64; 223 return IR::Opcode::StorageAtomicIAdd64;
204 case IR::Opcode::GlobalAtomicSMin64: 224 case IR::Opcode::GlobalAtomicSMin64:
@@ -215,10 +235,26 @@ IR::Opcode GlobalToStorage(IR::Opcode opcode) {
215 return IR::Opcode::StorageAtomicOr64; 235 return IR::Opcode::StorageAtomicOr64;
216 case IR::Opcode::GlobalAtomicXor64: 236 case IR::Opcode::GlobalAtomicXor64:
217 return IR::Opcode::StorageAtomicXor64; 237 return IR::Opcode::StorageAtomicXor64;
218 case IR::Opcode::GlobalAtomicExchange32:
219 return IR::Opcode::StorageAtomicExchange32;
220 case IR::Opcode::GlobalAtomicExchange64: 238 case IR::Opcode::GlobalAtomicExchange64:
221 return IR::Opcode::StorageAtomicExchange64; 239 return IR::Opcode::StorageAtomicExchange64;
240 case IR::Opcode::GlobalAtomicIAdd32x2:
241 return IR::Opcode::StorageAtomicIAdd32x2;
242 case IR::Opcode::GlobalAtomicSMin32x2:
243 return IR::Opcode::StorageAtomicSMin32x2;
244 case IR::Opcode::GlobalAtomicUMin32x2:
245 return IR::Opcode::StorageAtomicUMin32x2;
246 case IR::Opcode::GlobalAtomicSMax32x2:
247 return IR::Opcode::StorageAtomicSMax32x2;
248 case IR::Opcode::GlobalAtomicUMax32x2:
249 return IR::Opcode::StorageAtomicUMax32x2;
250 case IR::Opcode::GlobalAtomicAnd32x2:
251 return IR::Opcode::StorageAtomicAnd32x2;
252 case IR::Opcode::GlobalAtomicOr32x2:
253 return IR::Opcode::StorageAtomicOr32x2;
254 case IR::Opcode::GlobalAtomicXor32x2:
255 return IR::Opcode::StorageAtomicXor32x2;
256 case IR::Opcode::GlobalAtomicExchange32x2:
257 return IR::Opcode::StorageAtomicExchange32x2;
222 case IR::Opcode::GlobalAtomicAddF32: 258 case IR::Opcode::GlobalAtomicAddF32:
223 return IR::Opcode::StorageAtomicAddF32; 259 return IR::Opcode::StorageAtomicAddF32;
224 case IR::Opcode::GlobalAtomicAddF16x2: 260 case IR::Opcode::GlobalAtomicAddF16x2:
@@ -454,6 +490,15 @@ void Replace(IR::Block& block, IR::Inst& inst, const IR::U32& storage_index,
454 case IR::Opcode::GlobalAtomicOr64: 490 case IR::Opcode::GlobalAtomicOr64:
455 case IR::Opcode::GlobalAtomicXor64: 491 case IR::Opcode::GlobalAtomicXor64:
456 case IR::Opcode::GlobalAtomicExchange64: 492 case IR::Opcode::GlobalAtomicExchange64:
493 case IR::Opcode::GlobalAtomicIAdd32x2:
494 case IR::Opcode::GlobalAtomicSMin32x2:
495 case IR::Opcode::GlobalAtomicUMin32x2:
496 case IR::Opcode::GlobalAtomicSMax32x2:
497 case IR::Opcode::GlobalAtomicUMax32x2:
498 case IR::Opcode::GlobalAtomicAnd32x2:
499 case IR::Opcode::GlobalAtomicOr32x2:
500 case IR::Opcode::GlobalAtomicXor32x2:
501 case IR::Opcode::GlobalAtomicExchange32x2:
457 case IR::Opcode::GlobalAtomicAddF32: 502 case IR::Opcode::GlobalAtomicAddF32:
458 case IR::Opcode::GlobalAtomicAddF16x2: 503 case IR::Opcode::GlobalAtomicAddF16x2:
459 case IR::Opcode::GlobalAtomicAddF32x2: 504 case IR::Opcode::GlobalAtomicAddF32x2:
diff --git a/src/shader_recompiler/ir_opt/lower_int64_to_int32.cpp b/src/shader_recompiler/ir_opt/lower_int64_to_int32.cpp
index e80d3d1d9..c2654cd9b 100644
--- a/src/shader_recompiler/ir_opt/lower_int64_to_int32.cpp
+++ b/src/shader_recompiler/ir_opt/lower_int64_to_int32.cpp
@@ -199,6 +199,26 @@ void Lower(IR::Block& block, IR::Inst& inst) {
199 return ShiftRightLogical64To32(block, inst); 199 return ShiftRightLogical64To32(block, inst);
200 case IR::Opcode::ShiftRightArithmetic64: 200 case IR::Opcode::ShiftRightArithmetic64:
201 return ShiftRightArithmetic64To32(block, inst); 201 return ShiftRightArithmetic64To32(block, inst);
202 case IR::Opcode::SharedAtomicExchange64:
203 return inst.ReplaceOpcode(IR::Opcode::SharedAtomicExchange32x2);
204 case IR::Opcode::GlobalAtomicIAdd64:
205 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicIAdd32x2);
206 case IR::Opcode::GlobalAtomicSMin64:
207 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicSMin32x2);
208 case IR::Opcode::GlobalAtomicUMin64:
209 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicUMin32x2);
210 case IR::Opcode::GlobalAtomicSMax64:
211 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicSMax32x2);
212 case IR::Opcode::GlobalAtomicUMax64:
213 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicUMax32x2);
214 case IR::Opcode::GlobalAtomicAnd64:
215 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicAnd32x2);
216 case IR::Opcode::GlobalAtomicOr64:
217 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicOr32x2);
218 case IR::Opcode::GlobalAtomicXor64:
219 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicXor32x2);
220 case IR::Opcode::GlobalAtomicExchange64:
221 return inst.ReplaceOpcode(IR::Opcode::GlobalAtomicExchange32x2);
202 default: 222 default:
203 break; 223 break;
204 } 224 }
diff --git a/src/video_core/renderer_base.h b/src/video_core/renderer_base.h
index bb204454e..c5f974080 100644
--- a/src/video_core/renderer_base.h
+++ b/src/video_core/renderer_base.h
@@ -5,9 +5,10 @@
5#pragma once 5#pragma once
6 6
7#include <atomic> 7#include <atomic>
8#include <functional>
8#include <memory> 9#include <memory>
9#include <optional>
10 10
11#include "common/common_funcs.h"
11#include "common/common_types.h" 12#include "common/common_types.h"
12#include "core/frontend/emu_window.h" 13#include "core/frontend/emu_window.h"
13#include "video_core/gpu.h" 14#include "video_core/gpu.h"
@@ -28,8 +29,11 @@ struct RendererSettings {
28 Layout::FramebufferLayout screenshot_framebuffer_layout; 29 Layout::FramebufferLayout screenshot_framebuffer_layout;
29}; 30};
30 31
31class RendererBase : NonCopyable { 32class RendererBase {
32public: 33public:
34 YUZU_NON_COPYABLE(RendererBase);
35 YUZU_NON_MOVEABLE(RendererBase);
36
33 explicit RendererBase(Core::Frontend::EmuWindow& window, 37 explicit RendererBase(Core::Frontend::EmuWindow& window,
34 std::unique_ptr<Core::Frontend::GraphicsContext> context); 38 std::unique_ptr<Core::Frontend::GraphicsContext> context);
35 virtual ~RendererBase(); 39 virtual ~RendererBase();
diff --git a/src/video_core/renderer_opengl/gl_resource_manager.h b/src/video_core/renderer_opengl/gl_resource_manager.h
index b2d5bfd3b..84e07f8bd 100644
--- a/src/video_core/renderer_opengl/gl_resource_manager.h
+++ b/src/video_core/renderer_opengl/gl_resource_manager.h
@@ -7,12 +7,14 @@
7#include <string_view> 7#include <string_view>
8#include <utility> 8#include <utility>
9#include <glad/glad.h> 9#include <glad/glad.h>
10#include "common/common_types.h" 10#include "common/common_funcs.h"
11 11
12namespace OpenGL { 12namespace OpenGL {
13 13
14class OGLRenderbuffer : private NonCopyable { 14class OGLRenderbuffer final {
15public: 15public:
16 YUZU_NON_COPYABLE(OGLRenderbuffer);
17
16 OGLRenderbuffer() = default; 18 OGLRenderbuffer() = default;
17 19
18 OGLRenderbuffer(OGLRenderbuffer&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 20 OGLRenderbuffer(OGLRenderbuffer&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -36,8 +38,10 @@ public:
36 GLuint handle = 0; 38 GLuint handle = 0;
37}; 39};
38 40
39class OGLTexture : private NonCopyable { 41class OGLTexture final {
40public: 42public:
43 YUZU_NON_COPYABLE(OGLTexture);
44
41 OGLTexture() = default; 45 OGLTexture() = default;
42 46
43 OGLTexture(OGLTexture&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 47 OGLTexture(OGLTexture&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -61,8 +65,10 @@ public:
61 GLuint handle = 0; 65 GLuint handle = 0;
62}; 66};
63 67
64class OGLTextureView : private NonCopyable { 68class OGLTextureView final {
65public: 69public:
70 YUZU_NON_COPYABLE(OGLTextureView);
71
66 OGLTextureView() = default; 72 OGLTextureView() = default;
67 73
68 OGLTextureView(OGLTextureView&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 74 OGLTextureView(OGLTextureView&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -86,8 +92,10 @@ public:
86 GLuint handle = 0; 92 GLuint handle = 0;
87}; 93};
88 94
89class OGLSampler : private NonCopyable { 95class OGLSampler final {
90public: 96public:
97 YUZU_NON_COPYABLE(OGLSampler);
98
91 OGLSampler() = default; 99 OGLSampler() = default;
92 100
93 OGLSampler(OGLSampler&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 101 OGLSampler(OGLSampler&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -111,8 +119,10 @@ public:
111 GLuint handle = 0; 119 GLuint handle = 0;
112}; 120};
113 121
114class OGLShader : private NonCopyable { 122class OGLShader final {
115public: 123public:
124 YUZU_NON_COPYABLE(OGLShader);
125
116 OGLShader() = default; 126 OGLShader() = default;
117 127
118 OGLShader(OGLShader&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 128 OGLShader(OGLShader&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -132,8 +142,10 @@ public:
132 GLuint handle = 0; 142 GLuint handle = 0;
133}; 143};
134 144
135class OGLProgram : private NonCopyable { 145class OGLProgram final {
136public: 146public:
147 YUZU_NON_COPYABLE(OGLProgram);
148
137 OGLProgram() = default; 149 OGLProgram() = default;
138 150
139 OGLProgram(OGLProgram&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 151 OGLProgram(OGLProgram&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -154,8 +166,10 @@ public:
154 GLuint handle = 0; 166 GLuint handle = 0;
155}; 167};
156 168
157class OGLAssemblyProgram : private NonCopyable { 169class OGLAssemblyProgram final {
158public: 170public:
171 YUZU_NON_COPYABLE(OGLAssemblyProgram);
172
159 OGLAssemblyProgram() = default; 173 OGLAssemblyProgram() = default;
160 174
161 OGLAssemblyProgram(OGLAssemblyProgram&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 175 OGLAssemblyProgram(OGLAssemblyProgram&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -176,8 +190,10 @@ public:
176 GLuint handle = 0; 190 GLuint handle = 0;
177}; 191};
178 192
179class OGLPipeline : private NonCopyable { 193class OGLPipeline final {
180public: 194public:
195 YUZU_NON_COPYABLE(OGLPipeline);
196
181 OGLPipeline() = default; 197 OGLPipeline() = default;
182 OGLPipeline(OGLPipeline&& o) noexcept : handle{std::exchange<GLuint>(o.handle, 0)} {} 198 OGLPipeline(OGLPipeline&& o) noexcept : handle{std::exchange<GLuint>(o.handle, 0)} {}
183 199
@@ -198,8 +214,10 @@ public:
198 GLuint handle = 0; 214 GLuint handle = 0;
199}; 215};
200 216
201class OGLBuffer : private NonCopyable { 217class OGLBuffer final {
202public: 218public:
219 YUZU_NON_COPYABLE(OGLBuffer);
220
203 OGLBuffer() = default; 221 OGLBuffer() = default;
204 222
205 OGLBuffer(OGLBuffer&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 223 OGLBuffer(OGLBuffer&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -223,8 +241,10 @@ public:
223 GLuint handle = 0; 241 GLuint handle = 0;
224}; 242};
225 243
226class OGLSync : private NonCopyable { 244class OGLSync final {
227public: 245public:
246 YUZU_NON_COPYABLE(OGLSync);
247
228 OGLSync() = default; 248 OGLSync() = default;
229 249
230 OGLSync(OGLSync&& o) noexcept : handle(std::exchange(o.handle, nullptr)) {} 250 OGLSync(OGLSync&& o) noexcept : handle(std::exchange(o.handle, nullptr)) {}
@@ -247,8 +267,10 @@ public:
247 GLsync handle = 0; 267 GLsync handle = 0;
248}; 268};
249 269
250class OGLFramebuffer : private NonCopyable { 270class OGLFramebuffer final {
251public: 271public:
272 YUZU_NON_COPYABLE(OGLFramebuffer);
273
252 OGLFramebuffer() = default; 274 OGLFramebuffer() = default;
253 275
254 OGLFramebuffer(OGLFramebuffer&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 276 OGLFramebuffer(OGLFramebuffer&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
@@ -272,8 +294,10 @@ public:
272 GLuint handle = 0; 294 GLuint handle = 0;
273}; 295};
274 296
275class OGLQuery : private NonCopyable { 297class OGLQuery final {
276public: 298public:
299 YUZU_NON_COPYABLE(OGLQuery);
300
277 OGLQuery() = default; 301 OGLQuery() = default;
278 302
279 OGLQuery(OGLQuery&& o) noexcept : handle(std::exchange(o.handle, 0)) {} 303 OGLQuery(OGLQuery&& o) noexcept : handle(std::exchange(o.handle, 0)) {}
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index 3bfdf41ba..7d9d4f7ba 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -140,12 +140,12 @@ bool VKScheduler::UpdateRescaling(bool is_rescaling) {
140void VKScheduler::WorkerThread(std::stop_token stop_token) { 140void VKScheduler::WorkerThread(std::stop_token stop_token) {
141 Common::SetCurrentThreadName("yuzu:VulkanWorker"); 141 Common::SetCurrentThreadName("yuzu:VulkanWorker");
142 do { 142 do {
143 if (work_queue.empty()) {
144 wait_cv.notify_all();
145 }
146 std::unique_ptr<CommandChunk> work; 143 std::unique_ptr<CommandChunk> work;
147 { 144 {
148 std::unique_lock lock{work_mutex}; 145 std::unique_lock lock{work_mutex};
146 if (work_queue.empty()) {
147 wait_cv.notify_all();
148 }
149 work_cv.wait(lock, stop_token, [this] { return !work_queue.empty(); }); 149 work_cv.wait(lock, stop_token, [this] { return !work_queue.empty(); });
150 if (stop_token.stop_requested()) { 150 if (stop_token.stop_requested()) {
151 continue; 151 continue;
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
index 1b06c9296..e69aa136b 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.h
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -146,6 +146,7 @@ private:
146 using FuncType = TypedCommand<T>; 146 using FuncType = TypedCommand<T>;
147 static_assert(sizeof(FuncType) < sizeof(data), "Lambda is too large"); 147 static_assert(sizeof(FuncType) < sizeof(data), "Lambda is too large");
148 148
149 recorded_counts++;
149 command_offset = Common::AlignUp(command_offset, alignof(FuncType)); 150 command_offset = Common::AlignUp(command_offset, alignof(FuncType));
150 if (command_offset > sizeof(data) - sizeof(FuncType)) { 151 if (command_offset > sizeof(data) - sizeof(FuncType)) {
151 return false; 152 return false;
@@ -167,7 +168,7 @@ private:
167 } 168 }
168 169
169 bool Empty() const { 170 bool Empty() const {
170 return command_offset == 0; 171 return recorded_counts == 0;
171 } 172 }
172 173
173 bool HasSubmit() const { 174 bool HasSubmit() const {
@@ -178,6 +179,7 @@ private:
178 Command* first = nullptr; 179 Command* first = nullptr;
179 Command* last = nullptr; 180 Command* last = nullptr;
180 181
182 size_t recorded_counts = 0;
181 size_t command_offset = 0; 183 size_t command_offset = 0;
182 bool submit = false; 184 bool submit = false;
183 alignas(std::max_align_t) std::array<u8, 0x8000> data{}; 185 alignas(std::max_align_t) std::array<u8, 0x8000> data{};
diff --git a/src/yuzu/configuration/configure_input_player.cpp b/src/yuzu/configuration/configure_input_player.cpp
index ba563b382..cc0534907 100644
--- a/src/yuzu/configuration/configure_input_player.cpp
+++ b/src/yuzu/configuration/configure_input_player.cpp
@@ -102,6 +102,10 @@ QString GetButtonName(Common::Input::ButtonNames button_name) {
102 return QObject::tr("Share"); 102 return QObject::tr("Share");
103 case Common::Input::ButtonNames::Options: 103 case Common::Input::ButtonNames::Options:
104 return QObject::tr("Options"); 104 return QObject::tr("Options");
105 case Common::Input::ButtonNames::Home:
106 return QObject::tr("Home");
107 case Common::Input::ButtonNames::Touch:
108 return QObject::tr("Touch");
105 case Common::Input::ButtonNames::ButtonMouseWheel: 109 case Common::Input::ButtonNames::ButtonMouseWheel:
106 return QObject::tr("Wheel", "Indicates the mouse wheel"); 110 return QObject::tr("Wheel", "Indicates the mouse wheel");
107 case Common::Input::ButtonNames::ButtonBackward: 111 case Common::Input::ButtonNames::ButtonBackward:
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index d9e689d14..556d2cdb3 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -965,6 +965,7 @@ void GMainWindow::LinkActionShortcut(QAction* action, const QString& action_name
965 static const QString main_window = QStringLiteral("Main Window"); 965 static const QString main_window = QStringLiteral("Main Window");
966 action->setShortcut(hotkey_registry.GetKeySequence(main_window, action_name)); 966 action->setShortcut(hotkey_registry.GetKeySequence(main_window, action_name));
967 action->setShortcutContext(hotkey_registry.GetShortcutContext(main_window, action_name)); 967 action->setShortcutContext(hotkey_registry.GetShortcutContext(main_window, action_name));
968 action->setAutoRepeat(false);
968 969
969 this->addAction(action); 970 this->addAction(action);
970 971