summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/common/CMakeLists.txt4
-rw-r--r--src/common/x64/xbyak_abi.h266
-rw-r--r--src/common/x64/xbyak_util.h47
-rw-r--r--src/core/hle/kernel/readable_event.cpp2
-rw-r--r--src/core/hle/service/hid/hid.cpp13
-rw-r--r--src/core/hle/service/hid/hid.h1
-rw-r--r--src/core/settings.h1
-rw-r--r--src/video_core/CMakeLists.txt10
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h40
-rw-r--r--src/video_core/engines/maxwell_3d.cpp34
-rw-r--r--src/video_core/engines/maxwell_3d.h19
-rw-r--r--src/video_core/macro/macro.cpp45
-rw-r--r--src/video_core/macro/macro.h128
-rw-r--r--src/video_core/macro/macro_interpreter.cpp (renamed from src/video_core/macro_interpreter.cpp)198
-rw-r--r--src/video_core/macro/macro_interpreter.h (renamed from src/video_core/macro_interpreter.h)51
-rw-r--r--src/video_core/macro/macro_jit_x64.cpp640
-rw-r--r--src/video_core/macro/macro_jit_x64.h100
-rw-r--r--src/video_core/renderer_opengl/gl_device.cpp89
-rw-r--r--src/video_core/renderer_opengl/gl_device.h12
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp218
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h8
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp12
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp123
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.h6
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.cpp123
-rw-r--r--src/video_core/renderer_opengl/gl_texture_cache.h32
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp3
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.cpp3
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_device.cpp143
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp20
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp70
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h16
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.cpp174
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.h12
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp109
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h33
-rw-r--r--src/video_core/shader/decode/other.cpp16
-rw-r--r--src/video_core/shader/node.h5
-rw-r--r--src/video_core/texture_cache/format_lookup_table.cpp3
-rw-r--r--src/video_core/texture_cache/surface_base.cpp7
-rw-r--r--src/video_core/texture_cache/surface_base.h13
-rw-r--r--src/video_core/texture_cache/surface_params.cpp19
-rw-r--r--src/video_core/texture_cache/texture_cache.h247
-rw-r--r--src/yuzu/bootmanager.cpp3
-rw-r--r--src/yuzu/configuration/config.cpp3
-rw-r--r--src/yuzu/configuration/configure_debug.cpp3
-rw-r--r--src/yuzu/configuration/configure_debug.ui13
-rw-r--r--src/yuzu/configuration/configure_input_player.cpp3
-rw-r--r--src/yuzu_cmd/config.cpp2
-rw-r--r--src/yuzu_cmd/default_ini.h2
-rw-r--r--src/yuzu_cmd/emu_window/emu_window_sdl2_gl.cpp3
54 files changed, 2388 insertions, 765 deletions
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 264dff546..24b7a083c 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -171,10 +171,12 @@ if(ARCHITECTURE_x86_64)
171 PRIVATE 171 PRIVATE
172 x64/cpu_detect.cpp 172 x64/cpu_detect.cpp
173 x64/cpu_detect.h 173 x64/cpu_detect.h
174 x64/xbyak_abi.h
175 x64/xbyak_util.h
174 ) 176 )
175endif() 177endif()
176 178
177create_target_directory_groups(common) 179create_target_directory_groups(common)
178 180
179target_link_libraries(common PUBLIC Boost::boost fmt::fmt microprofile) 181target_link_libraries(common PUBLIC Boost::boost fmt::fmt microprofile)
180target_link_libraries(common PRIVATE lz4::lz4 zstd::zstd) 182target_link_libraries(common PRIVATE lz4::lz4 zstd::zstd xbyak)
diff --git a/src/common/x64/xbyak_abi.h b/src/common/x64/xbyak_abi.h
new file mode 100644
index 000000000..794da8a52
--- /dev/null
+++ b/src/common/x64/xbyak_abi.h
@@ -0,0 +1,266 @@
1// Copyright 2016 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <bitset>
8#include <initializer_list>
9#include <xbyak.h>
10#include "common/assert.h"
11
12namespace Common::X64 {
13
14inline int RegToIndex(const Xbyak::Reg& reg) {
15 using Kind = Xbyak::Reg::Kind;
16 ASSERT_MSG((reg.getKind() & (Kind::REG | Kind::XMM)) != 0,
17 "RegSet only support GPRs and XMM registers.");
18 ASSERT_MSG(reg.getIdx() < 16, "RegSet only supports XXM0-15.");
19 return reg.getIdx() + (reg.getKind() == Kind::REG ? 0 : 16);
20}
21
22inline Xbyak::Reg64 IndexToReg64(int reg_index) {
23 ASSERT(reg_index < 16);
24 return Xbyak::Reg64(reg_index);
25}
26
27inline Xbyak::Xmm IndexToXmm(int reg_index) {
28 ASSERT(reg_index >= 16 && reg_index < 32);
29 return Xbyak::Xmm(reg_index - 16);
30}
31
32inline Xbyak::Reg IndexToReg(int reg_index) {
33 if (reg_index < 16) {
34 return IndexToReg64(reg_index);
35 } else {
36 return IndexToXmm(reg_index);
37 }
38}
39
40inline std::bitset<32> BuildRegSet(std::initializer_list<Xbyak::Reg> regs) {
41 std::bitset<32> bits;
42 for (const Xbyak::Reg& reg : regs) {
43 bits[RegToIndex(reg)] = true;
44 }
45 return bits;
46}
47
48const std::bitset<32> ABI_ALL_GPRS(0x0000FFFF);
49const std::bitset<32> ABI_ALL_XMMS(0xFFFF0000);
50
51#ifdef _WIN32
52
53// Microsoft x64 ABI
54const Xbyak::Reg ABI_RETURN = Xbyak::util::rax;
55const Xbyak::Reg ABI_PARAM1 = Xbyak::util::rcx;
56const Xbyak::Reg ABI_PARAM2 = Xbyak::util::rdx;
57const Xbyak::Reg ABI_PARAM3 = Xbyak::util::r8;
58const Xbyak::Reg ABI_PARAM4 = Xbyak::util::r9;
59
60const std::bitset<32> ABI_ALL_CALLER_SAVED = BuildRegSet({
61 // GPRs
62 Xbyak::util::rcx,
63 Xbyak::util::rdx,
64 Xbyak::util::r8,
65 Xbyak::util::r9,
66 Xbyak::util::r10,
67 Xbyak::util::r11,
68 // XMMs
69 Xbyak::util::xmm0,
70 Xbyak::util::xmm1,
71 Xbyak::util::xmm2,
72 Xbyak::util::xmm3,
73 Xbyak::util::xmm4,
74 Xbyak::util::xmm5,
75});
76
77const std::bitset<32> ABI_ALL_CALLEE_SAVED = BuildRegSet({
78 // GPRs
79 Xbyak::util::rbx,
80 Xbyak::util::rsi,
81 Xbyak::util::rdi,
82 Xbyak::util::rbp,
83 Xbyak::util::r12,
84 Xbyak::util::r13,
85 Xbyak::util::r14,
86 Xbyak::util::r15,
87 // XMMs
88 Xbyak::util::xmm6,
89 Xbyak::util::xmm7,
90 Xbyak::util::xmm8,
91 Xbyak::util::xmm9,
92 Xbyak::util::xmm10,
93 Xbyak::util::xmm11,
94 Xbyak::util::xmm12,
95 Xbyak::util::xmm13,
96 Xbyak::util::xmm14,
97 Xbyak::util::xmm15,
98});
99
100constexpr size_t ABI_SHADOW_SPACE = 0x20;
101
102#else
103
104// System V x86-64 ABI
105const Xbyak::Reg ABI_RETURN = Xbyak::util::rax;
106const Xbyak::Reg ABI_PARAM1 = Xbyak::util::rdi;
107const Xbyak::Reg ABI_PARAM2 = Xbyak::util::rsi;
108const Xbyak::Reg ABI_PARAM3 = Xbyak::util::rdx;
109const Xbyak::Reg ABI_PARAM4 = Xbyak::util::rcx;
110
111const std::bitset<32> ABI_ALL_CALLER_SAVED = BuildRegSet({
112 // GPRs
113 Xbyak::util::rcx,
114 Xbyak::util::rdx,
115 Xbyak::util::rdi,
116 Xbyak::util::rsi,
117 Xbyak::util::r8,
118 Xbyak::util::r9,
119 Xbyak::util::r10,
120 Xbyak::util::r11,
121 // XMMs
122 Xbyak::util::xmm0,
123 Xbyak::util::xmm1,
124 Xbyak::util::xmm2,
125 Xbyak::util::xmm3,
126 Xbyak::util::xmm4,
127 Xbyak::util::xmm5,
128 Xbyak::util::xmm6,
129 Xbyak::util::xmm7,
130 Xbyak::util::xmm8,
131 Xbyak::util::xmm9,
132 Xbyak::util::xmm10,
133 Xbyak::util::xmm11,
134 Xbyak::util::xmm12,
135 Xbyak::util::xmm13,
136 Xbyak::util::xmm14,
137 Xbyak::util::xmm15,
138});
139
140const std::bitset<32> ABI_ALL_CALLEE_SAVED = BuildRegSet({
141 // GPRs
142 Xbyak::util::rbx,
143 Xbyak::util::rbp,
144 Xbyak::util::r12,
145 Xbyak::util::r13,
146 Xbyak::util::r14,
147 Xbyak::util::r15,
148});
149
150constexpr size_t ABI_SHADOW_SPACE = 0;
151
152#endif
153
154inline void ABI_CalculateFrameSize(std::bitset<32> regs, size_t rsp_alignment,
155 size_t needed_frame_size, s32* out_subtraction,
156 s32* out_xmm_offset) {
157 const auto count = (regs & ABI_ALL_GPRS).count();
158 rsp_alignment -= count * 8;
159 size_t subtraction = 0;
160 const auto xmm_count = (regs & ABI_ALL_XMMS).count();
161 if (xmm_count) {
162 // If we have any XMMs to save, we must align the stack here.
163 subtraction = rsp_alignment & 0xF;
164 }
165 subtraction += 0x10 * xmm_count;
166 size_t xmm_base_subtraction = subtraction;
167 subtraction += needed_frame_size;
168 subtraction += ABI_SHADOW_SPACE;
169 // Final alignment.
170 rsp_alignment -= subtraction;
171 subtraction += rsp_alignment & 0xF;
172
173 *out_subtraction = (s32)subtraction;
174 *out_xmm_offset = (s32)(subtraction - xmm_base_subtraction);
175}
176
177inline size_t ABI_PushRegistersAndAdjustStack(Xbyak::CodeGenerator& code, std::bitset<32> regs,
178 size_t rsp_alignment, size_t needed_frame_size = 0) {
179 s32 subtraction, xmm_offset;
180 ABI_CalculateFrameSize(regs, rsp_alignment, needed_frame_size, &subtraction, &xmm_offset);
181 for (std::size_t i = 0; i < regs.size(); ++i) {
182 if (regs[i] && ABI_ALL_GPRS[i]) {
183 code.push(IndexToReg64(static_cast<int>(i)));
184 }
185 }
186 if (subtraction != 0) {
187 code.sub(code.rsp, subtraction);
188 }
189
190 for (int i = 0; i < regs.count(); i++) {
191 if (regs.test(i) & ABI_ALL_GPRS.test(i)) {
192 code.push(IndexToReg64(i));
193 }
194 }
195
196 for (std::size_t i = 0; i < regs.size(); ++i) {
197 if (regs[i] && ABI_ALL_XMMS[i]) {
198 code.movaps(code.xword[code.rsp + xmm_offset], IndexToXmm(static_cast<int>(i)));
199 xmm_offset += 0x10;
200 }
201 }
202
203 return ABI_SHADOW_SPACE;
204}
205
206inline void ABI_PopRegistersAndAdjustStack(Xbyak::CodeGenerator& code, std::bitset<32> regs,
207 size_t rsp_alignment, size_t needed_frame_size = 0) {
208 s32 subtraction, xmm_offset;
209 ABI_CalculateFrameSize(regs, rsp_alignment, needed_frame_size, &subtraction, &xmm_offset);
210
211 for (std::size_t i = 0; i < regs.size(); ++i) {
212 if (regs[i] && ABI_ALL_XMMS[i]) {
213 code.movaps(IndexToXmm(static_cast<int>(i)), code.xword[code.rsp + xmm_offset]);
214 xmm_offset += 0x10;
215 }
216 }
217
218 if (subtraction != 0) {
219 code.add(code.rsp, subtraction);
220 }
221
222 // GPRs need to be popped in reverse order
223 for (int i = 15; i >= 0; i--) {
224 if (regs[i]) {
225 code.pop(IndexToReg64(i));
226 }
227 }
228}
229
230inline size_t ABI_PushRegistersAndAdjustStackGPS(Xbyak::CodeGenerator& code, std::bitset<32> regs,
231 size_t rsp_alignment,
232 size_t needed_frame_size = 0) {
233 s32 subtraction, xmm_offset;
234 ABI_CalculateFrameSize(regs, rsp_alignment, needed_frame_size, &subtraction, &xmm_offset);
235
236 for (std::size_t i = 0; i < regs.size(); ++i) {
237 if (regs[i] && ABI_ALL_GPRS[i]) {
238 code.push(IndexToReg64(static_cast<int>(i)));
239 }
240 }
241
242 if (subtraction != 0) {
243 code.sub(code.rsp, subtraction);
244 }
245
246 return ABI_SHADOW_SPACE;
247}
248
249inline void ABI_PopRegistersAndAdjustStackGPS(Xbyak::CodeGenerator& code, std::bitset<32> regs,
250 size_t rsp_alignment, size_t needed_frame_size = 0) {
251 s32 subtraction, xmm_offset;
252 ABI_CalculateFrameSize(regs, rsp_alignment, needed_frame_size, &subtraction, &xmm_offset);
253
254 if (subtraction != 0) {
255 code.add(code.rsp, subtraction);
256 }
257
258 // GPRs need to be popped in reverse order
259 for (int i = 15; i >= 0; i--) {
260 if (regs[i]) {
261 code.pop(IndexToReg64(i));
262 }
263 }
264}
265
266} // namespace Common::X64
diff --git a/src/common/x64/xbyak_util.h b/src/common/x64/xbyak_util.h
new file mode 100644
index 000000000..df17f8cbe
--- /dev/null
+++ b/src/common/x64/xbyak_util.h
@@ -0,0 +1,47 @@
1// Copyright 2016 Citra Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <type_traits>
8#include <xbyak.h>
9#include "common/x64/xbyak_abi.h"
10
11namespace Common::X64 {
12
13// Constants for use with cmpps/cmpss
14enum {
15 CMP_EQ = 0,
16 CMP_LT = 1,
17 CMP_LE = 2,
18 CMP_UNORD = 3,
19 CMP_NEQ = 4,
20 CMP_NLT = 5,
21 CMP_NLE = 6,
22 CMP_ORD = 7,
23};
24
25constexpr bool IsWithin2G(uintptr_t ref, uintptr_t target) {
26 const u64 distance = target - (ref + 5);
27 return !(distance >= 0x8000'0000ULL && distance <= ~0x8000'0000ULL);
28}
29
30inline bool IsWithin2G(const Xbyak::CodeGenerator& code, uintptr_t target) {
31 return IsWithin2G(reinterpret_cast<uintptr_t>(code.getCurr()), target);
32}
33
34template <typename T>
35inline void CallFarFunction(Xbyak::CodeGenerator& code, const T f) {
36 static_assert(std::is_pointer_v<T>, "Argument must be a (function) pointer.");
37 size_t addr = reinterpret_cast<size_t>(f);
38 if (IsWithin2G(code, addr)) {
39 code.call(f);
40 } else {
41 // ABI_RETURN is a safe temp register to use before a call
42 code.mov(ABI_RETURN, addr);
43 code.call(ABI_RETURN);
44 }
45}
46
47} // namespace Common::X64
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index 00860fcbd..ef5e19e63 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -38,7 +38,7 @@ void ReadableEvent::Clear() {
38 38
39ResultCode ReadableEvent::Reset() { 39ResultCode ReadableEvent::Reset() {
40 if (!is_signaled) { 40 if (!is_signaled) {
41 LOG_ERROR(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}", 41 LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
42 GetObjectId(), GetTypeName(), GetName()); 42 GetObjectId(), GetTypeName(), GetName());
43 return ERR_INVALID_STATE; 43 return ERR_INVALID_STATE;
44 } 44 }
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index c84cb1483..72a050de2 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -161,7 +161,7 @@ Hid::Hid(Core::System& system) : ServiceFramework("hid"), system(system) {
161 {40, nullptr, "AcquireXpadIdEventHandle"}, 161 {40, nullptr, "AcquireXpadIdEventHandle"},
162 {41, nullptr, "ReleaseXpadIdEventHandle"}, 162 {41, nullptr, "ReleaseXpadIdEventHandle"},
163 {51, &Hid::ActivateXpad, "ActivateXpad"}, 163 {51, &Hid::ActivateXpad, "ActivateXpad"},
164 {55, nullptr, "GetXpadIds"}, 164 {55, &Hid::GetXpadIDs, "GetXpadIds"},
165 {56, nullptr, "ActivateJoyXpad"}, 165 {56, nullptr, "ActivateJoyXpad"},
166 {58, nullptr, "GetJoyXpadLifoHandle"}, 166 {58, nullptr, "GetJoyXpadLifoHandle"},
167 {59, nullptr, "GetJoyXpadIds"}, 167 {59, nullptr, "GetJoyXpadIds"},
@@ -319,6 +319,17 @@ void Hid::ActivateXpad(Kernel::HLERequestContext& ctx) {
319 rb.Push(RESULT_SUCCESS); 319 rb.Push(RESULT_SUCCESS);
320} 320}
321 321
322void Hid::GetXpadIDs(Kernel::HLERequestContext& ctx) {
323 IPC::RequestParser rp{ctx};
324 const auto applet_resource_user_id{rp.Pop<u64>()};
325
326 LOG_DEBUG(Service_HID, "(STUBBED) called, applet_resource_user_id={}", applet_resource_user_id);
327
328 IPC::ResponseBuilder rb{ctx, 3};
329 rb.Push(RESULT_SUCCESS);
330 rb.Push(0);
331}
332
322void Hid::ActivateDebugPad(Kernel::HLERequestContext& ctx) { 333void Hid::ActivateDebugPad(Kernel::HLERequestContext& ctx) {
323 IPC::RequestParser rp{ctx}; 334 IPC::RequestParser rp{ctx};
324 const auto applet_resource_user_id{rp.Pop<u64>()}; 335 const auto applet_resource_user_id{rp.Pop<u64>()};
diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h
index c8ed4ad8b..d481a75f8 100644
--- a/src/core/hle/service/hid/hid.h
+++ b/src/core/hle/service/hid/hid.h
@@ -86,6 +86,7 @@ public:
86private: 86private:
87 void CreateAppletResource(Kernel::HLERequestContext& ctx); 87 void CreateAppletResource(Kernel::HLERequestContext& ctx);
88 void ActivateXpad(Kernel::HLERequestContext& ctx); 88 void ActivateXpad(Kernel::HLERequestContext& ctx);
89 void GetXpadIDs(Kernel::HLERequestContext& ctx);
89 void ActivateDebugPad(Kernel::HLERequestContext& ctx); 90 void ActivateDebugPad(Kernel::HLERequestContext& ctx);
90 void ActivateTouchScreen(Kernel::HLERequestContext& ctx); 91 void ActivateTouchScreen(Kernel::HLERequestContext& ctx);
91 void ActivateMouse(Kernel::HLERequestContext& ctx); 92 void ActivateMouse(Kernel::HLERequestContext& ctx);
diff --git a/src/core/settings.h b/src/core/settings.h
index 78eb33737..36cd66fd4 100644
--- a/src/core/settings.h
+++ b/src/core/settings.h
@@ -474,6 +474,7 @@ struct Values {
474 bool reporting_services; 474 bool reporting_services;
475 bool quest_flag; 475 bool quest_flag;
476 bool disable_cpu_opt; 476 bool disable_cpu_opt;
477 bool disable_macro_jit;
477 478
478 // BCAT 479 // BCAT
479 std::string bcat_backend; 480 std::string bcat_backend;
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index f00c71dae..2bf8d68ce 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -25,6 +25,12 @@ add_library(video_core STATIC
25 engines/shader_bytecode.h 25 engines/shader_bytecode.h
26 engines/shader_header.h 26 engines/shader_header.h
27 engines/shader_type.h 27 engines/shader_type.h
28 macro/macro.cpp
29 macro/macro.h
30 macro/macro_interpreter.cpp
31 macro/macro_interpreter.h
32 macro/macro_jit_x64.cpp
33 macro/macro_jit_x64.h
28 fence_manager.h 34 fence_manager.h
29 gpu.cpp 35 gpu.cpp
30 gpu.h 36 gpu.h
@@ -36,8 +42,6 @@ add_library(video_core STATIC
36 gpu_thread.h 42 gpu_thread.h
37 guest_driver.cpp 43 guest_driver.cpp
38 guest_driver.h 44 guest_driver.h
39 macro_interpreter.cpp
40 macro_interpreter.h
41 memory_manager.cpp 45 memory_manager.cpp
42 memory_manager.h 46 memory_manager.h
43 morton.cpp 47 morton.cpp
@@ -229,7 +233,7 @@ endif()
229create_target_directory_groups(video_core) 233create_target_directory_groups(video_core)
230 234
231target_link_libraries(video_core PUBLIC common core) 235target_link_libraries(video_core PUBLIC common core)
232target_link_libraries(video_core PRIVATE glad) 236target_link_libraries(video_core PRIVATE glad xbyak)
233 237
234if (ENABLE_VULKAN) 238if (ENABLE_VULKAN)
235 target_include_directories(video_core PRIVATE sirit ../../externals/Vulkan-Headers/include) 239 target_include_directories(video_core PRIVATE sirit ../../externals/Vulkan-Headers/include)
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index d9a4a1b4d..b88fce2cd 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -56,24 +56,28 @@ public:
56 if (use_fast_cbuf || size < max_stream_size) { 56 if (use_fast_cbuf || size < max_stream_size) {
57 if (!is_written && !IsRegionWritten(cpu_addr, cpu_addr + size - 1)) { 57 if (!is_written && !IsRegionWritten(cpu_addr, cpu_addr + size - 1)) {
58 auto& memory_manager = system.GPU().MemoryManager(); 58 auto& memory_manager = system.GPU().MemoryManager();
59 const bool is_granular = memory_manager.IsGranularRange(gpu_addr, size);
59 if (use_fast_cbuf) { 60 if (use_fast_cbuf) {
60 if (memory_manager.IsGranularRange(gpu_addr, size)) { 61 u8* dest;
61 const auto host_ptr = memory_manager.GetPointer(gpu_addr); 62 if (is_granular) {
62 return ConstBufferUpload(host_ptr, size); 63 dest = memory_manager.GetPointer(gpu_addr);
63 } else { 64 } else {
64 staging_buffer.resize(size); 65 staging_buffer.resize(size);
65 memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size); 66 dest = staging_buffer.data();
66 return ConstBufferUpload(staging_buffer.data(), size); 67 memory_manager.ReadBlockUnsafe(gpu_addr, dest, size);
67 } 68 }
69 return ConstBufferUpload(dest, size);
70 }
71 if (is_granular) {
72 u8* const host_ptr = memory_manager.GetPointer(gpu_addr);
73 return StreamBufferUpload(size, alignment, [host_ptr, size](u8* dest) {
74 std::memcpy(dest, host_ptr, size);
75 });
68 } else { 76 } else {
69 if (memory_manager.IsGranularRange(gpu_addr, size)) { 77 return StreamBufferUpload(
70 const auto host_ptr = memory_manager.GetPointer(gpu_addr); 78 size, alignment, [&memory_manager, gpu_addr, size](u8* dest) {
71 return StreamBufferUpload(host_ptr, size, alignment); 79 memory_manager.ReadBlockUnsafe(gpu_addr, dest, size);
72 } else { 80 });
73 staging_buffer.resize(size);
74 memory_manager.ReadBlockUnsafe(gpu_addr, staging_buffer.data(), size);
75 return StreamBufferUpload(staging_buffer.data(), size, alignment);
76 }
77 } 81 }
78 } 82 }
79 } 83 }
@@ -101,7 +105,9 @@ public:
101 BufferInfo UploadHostMemory(const void* raw_pointer, std::size_t size, 105 BufferInfo UploadHostMemory(const void* raw_pointer, std::size_t size,
102 std::size_t alignment = 4) { 106 std::size_t alignment = 4) {
103 std::lock_guard lock{mutex}; 107 std::lock_guard lock{mutex};
104 return StreamBufferUpload(raw_pointer, size, alignment); 108 return StreamBufferUpload(size, alignment, [raw_pointer, size](u8* dest) {
109 std::memcpy(dest, raw_pointer, size);
110 });
105 } 111 }
106 112
107 void Map(std::size_t max_size) { 113 void Map(std::size_t max_size) {
@@ -424,11 +430,11 @@ private:
424 map->MarkAsModified(false, 0); 430 map->MarkAsModified(false, 0);
425 } 431 }
426 432
427 BufferInfo StreamBufferUpload(const void* raw_pointer, std::size_t size, 433 template <typename Callable>
428 std::size_t alignment) { 434 BufferInfo StreamBufferUpload(std::size_t size, std::size_t alignment, Callable&& callable) {
429 AlignBuffer(alignment); 435 AlignBuffer(alignment);
430 const std::size_t uploaded_offset = buffer_offset; 436 const std::size_t uploaded_offset = buffer_offset;
431 std::memcpy(buffer_ptr, raw_pointer, size); 437 callable(buffer_ptr);
432 438
433 buffer_ptr += size; 439 buffer_ptr += size;
434 buffer_offset += size; 440 buffer_offset += size;
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 024c9e43b..e46b153f9 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -25,9 +25,8 @@ constexpr u32 MacroRegistersStart = 0xE00;
25Maxwell3D::Maxwell3D(Core::System& system, VideoCore::RasterizerInterface& rasterizer, 25Maxwell3D::Maxwell3D(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
26 MemoryManager& memory_manager) 26 MemoryManager& memory_manager)
27 : system{system}, rasterizer{rasterizer}, memory_manager{memory_manager}, 27 : system{system}, rasterizer{rasterizer}, memory_manager{memory_manager},
28 macro_interpreter{*this}, upload_state{memory_manager, regs.upload} { 28 macro_engine{GetMacroEngine(*this)}, upload_state{memory_manager, regs.upload} {
29 dirty.flags.flip(); 29 dirty.flags.flip();
30
31 InitializeRegisterDefaults(); 30 InitializeRegisterDefaults();
32} 31}
33 32
@@ -106,7 +105,11 @@ void Maxwell3D::InitializeRegisterDefaults() {
106 regs.rasterize_enable = 1; 105 regs.rasterize_enable = 1;
107 regs.rt_separate_frag_data = 1; 106 regs.rt_separate_frag_data = 1;
108 regs.framebuffer_srgb = 1; 107 regs.framebuffer_srgb = 1;
108 regs.line_width_aliased = 1.0f;
109 regs.line_width_smooth = 1.0f;
109 regs.front_face = Maxwell3D::Regs::FrontFace::ClockWise; 110 regs.front_face = Maxwell3D::Regs::FrontFace::ClockWise;
111 regs.polygon_mode_back = Maxwell3D::Regs::PolygonMode::Fill;
112 regs.polygon_mode_front = Maxwell3D::Regs::PolygonMode::Fill;
110 113
111 shadow_state = regs; 114 shadow_state = regs;
112 115
@@ -116,7 +119,7 @@ void Maxwell3D::InitializeRegisterDefaults() {
116 mme_inline[MAXWELL3D_REG_INDEX(index_array.count)] = true; 119 mme_inline[MAXWELL3D_REG_INDEX(index_array.count)] = true;
117} 120}
118 121
119void Maxwell3D::CallMacroMethod(u32 method, std::size_t num_parameters, const u32* parameters) { 122void Maxwell3D::CallMacroMethod(u32 method, const std::vector<u32>& parameters) {
120 // Reset the current macro. 123 // Reset the current macro.
121 executing_macro = 0; 124 executing_macro = 0;
122 125
@@ -125,7 +128,7 @@ void Maxwell3D::CallMacroMethod(u32 method, std::size_t num_parameters, const u3
125 ((method - MacroRegistersStart) >> 1) % static_cast<u32>(macro_positions.size()); 128 ((method - MacroRegistersStart) >> 1) % static_cast<u32>(macro_positions.size());
126 129
127 // Execute the current macro. 130 // Execute the current macro.
128 macro_interpreter.Execute(macro_positions[entry], num_parameters, parameters); 131 macro_engine->Execute(macro_positions[entry], parameters);
129 if (mme_draw.current_mode != MMEDrawMode::Undefined) { 132 if (mme_draw.current_mode != MMEDrawMode::Undefined) {
130 FlushMMEInlineDraw(); 133 FlushMMEInlineDraw();
131 } 134 }
@@ -161,7 +164,7 @@ void Maxwell3D::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
161 164
162 // Call the macro when there are no more parameters in the command buffer 165 // Call the macro when there are no more parameters in the command buffer
163 if (is_last_call) { 166 if (is_last_call) {
164 CallMacroMethod(executing_macro, macro_params.size(), macro_params.data()); 167 CallMacroMethod(executing_macro, macro_params);
165 macro_params.clear(); 168 macro_params.clear();
166 } 169 }
167 return; 170 return;
@@ -197,7 +200,7 @@ void Maxwell3D::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
197 break; 200 break;
198 } 201 }
199 case MAXWELL3D_REG_INDEX(macros.data): { 202 case MAXWELL3D_REG_INDEX(macros.data): {
200 ProcessMacroUpload(arg); 203 macro_engine->AddCode(regs.macros.upload_address, arg);
201 break; 204 break;
202 } 205 }
203 case MAXWELL3D_REG_INDEX(macros.bind): { 206 case MAXWELL3D_REG_INDEX(macros.bind): {
@@ -306,7 +309,7 @@ void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
306 309
307 // Call the macro when there are no more parameters in the command buffer 310 // Call the macro when there are no more parameters in the command buffer
308 if (amount == methods_pending) { 311 if (amount == methods_pending) {
309 CallMacroMethod(executing_macro, macro_params.size(), macro_params.data()); 312 CallMacroMethod(executing_macro, macro_params);
310 macro_params.clear(); 313 macro_params.clear();
311 } 314 }
312 return; 315 return;
@@ -420,9 +423,7 @@ void Maxwell3D::FlushMMEInlineDraw() {
420} 423}
421 424
422void Maxwell3D::ProcessMacroUpload(u32 data) { 425void Maxwell3D::ProcessMacroUpload(u32 data) {
423 ASSERT_MSG(regs.macros.upload_address < macro_memory.size(), 426 macro_engine->AddCode(regs.macros.upload_address++, data);
424 "upload_address exceeded macro_memory size!");
425 macro_memory[regs.macros.upload_address++] = data;
426} 427}
427 428
428void Maxwell3D::ProcessMacroBind(u32 data) { 429void Maxwell3D::ProcessMacroBind(u32 data) {
@@ -457,8 +458,9 @@ void Maxwell3D::StampQueryResult(u64 payload, bool long_query) {
457 458
458void Maxwell3D::ProcessQueryGet() { 459void Maxwell3D::ProcessQueryGet() {
459 // TODO(Subv): Support the other query units. 460 // TODO(Subv): Support the other query units.
460 ASSERT_MSG(regs.query.query_get.unit == Regs::QueryUnit::Crop, 461 if (regs.query.query_get.unit != Regs::QueryUnit::Crop) {
461 "Units other than CROP are unimplemented"); 462 LOG_DEBUG(HW_GPU, "Units other than CROP are unimplemented");
463 }
462 464
463 switch (regs.query.query_get.operation) { 465 switch (regs.query.query_get.operation) {
464 case Regs::QueryOperation::Release: 466 case Regs::QueryOperation::Release:
@@ -534,8 +536,8 @@ void Maxwell3D::ProcessCounterReset() {
534 rasterizer.ResetCounter(QueryType::SamplesPassed); 536 rasterizer.ResetCounter(QueryType::SamplesPassed);
535 break; 537 break;
536 default: 538 default:
537 LOG_WARNING(Render_OpenGL, "Unimplemented counter reset={}", 539 LOG_DEBUG(Render_OpenGL, "Unimplemented counter reset={}",
538 static_cast<int>(regs.counter_reset)); 540 static_cast<int>(regs.counter_reset));
539 break; 541 break;
540 } 542 }
541} 543}
@@ -592,8 +594,8 @@ std::optional<u64> Maxwell3D::GetQueryResult() {
592 system.GPU().GetTicks()); 594 system.GPU().GetTicks());
593 return {}; 595 return {};
594 default: 596 default:
595 UNIMPLEMENTED_MSG("Unimplemented query select type {}", 597 LOG_DEBUG(HW_GPU, "Unimplemented query select type {}",
596 static_cast<u32>(regs.query.query_get.select.Value())); 598 static_cast<u32>(regs.query.query_get.select.Value()));
597 return 1; 599 return 1;
598 } 600 }
599} 601}
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index 05dd6b39b..79fc9bbea 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -23,7 +23,7 @@
23#include "video_core/engines/engine_upload.h" 23#include "video_core/engines/engine_upload.h"
24#include "video_core/engines/shader_type.h" 24#include "video_core/engines/shader_type.h"
25#include "video_core/gpu.h" 25#include "video_core/gpu.h"
26#include "video_core/macro_interpreter.h" 26#include "video_core/macro/macro.h"
27#include "video_core/textures/texture.h" 27#include "video_core/textures/texture.h"
28 28
29namespace Core { 29namespace Core {
@@ -598,6 +598,7 @@ public:
598 BitField<4, 3, u32> block_height; 598 BitField<4, 3, u32> block_height;
599 BitField<8, 3, u32> block_depth; 599 BitField<8, 3, u32> block_depth;
600 BitField<12, 1, InvMemoryLayout> type; 600 BitField<12, 1, InvMemoryLayout> type;
601 BitField<16, 1, u32> is_3d;
601 } memory_layout; 602 } memory_layout;
602 union { 603 union {
603 BitField<0, 16, u32> layers; 604 BitField<0, 16, u32> layers;
@@ -1411,15 +1412,6 @@ public:
1411 1412
1412 const VideoCore::GuestDriverProfile& AccessGuestDriverProfile() const override; 1413 const VideoCore::GuestDriverProfile& AccessGuestDriverProfile() const override;
1413 1414
1414 /// Memory for macro code - it's undetermined how big this is, however 1MB is much larger than
1415 /// we've seen used.
1416 using MacroMemory = std::array<u32, 0x40000>;
1417
1418 /// Gets a reference to macro memory.
1419 const MacroMemory& GetMacroMemory() const {
1420 return macro_memory;
1421 }
1422
1423 bool ShouldExecute() const { 1415 bool ShouldExecute() const {
1424 return execute_on; 1416 return execute_on;
1425 } 1417 }
@@ -1468,16 +1460,13 @@ private:
1468 1460
1469 std::array<bool, Regs::NUM_REGS> mme_inline{}; 1461 std::array<bool, Regs::NUM_REGS> mme_inline{};
1470 1462
1471 /// Memory for macro code
1472 MacroMemory macro_memory;
1473
1474 /// Macro method that is currently being executed / being fed parameters. 1463 /// Macro method that is currently being executed / being fed parameters.
1475 u32 executing_macro = 0; 1464 u32 executing_macro = 0;
1476 /// Parameters that have been submitted to the macro call so far. 1465 /// Parameters that have been submitted to the macro call so far.
1477 std::vector<u32> macro_params; 1466 std::vector<u32> macro_params;
1478 1467
1479 /// Interpreter for the macro codes uploaded to the GPU. 1468 /// Interpreter for the macro codes uploaded to the GPU.
1480 MacroInterpreter macro_interpreter; 1469 std::unique_ptr<MacroEngine> macro_engine;
1481 1470
1482 static constexpr u32 null_cb_data = 0xFFFFFFFF; 1471 static constexpr u32 null_cb_data = 0xFFFFFFFF;
1483 struct { 1472 struct {
@@ -1506,7 +1495,7 @@ private:
1506 * @param num_parameters Number of arguments 1495 * @param num_parameters Number of arguments
1507 * @param parameters Arguments to the method call 1496 * @param parameters Arguments to the method call
1508 */ 1497 */
1509 void CallMacroMethod(u32 method, std::size_t num_parameters, const u32* parameters); 1498 void CallMacroMethod(u32 method, const std::vector<u32>& parameters);
1510 1499
1511 /// Handles writes to the macro uploading register. 1500 /// Handles writes to the macro uploading register.
1512 void ProcessMacroUpload(u32 data); 1501 void ProcessMacroUpload(u32 data);
diff --git a/src/video_core/macro/macro.cpp b/src/video_core/macro/macro.cpp
new file mode 100644
index 000000000..89077a2d8
--- /dev/null
+++ b/src/video_core/macro/macro.cpp
@@ -0,0 +1,45 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "common/logging/log.h"
7#include "core/settings.h"
8#include "video_core/macro/macro.h"
9#include "video_core/macro/macro_interpreter.h"
10#include "video_core/macro/macro_jit_x64.h"
11
12namespace Tegra {
13
14void MacroEngine::AddCode(u32 method, u32 data) {
15 uploaded_macro_code[method].push_back(data);
16}
17
18void MacroEngine::Execute(u32 method, const std::vector<u32>& parameters) {
19 auto compiled_macro = macro_cache.find(method);
20 if (compiled_macro != macro_cache.end()) {
21 compiled_macro->second->Execute(parameters, method);
22 } else {
23 // Macro not compiled, check if it's uploaded and if so, compile it
24 auto macro_code = uploaded_macro_code.find(method);
25 if (macro_code == uploaded_macro_code.end()) {
26 UNREACHABLE_MSG("Macro 0x{0:x} was not uploaded", method);
27 return;
28 }
29 macro_cache[method] = Compile(macro_code->second);
30 macro_cache[method]->Execute(parameters, method);
31 }
32}
33
34std::unique_ptr<MacroEngine> GetMacroEngine(Engines::Maxwell3D& maxwell3d) {
35 if (Settings::values.disable_macro_jit) {
36 return std::make_unique<MacroInterpreter>(maxwell3d);
37 }
38#ifdef ARCHITECTURE_x86_64
39 return std::make_unique<MacroJITx64>(maxwell3d);
40#else
41 return std::make_unique<MacroInterpreter>(maxwell3d);
42#endif
43}
44
45} // namespace Tegra
diff --git a/src/video_core/macro/macro.h b/src/video_core/macro/macro.h
new file mode 100644
index 000000000..b76ed891f
--- /dev/null
+++ b/src/video_core/macro/macro.h
@@ -0,0 +1,128 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <unordered_map>
9#include <vector>
10#include "common/bit_field.h"
11#include "common/common_types.h"
12
13namespace Tegra {
14namespace Engines {
15class Maxwell3D;
16}
17namespace Macro {
18constexpr std::size_t NUM_MACRO_REGISTERS = 8;
19enum class Operation : u32 {
20 ALU = 0,
21 AddImmediate = 1,
22 ExtractInsert = 2,
23 ExtractShiftLeftImmediate = 3,
24 ExtractShiftLeftRegister = 4,
25 Read = 5,
26 Unused = 6, // This operation doesn't seem to be a valid encoding.
27 Branch = 7,
28};
29
30enum class ALUOperation : u32 {
31 Add = 0,
32 AddWithCarry = 1,
33 Subtract = 2,
34 SubtractWithBorrow = 3,
35 // Operations 4-7 don't seem to be valid encodings.
36 Xor = 8,
37 Or = 9,
38 And = 10,
39 AndNot = 11,
40 Nand = 12
41};
42
43enum class ResultOperation : u32 {
44 IgnoreAndFetch = 0,
45 Move = 1,
46 MoveAndSetMethod = 2,
47 FetchAndSend = 3,
48 MoveAndSend = 4,
49 FetchAndSetMethod = 5,
50 MoveAndSetMethodFetchAndSend = 6,
51 MoveAndSetMethodSend = 7
52};
53
54enum class BranchCondition : u32 {
55 Zero = 0,
56 NotZero = 1,
57};
58
59union Opcode {
60 u32 raw;
61 BitField<0, 3, Operation> operation;
62 BitField<4, 3, ResultOperation> result_operation;
63 BitField<4, 1, BranchCondition> branch_condition;
64 // If set on a branch, then the branch doesn't have a delay slot.
65 BitField<5, 1, u32> branch_annul;
66 BitField<7, 1, u32> is_exit;
67 BitField<8, 3, u32> dst;
68 BitField<11, 3, u32> src_a;
69 BitField<14, 3, u32> src_b;
70 // The signed immediate overlaps the second source operand and the alu operation.
71 BitField<14, 18, s32> immediate;
72
73 BitField<17, 5, ALUOperation> alu_operation;
74
75 // Bitfield instructions data
76 BitField<17, 5, u32> bf_src_bit;
77 BitField<22, 5, u32> bf_size;
78 BitField<27, 5, u32> bf_dst_bit;
79
80 u32 GetBitfieldMask() const {
81 return (1 << bf_size) - 1;
82 }
83
84 s32 GetBranchTarget() const {
85 return static_cast<s32>(immediate * sizeof(u32));
86 }
87};
88
89union MethodAddress {
90 u32 raw;
91 BitField<0, 12, u32> address;
92 BitField<12, 6, u32> increment;
93};
94
95} // namespace Macro
96
97class CachedMacro {
98public:
99 virtual ~CachedMacro() = default;
100 /**
101 * Executes the macro code with the specified input parameters.
102 * @param code The macro byte code to execute
103 * @param parameters The parameters of the macro
104 */
105 virtual void Execute(const std::vector<u32>& parameters, u32 method) = 0;
106};
107
108class MacroEngine {
109public:
110 virtual ~MacroEngine() = default;
111
112 // Store the uploaded macro code to compile them when they're called.
113 void AddCode(u32 method, u32 data);
114
115 // Compiles the macro if its not in the cache, and executes the compiled macro
116 void Execute(u32 method, const std::vector<u32>& parameters);
117
118protected:
119 virtual std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code) = 0;
120
121private:
122 std::unordered_map<u32, std::unique_ptr<CachedMacro>> macro_cache;
123 std::unordered_map<u32, std::vector<u32>> uploaded_macro_code;
124};
125
126std::unique_ptr<MacroEngine> GetMacroEngine(Engines::Maxwell3D& maxwell3d);
127
128} // namespace Tegra
diff --git a/src/video_core/macro_interpreter.cpp b/src/video_core/macro/macro_interpreter.cpp
index 947364928..5edff27aa 100644
--- a/src/video_core/macro_interpreter.cpp
+++ b/src/video_core/macro/macro_interpreter.cpp
@@ -1,4 +1,4 @@
1// Copyright 2018 yuzu Emulator Project 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
@@ -6,109 +6,46 @@
6#include "common/logging/log.h" 6#include "common/logging/log.h"
7#include "common/microprofile.h" 7#include "common/microprofile.h"
8#include "video_core/engines/maxwell_3d.h" 8#include "video_core/engines/maxwell_3d.h"
9#include "video_core/macro_interpreter.h" 9#include "video_core/macro/macro_interpreter.h"
10 10
11MICROPROFILE_DEFINE(MacroInterp, "GPU", "Execute macro interpreter", MP_RGB(128, 128, 192)); 11MICROPROFILE_DEFINE(MacroInterp, "GPU", "Execute macro interpreter", MP_RGB(128, 128, 192));
12 12
13namespace Tegra { 13namespace Tegra {
14namespace { 14MacroInterpreter::MacroInterpreter(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {}
15enum class Operation : u32 {
16 ALU = 0,
17 AddImmediate = 1,
18 ExtractInsert = 2,
19 ExtractShiftLeftImmediate = 3,
20 ExtractShiftLeftRegister = 4,
21 Read = 5,
22 Unused = 6, // This operation doesn't seem to be a valid encoding.
23 Branch = 7,
24};
25} // Anonymous namespace
26
27enum class MacroInterpreter::ALUOperation : u32 {
28 Add = 0,
29 AddWithCarry = 1,
30 Subtract = 2,
31 SubtractWithBorrow = 3,
32 // Operations 4-7 don't seem to be valid encodings.
33 Xor = 8,
34 Or = 9,
35 And = 10,
36 AndNot = 11,
37 Nand = 12
38};
39
40enum class MacroInterpreter::ResultOperation : u32 {
41 IgnoreAndFetch = 0,
42 Move = 1,
43 MoveAndSetMethod = 2,
44 FetchAndSend = 3,
45 MoveAndSend = 4,
46 FetchAndSetMethod = 5,
47 MoveAndSetMethodFetchAndSend = 6,
48 MoveAndSetMethodSend = 7
49};
50
51enum class MacroInterpreter::BranchCondition : u32 {
52 Zero = 0,
53 NotZero = 1,
54};
55
56union MacroInterpreter::Opcode {
57 u32 raw;
58 BitField<0, 3, Operation> operation;
59 BitField<4, 3, ResultOperation> result_operation;
60 BitField<4, 1, BranchCondition> branch_condition;
61 // If set on a branch, then the branch doesn't have a delay slot.
62 BitField<5, 1, u32> branch_annul;
63 BitField<7, 1, u32> is_exit;
64 BitField<8, 3, u32> dst;
65 BitField<11, 3, u32> src_a;
66 BitField<14, 3, u32> src_b;
67 // The signed immediate overlaps the second source operand and the alu operation.
68 BitField<14, 18, s32> immediate;
69
70 BitField<17, 5, ALUOperation> alu_operation;
71
72 // Bitfield instructions data
73 BitField<17, 5, u32> bf_src_bit;
74 BitField<22, 5, u32> bf_size;
75 BitField<27, 5, u32> bf_dst_bit;
76
77 u32 GetBitfieldMask() const {
78 return (1 << bf_size) - 1;
79 }
80 15
81 s32 GetBranchTarget() const { 16std::unique_ptr<CachedMacro> MacroInterpreter::Compile(const std::vector<u32>& code) {
82 return static_cast<s32>(immediate * sizeof(u32)); 17 return std::make_unique<MacroInterpreterImpl>(maxwell3d, code);
83 } 18}
84};
85 19
86MacroInterpreter::MacroInterpreter(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {} 20MacroInterpreterImpl::MacroInterpreterImpl(Engines::Maxwell3D& maxwell3d,
21 const std::vector<u32>& code)
22 : maxwell3d(maxwell3d), code(code) {}
87 23
88void MacroInterpreter::Execute(u32 offset, std::size_t num_parameters, const u32* parameters) { 24void MacroInterpreterImpl::Execute(const std::vector<u32>& parameters, u32 method) {
89 MICROPROFILE_SCOPE(MacroInterp); 25 MICROPROFILE_SCOPE(MacroInterp);
90 Reset(); 26 Reset();
91 27
92 registers[1] = parameters[0]; 28 registers[1] = parameters[0];
29 num_parameters = parameters.size();
93 30
94 if (num_parameters > parameters_capacity) { 31 if (num_parameters > parameters_capacity) {
95 parameters_capacity = num_parameters; 32 parameters_capacity = num_parameters;
96 this->parameters = std::make_unique<u32[]>(num_parameters); 33 this->parameters = std::make_unique<u32[]>(num_parameters);
97 } 34 }
98 std::memcpy(this->parameters.get(), parameters, num_parameters * sizeof(u32)); 35 std::memcpy(this->parameters.get(), parameters.data(), num_parameters * sizeof(u32));
99 this->num_parameters = num_parameters; 36 this->num_parameters = num_parameters;
100 37
101 // Execute the code until we hit an exit condition. 38 // Execute the code until we hit an exit condition.
102 bool keep_executing = true; 39 bool keep_executing = true;
103 while (keep_executing) { 40 while (keep_executing) {
104 keep_executing = Step(offset, false); 41 keep_executing = Step(false);
105 } 42 }
106 43
107 // Assert the the macro used all the input parameters 44 // Assert the the macro used all the input parameters
108 ASSERT(next_parameter_index == num_parameters); 45 ASSERT(next_parameter_index == num_parameters);
109} 46}
110 47
111void MacroInterpreter::Reset() { 48void MacroInterpreterImpl::Reset() {
112 registers = {}; 49 registers = {};
113 pc = 0; 50 pc = 0;
114 delayed_pc = {}; 51 delayed_pc = {};
@@ -120,10 +57,10 @@ void MacroInterpreter::Reset() {
120 carry_flag = false; 57 carry_flag = false;
121} 58}
122 59
123bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) { 60bool MacroInterpreterImpl::Step(bool is_delay_slot) {
124 u32 base_address = pc; 61 u32 base_address = pc;
125 62
126 Opcode opcode = GetOpcode(offset); 63 Macro::Opcode opcode = GetOpcode();
127 pc += 4; 64 pc += 4;
128 65
129 // Update the program counter if we were delayed 66 // Update the program counter if we were delayed
@@ -134,18 +71,18 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
134 } 71 }
135 72
136 switch (opcode.operation) { 73 switch (opcode.operation) {
137 case Operation::ALU: { 74 case Macro::Operation::ALU: {
138 u32 result = GetALUResult(opcode.alu_operation, GetRegister(opcode.src_a), 75 u32 result = GetALUResult(opcode.alu_operation, GetRegister(opcode.src_a),
139 GetRegister(opcode.src_b)); 76 GetRegister(opcode.src_b));
140 ProcessResult(opcode.result_operation, opcode.dst, result); 77 ProcessResult(opcode.result_operation, opcode.dst, result);
141 break; 78 break;
142 } 79 }
143 case Operation::AddImmediate: { 80 case Macro::Operation::AddImmediate: {
144 ProcessResult(opcode.result_operation, opcode.dst, 81 ProcessResult(opcode.result_operation, opcode.dst,
145 GetRegister(opcode.src_a) + opcode.immediate); 82 GetRegister(opcode.src_a) + opcode.immediate);
146 break; 83 break;
147 } 84 }
148 case Operation::ExtractInsert: { 85 case Macro::Operation::ExtractInsert: {
149 u32 dst = GetRegister(opcode.src_a); 86 u32 dst = GetRegister(opcode.src_a);
150 u32 src = GetRegister(opcode.src_b); 87 u32 src = GetRegister(opcode.src_b);
151 88
@@ -155,7 +92,7 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
155 ProcessResult(opcode.result_operation, opcode.dst, dst); 92 ProcessResult(opcode.result_operation, opcode.dst, dst);
156 break; 93 break;
157 } 94 }
158 case Operation::ExtractShiftLeftImmediate: { 95 case Macro::Operation::ExtractShiftLeftImmediate: {
159 u32 dst = GetRegister(opcode.src_a); 96 u32 dst = GetRegister(opcode.src_a);
160 u32 src = GetRegister(opcode.src_b); 97 u32 src = GetRegister(opcode.src_b);
161 98
@@ -164,7 +101,7 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
164 ProcessResult(opcode.result_operation, opcode.dst, result); 101 ProcessResult(opcode.result_operation, opcode.dst, result);
165 break; 102 break;
166 } 103 }
167 case Operation::ExtractShiftLeftRegister: { 104 case Macro::Operation::ExtractShiftLeftRegister: {
168 u32 dst = GetRegister(opcode.src_a); 105 u32 dst = GetRegister(opcode.src_a);
169 u32 src = GetRegister(opcode.src_b); 106 u32 src = GetRegister(opcode.src_b);
170 107
@@ -173,12 +110,12 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
173 ProcessResult(opcode.result_operation, opcode.dst, result); 110 ProcessResult(opcode.result_operation, opcode.dst, result);
174 break; 111 break;
175 } 112 }
176 case Operation::Read: { 113 case Macro::Operation::Read: {
177 u32 result = Read(GetRegister(opcode.src_a) + opcode.immediate); 114 u32 result = Read(GetRegister(opcode.src_a) + opcode.immediate);
178 ProcessResult(opcode.result_operation, opcode.dst, result); 115 ProcessResult(opcode.result_operation, opcode.dst, result);
179 break; 116 break;
180 } 117 }
181 case Operation::Branch: { 118 case Macro::Operation::Branch: {
182 ASSERT_MSG(!is_delay_slot, "Executing a branch in a delay slot is not valid"); 119 ASSERT_MSG(!is_delay_slot, "Executing a branch in a delay slot is not valid");
183 u32 value = GetRegister(opcode.src_a); 120 u32 value = GetRegister(opcode.src_a);
184 bool taken = EvaluateBranchCondition(opcode.branch_condition, value); 121 bool taken = EvaluateBranchCondition(opcode.branch_condition, value);
@@ -191,7 +128,7 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
191 128
192 delayed_pc = base_address + opcode.GetBranchTarget(); 129 delayed_pc = base_address + opcode.GetBranchTarget();
193 // Execute one more instruction due to the delay slot. 130 // Execute one more instruction due to the delay slot.
194 return Step(offset, true); 131 return Step(true);
195 } 132 }
196 break; 133 break;
197 } 134 }
@@ -204,51 +141,44 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
204 // cause an exit if it's executed inside a delay slot. 141 // cause an exit if it's executed inside a delay slot.
205 if (opcode.is_exit && !is_delay_slot) { 142 if (opcode.is_exit && !is_delay_slot) {
206 // Exit has a delay slot, execute the next instruction 143 // Exit has a delay slot, execute the next instruction
207 Step(offset, true); 144 Step(true);
208 return false; 145 return false;
209 } 146 }
210 147
211 return true; 148 return true;
212} 149}
213 150
214MacroInterpreter::Opcode MacroInterpreter::GetOpcode(u32 offset) const { 151u32 MacroInterpreterImpl::GetALUResult(Macro::ALUOperation operation, u32 src_a, u32 src_b) {
215 const auto& macro_memory{maxwell3d.GetMacroMemory()};
216 ASSERT((pc % sizeof(u32)) == 0);
217 ASSERT((pc + offset) < macro_memory.size() * sizeof(u32));
218 return {macro_memory[offset + pc / sizeof(u32)]};
219}
220
221u32 MacroInterpreter::GetALUResult(ALUOperation operation, u32 src_a, u32 src_b) {
222 switch (operation) { 152 switch (operation) {
223 case ALUOperation::Add: { 153 case Macro::ALUOperation::Add: {
224 const u64 result{static_cast<u64>(src_a) + src_b}; 154 const u64 result{static_cast<u64>(src_a) + src_b};
225 carry_flag = result > 0xffffffff; 155 carry_flag = result > 0xffffffff;
226 return static_cast<u32>(result); 156 return static_cast<u32>(result);
227 } 157 }
228 case ALUOperation::AddWithCarry: { 158 case Macro::ALUOperation::AddWithCarry: {
229 const u64 result{static_cast<u64>(src_a) + src_b + (carry_flag ? 1ULL : 0ULL)}; 159 const u64 result{static_cast<u64>(src_a) + src_b + (carry_flag ? 1ULL : 0ULL)};
230 carry_flag = result > 0xffffffff; 160 carry_flag = result > 0xffffffff;
231 return static_cast<u32>(result); 161 return static_cast<u32>(result);
232 } 162 }
233 case ALUOperation::Subtract: { 163 case Macro::ALUOperation::Subtract: {
234 const u64 result{static_cast<u64>(src_a) - src_b}; 164 const u64 result{static_cast<u64>(src_a) - src_b};
235 carry_flag = result < 0x100000000; 165 carry_flag = result < 0x100000000;
236 return static_cast<u32>(result); 166 return static_cast<u32>(result);
237 } 167 }
238 case ALUOperation::SubtractWithBorrow: { 168 case Macro::ALUOperation::SubtractWithBorrow: {
239 const u64 result{static_cast<u64>(src_a) - src_b - (carry_flag ? 0ULL : 1ULL)}; 169 const u64 result{static_cast<u64>(src_a) - src_b - (carry_flag ? 0ULL : 1ULL)};
240 carry_flag = result < 0x100000000; 170 carry_flag = result < 0x100000000;
241 return static_cast<u32>(result); 171 return static_cast<u32>(result);
242 } 172 }
243 case ALUOperation::Xor: 173 case Macro::ALUOperation::Xor:
244 return src_a ^ src_b; 174 return src_a ^ src_b;
245 case ALUOperation::Or: 175 case Macro::ALUOperation::Or:
246 return src_a | src_b; 176 return src_a | src_b;
247 case ALUOperation::And: 177 case Macro::ALUOperation::And:
248 return src_a & src_b; 178 return src_a & src_b;
249 case ALUOperation::AndNot: 179 case Macro::ALUOperation::AndNot:
250 return src_a & ~src_b; 180 return src_a & ~src_b;
251 case ALUOperation::Nand: 181 case Macro::ALUOperation::Nand:
252 return ~(src_a & src_b); 182 return ~(src_a & src_b);
253 183
254 default: 184 default:
@@ -257,43 +187,43 @@ u32 MacroInterpreter::GetALUResult(ALUOperation operation, u32 src_a, u32 src_b)
257 } 187 }
258} 188}
259 189
260void MacroInterpreter::ProcessResult(ResultOperation operation, u32 reg, u32 result) { 190void MacroInterpreterImpl::ProcessResult(Macro::ResultOperation operation, u32 reg, u32 result) {
261 switch (operation) { 191 switch (operation) {
262 case ResultOperation::IgnoreAndFetch: 192 case Macro::ResultOperation::IgnoreAndFetch:
263 // Fetch parameter and ignore result. 193 // Fetch parameter and ignore result.
264 SetRegister(reg, FetchParameter()); 194 SetRegister(reg, FetchParameter());
265 break; 195 break;
266 case ResultOperation::Move: 196 case Macro::ResultOperation::Move:
267 // Move result. 197 // Move result.
268 SetRegister(reg, result); 198 SetRegister(reg, result);
269 break; 199 break;
270 case ResultOperation::MoveAndSetMethod: 200 case Macro::ResultOperation::MoveAndSetMethod:
271 // Move result and use as Method Address. 201 // Move result and use as Method Address.
272 SetRegister(reg, result); 202 SetRegister(reg, result);
273 SetMethodAddress(result); 203 SetMethodAddress(result);
274 break; 204 break;
275 case ResultOperation::FetchAndSend: 205 case Macro::ResultOperation::FetchAndSend:
276 // Fetch parameter and send result. 206 // Fetch parameter and send result.
277 SetRegister(reg, FetchParameter()); 207 SetRegister(reg, FetchParameter());
278 Send(result); 208 Send(result);
279 break; 209 break;
280 case ResultOperation::MoveAndSend: 210 case Macro::ResultOperation::MoveAndSend:
281 // Move and send result. 211 // Move and send result.
282 SetRegister(reg, result); 212 SetRegister(reg, result);
283 Send(result); 213 Send(result);
284 break; 214 break;
285 case ResultOperation::FetchAndSetMethod: 215 case Macro::ResultOperation::FetchAndSetMethod:
286 // Fetch parameter and use result as Method Address. 216 // Fetch parameter and use result as Method Address.
287 SetRegister(reg, FetchParameter()); 217 SetRegister(reg, FetchParameter());
288 SetMethodAddress(result); 218 SetMethodAddress(result);
289 break; 219 break;
290 case ResultOperation::MoveAndSetMethodFetchAndSend: 220 case Macro::ResultOperation::MoveAndSetMethodFetchAndSend:
291 // Move result and use as Method Address, then fetch and send parameter. 221 // Move result and use as Method Address, then fetch and send parameter.
292 SetRegister(reg, result); 222 SetRegister(reg, result);
293 SetMethodAddress(result); 223 SetMethodAddress(result);
294 Send(FetchParameter()); 224 Send(FetchParameter());
295 break; 225 break;
296 case ResultOperation::MoveAndSetMethodSend: 226 case Macro::ResultOperation::MoveAndSetMethodSend:
297 // Move result and use as Method Address, then send bits 12:17 of result. 227 // Move result and use as Method Address, then send bits 12:17 of result.
298 SetRegister(reg, result); 228 SetRegister(reg, result);
299 SetMethodAddress(result); 229 SetMethodAddress(result);
@@ -304,16 +234,28 @@ void MacroInterpreter::ProcessResult(ResultOperation operation, u32 reg, u32 res
304 } 234 }
305} 235}
306 236
307u32 MacroInterpreter::FetchParameter() { 237bool MacroInterpreterImpl::EvaluateBranchCondition(Macro::BranchCondition cond, u32 value) const {
308 ASSERT(next_parameter_index < num_parameters); 238 switch (cond) {
309 return parameters[next_parameter_index++]; 239 case Macro::BranchCondition::Zero:
240 return value == 0;
241 case Macro::BranchCondition::NotZero:
242 return value != 0;
243 }
244 UNREACHABLE();
245 return true;
310} 246}
311 247
312u32 MacroInterpreter::GetRegister(u32 register_id) const { 248Macro::Opcode MacroInterpreterImpl::GetOpcode() const {
249 ASSERT((pc % sizeof(u32)) == 0);
250 ASSERT(pc < code.size() * sizeof(u32));
251 return {code[pc / sizeof(u32)]};
252}
253
254u32 MacroInterpreterImpl::GetRegister(u32 register_id) const {
313 return registers.at(register_id); 255 return registers.at(register_id);
314} 256}
315 257
316void MacroInterpreter::SetRegister(u32 register_id, u32 value) { 258void MacroInterpreterImpl::SetRegister(u32 register_id, u32 value) {
317 // Register 0 is hardwired as the zero register. 259 // Register 0 is hardwired as the zero register.
318 // Ensure no writes to it actually occur. 260 // Ensure no writes to it actually occur.
319 if (register_id == 0) { 261 if (register_id == 0) {
@@ -323,30 +265,24 @@ void MacroInterpreter::SetRegister(u32 register_id, u32 value) {
323 registers.at(register_id) = value; 265 registers.at(register_id) = value;
324} 266}
325 267
326void MacroInterpreter::SetMethodAddress(u32 address) { 268void MacroInterpreterImpl::SetMethodAddress(u32 address) {
327 method_address.raw = address; 269 method_address.raw = address;
328} 270}
329 271
330void MacroInterpreter::Send(u32 value) { 272void MacroInterpreterImpl::Send(u32 value) {
331 maxwell3d.CallMethodFromMME(method_address.address, value); 273 maxwell3d.CallMethodFromMME(method_address.address, value);
332 // Increment the method address by the method increment. 274 // Increment the method address by the method increment.
333 method_address.address.Assign(method_address.address.Value() + 275 method_address.address.Assign(method_address.address.Value() +
334 method_address.increment.Value()); 276 method_address.increment.Value());
335} 277}
336 278
337u32 MacroInterpreter::Read(u32 method) const { 279u32 MacroInterpreterImpl::Read(u32 method) const {
338 return maxwell3d.GetRegisterValue(method); 280 return maxwell3d.GetRegisterValue(method);
339} 281}
340 282
341bool MacroInterpreter::EvaluateBranchCondition(BranchCondition cond, u32 value) const { 283u32 MacroInterpreterImpl::FetchParameter() {
342 switch (cond) { 284 ASSERT(next_parameter_index < num_parameters);
343 case BranchCondition::Zero: 285 return parameters[next_parameter_index++];
344 return value == 0;
345 case BranchCondition::NotZero:
346 return value != 0;
347 }
348 UNREACHABLE();
349 return true;
350} 286}
351 287
352} // namespace Tegra 288} // namespace Tegra
diff --git a/src/video_core/macro_interpreter.h b/src/video_core/macro/macro_interpreter.h
index 631146d89..90217fc89 100644
--- a/src/video_core/macro_interpreter.h
+++ b/src/video_core/macro/macro_interpreter.h
@@ -1,44 +1,37 @@
1// Copyright 2018 yuzu Emulator Project 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#pragma once 5#pragma once
6
7#include <array> 6#include <array>
8#include <optional> 7#include <optional>
9 8#include <vector>
10#include "common/bit_field.h" 9#include "common/bit_field.h"
11#include "common/common_types.h" 10#include "common/common_types.h"
11#include "video_core/macro/macro.h"
12 12
13namespace Tegra { 13namespace Tegra {
14namespace Engines { 14namespace Engines {
15class Maxwell3D; 15class Maxwell3D;
16} 16}
17 17
18class MacroInterpreter final { 18class MacroInterpreter final : public MacroEngine {
19public: 19public:
20 explicit MacroInterpreter(Engines::Maxwell3D& maxwell3d); 20 explicit MacroInterpreter(Engines::Maxwell3D& maxwell3d);
21 21
22 /** 22protected:
23 * Executes the macro code with the specified input parameters. 23 std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code) override;
24 * @param offset Offset to start execution at.
25 * @param parameters The parameters of the macro.
26 */
27 void Execute(u32 offset, std::size_t num_parameters, const u32* parameters);
28 24
29private: 25private:
30 enum class ALUOperation : u32; 26 Engines::Maxwell3D& maxwell3d;
31 enum class BranchCondition : u32; 27};
32 enum class ResultOperation : u32;
33
34 union Opcode;
35 28
36 union MethodAddress { 29class MacroInterpreterImpl : public CachedMacro {
37 u32 raw; 30public:
38 BitField<0, 12, u32> address; 31 MacroInterpreterImpl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code);
39 BitField<12, 6, u32> increment; 32 void Execute(const std::vector<u32>& parameters, u32 method) override;
40 };
41 33
34private:
42 /// Resets the execution engine state, zeroing registers, etc. 35 /// Resets the execution engine state, zeroing registers, etc.
43 void Reset(); 36 void Reset();
44 37
@@ -49,20 +42,20 @@ private:
49 * @param is_delay_slot Whether the current step is being executed due to a delay slot in a 42 * @param is_delay_slot Whether the current step is being executed due to a delay slot in a
50 * previous instruction. 43 * previous instruction.
51 */ 44 */
52 bool Step(u32 offset, bool is_delay_slot); 45 bool Step(bool is_delay_slot);
53 46
54 /// Calculates the result of an ALU operation. src_a OP src_b; 47 /// Calculates the result of an ALU operation. src_a OP src_b;
55 u32 GetALUResult(ALUOperation operation, u32 src_a, u32 src_b); 48 u32 GetALUResult(Macro::ALUOperation operation, u32 src_a, u32 src_b);
56 49
57 /// Performs the result operation on the input result and stores it in the specified register 50 /// Performs the result operation on the input result and stores it in the specified register
58 /// (if necessary). 51 /// (if necessary).
59 void ProcessResult(ResultOperation operation, u32 reg, u32 result); 52 void ProcessResult(Macro::ResultOperation operation, u32 reg, u32 result);
60 53
61 /// Evaluates the branch condition and returns whether the branch should be taken or not. 54 /// Evaluates the branch condition and returns whether the branch should be taken or not.
62 bool EvaluateBranchCondition(BranchCondition cond, u32 value) const; 55 bool EvaluateBranchCondition(Macro::BranchCondition cond, u32 value) const;
63 56
64 /// Reads an opcode at the current program counter location. 57 /// Reads an opcode at the current program counter location.
65 Opcode GetOpcode(u32 offset) const; 58 Macro::Opcode GetOpcode() const;
66 59
67 /// Returns the specified register's value. Register 0 is hardcoded to always return 0. 60 /// Returns the specified register's value. Register 0 is hardcoded to always return 0.
68 u32 GetRegister(u32 register_id) const; 61 u32 GetRegister(u32 register_id) const;
@@ -89,13 +82,11 @@ private:
89 /// Program counter to execute at after the delay slot is executed. 82 /// Program counter to execute at after the delay slot is executed.
90 std::optional<u32> delayed_pc; 83 std::optional<u32> delayed_pc;
91 84
92 static constexpr std::size_t NumMacroRegisters = 8;
93
94 /// General purpose macro registers. 85 /// General purpose macro registers.
95 std::array<u32, NumMacroRegisters> registers = {}; 86 std::array<u32, Macro::NUM_MACRO_REGISTERS> registers = {};
96 87
97 /// Method address to use for the next Send instruction. 88 /// Method address to use for the next Send instruction.
98 MethodAddress method_address = {}; 89 Macro::MethodAddress method_address = {};
99 90
100 /// Input parameters of the current macro. 91 /// Input parameters of the current macro.
101 std::unique_ptr<u32[]> parameters; 92 std::unique_ptr<u32[]> parameters;
@@ -105,5 +96,7 @@ private:
105 u32 next_parameter_index = 0; 96 u32 next_parameter_index = 0;
106 97
107 bool carry_flag = false; 98 bool carry_flag = false;
99 const std::vector<u32>& code;
108}; 100};
101
109} // namespace Tegra 102} // namespace Tegra
diff --git a/src/video_core/macro/macro_jit_x64.cpp b/src/video_core/macro/macro_jit_x64.cpp
new file mode 100644
index 000000000..11c1cc3be
--- /dev/null
+++ b/src/video_core/macro/macro_jit_x64.cpp
@@ -0,0 +1,640 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "common/logging/log.h"
7#include "common/microprofile.h"
8#include "common/x64/xbyak_util.h"
9#include "video_core/engines/maxwell_3d.h"
10#include "video_core/macro/macro_interpreter.h"
11#include "video_core/macro/macro_jit_x64.h"
12
13MICROPROFILE_DEFINE(MacroJitCompile, "GPU", "Compile macro JIT", MP_RGB(173, 255, 47));
14MICROPROFILE_DEFINE(MacroJitExecute, "GPU", "Execute macro JIT", MP_RGB(255, 255, 0));
15
16namespace Tegra {
17static const Xbyak::Reg64 PARAMETERS = Xbyak::util::r9;
18static const Xbyak::Reg64 REGISTERS = Xbyak::util::r10;
19static const Xbyak::Reg64 STATE = Xbyak::util::r11;
20static const Xbyak::Reg64 NEXT_PARAMETER = Xbyak::util::r12;
21static const Xbyak::Reg32 RESULT = Xbyak::util::r13d;
22static const Xbyak::Reg64 RESULT_64 = Xbyak::util::r13;
23static const Xbyak::Reg32 METHOD_ADDRESS = Xbyak::util::r14d;
24static const Xbyak::Reg64 METHOD_ADDRESS_64 = Xbyak::util::r14;
25static const Xbyak::Reg64 BRANCH_HOLDER = Xbyak::util::r15;
26
27static const std::bitset<32> PERSISTENT_REGISTERS = Common::X64::BuildRegSet({
28 PARAMETERS,
29 REGISTERS,
30 STATE,
31 NEXT_PARAMETER,
32 RESULT,
33 METHOD_ADDRESS,
34 BRANCH_HOLDER,
35});
36
37MacroJITx64::MacroJITx64(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {}
38
39std::unique_ptr<CachedMacro> MacroJITx64::Compile(const std::vector<u32>& code) {
40 return std::make_unique<MacroJITx64Impl>(maxwell3d, code);
41}
42
43MacroJITx64Impl::MacroJITx64Impl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code)
44 : Xbyak::CodeGenerator(MAX_CODE_SIZE), code(code), maxwell3d(maxwell3d) {
45 Compile();
46}
47
48MacroJITx64Impl::~MacroJITx64Impl() = default;
49
50void MacroJITx64Impl::Execute(const std::vector<u32>& parameters, u32 method) {
51 MICROPROFILE_SCOPE(MacroJitExecute);
52 ASSERT_OR_EXECUTE(program != nullptr, { return; });
53 JITState state{};
54 state.maxwell3d = &maxwell3d;
55 state.registers = {};
56 state.parameters = parameters.data();
57 program(&state);
58}
59
60void MacroJITx64Impl::Compile_ALU(Macro::Opcode opcode) {
61 const bool is_a_zero = opcode.src_a == 0;
62 const bool is_b_zero = opcode.src_b == 0;
63 const bool valid_operation = !is_a_zero && !is_b_zero;
64 const bool is_move_operation = !is_a_zero && is_b_zero;
65 const bool has_zero_register = is_a_zero || is_b_zero;
66
67 Xbyak::Reg64 src_a;
68 Xbyak::Reg32 src_b;
69
70 if (!optimizer.zero_reg_skip) {
71 src_a = Compile_GetRegister(opcode.src_a, RESULT_64);
72 src_b = Compile_GetRegister(opcode.src_b, ebx);
73 } else {
74 if (!is_a_zero) {
75 src_a = Compile_GetRegister(opcode.src_a, RESULT_64);
76 }
77 if (!is_b_zero) {
78 src_b = Compile_GetRegister(opcode.src_b, ebx);
79 }
80 }
81 Xbyak::Label skip_carry{};
82
83 bool has_emitted = false;
84
85 switch (opcode.alu_operation) {
86 case Macro::ALUOperation::Add:
87 if (optimizer.zero_reg_skip) {
88 if (valid_operation) {
89 add(src_a, src_b);
90 }
91 } else {
92 add(src_a, src_b);
93 }
94
95 if (!optimizer.can_skip_carry) {
96 setc(byte[STATE + offsetof(JITState, carry_flag)]);
97 }
98 break;
99 case Macro::ALUOperation::AddWithCarry:
100 bt(dword[STATE + offsetof(JITState, carry_flag)], 0);
101 adc(src_a, src_b);
102 setc(byte[STATE + offsetof(JITState, carry_flag)]);
103 break;
104 case Macro::ALUOperation::Subtract:
105 if (optimizer.zero_reg_skip) {
106 if (valid_operation) {
107 sub(src_a, src_b);
108 has_emitted = true;
109 }
110 } else {
111 sub(src_a, src_b);
112 has_emitted = true;
113 }
114 if (!optimizer.can_skip_carry && has_emitted) {
115 setc(byte[STATE + offsetof(JITState, carry_flag)]);
116 }
117 break;
118 case Macro::ALUOperation::SubtractWithBorrow:
119 bt(dword[STATE + offsetof(JITState, carry_flag)], 0);
120 sbb(src_a, src_b);
121 setc(byte[STATE + offsetof(JITState, carry_flag)]);
122 break;
123 case Macro::ALUOperation::Xor:
124 if (optimizer.zero_reg_skip) {
125 if (valid_operation) {
126 xor_(src_a, src_b);
127 }
128 } else {
129 xor_(src_a, src_b);
130 }
131 break;
132 case Macro::ALUOperation::Or:
133 if (optimizer.zero_reg_skip) {
134 if (valid_operation) {
135 or_(src_a, src_b);
136 }
137 } else {
138 or_(src_a, src_b);
139 }
140 break;
141 case Macro::ALUOperation::And:
142 if (optimizer.zero_reg_skip) {
143 if (!has_zero_register) {
144 and_(src_a, src_b);
145 }
146 } else {
147 and_(src_a, src_b);
148 }
149 break;
150 case Macro::ALUOperation::AndNot:
151 if (optimizer.zero_reg_skip) {
152 if (!is_a_zero) {
153 not_(src_b);
154 and_(src_a, src_b);
155 }
156 } else {
157 not_(src_b);
158 and_(src_a, src_b);
159 }
160 break;
161 case Macro::ALUOperation::Nand:
162 if (optimizer.zero_reg_skip) {
163 if (!is_a_zero) {
164 and_(src_a, src_b);
165 not_(src_a);
166 }
167 } else {
168 and_(src_a, src_b);
169 not_(src_a);
170 }
171 break;
172 default:
173 UNIMPLEMENTED_MSG("Unimplemented ALU operation {}",
174 static_cast<std::size_t>(opcode.alu_operation.Value()));
175 break;
176 }
177 Compile_ProcessResult(opcode.result_operation, opcode.dst);
178}
179
180void MacroJITx64Impl::Compile_AddImmediate(Macro::Opcode opcode) {
181 if (optimizer.skip_dummy_addimmediate) {
182 // Games tend to use this as an exit instruction placeholder. It's to encode an instruction
183 // without doing anything. In our case we can just not emit anything.
184 if (opcode.result_operation == Macro::ResultOperation::Move && opcode.dst == 0) {
185 return;
186 }
187 }
188 // Check for redundant moves
189 if (optimizer.optimize_for_method_move &&
190 opcode.result_operation == Macro::ResultOperation::MoveAndSetMethod) {
191 if (next_opcode.has_value()) {
192 const auto next = *next_opcode;
193 if (next.result_operation == Macro::ResultOperation::MoveAndSetMethod) {
194 return;
195 }
196 }
197 }
198 if (optimizer.zero_reg_skip && opcode.src_a == 0) {
199 if (opcode.immediate == 0) {
200 xor_(RESULT, RESULT);
201 } else {
202 mov(RESULT, opcode.immediate);
203 }
204 } else {
205 auto result = Compile_GetRegister(opcode.src_a, RESULT);
206 if (opcode.immediate > 2) {
207 add(result, opcode.immediate);
208 } else if (opcode.immediate == 1) {
209 inc(result);
210 } else if (opcode.immediate < 0) {
211 sub(result, opcode.immediate * -1);
212 }
213 }
214 Compile_ProcessResult(opcode.result_operation, opcode.dst);
215}
216
217void MacroJITx64Impl::Compile_ExtractInsert(Macro::Opcode opcode) {
218 auto dst = Compile_GetRegister(opcode.src_a, RESULT);
219 auto src = Compile_GetRegister(opcode.src_b, eax);
220
221 if (opcode.bf_src_bit != 0 && opcode.bf_src_bit != 31) {
222 shr(src, opcode.bf_src_bit);
223 } else if (opcode.bf_src_bit == 31) {
224 xor_(src, src);
225 }
226 // Don't bother masking the whole register since we're using a 32 bit register
227 if (opcode.bf_size != 31 && opcode.bf_size != 0) {
228 and_(src, opcode.GetBitfieldMask());
229 } else if (opcode.bf_size == 0) {
230 xor_(src, src);
231 }
232 if (opcode.bf_dst_bit != 31 && opcode.bf_dst_bit != 0) {
233 shl(src, opcode.bf_dst_bit);
234 } else if (opcode.bf_dst_bit == 31) {
235 xor_(src, src);
236 }
237
238 const u32 mask = ~(opcode.GetBitfieldMask() << opcode.bf_dst_bit);
239 if (mask != 0xffffffff) {
240 and_(dst, mask);
241 }
242 or_(dst, src);
243 Compile_ProcessResult(opcode.result_operation, opcode.dst);
244}
245
246void MacroJITx64Impl::Compile_ExtractShiftLeftImmediate(Macro::Opcode opcode) {
247 auto dst = Compile_GetRegister(opcode.src_a, eax);
248 auto src = Compile_GetRegister(opcode.src_b, RESULT);
249
250 shr(src, al);
251 if (opcode.bf_size != 0 && opcode.bf_size != 31) {
252 and_(src, opcode.GetBitfieldMask());
253 } else if (opcode.bf_size == 0) {
254 xor_(src, src);
255 }
256
257 if (opcode.bf_dst_bit != 0 && opcode.bf_dst_bit != 31) {
258 shl(src, opcode.bf_dst_bit);
259 } else if (opcode.bf_dst_bit == 31) {
260 xor_(src, src);
261 }
262 Compile_ProcessResult(opcode.result_operation, opcode.dst);
263}
264
265void MacroJITx64Impl::Compile_ExtractShiftLeftRegister(Macro::Opcode opcode) {
266 auto dst = Compile_GetRegister(opcode.src_a, eax);
267 auto src = Compile_GetRegister(opcode.src_b, RESULT);
268
269 if (opcode.bf_src_bit != 0) {
270 shr(src, opcode.bf_src_bit);
271 }
272
273 if (opcode.bf_size != 31) {
274 and_(src, opcode.GetBitfieldMask());
275 }
276 shl(src, al);
277 Compile_ProcessResult(opcode.result_operation, opcode.dst);
278}
279
280static u32 Read(Engines::Maxwell3D* maxwell3d, u32 method) {
281 return maxwell3d->GetRegisterValue(method);
282}
283
284static void Send(Engines::Maxwell3D* maxwell3d, Macro::MethodAddress method_address, u32 value) {
285 maxwell3d->CallMethodFromMME(method_address.address, value);
286}
287
288void MacroJITx64Impl::Compile_Read(Macro::Opcode opcode) {
289 if (optimizer.zero_reg_skip && opcode.src_a == 0) {
290 if (opcode.immediate == 0) {
291 xor_(RESULT, RESULT);
292 } else {
293 mov(RESULT, opcode.immediate);
294 }
295 } else {
296 auto result = Compile_GetRegister(opcode.src_a, RESULT);
297 if (opcode.immediate > 2) {
298 add(result, opcode.immediate);
299 } else if (opcode.immediate == 1) {
300 inc(result);
301 } else if (opcode.immediate < 0) {
302 sub(result, opcode.immediate * -1);
303 }
304 }
305 Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
306 mov(Common::X64::ABI_PARAM1, qword[STATE]);
307 mov(Common::X64::ABI_PARAM2, RESULT);
308 Common::X64::CallFarFunction(*this, &Read);
309 Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
310 mov(RESULT, Common::X64::ABI_RETURN.cvt32());
311 Compile_ProcessResult(opcode.result_operation, opcode.dst);
312}
313
314void Tegra::MacroJITx64Impl::Compile_Send(Xbyak::Reg32 value) {
315 Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
316 mov(Common::X64::ABI_PARAM1, qword[STATE]);
317 mov(Common::X64::ABI_PARAM2, METHOD_ADDRESS);
318 mov(Common::X64::ABI_PARAM3, value);
319 Common::X64::CallFarFunction(*this, &Send);
320 Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
321
322 Xbyak::Label dont_process{};
323 // Get increment
324 test(METHOD_ADDRESS, 0x3f000);
325 // If zero, method address doesn't update
326 je(dont_process);
327
328 mov(ecx, METHOD_ADDRESS);
329 and_(METHOD_ADDRESS, 0xfff);
330 shr(ecx, 12);
331 and_(ecx, 0x3f);
332 lea(eax, ptr[rcx + METHOD_ADDRESS_64]);
333 sal(ecx, 12);
334 or_(eax, ecx);
335
336 mov(METHOD_ADDRESS, eax);
337
338 L(dont_process);
339}
340
341void Tegra::MacroJITx64Impl::Compile_Branch(Macro::Opcode opcode) {
342 ASSERT_MSG(!is_delay_slot, "Executing a branch in a delay slot is not valid");
343 const s32 jump_address =
344 static_cast<s32>(pc) + static_cast<s32>(opcode.GetBranchTarget() / sizeof(s32));
345
346 Xbyak::Label end;
347 auto value = Compile_GetRegister(opcode.src_a, eax);
348 test(value, value);
349 if (optimizer.has_delayed_pc) {
350 switch (opcode.branch_condition) {
351 case Macro::BranchCondition::Zero:
352 jne(end, T_NEAR);
353 break;
354 case Macro::BranchCondition::NotZero:
355 je(end, T_NEAR);
356 break;
357 }
358
359 if (opcode.branch_annul) {
360 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
361 jmp(labels[jump_address], T_NEAR);
362 } else {
363 Xbyak::Label handle_post_exit{};
364 Xbyak::Label skip{};
365 jmp(skip, T_NEAR);
366 if (opcode.is_exit) {
367 L(handle_post_exit);
368 // Execute 1 instruction
369 mov(BRANCH_HOLDER, end_of_code);
370 // Jump to next instruction to skip delay slot check
371 jmp(labels[jump_address], T_NEAR);
372 } else {
373 L(handle_post_exit);
374 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
375 jmp(labels[jump_address], T_NEAR);
376 }
377 L(skip);
378 mov(BRANCH_HOLDER, handle_post_exit);
379 jmp(delay_skip[pc], T_NEAR);
380 }
381 } else {
382 switch (opcode.branch_condition) {
383 case Macro::BranchCondition::Zero:
384 je(labels[jump_address], T_NEAR);
385 break;
386 case Macro::BranchCondition::NotZero:
387 jne(labels[jump_address], T_NEAR);
388 break;
389 }
390 }
391
392 L(end);
393}
394
395void Tegra::MacroJITx64Impl::Optimizer_ScanFlags() {
396 optimizer.can_skip_carry = true;
397 optimizer.has_delayed_pc = false;
398 for (auto raw_op : code) {
399 Macro::Opcode op{};
400 op.raw = raw_op;
401
402 if (op.operation == Macro::Operation::ALU) {
403 // Scan for any ALU operations which actually use the carry flag, if they don't exist in
404 // our current code we can skip emitting the carry flag handling operations
405 if (op.alu_operation == Macro::ALUOperation::AddWithCarry ||
406 op.alu_operation == Macro::ALUOperation::SubtractWithBorrow) {
407 optimizer.can_skip_carry = false;
408 }
409 }
410
411 if (op.operation == Macro::Operation::Branch) {
412 if (!op.branch_annul) {
413 optimizer.has_delayed_pc = true;
414 }
415 }
416 }
417}
418
419void MacroJITx64Impl::Compile() {
420 MICROPROFILE_SCOPE(MacroJitCompile);
421 bool keep_executing = true;
422 labels.fill(Xbyak::Label());
423
424 Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, Common::X64::ABI_ALL_CALLEE_SAVED, 8);
425 // JIT state
426 mov(STATE, Common::X64::ABI_PARAM1);
427 mov(PARAMETERS, qword[Common::X64::ABI_PARAM1 +
428 static_cast<Xbyak::uint32>(offsetof(JITState, parameters))]);
429 mov(REGISTERS, Common::X64::ABI_PARAM1);
430 add(REGISTERS, static_cast<Xbyak::uint32>(offsetof(JITState, registers)));
431 xor_(RESULT, RESULT);
432 xor_(METHOD_ADDRESS, METHOD_ADDRESS);
433 xor_(NEXT_PARAMETER, NEXT_PARAMETER);
434 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
435
436 mov(dword[REGISTERS + 4], Compile_FetchParameter());
437
438 // Track get register for zero registers and mark it as no-op
439 optimizer.zero_reg_skip = true;
440
441 // AddImmediate tends to be used as a NOP instruction, if we detect this we can
442 // completely skip the entire code path and no emit anything
443 optimizer.skip_dummy_addimmediate = true;
444
445 // SMO tends to emit a lot of unnecessary method moves, we can mitigate this by only emitting
446 // one if our register isn't "dirty"
447 optimizer.optimize_for_method_move = true;
448
449 // Check to see if we can skip emitting certain instructions
450 Optimizer_ScanFlags();
451
452 const u32 op_count = static_cast<u32>(code.size());
453 for (u32 i = 0; i < op_count; i++) {
454 if (i < op_count - 1) {
455 pc = i + 1;
456 next_opcode = GetOpCode();
457 } else {
458 next_opcode = {};
459 }
460 pc = i;
461 Compile_NextInstruction();
462 }
463
464 L(end_of_code);
465
466 Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, Common::X64::ABI_ALL_CALLEE_SAVED, 8);
467 ret();
468 ready();
469 program = getCode<ProgramType>();
470}
471
472bool MacroJITx64Impl::Compile_NextInstruction() {
473 const auto opcode = GetOpCode();
474 if (labels[pc].getAddress()) {
475 return false;
476 }
477
478 L(labels[pc]);
479
480 switch (opcode.operation) {
481 case Macro::Operation::ALU:
482 Compile_ALU(opcode);
483 break;
484 case Macro::Operation::AddImmediate:
485 Compile_AddImmediate(opcode);
486 break;
487 case Macro::Operation::ExtractInsert:
488 Compile_ExtractInsert(opcode);
489 break;
490 case Macro::Operation::ExtractShiftLeftImmediate:
491 Compile_ExtractShiftLeftImmediate(opcode);
492 break;
493 case Macro::Operation::ExtractShiftLeftRegister:
494 Compile_ExtractShiftLeftRegister(opcode);
495 break;
496 case Macro::Operation::Read:
497 Compile_Read(opcode);
498 break;
499 case Macro::Operation::Branch:
500 Compile_Branch(opcode);
501 break;
502 default:
503 UNIMPLEMENTED_MSG("Unimplemented opcode {}", opcode.operation.Value());
504 break;
505 }
506
507 if (optimizer.has_delayed_pc) {
508 if (opcode.is_exit) {
509 mov(rax, end_of_code);
510 test(BRANCH_HOLDER, BRANCH_HOLDER);
511 cmove(BRANCH_HOLDER, rax);
512 // Jump to next instruction to skip delay slot check
513 je(labels[pc + 1], T_NEAR);
514 } else {
515 // TODO(ogniK): Optimize delay slot branching
516 Xbyak::Label no_delay_slot{};
517 test(BRANCH_HOLDER, BRANCH_HOLDER);
518 je(no_delay_slot, T_NEAR);
519 mov(rax, BRANCH_HOLDER);
520 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
521 jmp(rax);
522 L(no_delay_slot);
523 }
524 L(delay_skip[pc]);
525 if (opcode.is_exit) {
526 return false;
527 }
528 } else {
529 test(BRANCH_HOLDER, BRANCH_HOLDER);
530 jne(end_of_code, T_NEAR);
531 if (opcode.is_exit) {
532 inc(BRANCH_HOLDER);
533 return false;
534 }
535 }
536 return true;
537}
538
539Xbyak::Reg32 Tegra::MacroJITx64Impl::Compile_FetchParameter() {
540 mov(eax, dword[PARAMETERS + NEXT_PARAMETER * sizeof(u32)]);
541 inc(NEXT_PARAMETER);
542 return eax;
543}
544
545Xbyak::Reg32 MacroJITx64Impl::Compile_GetRegister(u32 index, Xbyak::Reg32 dst) {
546 if (index == 0) {
547 // Register 0 is always zero
548 xor_(dst, dst);
549 } else {
550 mov(dst, dword[REGISTERS + index * sizeof(u32)]);
551 }
552
553 return dst;
554}
555
556Xbyak::Reg64 Tegra::MacroJITx64Impl::Compile_GetRegister(u32 index, Xbyak::Reg64 dst) {
557 if (index == 0) {
558 // Register 0 is always zero
559 xor_(dst, dst);
560 } else {
561 mov(dst, dword[REGISTERS + index * sizeof(u32)]);
562 }
563
564 return dst;
565}
566
567void Tegra::MacroJITx64Impl::Compile_WriteCarry(Xbyak::Reg64 dst) {
568 Xbyak::Label zero{}, end{};
569 xor_(ecx, ecx);
570 shr(dst, 32);
571 setne(cl);
572 mov(dword[STATE + offsetof(JITState, carry_flag)], ecx);
573}
574
575void MacroJITx64Impl::Compile_ProcessResult(Macro::ResultOperation operation, u32 reg) {
576 auto SetRegister = [=](u32 reg, Xbyak::Reg32 result) {
577 // Register 0 is supposed to always return 0. NOP is implemented as a store to the zero
578 // register.
579 if (reg == 0) {
580 return;
581 }
582 mov(dword[REGISTERS + reg * sizeof(u32)], result);
583 };
584 auto SetMethodAddress = [=](Xbyak::Reg32 reg) { mov(METHOD_ADDRESS, reg); };
585
586 switch (operation) {
587 case Macro::ResultOperation::IgnoreAndFetch:
588 SetRegister(reg, Compile_FetchParameter());
589 break;
590 case Macro::ResultOperation::Move:
591 SetRegister(reg, RESULT);
592 break;
593 case Macro::ResultOperation::MoveAndSetMethod:
594 SetRegister(reg, RESULT);
595 SetMethodAddress(RESULT);
596 break;
597 case Macro::ResultOperation::FetchAndSend:
598 // Fetch parameter and send result.
599 SetRegister(reg, Compile_FetchParameter());
600 Compile_Send(RESULT);
601 break;
602 case Macro::ResultOperation::MoveAndSend:
603 // Move and send result.
604 SetRegister(reg, RESULT);
605 Compile_Send(RESULT);
606 break;
607 case Macro::ResultOperation::FetchAndSetMethod:
608 // Fetch parameter and use result as Method Address.
609 SetRegister(reg, Compile_FetchParameter());
610 SetMethodAddress(RESULT);
611 break;
612 case Macro::ResultOperation::MoveAndSetMethodFetchAndSend:
613 // Move result and use as Method Address, then fetch and send parameter.
614 SetRegister(reg, RESULT);
615 SetMethodAddress(RESULT);
616 Compile_Send(Compile_FetchParameter());
617 break;
618 case Macro::ResultOperation::MoveAndSetMethodSend:
619 // Move result and use as Method Address, then send bits 12:17 of result.
620 SetRegister(reg, RESULT);
621 SetMethodAddress(RESULT);
622 shr(RESULT, 12);
623 and_(RESULT, 0b111111);
624 Compile_Send(RESULT);
625 break;
626 default:
627 UNIMPLEMENTED_MSG("Unimplemented macro operation {}", static_cast<std::size_t>(operation));
628 }
629}
630
631Macro::Opcode MacroJITx64Impl::GetOpCode() const {
632 ASSERT(pc < code.size());
633 return {code[pc]};
634}
635
636std::bitset<32> MacroJITx64Impl::PersistentCallerSavedRegs() const {
637 return PERSISTENT_REGISTERS & Common::X64::ABI_ALL_CALLER_SAVED;
638}
639
640} // namespace Tegra
diff --git a/src/video_core/macro/macro_jit_x64.h b/src/video_core/macro/macro_jit_x64.h
new file mode 100644
index 000000000..21ee157cf
--- /dev/null
+++ b/src/video_core/macro/macro_jit_x64.h
@@ -0,0 +1,100 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <bitset>
9#include <xbyak.h>
10#include "common/bit_field.h"
11#include "common/common_types.h"
12#include "common/x64/xbyak_abi.h"
13#include "video_core/macro/macro.h"
14
15namespace Tegra {
16
17namespace Engines {
18class Maxwell3D;
19}
20
21/// MAX_CODE_SIZE is arbitrarily chosen based on current booting games
22constexpr size_t MAX_CODE_SIZE = 0x10000;
23
24class MacroJITx64 final : public MacroEngine {
25public:
26 explicit MacroJITx64(Engines::Maxwell3D& maxwell3d);
27
28protected:
29 std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code) override;
30
31private:
32 Engines::Maxwell3D& maxwell3d;
33};
34
35class MacroJITx64Impl : public Xbyak::CodeGenerator, public CachedMacro {
36public:
37 MacroJITx64Impl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code);
38 ~MacroJITx64Impl();
39
40 void Execute(const std::vector<u32>& parameters, u32 method) override;
41
42 void Compile_ALU(Macro::Opcode opcode);
43 void Compile_AddImmediate(Macro::Opcode opcode);
44 void Compile_ExtractInsert(Macro::Opcode opcode);
45 void Compile_ExtractShiftLeftImmediate(Macro::Opcode opcode);
46 void Compile_ExtractShiftLeftRegister(Macro::Opcode opcode);
47 void Compile_Read(Macro::Opcode opcode);
48 void Compile_Branch(Macro::Opcode opcode);
49
50private:
51 void Optimizer_ScanFlags();
52
53 void Compile();
54 bool Compile_NextInstruction();
55
56 Xbyak::Reg32 Compile_FetchParameter();
57 Xbyak::Reg32 Compile_GetRegister(u32 index, Xbyak::Reg32 dst);
58 Xbyak::Reg64 Compile_GetRegister(u32 index, Xbyak::Reg64 dst);
59 void Compile_WriteCarry(Xbyak::Reg64 dst);
60
61 void Compile_ProcessResult(Macro::ResultOperation operation, u32 reg);
62 void Compile_Send(Xbyak::Reg32 value);
63
64 Macro::Opcode GetOpCode() const;
65 std::bitset<32> PersistentCallerSavedRegs() const;
66
67 struct JITState {
68 Engines::Maxwell3D* maxwell3d{};
69 std::array<u32, Macro::NUM_MACRO_REGISTERS> registers{};
70 const u32* parameters{};
71 u32 carry_flag{};
72 };
73 static_assert(offsetof(JITState, maxwell3d) == 0, "Maxwell3D is not at 0x0");
74 using ProgramType = void (*)(JITState*);
75
76 struct OptimizerState {
77 bool can_skip_carry{};
78 bool has_delayed_pc{};
79 bool zero_reg_skip{};
80 bool skip_dummy_addimmediate{};
81 bool optimize_for_method_move{};
82 };
83 OptimizerState optimizer{};
84
85 std::optional<Macro::Opcode> next_opcode{};
86 ProgramType program{nullptr};
87
88 std::array<Xbyak::Label, MAX_CODE_SIZE> labels{};
89 std::array<Xbyak::Label, MAX_CODE_SIZE> delay_skip{};
90 Xbyak::Label end_of_code{};
91
92 bool is_delay_slot{};
93 u32 pc{};
94 std::optional<u32> delayed_pc;
95
96 const std::vector<u32>& code;
97 Engines::Maxwell3D& maxwell3d;
98};
99
100} // namespace Tegra
diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp
index 466a911db..890fc6c63 100644
--- a/src/video_core/renderer_opengl/gl_device.cpp
+++ b/src/video_core/renderer_opengl/gl_device.cpp
@@ -6,6 +6,7 @@
6#include <array> 6#include <array>
7#include <cstddef> 7#include <cstddef>
8#include <cstring> 8#include <cstring>
9#include <limits>
9#include <optional> 10#include <optional>
10#include <vector> 11#include <vector>
11 12
@@ -26,24 +27,27 @@ constexpr u32 ReservedUniformBlocks = 1;
26 27
27constexpr u32 NumStages = 5; 28constexpr u32 NumStages = 5;
28 29
29constexpr std::array LimitUBOs = {GL_MAX_VERTEX_UNIFORM_BLOCKS, GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS, 30constexpr std::array LimitUBOs = {
30 GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS, 31 GL_MAX_VERTEX_UNIFORM_BLOCKS, GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS,
31 GL_MAX_GEOMETRY_UNIFORM_BLOCKS, GL_MAX_FRAGMENT_UNIFORM_BLOCKS}; 32 GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS, GL_MAX_GEOMETRY_UNIFORM_BLOCKS,
33 GL_MAX_FRAGMENT_UNIFORM_BLOCKS, GL_MAX_COMPUTE_UNIFORM_BLOCKS};
32 34
33constexpr std::array LimitSSBOs = { 35constexpr std::array LimitSSBOs = {
34 GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS, GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS, 36 GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS, GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS,
35 GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS, GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS, 37 GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS, GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS,
36 GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS}; 38 GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS, GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS};
37 39
38constexpr std::array LimitSamplers = { 40constexpr std::array LimitSamplers = {GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS,
39 GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS, GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS, 41 GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS,
40 GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS, GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS, 42 GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS,
41 GL_MAX_TEXTURE_IMAGE_UNITS}; 43 GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS,
44 GL_MAX_TEXTURE_IMAGE_UNITS,
45 GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS};
42 46
43constexpr std::array LimitImages = {GL_MAX_VERTEX_IMAGE_UNIFORMS, 47constexpr std::array LimitImages = {
44 GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS, 48 GL_MAX_VERTEX_IMAGE_UNIFORMS, GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS,
45 GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS, 49 GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS, GL_MAX_GEOMETRY_IMAGE_UNIFORMS,
46 GL_MAX_GEOMETRY_IMAGE_UNIFORMS, GL_MAX_FRAGMENT_IMAGE_UNIFORMS}; 50 GL_MAX_FRAGMENT_IMAGE_UNIFORMS, GL_MAX_COMPUTE_IMAGE_UNIFORMS};
47 51
48template <typename T> 52template <typename T>
49T GetInteger(GLenum pname) { 53T GetInteger(GLenum pname) {
@@ -85,6 +89,13 @@ u32 Extract(u32& base, u32& num, u32 amount, std::optional<GLenum> limit = {}) {
85 return std::exchange(base, base + amount); 89 return std::exchange(base, base + amount);
86} 90}
87 91
92std::array<u32, Tegra::Engines::MaxShaderTypes> BuildMaxUniformBuffers() noexcept {
93 std::array<u32, Tegra::Engines::MaxShaderTypes> max;
94 std::transform(LimitUBOs.begin(), LimitUBOs.end(), max.begin(),
95 [](GLenum pname) { return GetInteger<u32>(pname); });
96 return max;
97}
98
88std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> BuildBaseBindings() noexcept { 99std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> BuildBaseBindings() noexcept {
89 std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> bindings; 100 std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> bindings;
90 101
@@ -133,6 +144,7 @@ std::array<Device::BaseBindings, Tegra::Engines::MaxShaderTypes> BuildBaseBindin
133} 144}
134 145
135bool IsASTCSupported() { 146bool IsASTCSupported() {
147 static constexpr std::array targets = {GL_TEXTURE_2D, GL_TEXTURE_2D_ARRAY};
136 static constexpr std::array formats = { 148 static constexpr std::array formats = {
137 GL_COMPRESSED_RGBA_ASTC_4x4_KHR, GL_COMPRESSED_RGBA_ASTC_5x4_KHR, 149 GL_COMPRESSED_RGBA_ASTC_4x4_KHR, GL_COMPRESSED_RGBA_ASTC_5x4_KHR,
138 GL_COMPRESSED_RGBA_ASTC_5x5_KHR, GL_COMPRESSED_RGBA_ASTC_6x5_KHR, 150 GL_COMPRESSED_RGBA_ASTC_5x5_KHR, GL_COMPRESSED_RGBA_ASTC_6x5_KHR,
@@ -149,25 +161,43 @@ bool IsASTCSupported() {
149 GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR, 161 GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR,
150 GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR, 162 GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR,
151 }; 163 };
152 return std::find_if_not(formats.begin(), formats.end(), [](GLenum format) { 164 static constexpr std::array required_support = {
153 GLint supported; 165 GL_VERTEX_TEXTURE, GL_TESS_CONTROL_TEXTURE, GL_TESS_EVALUATION_TEXTURE,
154 glGetInternalformativ(GL_TEXTURE_2D, format, GL_INTERNALFORMAT_SUPPORTED, 1, 166 GL_GEOMETRY_TEXTURE, GL_FRAGMENT_TEXTURE, GL_COMPUTE_TEXTURE,
155 &supported); 167 };
156 return supported == GL_TRUE; 168
157 }) == formats.end(); 169 for (const GLenum target : targets) {
170 for (const GLenum format : formats) {
171 for (const GLenum support : required_support) {
172 GLint value;
173 glGetInternalformativ(GL_TEXTURE_2D, format, support, 1, &value);
174 if (value != GL_FULL_SUPPORT) {
175 return false;
176 }
177 }
178 }
179 }
180 return true;
158} 181}
159 182
160} // Anonymous namespace 183} // Anonymous namespace
161 184
162Device::Device() : base_bindings{BuildBaseBindings()} { 185Device::Device()
186 : max_uniform_buffers{BuildMaxUniformBuffers()}, base_bindings{BuildBaseBindings()} {
163 const std::string_view vendor = reinterpret_cast<const char*>(glGetString(GL_VENDOR)); 187 const std::string_view vendor = reinterpret_cast<const char*>(glGetString(GL_VENDOR));
164 const auto renderer = reinterpret_cast<const char*>(glGetString(GL_RENDERER)); 188 const std::string_view version = reinterpret_cast<const char*>(glGetString(GL_VERSION));
165 const std::vector extensions = GetExtensions(); 189 const std::vector extensions = GetExtensions();
166 190
167 const bool is_nvidia = vendor == "NVIDIA Corporation"; 191 const bool is_nvidia = vendor == "NVIDIA Corporation";
168 const bool is_amd = vendor == "ATI Technologies Inc."; 192 const bool is_amd = vendor == "ATI Technologies Inc.";
169 const bool is_intel = vendor == "Intel"; 193
170 const bool is_intel_proprietary = is_intel && std::strstr(renderer, "Mesa") == nullptr; 194 bool disable_fast_buffer_sub_data = false;
195 if (is_nvidia && version == "4.6.0 NVIDIA 443.24") {
196 LOG_WARNING(
197 Render_OpenGL,
198 "Beta driver 443.24 is known to have issues. There might be performance issues.");
199 disable_fast_buffer_sub_data = true;
200 }
171 201
172 uniform_buffer_alignment = GetInteger<std::size_t>(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT); 202 uniform_buffer_alignment = GetInteger<std::size_t>(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT);
173 shader_storage_alignment = GetInteger<std::size_t>(GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT); 203 shader_storage_alignment = GetInteger<std::size_t>(GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT);
@@ -182,10 +212,10 @@ Device::Device() : base_bindings{BuildBaseBindings()} {
182 has_variable_aoffi = TestVariableAoffi(); 212 has_variable_aoffi = TestVariableAoffi();
183 has_component_indexing_bug = is_amd; 213 has_component_indexing_bug = is_amd;
184 has_precise_bug = TestPreciseBug(); 214 has_precise_bug = TestPreciseBug();
185 has_broken_compute = is_intel_proprietary; 215 has_fast_buffer_sub_data = is_nvidia && !disable_fast_buffer_sub_data;
186 has_fast_buffer_sub_data = is_nvidia;
187 use_assembly_shaders = Settings::values.use_assembly_shaders && GLAD_GL_NV_gpu_program5 && 216 use_assembly_shaders = Settings::values.use_assembly_shaders && GLAD_GL_NV_gpu_program5 &&
188 GLAD_GL_NV_compute_program5; 217 GLAD_GL_NV_compute_program5 && GLAD_GL_NV_transform_feedback &&
218 GLAD_GL_NV_transform_feedback2;
189 219
190 LOG_INFO(Render_OpenGL, "Renderer_VariableAOFFI: {}", has_variable_aoffi); 220 LOG_INFO(Render_OpenGL, "Renderer_VariableAOFFI: {}", has_variable_aoffi);
191 LOG_INFO(Render_OpenGL, "Renderer_ComponentIndexingBug: {}", has_component_indexing_bug); 221 LOG_INFO(Render_OpenGL, "Renderer_ComponentIndexingBug: {}", has_component_indexing_bug);
@@ -197,7 +227,9 @@ Device::Device() : base_bindings{BuildBaseBindings()} {
197} 227}
198 228
199Device::Device(std::nullptr_t) { 229Device::Device(std::nullptr_t) {
200 uniform_buffer_alignment = 0; 230 max_uniform_buffers.fill(std::numeric_limits<u32>::max());
231 uniform_buffer_alignment = 4;
232 shader_storage_alignment = 4;
201 max_vertex_attributes = 16; 233 max_vertex_attributes = 16;
202 max_varyings = 15; 234 max_varyings = 15;
203 has_warp_intrinsics = true; 235 has_warp_intrinsics = true;
@@ -205,9 +237,6 @@ Device::Device(std::nullptr_t) {
205 has_vertex_viewport_layer = true; 237 has_vertex_viewport_layer = true;
206 has_image_load_formatted = true; 238 has_image_load_formatted = true;
207 has_variable_aoffi = true; 239 has_variable_aoffi = true;
208 has_component_indexing_bug = false;
209 has_broken_compute = false;
210 has_precise_bug = false;
211} 240}
212 241
213bool Device::TestVariableAoffi() { 242bool Device::TestVariableAoffi() {
diff --git a/src/video_core/renderer_opengl/gl_device.h b/src/video_core/renderer_opengl/gl_device.h
index e915dbd86..98cca0254 100644
--- a/src/video_core/renderer_opengl/gl_device.h
+++ b/src/video_core/renderer_opengl/gl_device.h
@@ -24,6 +24,10 @@ public:
24 explicit Device(); 24 explicit Device();
25 explicit Device(std::nullptr_t); 25 explicit Device(std::nullptr_t);
26 26
27 u32 GetMaxUniformBuffers(Tegra::Engines::ShaderType shader_type) const noexcept {
28 return max_uniform_buffers[static_cast<std::size_t>(shader_type)];
29 }
30
27 const BaseBindings& GetBaseBindings(std::size_t stage_index) const noexcept { 31 const BaseBindings& GetBaseBindings(std::size_t stage_index) const noexcept {
28 return base_bindings[stage_index]; 32 return base_bindings[stage_index];
29 } 33 }
@@ -80,10 +84,6 @@ public:
80 return has_precise_bug; 84 return has_precise_bug;
81 } 85 }
82 86
83 bool HasBrokenCompute() const {
84 return has_broken_compute;
85 }
86
87 bool HasFastBufferSubData() const { 87 bool HasFastBufferSubData() const {
88 return has_fast_buffer_sub_data; 88 return has_fast_buffer_sub_data;
89 } 89 }
@@ -96,7 +96,8 @@ private:
96 static bool TestVariableAoffi(); 96 static bool TestVariableAoffi();
97 static bool TestPreciseBug(); 97 static bool TestPreciseBug();
98 98
99 std::array<BaseBindings, Tegra::Engines::MaxShaderTypes> base_bindings; 99 std::array<u32, Tegra::Engines::MaxShaderTypes> max_uniform_buffers{};
100 std::array<BaseBindings, Tegra::Engines::MaxShaderTypes> base_bindings{};
100 std::size_t uniform_buffer_alignment{}; 101 std::size_t uniform_buffer_alignment{};
101 std::size_t shader_storage_alignment{}; 102 std::size_t shader_storage_alignment{};
102 u32 max_vertex_attributes{}; 103 u32 max_vertex_attributes{};
@@ -109,7 +110,6 @@ private:
109 bool has_variable_aoffi{}; 110 bool has_variable_aoffi{};
110 bool has_component_indexing_bug{}; 111 bool has_component_indexing_bug{};
111 bool has_precise_bug{}; 112 bool has_precise_bug{};
112 bool has_broken_compute{};
113 bool has_fast_buffer_sub_data{}; 113 bool has_fast_buffer_sub_data{};
114 bool use_assembly_shaders{}; 114 bool use_assembly_shaders{};
115}; 115};
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 8116a5daa..f802fd384 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -54,6 +54,12 @@ MICROPROFILE_DEFINE(OpenGL_PrimitiveAssembly, "OpenGL", "Prim Asmbl", MP_RGB(255
54 54
55namespace { 55namespace {
56 56
57constexpr std::size_t NUM_CONST_BUFFERS_PER_STAGE = 18;
58constexpr std::size_t NUM_CONST_BUFFERS_BYTES_PER_STAGE =
59 NUM_CONST_BUFFERS_PER_STAGE * Maxwell::MaxConstBufferSize;
60constexpr std::size_t TOTAL_CONST_BUFFER_BYTES =
61 NUM_CONST_BUFFERS_BYTES_PER_STAGE * Maxwell::MaxShaderStage;
62
57constexpr std::size_t NumSupportedVertexAttributes = 16; 63constexpr std::size_t NumSupportedVertexAttributes = 16;
58 64
59template <typename Engine, typename Entry> 65template <typename Engine, typename Entry>
@@ -87,6 +93,34 @@ std::size_t GetConstBufferSize(const Tegra::Engines::ConstBufferInfo& buffer,
87 return buffer.size; 93 return buffer.size;
88} 94}
89 95
96/// Translates hardware transform feedback indices
97/// @param location Hardware location
98/// @return Pair of ARB_transform_feedback3 token stream first and third arguments
99/// @note Read https://www.khronos.org/registry/OpenGL/extensions/ARB/ARB_transform_feedback3.txt
100std::pair<GLint, GLint> TransformFeedbackEnum(u8 location) {
101 const u8 index = location / 4;
102 if (index >= 8 && index <= 39) {
103 return {GL_GENERIC_ATTRIB_NV, index - 8};
104 }
105 if (index >= 48 && index <= 55) {
106 return {GL_TEXTURE_COORD_NV, index - 48};
107 }
108 switch (index) {
109 case 7:
110 return {GL_POSITION, 0};
111 case 40:
112 return {GL_PRIMARY_COLOR_NV, 0};
113 case 41:
114 return {GL_SECONDARY_COLOR_NV, 0};
115 case 42:
116 return {GL_BACK_PRIMARY_COLOR_NV, 0};
117 case 43:
118 return {GL_BACK_SECONDARY_COLOR_NV, 0};
119 }
120 UNIMPLEMENTED_MSG("index={}", static_cast<int>(index));
121 return {GL_POSITION, 0};
122}
123
90void oglEnable(GLenum cap, bool state) { 124void oglEnable(GLenum cap, bool state) {
91 (state ? glEnable : glDisable)(cap); 125 (state ? glEnable : glDisable)(cap);
92} 126}
@@ -104,6 +138,9 @@ RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWind
104 screen_info{info}, program_manager{program_manager}, state_tracker{state_tracker} { 138 screen_info{info}, program_manager{program_manager}, state_tracker{state_tracker} {
105 CheckExtensions(); 139 CheckExtensions();
106 140
141 unified_uniform_buffer.Create();
142 glNamedBufferStorage(unified_uniform_buffer.handle, TOTAL_CONST_BUFFER_BYTES, nullptr, 0);
143
107 if (device.UseAssemblyShaders()) { 144 if (device.UseAssemblyShaders()) {
108 glCreateBuffers(static_cast<GLsizei>(staging_cbufs.size()), staging_cbufs.data()); 145 glCreateBuffers(static_cast<GLsizei>(staging_cbufs.size()), staging_cbufs.data());
109 for (const GLuint cbuf : staging_cbufs) { 146 for (const GLuint cbuf : staging_cbufs) {
@@ -655,10 +692,6 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
655} 692}
656 693
657void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) { 694void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) {
658 if (device.HasBrokenCompute()) {
659 return;
660 }
661
662 buffer_cache.Acquire(); 695 buffer_cache.Acquire();
663 current_cbuf = 0; 696 current_cbuf = 0;
664 697
@@ -846,34 +879,56 @@ void RasterizerOpenGL::SetupDrawConstBuffers(std::size_t stage_index, const Shad
846 MICROPROFILE_SCOPE(OpenGL_UBO); 879 MICROPROFILE_SCOPE(OpenGL_UBO);
847 const auto& stages = system.GPU().Maxwell3D().state.shader_stages; 880 const auto& stages = system.GPU().Maxwell3D().state.shader_stages;
848 const auto& shader_stage = stages[stage_index]; 881 const auto& shader_stage = stages[stage_index];
882 const auto& entries = shader->GetEntries();
883 const bool use_unified = entries.use_unified_uniforms;
884 const std::size_t base_unified_offset = stage_index * NUM_CONST_BUFFERS_BYTES_PER_STAGE;
849 885
850 u32 binding = 886 const auto base_bindings = device.GetBaseBindings(stage_index);
851 device.UseAssemblyShaders() ? 0 : device.GetBaseBindings(stage_index).uniform_buffer; 887 u32 binding = device.UseAssemblyShaders() ? 0 : base_bindings.uniform_buffer;
852 for (const auto& entry : shader->GetEntries().const_buffers) { 888 for (const auto& entry : entries.const_buffers) {
853 const auto& buffer = shader_stage.const_buffers[entry.GetIndex()]; 889 const u32 index = entry.GetIndex();
854 SetupConstBuffer(PARAMETER_LUT[stage_index], binding++, buffer, entry); 890 const auto& buffer = shader_stage.const_buffers[index];
891 SetupConstBuffer(PARAMETER_LUT[stage_index], binding, buffer, entry, use_unified,
892 base_unified_offset + index * Maxwell::MaxConstBufferSize);
893 ++binding;
894 }
895 if (use_unified) {
896 const u32 index = static_cast<u32>(base_bindings.shader_storage_buffer +
897 entries.global_memory_entries.size());
898 glBindBufferRange(GL_SHADER_STORAGE_BUFFER, index, unified_uniform_buffer.handle,
899 base_unified_offset, NUM_CONST_BUFFERS_BYTES_PER_STAGE);
855 } 900 }
856} 901}
857 902
858void RasterizerOpenGL::SetupComputeConstBuffers(const Shader& kernel) { 903void RasterizerOpenGL::SetupComputeConstBuffers(const Shader& kernel) {
859 MICROPROFILE_SCOPE(OpenGL_UBO); 904 MICROPROFILE_SCOPE(OpenGL_UBO);
860 const auto& launch_desc = system.GPU().KeplerCompute().launch_description; 905 const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
906 const auto& entries = kernel->GetEntries();
907 const bool use_unified = entries.use_unified_uniforms;
861 908
862 u32 binding = 0; 909 u32 binding = 0;
863 for (const auto& entry : kernel->GetEntries().const_buffers) { 910 for (const auto& entry : entries.const_buffers) {
864 const auto& config = launch_desc.const_buffer_config[entry.GetIndex()]; 911 const auto& config = launch_desc.const_buffer_config[entry.GetIndex()];
865 const std::bitset<8> mask = launch_desc.const_buffer_enable_mask.Value(); 912 const std::bitset<8> mask = launch_desc.const_buffer_enable_mask.Value();
866 Tegra::Engines::ConstBufferInfo buffer; 913 Tegra::Engines::ConstBufferInfo buffer;
867 buffer.address = config.Address(); 914 buffer.address = config.Address();
868 buffer.size = config.size; 915 buffer.size = config.size;
869 buffer.enabled = mask[entry.GetIndex()]; 916 buffer.enabled = mask[entry.GetIndex()];
870 SetupConstBuffer(GL_COMPUTE_PROGRAM_PARAMETER_BUFFER_NV, binding++, buffer, entry); 917 SetupConstBuffer(GL_COMPUTE_PROGRAM_PARAMETER_BUFFER_NV, binding, buffer, entry,
918 use_unified, entry.GetIndex() * Maxwell::MaxConstBufferSize);
919 ++binding;
920 }
921 if (use_unified) {
922 const GLuint index = static_cast<GLuint>(entries.global_memory_entries.size());
923 glBindBufferRange(GL_SHADER_STORAGE_BUFFER, index, unified_uniform_buffer.handle, 0,
924 NUM_CONST_BUFFERS_BYTES_PER_STAGE);
871 } 925 }
872} 926}
873 927
874void RasterizerOpenGL::SetupConstBuffer(GLenum stage, u32 binding, 928void RasterizerOpenGL::SetupConstBuffer(GLenum stage, u32 binding,
875 const Tegra::Engines::ConstBufferInfo& buffer, 929 const Tegra::Engines::ConstBufferInfo& buffer,
876 const ConstBufferEntry& entry) { 930 const ConstBufferEntry& entry, bool use_unified,
931 std::size_t unified_offset) {
877 if (!buffer.enabled) { 932 if (!buffer.enabled) {
878 // Set values to zero to unbind buffers 933 // Set values to zero to unbind buffers
879 if (device.UseAssemblyShaders()) { 934 if (device.UseAssemblyShaders()) {
@@ -889,20 +944,29 @@ void RasterizerOpenGL::SetupConstBuffer(GLenum stage, u32 binding,
889 // UBO alignment requirements. 944 // UBO alignment requirements.
890 const std::size_t size = Common::AlignUp(GetConstBufferSize(buffer, entry), sizeof(GLvec4)); 945 const std::size_t size = Common::AlignUp(GetConstBufferSize(buffer, entry), sizeof(GLvec4));
891 946
892 const auto alignment = device.GetUniformBufferAlignment(); 947 const bool fast_upload = !use_unified && device.HasFastBufferSubData();
893 auto [cbuf, offset] = buffer_cache.UploadMemory(buffer.address, size, alignment, false, 948
894 device.HasFastBufferSubData()); 949 const std::size_t alignment = use_unified ? 4 : device.GetUniformBufferAlignment();
895 if (!device.UseAssemblyShaders()) { 950 const GPUVAddr gpu_addr = buffer.address;
896 glBindBufferRange(GL_UNIFORM_BUFFER, binding, cbuf, offset, size); 951 auto [cbuf, offset] = buffer_cache.UploadMemory(gpu_addr, size, alignment, false, fast_upload);
952
953 if (device.UseAssemblyShaders()) {
954 UNIMPLEMENTED_IF(use_unified);
955 if (offset != 0) {
956 const GLuint staging_cbuf = staging_cbufs[current_cbuf++];
957 glCopyNamedBufferSubData(cbuf, staging_cbuf, offset, 0, size);
958 cbuf = staging_cbuf;
959 offset = 0;
960 }
961 glBindBufferRangeNV(stage, binding, cbuf, offset, size);
897 return; 962 return;
898 } 963 }
899 if (offset != 0) { 964
900 const GLuint staging_cbuf = staging_cbufs[current_cbuf++]; 965 if (use_unified) {
901 glCopyNamedBufferSubData(cbuf, staging_cbuf, offset, 0, size); 966 glCopyNamedBufferSubData(cbuf, unified_uniform_buffer.handle, offset, unified_offset, size);
902 cbuf = staging_cbuf; 967 } else {
903 offset = 0; 968 glBindBufferRange(GL_UNIFORM_BUFFER, binding, cbuf, offset, size);
904 } 969 }
905 glBindBufferRangeNV(stage, binding, cbuf, offset, size);
906} 970}
907 971
908void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, const Shader& shader) { 972void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, const Shader& shader) {
@@ -977,16 +1041,12 @@ void RasterizerOpenGL::SetupTexture(u32 binding, const Tegra::Texture::FullTextu
977 glBindTextureUnit(binding, 0); 1041 glBindTextureUnit(binding, 0);
978 return; 1042 return;
979 } 1043 }
980 glBindTextureUnit(binding, view->GetTexture()); 1044 const GLuint handle = view->GetTexture(texture.tic.x_source, texture.tic.y_source,
981 1045 texture.tic.z_source, texture.tic.w_source);
982 if (view->GetSurfaceParams().IsBuffer()) { 1046 glBindTextureUnit(binding, handle);
983 return; 1047 if (!view->GetSurfaceParams().IsBuffer()) {
1048 glBindSampler(binding, sampler_cache.GetSampler(texture.tsc));
984 } 1049 }
985 // Apply swizzle to textures that are not buffers.
986 view->ApplySwizzle(texture.tic.x_source, texture.tic.y_source, texture.tic.z_source,
987 texture.tic.w_source);
988
989 glBindSampler(binding, sampler_cache.GetSampler(texture.tsc));
990} 1050}
991 1051
992void RasterizerOpenGL::SetupDrawImages(std::size_t stage_index, const Shader& shader) { 1052void RasterizerOpenGL::SetupDrawImages(std::size_t stage_index, const Shader& shader) {
@@ -1015,14 +1075,11 @@ void RasterizerOpenGL::SetupImage(u32 binding, const Tegra::Texture::TICEntry& t
1015 glBindImageTexture(binding, 0, 0, GL_FALSE, 0, GL_READ_ONLY, GL_R8); 1075 glBindImageTexture(binding, 0, 0, GL_FALSE, 0, GL_READ_ONLY, GL_R8);
1016 return; 1076 return;
1017 } 1077 }
1018 if (!tic.IsBuffer()) {
1019 view->ApplySwizzle(tic.x_source, tic.y_source, tic.z_source, tic.w_source);
1020 }
1021 if (entry.is_written) { 1078 if (entry.is_written) {
1022 view->MarkAsModified(texture_cache.Tick()); 1079 view->MarkAsModified(texture_cache.Tick());
1023 } 1080 }
1024 glBindImageTexture(binding, view->GetTexture(), 0, GL_TRUE, 0, GL_READ_WRITE, 1081 const GLuint handle = view->GetTexture(tic.x_source, tic.y_source, tic.z_source, tic.w_source);
1025 view->GetFormat()); 1082 glBindImageTexture(binding, handle, 0, GL_TRUE, 0, GL_READ_WRITE, view->GetFormat());
1026} 1083}
1027 1084
1028void RasterizerOpenGL::SyncViewport() { 1085void RasterizerOpenGL::SyncViewport() {
@@ -1031,6 +1088,26 @@ void RasterizerOpenGL::SyncViewport() {
1031 const auto& regs = gpu.regs; 1088 const auto& regs = gpu.regs;
1032 1089
1033 const bool dirty_viewport = flags[Dirty::Viewports]; 1090 const bool dirty_viewport = flags[Dirty::Viewports];
1091 const bool dirty_clip_control = flags[Dirty::ClipControl];
1092
1093 if (dirty_clip_control || flags[Dirty::FrontFace]) {
1094 flags[Dirty::FrontFace] = false;
1095
1096 GLenum mode = MaxwellToGL::FrontFace(regs.front_face);
1097 if (regs.screen_y_control.triangle_rast_flip != 0 &&
1098 regs.viewport_transform[0].scale_y < 0.0f) {
1099 switch (mode) {
1100 case GL_CW:
1101 mode = GL_CCW;
1102 break;
1103 case GL_CCW:
1104 mode = GL_CW;
1105 break;
1106 }
1107 }
1108 glFrontFace(mode);
1109 }
1110
1034 if (dirty_viewport || flags[Dirty::ClipControl]) { 1111 if (dirty_viewport || flags[Dirty::ClipControl]) {
1035 flags[Dirty::ClipControl] = false; 1112 flags[Dirty::ClipControl] = false;
1036 1113
@@ -1128,11 +1205,6 @@ void RasterizerOpenGL::SyncCullMode() {
1128 glDisable(GL_CULL_FACE); 1205 glDisable(GL_CULL_FACE);
1129 } 1206 }
1130 } 1207 }
1131
1132 if (flags[Dirty::FrontFace]) {
1133 flags[Dirty::FrontFace] = false;
1134 glFrontFace(MaxwellToGL::FrontFace(regs.front_face));
1135 }
1136} 1208}
1137 1209
1138void RasterizerOpenGL::SyncPrimitiveRestart() { 1210void RasterizerOpenGL::SyncPrimitiveRestart() {
@@ -1503,12 +1575,70 @@ void RasterizerOpenGL::SyncFramebufferSRGB() {
1503 oglEnable(GL_FRAMEBUFFER_SRGB, gpu.regs.framebuffer_srgb); 1575 oglEnable(GL_FRAMEBUFFER_SRGB, gpu.regs.framebuffer_srgb);
1504} 1576}
1505 1577
1578void RasterizerOpenGL::SyncTransformFeedback() {
1579 // TODO(Rodrigo): Inject SKIP_COMPONENTS*_NV when required. An unimplemented message will signal
1580 // when this is required.
1581 const auto& regs = system.GPU().Maxwell3D().regs;
1582
1583 static constexpr std::size_t STRIDE = 3;
1584 std::array<GLint, 128 * STRIDE * Maxwell::NumTransformFeedbackBuffers> attribs;
1585 std::array<GLint, Maxwell::NumTransformFeedbackBuffers> streams;
1586
1587 GLint* cursor = attribs.data();
1588 GLint* current_stream = streams.data();
1589
1590 for (std::size_t feedback = 0; feedback < Maxwell::NumTransformFeedbackBuffers; ++feedback) {
1591 const auto& layout = regs.tfb_layouts[feedback];
1592 UNIMPLEMENTED_IF_MSG(layout.stride != layout.varying_count * 4, "Stride padding");
1593 if (layout.varying_count == 0) {
1594 continue;
1595 }
1596
1597 *current_stream = static_cast<GLint>(feedback);
1598 if (current_stream != streams.data()) {
1599 // When stepping one stream, push the expected token
1600 cursor[0] = GL_NEXT_BUFFER_NV;
1601 cursor[1] = 0;
1602 cursor[2] = 0;
1603 cursor += STRIDE;
1604 }
1605 ++current_stream;
1606
1607 const auto& locations = regs.tfb_varying_locs[feedback];
1608 std::optional<u8> current_index;
1609 for (u32 offset = 0; offset < layout.varying_count; ++offset) {
1610 const u8 location = locations[offset];
1611 const u8 index = location / 4;
1612
1613 if (current_index == index) {
1614 // Increase number of components of the previous attachment
1615 ++cursor[-2];
1616 continue;
1617 }
1618 current_index = index;
1619
1620 std::tie(cursor[0], cursor[2]) = TransformFeedbackEnum(location);
1621 cursor[1] = 1;
1622 cursor += STRIDE;
1623 }
1624 }
1625
1626 const GLsizei num_attribs = static_cast<GLsizei>((cursor - attribs.data()) / STRIDE);
1627 const GLsizei num_strides = static_cast<GLsizei>(current_stream - streams.data());
1628 glTransformFeedbackStreamAttribsNV(num_attribs, attribs.data(), num_strides, streams.data(),
1629 GL_INTERLEAVED_ATTRIBS);
1630}
1631
1506void RasterizerOpenGL::BeginTransformFeedback(GLenum primitive_mode) { 1632void RasterizerOpenGL::BeginTransformFeedback(GLenum primitive_mode) {
1507 const auto& regs = system.GPU().Maxwell3D().regs; 1633 const auto& regs = system.GPU().Maxwell3D().regs;
1508 if (regs.tfb_enabled == 0) { 1634 if (regs.tfb_enabled == 0) {
1509 return; 1635 return;
1510 } 1636 }
1511 1637
1638 if (device.UseAssemblyShaders()) {
1639 SyncTransformFeedback();
1640 }
1641
1512 UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationControl) || 1642 UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationControl) ||
1513 regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationEval) || 1643 regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationEval) ||
1514 regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::Geometry)); 1644 regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::Geometry));
@@ -1535,6 +1665,10 @@ void RasterizerOpenGL::BeginTransformFeedback(GLenum primitive_mode) {
1535 static_cast<GLsizeiptr>(size)); 1665 static_cast<GLsizeiptr>(size));
1536 } 1666 }
1537 1667
1668 // We may have to call BeginTransformFeedbackNV here since they seem to call different
1669 // implementations on Nvidia's driver (the pointer is different) but we are using
1670 // ARB_transform_feedback3 features with NV_transform_feedback interactions and the ARB
1671 // extension doesn't define BeginTransformFeedback (without NV) interactions. It just works.
1538 glBeginTransformFeedback(GL_POINTS); 1672 glBeginTransformFeedback(GL_POINTS);
1539} 1673}
1540 1674
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 87f7fe159..7abc8fdbd 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -107,7 +107,8 @@ private:
107 107
108 /// Configures a constant buffer. 108 /// Configures a constant buffer.
109 void SetupConstBuffer(GLenum stage, u32 binding, const Tegra::Engines::ConstBufferInfo& buffer, 109 void SetupConstBuffer(GLenum stage, u32 binding, const Tegra::Engines::ConstBufferInfo& buffer,
110 const ConstBufferEntry& entry); 110 const ConstBufferEntry& entry, bool use_unified,
111 std::size_t unified_offset);
111 112
112 /// Configures the current global memory entries to use for the draw command. 113 /// Configures the current global memory entries to use for the draw command.
113 void SetupDrawGlobalMemory(std::size_t stage_index, const Shader& shader); 114 void SetupDrawGlobalMemory(std::size_t stage_index, const Shader& shader);
@@ -201,6 +202,10 @@ private:
201 /// Syncs the framebuffer sRGB state to match the guest state 202 /// Syncs the framebuffer sRGB state to match the guest state
202 void SyncFramebufferSRGB(); 203 void SyncFramebufferSRGB();
203 204
205 /// Syncs transform feedback state to match guest state
206 /// @note Only valid on assembly shaders
207 void SyncTransformFeedback();
208
204 /// Begin a transform feedback 209 /// Begin a transform feedback
205 void BeginTransformFeedback(GLenum primitive_mode); 210 void BeginTransformFeedback(GLenum primitive_mode);
206 211
@@ -253,6 +258,7 @@ private:
253 Tegra::Engines::Maxwell3D::Regs::MaxShaderProgram; 258 Tegra::Engines::Maxwell3D::Regs::MaxShaderProgram;
254 std::array<GLuint, NUM_CONSTANT_BUFFERS> staging_cbufs{}; 259 std::array<GLuint, NUM_CONSTANT_BUFFERS> staging_cbufs{};
255 std::size_t current_cbuf = 0; 260 std::size_t current_cbuf = 0;
261 OGLBuffer unified_uniform_buffer;
256 262
257 /// Number of commands queued to the OpenGL driver. Reseted on flush. 263 /// Number of commands queued to the OpenGL driver. Reseted on flush.
258 std::size_t num_queued_commands = 0; 264 std::size_t num_queued_commands = 0;
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 4cd0f36cf..a991ca64a 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -241,8 +241,9 @@ Shader CachedShader::CreateStageFromMemory(const ShaderParameters& params,
241 entry.bindless_samplers = registry->GetBindlessSamplers(); 241 entry.bindless_samplers = registry->GetBindlessSamplers();
242 params.disk_cache.SaveEntry(std::move(entry)); 242 params.disk_cache.SaveEntry(std::move(entry));
243 243
244 return std::shared_ptr<CachedShader>(new CachedShader( 244 return std::shared_ptr<CachedShader>(
245 params.cpu_addr, size_in_bytes, std::move(registry), MakeEntries(ir), std::move(program))); 245 new CachedShader(params.cpu_addr, size_in_bytes, std::move(registry),
246 MakeEntries(params.device, ir, shader_type), std::move(program)));
246} 247}
247 248
248Shader CachedShader::CreateKernelFromMemory(const ShaderParameters& params, ProgramCode code) { 249Shader CachedShader::CreateKernelFromMemory(const ShaderParameters& params, ProgramCode code) {
@@ -265,8 +266,9 @@ Shader CachedShader::CreateKernelFromMemory(const ShaderParameters& params, Prog
265 entry.bindless_samplers = registry->GetBindlessSamplers(); 266 entry.bindless_samplers = registry->GetBindlessSamplers();
266 params.disk_cache.SaveEntry(std::move(entry)); 267 params.disk_cache.SaveEntry(std::move(entry));
267 268
268 return std::shared_ptr<CachedShader>(new CachedShader( 269 return std::shared_ptr<CachedShader>(
269 params.cpu_addr, size_in_bytes, std::move(registry), MakeEntries(ir), std::move(program))); 270 new CachedShader(params.cpu_addr, size_in_bytes, std::move(registry),
271 MakeEntries(params.device, ir, ShaderType::Compute), std::move(program)));
270} 272}
271 273
272Shader CachedShader::CreateFromCache(const ShaderParameters& params, 274Shader CachedShader::CreateFromCache(const ShaderParameters& params,
@@ -348,7 +350,7 @@ void ShaderCacheOpenGL::LoadDiskCache(const std::atomic_bool& stop_loading,
348 PrecompiledShader shader; 350 PrecompiledShader shader;
349 shader.program = std::move(program); 351 shader.program = std::move(program);
350 shader.registry = std::move(registry); 352 shader.registry = std::move(registry);
351 shader.entries = MakeEntries(ir); 353 shader.entries = MakeEntries(device, ir, entry.type);
352 354
353 std::scoped_lock lock{mutex}; 355 std::scoped_lock lock{mutex};
354 if (callback) { 356 if (callback) {
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index 253484968..d6e30b321 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -61,8 +61,8 @@ struct TextureDerivates {};
61using TextureArgument = std::pair<Type, Node>; 61using TextureArgument = std::pair<Type, Node>;
62using TextureIR = std::variant<TextureOffset, TextureDerivates, TextureArgument>; 62using TextureIR = std::variant<TextureOffset, TextureDerivates, TextureArgument>;
63 63
64constexpr u32 MAX_CONSTBUFFER_ELEMENTS = 64constexpr u32 MAX_CONSTBUFFER_SCALARS = static_cast<u32>(Maxwell::MaxConstBufferSize) / sizeof(u32);
65 static_cast<u32>(Maxwell::MaxConstBufferSize) / (4 * sizeof(float)); 65constexpr u32 MAX_CONSTBUFFER_ELEMENTS = MAX_CONSTBUFFER_SCALARS / sizeof(u32);
66 66
67constexpr std::string_view CommonDeclarations = R"(#define ftoi floatBitsToInt 67constexpr std::string_view CommonDeclarations = R"(#define ftoi floatBitsToInt
68#define ftou floatBitsToUint 68#define ftou floatBitsToUint
@@ -402,6 +402,13 @@ std::string FlowStackTopName(MetaStackClass stack) {
402 return fmt::format("{}_flow_stack_top", GetFlowStackPrefix(stack)); 402 return fmt::format("{}_flow_stack_top", GetFlowStackPrefix(stack));
403} 403}
404 404
405bool UseUnifiedUniforms(const Device& device, const ShaderIR& ir, ShaderType stage) {
406 const u32 num_ubos = static_cast<u32>(ir.GetConstantBuffers().size());
407 // We waste one UBO for emulation
408 const u32 num_available_ubos = device.GetMaxUniformBuffers(stage) - 1;
409 return num_ubos > num_available_ubos;
410}
411
405struct GenericVaryingDescription { 412struct GenericVaryingDescription {
406 std::string name; 413 std::string name;
407 u8 first_element = 0; 414 u8 first_element = 0;
@@ -412,8 +419,9 @@ class GLSLDecompiler final {
412public: 419public:
413 explicit GLSLDecompiler(const Device& device, const ShaderIR& ir, const Registry& registry, 420 explicit GLSLDecompiler(const Device& device, const ShaderIR& ir, const Registry& registry,
414 ShaderType stage, std::string_view identifier, std::string_view suffix) 421 ShaderType stage, std::string_view identifier, std::string_view suffix)
415 : device{device}, ir{ir}, registry{registry}, stage{stage}, 422 : device{device}, ir{ir}, registry{registry}, stage{stage}, identifier{identifier},
416 identifier{identifier}, suffix{suffix}, header{ir.GetHeader()} { 423 suffix{suffix}, header{ir.GetHeader()}, use_unified_uniforms{
424 UseUnifiedUniforms(device, ir, stage)} {
417 if (stage != ShaderType::Compute) { 425 if (stage != ShaderType::Compute) {
418 transform_feedback = BuildTransformFeedback(registry.GetGraphicsInfo()); 426 transform_feedback = BuildTransformFeedback(registry.GetGraphicsInfo());
419 } 427 }
@@ -618,7 +626,9 @@ private:
618 break; 626 break;
619 } 627 }
620 } 628 }
621 if (stage != ShaderType::Vertex || device.HasVertexViewportLayer()) { 629
630 if (stage != ShaderType::Geometry &&
631 (stage != ShaderType::Vertex || device.HasVertexViewportLayer())) {
622 if (ir.UsesLayer()) { 632 if (ir.UsesLayer()) {
623 code.AddLine("int gl_Layer;"); 633 code.AddLine("int gl_Layer;");
624 } 634 }
@@ -647,6 +657,16 @@ private:
647 --code.scope; 657 --code.scope;
648 code.AddLine("}};"); 658 code.AddLine("}};");
649 code.AddNewLine(); 659 code.AddNewLine();
660
661 if (stage == ShaderType::Geometry) {
662 if (ir.UsesLayer()) {
663 code.AddLine("out int gl_Layer;");
664 }
665 if (ir.UsesViewportIndex()) {
666 code.AddLine("out int gl_ViewportIndex;");
667 }
668 }
669 code.AddNewLine();
650 } 670 }
651 671
652 void DeclareRegisters() { 672 void DeclareRegisters() {
@@ -834,12 +854,24 @@ private:
834 } 854 }
835 855
836 void DeclareConstantBuffers() { 856 void DeclareConstantBuffers() {
857 if (use_unified_uniforms) {
858 const u32 binding = device.GetBaseBindings(stage).shader_storage_buffer +
859 static_cast<u32>(ir.GetGlobalMemory().size());
860 code.AddLine("layout (std430, binding = {}) readonly buffer UnifiedUniforms {{",
861 binding);
862 code.AddLine(" uint cbufs[];");
863 code.AddLine("}};");
864 code.AddNewLine();
865 return;
866 }
867
837 u32 binding = device.GetBaseBindings(stage).uniform_buffer; 868 u32 binding = device.GetBaseBindings(stage).uniform_buffer;
838 for (const auto& buffers : ir.GetConstantBuffers()) { 869 for (const auto [index, info] : ir.GetConstantBuffers()) {
839 const auto index = buffers.first; 870 const u32 num_elements = Common::AlignUp(info.GetSize(), 4) / 4;
871 const u32 size = info.IsIndirect() ? MAX_CONSTBUFFER_ELEMENTS : num_elements;
840 code.AddLine("layout (std140, binding = {}) uniform {} {{", binding++, 872 code.AddLine("layout (std140, binding = {}) uniform {} {{", binding++,
841 GetConstBufferBlock(index)); 873 GetConstBufferBlock(index));
842 code.AddLine(" uvec4 {}[{}];", GetConstBuffer(index), MAX_CONSTBUFFER_ELEMENTS); 874 code.AddLine(" uvec4 {}[{}];", GetConstBuffer(index), size);
843 code.AddLine("}};"); 875 code.AddLine("}};");
844 code.AddNewLine(); 876 code.AddNewLine();
845 } 877 }
@@ -1038,42 +1070,51 @@ private:
1038 1070
1039 if (const auto cbuf = std::get_if<CbufNode>(&*node)) { 1071 if (const auto cbuf = std::get_if<CbufNode>(&*node)) {
1040 const Node offset = cbuf->GetOffset(); 1072 const Node offset = cbuf->GetOffset();
1073 const u32 base_unified_offset = cbuf->GetIndex() * MAX_CONSTBUFFER_SCALARS;
1074
1041 if (const auto immediate = std::get_if<ImmediateNode>(&*offset)) { 1075 if (const auto immediate = std::get_if<ImmediateNode>(&*offset)) {
1042 // Direct access 1076 // Direct access
1043 const u32 offset_imm = immediate->GetValue(); 1077 const u32 offset_imm = immediate->GetValue();
1044 ASSERT_MSG(offset_imm % 4 == 0, "Unaligned cbuf direct access"); 1078 ASSERT_MSG(offset_imm % 4 == 0, "Unaligned cbuf direct access");
1045 return {fmt::format("{}[{}][{}]", GetConstBuffer(cbuf->GetIndex()), 1079 if (use_unified_uniforms) {
1046 offset_imm / (4 * 4), (offset_imm / 4) % 4), 1080 return {fmt::format("cbufs[{}]", base_unified_offset + offset_imm / 4),
1081 Type::Uint};
1082 } else {
1083 return {fmt::format("{}[{}][{}]", GetConstBuffer(cbuf->GetIndex()),
1084 offset_imm / (4 * 4), (offset_imm / 4) % 4),
1085 Type::Uint};
1086 }
1087 }
1088
1089 // Indirect access
1090 if (use_unified_uniforms) {
1091 return {fmt::format("cbufs[{} + ({} >> 2)]", base_unified_offset,
1092 Visit(offset).AsUint()),
1047 Type::Uint}; 1093 Type::Uint};
1048 } 1094 }
1049 1095
1050 if (std::holds_alternative<OperationNode>(*offset)) { 1096 const std::string final_offset = code.GenerateTemporary();
1051 // Indirect access 1097 code.AddLine("uint {} = {} >> 2;", final_offset, Visit(offset).AsUint());
1052 const std::string final_offset = code.GenerateTemporary();
1053 code.AddLine("uint {} = {} >> 2;", final_offset, Visit(offset).AsUint());
1054 1098
1055 if (!device.HasComponentIndexingBug()) { 1099 if (!device.HasComponentIndexingBug()) {
1056 return {fmt::format("{}[{} >> 2][{} & 3]", GetConstBuffer(cbuf->GetIndex()), 1100 return {fmt::format("{}[{} >> 2][{} & 3]", GetConstBuffer(cbuf->GetIndex()),
1057 final_offset, final_offset), 1101 final_offset, final_offset),
1058 Type::Uint}; 1102 Type::Uint};
1059 }
1060
1061 // AMD's proprietary GLSL compiler emits ill code for variable component access.
1062 // To bypass this driver bug generate 4 ifs, one per each component.
1063 const std::string pack = code.GenerateTemporary();
1064 code.AddLine("uvec4 {} = {}[{} >> 2];", pack, GetConstBuffer(cbuf->GetIndex()),
1065 final_offset);
1066
1067 const std::string result = code.GenerateTemporary();
1068 code.AddLine("uint {};", result);
1069 for (u32 swizzle = 0; swizzle < 4; ++swizzle) {
1070 code.AddLine("if (({} & 3) == {}) {} = {}{};", final_offset, swizzle, result,
1071 pack, GetSwizzle(swizzle));
1072 }
1073 return {result, Type::Uint};
1074 } 1103 }
1075 1104
1076 UNREACHABLE_MSG("Unmanaged offset node type"); 1105 // AMD's proprietary GLSL compiler emits ill code for variable component access.
1106 // To bypass this driver bug generate 4 ifs, one per each component.
1107 const std::string pack = code.GenerateTemporary();
1108 code.AddLine("uvec4 {} = {}[{} >> 2];", pack, GetConstBuffer(cbuf->GetIndex()),
1109 final_offset);
1110
1111 const std::string result = code.GenerateTemporary();
1112 code.AddLine("uint {};", result);
1113 for (u32 swizzle = 0; swizzle < 4; ++swizzle) {
1114 code.AddLine("if (({} & 3) == {}) {} = {}{};", final_offset, swizzle, result, pack,
1115 GetSwizzle(swizzle));
1116 }
1117 return {result, Type::Uint};
1077 } 1118 }
1078 1119
1079 if (const auto gmem = std::get_if<GmemNode>(&*node)) { 1120 if (const auto gmem = std::get_if<GmemNode>(&*node)) {
@@ -2344,7 +2385,12 @@ private:
2344 return {}; 2385 return {};
2345 } 2386 }
2346 2387
2347 Expression MemoryBarrierGL(Operation) { 2388 Expression MemoryBarrierGroup(Operation) {
2389 code.AddLine("groupMemoryBarrier();");
2390 return {};
2391 }
2392
2393 Expression MemoryBarrierGlobal(Operation) {
2348 code.AddLine("memoryBarrier();"); 2394 code.AddLine("memoryBarrier();");
2349 return {}; 2395 return {};
2350 } 2396 }
@@ -2591,7 +2637,8 @@ private:
2591 &GLSLDecompiler::ShuffleIndexed, 2637 &GLSLDecompiler::ShuffleIndexed,
2592 2638
2593 &GLSLDecompiler::Barrier, 2639 &GLSLDecompiler::Barrier,
2594 &GLSLDecompiler::MemoryBarrierGL, 2640 &GLSLDecompiler::MemoryBarrierGroup,
2641 &GLSLDecompiler::MemoryBarrierGlobal,
2595 }; 2642 };
2596 static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount)); 2643 static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount));
2597 2644
@@ -2704,6 +2751,7 @@ private:
2704 const std::string_view identifier; 2751 const std::string_view identifier;
2705 const std::string_view suffix; 2752 const std::string_view suffix;
2706 const Header header; 2753 const Header header;
2754 const bool use_unified_uniforms;
2707 std::unordered_map<u8, VaryingTFB> transform_feedback; 2755 std::unordered_map<u8, VaryingTFB> transform_feedback;
2708 2756
2709 ShaderWriter code; 2757 ShaderWriter code;
@@ -2899,7 +2947,7 @@ void GLSLDecompiler::DecompileAST() {
2899 2947
2900} // Anonymous namespace 2948} // Anonymous namespace
2901 2949
2902ShaderEntries MakeEntries(const VideoCommon::Shader::ShaderIR& ir) { 2950ShaderEntries MakeEntries(const Device& device, const ShaderIR& ir, ShaderType stage) {
2903 ShaderEntries entries; 2951 ShaderEntries entries;
2904 for (const auto& cbuf : ir.GetConstantBuffers()) { 2952 for (const auto& cbuf : ir.GetConstantBuffers()) {
2905 entries.const_buffers.emplace_back(cbuf.second.GetMaxOffset(), cbuf.second.IsIndirect(), 2953 entries.const_buffers.emplace_back(cbuf.second.GetMaxOffset(), cbuf.second.IsIndirect(),
@@ -2920,6 +2968,7 @@ ShaderEntries MakeEntries(const VideoCommon::Shader::ShaderIR& ir) {
2920 entries.clip_distances = (clip_distances[i] ? 1U : 0U) << i; 2968 entries.clip_distances = (clip_distances[i] ? 1U : 0U) << i;
2921 } 2969 }
2922 entries.shader_length = ir.GetLength(); 2970 entries.shader_length = ir.GetLength();
2971 entries.use_unified_uniforms = UseUnifiedUniforms(device, ir, stage);
2923 return entries; 2972 return entries;
2924} 2973}
2925 2974
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.h b/src/video_core/renderer_opengl/gl_shader_decompiler.h
index e8a178764..451c9689a 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.h
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.h
@@ -53,11 +53,13 @@ struct ShaderEntries {
53 std::vector<GlobalMemoryEntry> global_memory_entries; 53 std::vector<GlobalMemoryEntry> global_memory_entries;
54 std::vector<SamplerEntry> samplers; 54 std::vector<SamplerEntry> samplers;
55 std::vector<ImageEntry> images; 55 std::vector<ImageEntry> images;
56 u32 clip_distances{};
57 std::size_t shader_length{}; 56 std::size_t shader_length{};
57 u32 clip_distances{};
58 bool use_unified_uniforms{};
58}; 59};
59 60
60ShaderEntries MakeEntries(const VideoCommon::Shader::ShaderIR& ir); 61ShaderEntries MakeEntries(const Device& device, const VideoCommon::Shader::ShaderIR& ir,
62 Tegra::Engines::ShaderType stage);
61 63
62std::string DecompileShader(const Device& device, const VideoCommon::Shader::ShaderIR& ir, 64std::string DecompileShader(const Device& device, const VideoCommon::Shader::ShaderIR& ir,
63 const VideoCommon::Shader::Registry& registry, 65 const VideoCommon::Shader::Registry& registry,
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.cpp b/src/video_core/renderer_opengl/gl_texture_cache.cpp
index 94fbd2a22..61505879b 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_texture_cache.cpp
@@ -35,7 +35,7 @@ MICROPROFILE_DEFINE(OpenGL_Texture_Buffer_Copy, "OpenGL", "Texture Buffer Copy",
35namespace { 35namespace {
36 36
37struct FormatTuple { 37struct FormatTuple {
38 GLint internal_format; 38 GLenum internal_format;
39 GLenum format = GL_NONE; 39 GLenum format = GL_NONE;
40 GLenum type = GL_NONE; 40 GLenum type = GL_NONE;
41}; 41};
@@ -238,6 +238,12 @@ OGLTexture CreateTexture(const SurfaceParams& params, GLenum target, GLenum inte
238 return texture; 238 return texture;
239} 239}
240 240
241constexpr u32 EncodeSwizzle(SwizzleSource x_source, SwizzleSource y_source, SwizzleSource z_source,
242 SwizzleSource w_source) {
243 return (static_cast<u32>(x_source) << 24) | (static_cast<u32>(y_source) << 16) |
244 (static_cast<u32>(z_source) << 8) | static_cast<u32>(w_source);
245}
246
241} // Anonymous namespace 247} // Anonymous namespace
242 248
243CachedSurface::CachedSurface(const GPUVAddr gpu_addr, const SurfaceParams& params, 249CachedSurface::CachedSurface(const GPUVAddr gpu_addr, const SurfaceParams& params,
@@ -257,9 +263,14 @@ CachedSurface::CachedSurface(const GPUVAddr gpu_addr, const SurfaceParams& param
257 target = GetTextureTarget(params.target); 263 target = GetTextureTarget(params.target);
258 texture = CreateTexture(params, target, internal_format, texture_buffer); 264 texture = CreateTexture(params, target, internal_format, texture_buffer);
259 DecorateSurfaceName(); 265 DecorateSurfaceName();
260 main_view = CreateViewInner( 266
261 ViewParams(params.target, 0, params.is_layered ? params.depth : 1, 0, params.num_levels), 267 u32 num_layers = 1;
262 true); 268 if (params.is_layered || params.target == SurfaceTarget::Texture3D) {
269 num_layers = params.depth;
270 }
271
272 main_view =
273 CreateViewInner(ViewParams(params.target, 0, num_layers, 0, params.num_levels), true);
263} 274}
264 275
265CachedSurface::~CachedSurface() = default; 276CachedSurface::~CachedSurface() = default;
@@ -381,7 +392,7 @@ void CachedSurface::DecorateSurfaceName() {
381} 392}
382 393
383void CachedSurfaceView::DecorateViewName(GPUVAddr gpu_addr, std::string prefix) { 394void CachedSurfaceView::DecorateViewName(GPUVAddr gpu_addr, std::string prefix) {
384 LabelGLObject(GL_TEXTURE, texture_view.handle, gpu_addr, prefix); 395 LabelGLObject(GL_TEXTURE, main_view.handle, gpu_addr, prefix);
385} 396}
386 397
387View CachedSurface::CreateView(const ViewParams& view_key) { 398View CachedSurface::CreateView(const ViewParams& view_key) {
@@ -397,32 +408,33 @@ View CachedSurface::CreateViewInner(const ViewParams& view_key, const bool is_pr
397} 408}
398 409
399CachedSurfaceView::CachedSurfaceView(CachedSurface& surface, const ViewParams& params, 410CachedSurfaceView::CachedSurfaceView(CachedSurface& surface, const ViewParams& params,
400 const bool is_proxy) 411 bool is_proxy)
401 : VideoCommon::ViewBase(params), surface{surface}, is_proxy{is_proxy} { 412 : VideoCommon::ViewBase(params), surface{surface}, format{surface.internal_format},
402 target = GetTextureTarget(params.target); 413 target{GetTextureTarget(params.target)}, is_proxy{is_proxy} {
403 format = GetFormatTuple(surface.GetSurfaceParams().pixel_format).internal_format;
404 if (!is_proxy) { 414 if (!is_proxy) {
405 texture_view = CreateTextureView(); 415 main_view = CreateTextureView();
406 } 416 }
407 swizzle = EncodeSwizzle(SwizzleSource::R, SwizzleSource::G, SwizzleSource::B, SwizzleSource::A);
408} 417}
409 418
410CachedSurfaceView::~CachedSurfaceView() = default; 419CachedSurfaceView::~CachedSurfaceView() = default;
411 420
412void CachedSurfaceView::Attach(GLenum attachment, GLenum target) const { 421void CachedSurfaceView::Attach(GLenum attachment, GLenum fb_target) const {
413 ASSERT(params.num_levels == 1); 422 ASSERT(params.num_levels == 1);
414 423
424 if (params.target == SurfaceTarget::Texture3D) {
425 if (params.num_layers > 1) {
426 ASSERT(params.base_layer == 0);
427 glFramebufferTexture(fb_target, attachment, surface.texture.handle, params.base_level);
428 } else {
429 glFramebufferTexture3D(fb_target, attachment, target, surface.texture.handle,
430 params.base_level, params.base_layer);
431 }
432 return;
433 }
434
415 if (params.num_layers > 1) { 435 if (params.num_layers > 1) {
416 // Layered framebuffer attachments
417 UNIMPLEMENTED_IF(params.base_layer != 0); 436 UNIMPLEMENTED_IF(params.base_layer != 0);
418 437 glFramebufferTexture(fb_target, attachment, GetTexture(), 0);
419 switch (params.target) {
420 case SurfaceTarget::Texture2DArray:
421 glFramebufferTexture(target, attachment, GetTexture(), 0);
422 break;
423 default:
424 UNIMPLEMENTED();
425 }
426 return; 438 return;
427 } 439 }
428 440
@@ -430,16 +442,16 @@ void CachedSurfaceView::Attach(GLenum attachment, GLenum target) const {
430 const GLuint texture = surface.GetTexture(); 442 const GLuint texture = surface.GetTexture();
431 switch (surface.GetSurfaceParams().target) { 443 switch (surface.GetSurfaceParams().target) {
432 case SurfaceTarget::Texture1D: 444 case SurfaceTarget::Texture1D:
433 glFramebufferTexture1D(target, attachment, view_target, texture, params.base_level); 445 glFramebufferTexture1D(fb_target, attachment, view_target, texture, params.base_level);
434 break; 446 break;
435 case SurfaceTarget::Texture2D: 447 case SurfaceTarget::Texture2D:
436 glFramebufferTexture2D(target, attachment, view_target, texture, params.base_level); 448 glFramebufferTexture2D(fb_target, attachment, view_target, texture, params.base_level);
437 break; 449 break;
438 case SurfaceTarget::Texture1DArray: 450 case SurfaceTarget::Texture1DArray:
439 case SurfaceTarget::Texture2DArray: 451 case SurfaceTarget::Texture2DArray:
440 case SurfaceTarget::TextureCubemap: 452 case SurfaceTarget::TextureCubemap:
441 case SurfaceTarget::TextureCubeArray: 453 case SurfaceTarget::TextureCubeArray:
442 glFramebufferTextureLayer(target, attachment, texture, params.base_level, 454 glFramebufferTextureLayer(fb_target, attachment, texture, params.base_level,
443 params.base_layer); 455 params.base_layer);
444 break; 456 break;
445 default: 457 default:
@@ -447,35 +459,62 @@ void CachedSurfaceView::Attach(GLenum attachment, GLenum target) const {
447 } 459 }
448} 460}
449 461
450void CachedSurfaceView::ApplySwizzle(SwizzleSource x_source, SwizzleSource y_source, 462GLuint CachedSurfaceView::GetTexture(SwizzleSource x_source, SwizzleSource y_source,
451 SwizzleSource z_source, SwizzleSource w_source) { 463 SwizzleSource z_source, SwizzleSource w_source) {
452 u32 new_swizzle = EncodeSwizzle(x_source, y_source, z_source, w_source); 464 if (GetSurfaceParams().IsBuffer()) {
453 if (new_swizzle == swizzle) 465 return GetTexture();
454 return; 466 }
455 swizzle = new_swizzle; 467 const u32 new_swizzle = EncodeSwizzle(x_source, y_source, z_source, w_source);
456 const std::array gl_swizzle = {GetSwizzleSource(x_source), GetSwizzleSource(y_source), 468 if (current_swizzle == new_swizzle) {
457 GetSwizzleSource(z_source), GetSwizzleSource(w_source)}; 469 return current_view;
458 const GLuint handle = GetTexture(); 470 }
459 const PixelFormat format = surface.GetSurfaceParams().pixel_format; 471 current_swizzle = new_swizzle;
460 switch (format) { 472
473 const auto [entry, is_cache_miss] = view_cache.try_emplace(new_swizzle);
474 OGLTextureView& view = entry->second;
475 if (!is_cache_miss) {
476 current_view = view.handle;
477 return view.handle;
478 }
479 view = CreateTextureView();
480 current_view = view.handle;
481
482 std::array swizzle{x_source, y_source, z_source, w_source};
483
484 switch (const PixelFormat format = GetSurfaceParams().pixel_format) {
461 case PixelFormat::Z24S8: 485 case PixelFormat::Z24S8:
462 case PixelFormat::Z32FS8: 486 case PixelFormat::Z32FS8:
463 case PixelFormat::S8Z24: 487 case PixelFormat::S8Z24:
464 glTextureParameteri(handle, GL_DEPTH_STENCIL_TEXTURE_MODE, 488 UNIMPLEMENTED_IF(x_source != SwizzleSource::R && x_source != SwizzleSource::G);
489 glTextureParameteri(view.handle, GL_DEPTH_STENCIL_TEXTURE_MODE,
465 GetComponent(format, x_source == SwizzleSource::R)); 490 GetComponent(format, x_source == SwizzleSource::R));
466 break; 491
467 default: 492 // Make sure we sample the first component
468 glTextureParameteriv(handle, GL_TEXTURE_SWIZZLE_RGBA, gl_swizzle.data()); 493 std::transform(swizzle.begin(), swizzle.end(), swizzle.begin(), [](SwizzleSource value) {
494 return value == SwizzleSource::G ? SwizzleSource::R : value;
495 });
496 [[fallthrough]];
497 default: {
498 const std::array gl_swizzle = {GetSwizzleSource(swizzle[0]), GetSwizzleSource(swizzle[1]),
499 GetSwizzleSource(swizzle[2]), GetSwizzleSource(swizzle[3])};
500 glTextureParameteriv(view.handle, GL_TEXTURE_SWIZZLE_RGBA, gl_swizzle.data());
469 break; 501 break;
470 } 502 }
503 }
504 return view.handle;
471} 505}
472 506
473OGLTextureView CachedSurfaceView::CreateTextureView() const { 507OGLTextureView CachedSurfaceView::CreateTextureView() const {
474 OGLTextureView texture_view; 508 OGLTextureView texture_view;
475 texture_view.Create(); 509 texture_view.Create();
476 510
477 glTextureView(texture_view.handle, target, surface.texture.handle, format, params.base_level, 511 if (target == GL_TEXTURE_3D) {
478 params.num_levels, params.base_layer, params.num_layers); 512 glTextureView(texture_view.handle, target, surface.texture.handle, format,
513 params.base_level, params.num_levels, 0, 1);
514 } else {
515 glTextureView(texture_view.handle, target, surface.texture.handle, format,
516 params.base_level, params.num_levels, params.base_layer, params.num_layers);
517 }
479 ApplyTextureDefaults(surface.GetSurfaceParams(), texture_view.handle); 518 ApplyTextureDefaults(surface.GetSurfaceParams(), texture_view.handle);
480 519
481 return texture_view; 520 return texture_view;
@@ -518,8 +557,8 @@ void TextureCacheOpenGL::ImageBlit(View& src_view, View& dst_view,
518 const Tegra::Engines::Fermi2D::Config& copy_config) { 557 const Tegra::Engines::Fermi2D::Config& copy_config) {
519 const auto& src_params{src_view->GetSurfaceParams()}; 558 const auto& src_params{src_view->GetSurfaceParams()};
520 const auto& dst_params{dst_view->GetSurfaceParams()}; 559 const auto& dst_params{dst_view->GetSurfaceParams()};
521 UNIMPLEMENTED_IF(src_params.target == SurfaceTarget::Texture3D); 560 UNIMPLEMENTED_IF(src_params.depth != 1);
522 UNIMPLEMENTED_IF(dst_params.target == SurfaceTarget::Texture3D); 561 UNIMPLEMENTED_IF(dst_params.depth != 1);
523 562
524 state_tracker.NotifyScissor0(); 563 state_tracker.NotifyScissor0();
525 state_tracker.NotifyFramebuffer(); 564 state_tracker.NotifyFramebuffer();
diff --git a/src/video_core/renderer_opengl/gl_texture_cache.h b/src/video_core/renderer_opengl/gl_texture_cache.h
index 02d9981a1..bfc4ddf5d 100644
--- a/src/video_core/renderer_opengl/gl_texture_cache.h
+++ b/src/video_core/renderer_opengl/gl_texture_cache.h
@@ -80,10 +80,12 @@ public:
80 explicit CachedSurfaceView(CachedSurface& surface, const ViewParams& params, bool is_proxy); 80 explicit CachedSurfaceView(CachedSurface& surface, const ViewParams& params, bool is_proxy);
81 ~CachedSurfaceView(); 81 ~CachedSurfaceView();
82 82
83 /// Attaches this texture view to the current bound GL_DRAW_FRAMEBUFFER 83 /// @brief Attaches this texture view to the currently bound fb_target framebuffer
84 void Attach(GLenum attachment, GLenum target) const; 84 /// @param attachment Attachment to bind textures to
85 /// @param fb_target Framebuffer target to attach to (e.g. DRAW_FRAMEBUFFER)
86 void Attach(GLenum attachment, GLenum fb_target) const;
85 87
86 void ApplySwizzle(Tegra::Texture::SwizzleSource x_source, 88 GLuint GetTexture(Tegra::Texture::SwizzleSource x_source,
87 Tegra::Texture::SwizzleSource y_source, 89 Tegra::Texture::SwizzleSource y_source,
88 Tegra::Texture::SwizzleSource z_source, 90 Tegra::Texture::SwizzleSource z_source,
89 Tegra::Texture::SwizzleSource w_source); 91 Tegra::Texture::SwizzleSource w_source);
@@ -98,7 +100,7 @@ public:
98 if (is_proxy) { 100 if (is_proxy) {
99 return surface.GetTexture(); 101 return surface.GetTexture();
100 } 102 }
101 return texture_view.handle; 103 return main_view.handle;
102 } 104 }
103 105
104 GLenum GetFormat() const { 106 GLenum GetFormat() const {
@@ -110,23 +112,19 @@ public:
110 } 112 }
111 113
112private: 114private:
113 u32 EncodeSwizzle(Tegra::Texture::SwizzleSource x_source,
114 Tegra::Texture::SwizzleSource y_source,
115 Tegra::Texture::SwizzleSource z_source,
116 Tegra::Texture::SwizzleSource w_source) const {
117 return (static_cast<u32>(x_source) << 24) | (static_cast<u32>(y_source) << 16) |
118 (static_cast<u32>(z_source) << 8) | static_cast<u32>(w_source);
119 }
120
121 OGLTextureView CreateTextureView() const; 115 OGLTextureView CreateTextureView() const;
122 116
123 CachedSurface& surface; 117 CachedSurface& surface;
124 GLenum target{}; 118 const GLenum format;
125 GLenum format{}; 119 const GLenum target;
120 const bool is_proxy;
121
122 std::unordered_map<u32, OGLTextureView> view_cache;
123 OGLTextureView main_view;
126 124
127 OGLTextureView texture_view; 125 // Use an invalid default so it always fails the comparison test
128 u32 swizzle{}; 126 u32 current_swizzle = 0xffffffff;
129 bool is_proxy{}; 127 GLuint current_view = 0;
130}; 128};
131 129
132class TextureCacheOpenGL final : public TextureCacheBase { 130class TextureCacheOpenGL final : public TextureCacheBase {
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index 6b489e6db..6214fcbc3 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -751,8 +751,9 @@ void RendererOpenGL::RenderScreenshot() {
751} 751}
752 752
753bool RendererOpenGL::Init() { 753bool RendererOpenGL::Init() {
754 if (GLAD_GL_KHR_debug) { 754 if (Settings::values.renderer_debug && GLAD_GL_KHR_debug) {
755 glEnable(GL_DEBUG_OUTPUT); 755 glEnable(GL_DEBUG_OUTPUT);
756 glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
756 glDebugMessageCallback(DebugHandler, nullptr); 757 glDebugMessageCallback(DebugHandler, nullptr);
757 } 758 }
758 759
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
index 568744e3c..424278816 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
@@ -71,8 +71,7 @@ void FixedPipelineState::Rasterizer::Fill(const Maxwell& regs) noexcept {
71 const u32 topology_index = static_cast<u32>(regs.draw.topology.Value()); 71 const u32 topology_index = static_cast<u32>(regs.draw.topology.Value());
72 72
73 u32 packed_front_face = PackFrontFace(regs.front_face); 73 u32 packed_front_face = PackFrontFace(regs.front_face);
74 if (regs.screen_y_control.triangle_rast_flip != 0 && 74 if (regs.screen_y_control.triangle_rast_flip != 0) {
75 regs.viewport_transform[0].scale_y > 0.0f) {
76 // Flip front face 75 // Flip front face
77 packed_front_face = 1 - packed_front_face; 76 packed_front_face = 1 - packed_front_face;
78 } 77 }
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index 2871035f5..62e950d31 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -149,7 +149,7 @@ struct FormatTuple {
149 {VK_FORMAT_R16_SFLOAT, Attachable | Storage}, // R16F 149 {VK_FORMAT_R16_SFLOAT, Attachable | Storage}, // R16F
150 {VK_FORMAT_R16_UNORM, Attachable | Storage}, // R16U 150 {VK_FORMAT_R16_UNORM, Attachable | Storage}, // R16U
151 {VK_FORMAT_UNDEFINED}, // R16S 151 {VK_FORMAT_UNDEFINED}, // R16S
152 {VK_FORMAT_UNDEFINED}, // R16UI 152 {VK_FORMAT_R16_UINT, Attachable | Storage}, // R16UI
153 {VK_FORMAT_UNDEFINED}, // R16I 153 {VK_FORMAT_UNDEFINED}, // R16I
154 {VK_FORMAT_R16G16_UNORM, Attachable | Storage}, // RG16 154 {VK_FORMAT_R16G16_UNORM, Attachable | Storage}, // RG16
155 {VK_FORMAT_R16G16_SFLOAT, Attachable | Storage}, // RG16F 155 {VK_FORMAT_R16G16_SFLOAT, Attachable | Storage}, // RG16F
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 8e1b46277..281bf9ac3 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -53,8 +53,9 @@ vk::DescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
53 }; 53 };
54 add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size()); 54 add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size());
55 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, entries.global_buffers.size()); 55 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, entries.global_buffers.size());
56 add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, entries.texel_buffers.size()); 56 add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, entries.uniform_texels.size());
57 add_bindings(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, entries.samplers.size()); 57 add_bindings(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, entries.samplers.size());
58 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, entries.storage_texels.size());
58 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size()); 59 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size());
59 60
60 VkDescriptorSetLayoutCreateInfo ci; 61 VkDescriptorSetLayoutCreateInfo ci;
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
index 890fd52cf..9259b618d 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
@@ -42,6 +42,7 @@ vk::DescriptorPool* VKDescriptorPool::AllocateNewPool() {
42 {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, num_sets * 60}, 42 {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, num_sets * 60},
43 {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, num_sets * 64}, 43 {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, num_sets * 64},
44 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, num_sets * 64}, 44 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, num_sets * 64},
45 {VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, num_sets * 64},
45 {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, num_sets * 40}}; 46 {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, num_sets * 40}};
46 47
47 VkDescriptorPoolCreateInfo ci; 48 VkDescriptorPoolCreateInfo ci;
diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp
index 750e5a0ca..9fd8ac3f6 100644
--- a/src/video_core/renderer_vulkan/vk_device.cpp
+++ b/src/video_core/renderer_vulkan/vk_device.cpp
@@ -73,76 +73,79 @@ VkFormatFeatureFlags GetFormatFeatures(VkFormatProperties properties, FormatType
73 73
74std::unordered_map<VkFormat, VkFormatProperties> GetFormatProperties( 74std::unordered_map<VkFormat, VkFormatProperties> GetFormatProperties(
75 vk::PhysicalDevice physical, const vk::InstanceDispatch& dld) { 75 vk::PhysicalDevice physical, const vk::InstanceDispatch& dld) {
76 static constexpr std::array formats{VK_FORMAT_A8B8G8R8_UNORM_PACK32, 76 static constexpr std::array formats{
77 VK_FORMAT_A8B8G8R8_UINT_PACK32, 77 VK_FORMAT_A8B8G8R8_UNORM_PACK32,
78 VK_FORMAT_A8B8G8R8_SNORM_PACK32, 78 VK_FORMAT_A8B8G8R8_UINT_PACK32,
79 VK_FORMAT_A8B8G8R8_SRGB_PACK32, 79 VK_FORMAT_A8B8G8R8_SNORM_PACK32,
80 VK_FORMAT_B5G6R5_UNORM_PACK16, 80 VK_FORMAT_A8B8G8R8_SRGB_PACK32,
81 VK_FORMAT_A2B10G10R10_UNORM_PACK32, 81 VK_FORMAT_B5G6R5_UNORM_PACK16,
82 VK_FORMAT_A1R5G5B5_UNORM_PACK16, 82 VK_FORMAT_A2B10G10R10_UNORM_PACK32,
83 VK_FORMAT_R32G32B32A32_SFLOAT, 83 VK_FORMAT_A1R5G5B5_UNORM_PACK16,
84 VK_FORMAT_R32G32B32A32_UINT, 84 VK_FORMAT_R32G32B32A32_SFLOAT,
85 VK_FORMAT_R32G32_SFLOAT, 85 VK_FORMAT_R32G32B32A32_UINT,
86 VK_FORMAT_R32G32_UINT, 86 VK_FORMAT_R32G32_SFLOAT,
87 VK_FORMAT_R16G16B16A16_UINT, 87 VK_FORMAT_R32G32_UINT,
88 VK_FORMAT_R16G16B16A16_SNORM, 88 VK_FORMAT_R16G16B16A16_UINT,
89 VK_FORMAT_R16G16B16A16_UNORM, 89 VK_FORMAT_R16G16B16A16_SNORM,
90 VK_FORMAT_R16G16_UNORM, 90 VK_FORMAT_R16G16B16A16_UNORM,
91 VK_FORMAT_R16G16_SNORM, 91 VK_FORMAT_R16G16_UNORM,
92 VK_FORMAT_R16G16_SFLOAT, 92 VK_FORMAT_R16G16_SNORM,
93 VK_FORMAT_R16_UNORM, 93 VK_FORMAT_R16G16_SFLOAT,
94 VK_FORMAT_R8G8B8A8_SRGB, 94 VK_FORMAT_R16_UNORM,
95 VK_FORMAT_R8G8_UNORM, 95 VK_FORMAT_R16_UINT,
96 VK_FORMAT_R8G8_SNORM, 96 VK_FORMAT_R8G8B8A8_SRGB,
97 VK_FORMAT_R8G8_UINT, 97 VK_FORMAT_R8G8_UNORM,
98 VK_FORMAT_R8_UNORM, 98 VK_FORMAT_R8G8_SNORM,
99 VK_FORMAT_R8_UINT, 99 VK_FORMAT_R8G8_UINT,
100 VK_FORMAT_B10G11R11_UFLOAT_PACK32, 100 VK_FORMAT_R8_UNORM,
101 VK_FORMAT_R32_SFLOAT, 101 VK_FORMAT_R8_UINT,
102 VK_FORMAT_R32_UINT, 102 VK_FORMAT_B10G11R11_UFLOAT_PACK32,
103 VK_FORMAT_R32_SINT, 103 VK_FORMAT_R32_SFLOAT,
104 VK_FORMAT_R16_SFLOAT, 104 VK_FORMAT_R32_UINT,
105 VK_FORMAT_R16G16B16A16_SFLOAT, 105 VK_FORMAT_R32_SINT,
106 VK_FORMAT_B8G8R8A8_UNORM, 106 VK_FORMAT_R16_SFLOAT,
107 VK_FORMAT_B8G8R8A8_SRGB, 107 VK_FORMAT_R16G16B16A16_SFLOAT,
108 VK_FORMAT_R4G4B4A4_UNORM_PACK16, 108 VK_FORMAT_B8G8R8A8_UNORM,
109 VK_FORMAT_D32_SFLOAT, 109 VK_FORMAT_B8G8R8A8_SRGB,
110 VK_FORMAT_D16_UNORM, 110 VK_FORMAT_R4G4B4A4_UNORM_PACK16,
111 VK_FORMAT_D16_UNORM_S8_UINT, 111 VK_FORMAT_D32_SFLOAT,
112 VK_FORMAT_D24_UNORM_S8_UINT, 112 VK_FORMAT_D16_UNORM,
113 VK_FORMAT_D32_SFLOAT_S8_UINT, 113 VK_FORMAT_D16_UNORM_S8_UINT,
114 VK_FORMAT_BC1_RGBA_UNORM_BLOCK, 114 VK_FORMAT_D24_UNORM_S8_UINT,
115 VK_FORMAT_BC2_UNORM_BLOCK, 115 VK_FORMAT_D32_SFLOAT_S8_UINT,
116 VK_FORMAT_BC3_UNORM_BLOCK, 116 VK_FORMAT_BC1_RGBA_UNORM_BLOCK,
117 VK_FORMAT_BC4_UNORM_BLOCK, 117 VK_FORMAT_BC2_UNORM_BLOCK,
118 VK_FORMAT_BC5_UNORM_BLOCK, 118 VK_FORMAT_BC3_UNORM_BLOCK,
119 VK_FORMAT_BC5_SNORM_BLOCK, 119 VK_FORMAT_BC4_UNORM_BLOCK,
120 VK_FORMAT_BC7_UNORM_BLOCK, 120 VK_FORMAT_BC5_UNORM_BLOCK,
121 VK_FORMAT_BC6H_UFLOAT_BLOCK, 121 VK_FORMAT_BC5_SNORM_BLOCK,
122 VK_FORMAT_BC6H_SFLOAT_BLOCK, 122 VK_FORMAT_BC7_UNORM_BLOCK,
123 VK_FORMAT_BC1_RGBA_SRGB_BLOCK, 123 VK_FORMAT_BC6H_UFLOAT_BLOCK,
124 VK_FORMAT_BC2_SRGB_BLOCK, 124 VK_FORMAT_BC6H_SFLOAT_BLOCK,
125 VK_FORMAT_BC3_SRGB_BLOCK, 125 VK_FORMAT_BC1_RGBA_SRGB_BLOCK,
126 VK_FORMAT_BC7_SRGB_BLOCK, 126 VK_FORMAT_BC2_SRGB_BLOCK,
127 VK_FORMAT_ASTC_4x4_SRGB_BLOCK, 127 VK_FORMAT_BC3_SRGB_BLOCK,
128 VK_FORMAT_ASTC_8x8_SRGB_BLOCK, 128 VK_FORMAT_BC7_SRGB_BLOCK,
129 VK_FORMAT_ASTC_8x5_SRGB_BLOCK, 129 VK_FORMAT_ASTC_4x4_SRGB_BLOCK,
130 VK_FORMAT_ASTC_5x4_SRGB_BLOCK, 130 VK_FORMAT_ASTC_8x8_SRGB_BLOCK,
131 VK_FORMAT_ASTC_5x5_UNORM_BLOCK, 131 VK_FORMAT_ASTC_8x5_SRGB_BLOCK,
132 VK_FORMAT_ASTC_5x5_SRGB_BLOCK, 132 VK_FORMAT_ASTC_5x4_SRGB_BLOCK,
133 VK_FORMAT_ASTC_10x8_UNORM_BLOCK, 133 VK_FORMAT_ASTC_5x5_UNORM_BLOCK,
134 VK_FORMAT_ASTC_10x8_SRGB_BLOCK, 134 VK_FORMAT_ASTC_5x5_SRGB_BLOCK,
135 VK_FORMAT_ASTC_6x6_UNORM_BLOCK, 135 VK_FORMAT_ASTC_10x8_UNORM_BLOCK,
136 VK_FORMAT_ASTC_6x6_SRGB_BLOCK, 136 VK_FORMAT_ASTC_10x8_SRGB_BLOCK,
137 VK_FORMAT_ASTC_10x10_UNORM_BLOCK, 137 VK_FORMAT_ASTC_6x6_UNORM_BLOCK,
138 VK_FORMAT_ASTC_10x10_SRGB_BLOCK, 138 VK_FORMAT_ASTC_6x6_SRGB_BLOCK,
139 VK_FORMAT_ASTC_12x12_UNORM_BLOCK, 139 VK_FORMAT_ASTC_10x10_UNORM_BLOCK,
140 VK_FORMAT_ASTC_12x12_SRGB_BLOCK, 140 VK_FORMAT_ASTC_10x10_SRGB_BLOCK,
141 VK_FORMAT_ASTC_8x6_UNORM_BLOCK, 141 VK_FORMAT_ASTC_12x12_UNORM_BLOCK,
142 VK_FORMAT_ASTC_8x6_SRGB_BLOCK, 142 VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
143 VK_FORMAT_ASTC_6x5_UNORM_BLOCK, 143 VK_FORMAT_ASTC_8x6_UNORM_BLOCK,
144 VK_FORMAT_ASTC_6x5_SRGB_BLOCK, 144 VK_FORMAT_ASTC_8x6_SRGB_BLOCK,
145 VK_FORMAT_E5B9G9R9_UFLOAT_PACK32}; 145 VK_FORMAT_ASTC_6x5_UNORM_BLOCK,
146 VK_FORMAT_ASTC_6x5_SRGB_BLOCK,
147 VK_FORMAT_E5B9G9R9_UFLOAT_PACK32,
148 };
146 std::unordered_map<VkFormat, VkFormatProperties> format_properties; 149 std::unordered_map<VkFormat, VkFormatProperties> format_properties;
147 for (const auto format : formats) { 150 for (const auto format : formats) {
148 format_properties.emplace(format, physical.GetFormatProperties(format)); 151 format_properties.emplace(format, physical.GetFormatProperties(format));
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index a5c7b7945..b8ccf164f 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -45,6 +45,7 @@ constexpr VkDescriptorType UNIFORM_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
45constexpr VkDescriptorType STORAGE_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; 45constexpr VkDescriptorType STORAGE_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
46constexpr VkDescriptorType UNIFORM_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; 46constexpr VkDescriptorType UNIFORM_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
47constexpr VkDescriptorType COMBINED_IMAGE_SAMPLER = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; 47constexpr VkDescriptorType COMBINED_IMAGE_SAMPLER = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
48constexpr VkDescriptorType STORAGE_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
48constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; 49constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
49 50
50constexpr VideoCommon::Shader::CompilerSettings compiler_settings{ 51constexpr VideoCommon::Shader::CompilerSettings compiler_settings{
@@ -104,8 +105,9 @@ u32 FillDescriptorLayout(const ShaderEntries& entries,
104 u32 binding = base_binding; 105 u32 binding = base_binding;
105 AddBindings<UNIFORM_BUFFER>(bindings, binding, flags, entries.const_buffers); 106 AddBindings<UNIFORM_BUFFER>(bindings, binding, flags, entries.const_buffers);
106 AddBindings<STORAGE_BUFFER>(bindings, binding, flags, entries.global_buffers); 107 AddBindings<STORAGE_BUFFER>(bindings, binding, flags, entries.global_buffers);
107 AddBindings<UNIFORM_TEXEL_BUFFER>(bindings, binding, flags, entries.texel_buffers); 108 AddBindings<UNIFORM_TEXEL_BUFFER>(bindings, binding, flags, entries.uniform_texels);
108 AddBindings<COMBINED_IMAGE_SAMPLER>(bindings, binding, flags, entries.samplers); 109 AddBindings<COMBINED_IMAGE_SAMPLER>(bindings, binding, flags, entries.samplers);
110 AddBindings<STORAGE_TEXEL_BUFFER>(bindings, binding, flags, entries.storage_texels);
109 AddBindings<STORAGE_IMAGE>(bindings, binding, flags, entries.images); 111 AddBindings<STORAGE_IMAGE>(bindings, binding, flags, entries.images);
110 return binding; 112 return binding;
111} 113}
@@ -312,7 +314,9 @@ VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) {
312 ASSERT(point_size != 0.0f); 314 ASSERT(point_size != 0.0f);
313 } 315 }
314 for (std::size_t i = 0; i < Maxwell::NumVertexAttributes; ++i) { 316 for (std::size_t i = 0; i < Maxwell::NumVertexAttributes; ++i) {
315 specialization.attribute_types[i] = fixed_state.vertex_input.attributes[i].Type(); 317 const auto& attribute = fixed_state.vertex_input.attributes[i];
318 specialization.enabled_attributes[i] = attribute.enabled.Value() != 0;
319 specialization.attribute_types[i] = attribute.Type();
316 } 320 }
317 specialization.ndc_minus_one_to_one = fixed_state.rasterizer.ndc_minus_one_to_one; 321 specialization.ndc_minus_one_to_one = fixed_state.rasterizer.ndc_minus_one_to_one;
318 322
@@ -375,16 +379,17 @@ void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u3
375 return; 379 return;
376 } 380 }
377 381
378 if constexpr (descriptor_type == UNIFORM_TEXEL_BUFFER) { 382 if constexpr (descriptor_type == UNIFORM_TEXEL_BUFFER ||
379 // Nvidia has a bug where updating multiple uniform texels at once causes the driver to 383 descriptor_type == STORAGE_TEXEL_BUFFER) {
380 // crash. 384 // Nvidia has a bug where updating multiple texels at once causes the driver to crash.
385 // Note: Fixed in driver Windows 443.24, Linux 440.66.15
381 for (u32 i = 0; i < count; ++i) { 386 for (u32 i = 0; i < count; ++i) {
382 VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back(); 387 VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
383 entry.dstBinding = binding + i; 388 entry.dstBinding = binding + i;
384 entry.dstArrayElement = 0; 389 entry.dstArrayElement = 0;
385 entry.descriptorCount = 1; 390 entry.descriptorCount = 1;
386 entry.descriptorType = descriptor_type; 391 entry.descriptorType = descriptor_type;
387 entry.offset = offset + i * entry_size; 392 entry.offset = static_cast<std::size_t>(offset + i * entry_size);
388 entry.stride = entry_size; 393 entry.stride = entry_size;
389 } 394 }
390 } else if (count > 0) { 395 } else if (count > 0) {
@@ -405,8 +410,9 @@ void FillDescriptorUpdateTemplateEntries(
405 std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries) { 410 std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries) {
406 AddEntry<UNIFORM_BUFFER>(template_entries, offset, binding, entries.const_buffers); 411 AddEntry<UNIFORM_BUFFER>(template_entries, offset, binding, entries.const_buffers);
407 AddEntry<STORAGE_BUFFER>(template_entries, offset, binding, entries.global_buffers); 412 AddEntry<STORAGE_BUFFER>(template_entries, offset, binding, entries.global_buffers);
408 AddEntry<UNIFORM_TEXEL_BUFFER>(template_entries, offset, binding, entries.texel_buffers); 413 AddEntry<UNIFORM_TEXEL_BUFFER>(template_entries, offset, binding, entries.uniform_texels);
409 AddEntry<COMBINED_IMAGE_SAMPLER>(template_entries, offset, binding, entries.samplers); 414 AddEntry<COMBINED_IMAGE_SAMPLER>(template_entries, offset, binding, entries.samplers);
415 AddEntry<STORAGE_TEXEL_BUFFER>(template_entries, offset, binding, entries.storage_texels);
410 AddEntry<STORAGE_IMAGE>(template_entries, offset, binding, entries.images); 416 AddEntry<STORAGE_IMAGE>(template_entries, offset, binding, entries.images);
411} 417}
412 418
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index be5b77fae..19b8f9da3 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -468,8 +468,9 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
468 const auto& entries = pipeline.GetEntries(); 468 const auto& entries = pipeline.GetEntries();
469 SetupComputeConstBuffers(entries); 469 SetupComputeConstBuffers(entries);
470 SetupComputeGlobalBuffers(entries); 470 SetupComputeGlobalBuffers(entries);
471 SetupComputeTexelBuffers(entries); 471 SetupComputeUniformTexels(entries);
472 SetupComputeTextures(entries); 472 SetupComputeTextures(entries);
473 SetupComputeStorageTexels(entries);
473 SetupComputeImages(entries); 474 SetupComputeImages(entries);
474 475
475 buffer_cache.Unmap(); 476 buffer_cache.Unmap();
@@ -715,7 +716,7 @@ std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
715 if (!view) { 716 if (!view) {
716 return false; 717 return false;
717 } 718 }
718 key.views.push_back(view->GetHandle()); 719 key.views.push_back(view->GetAttachment());
719 key.width = std::min(key.width, view->GetWidth()); 720 key.width = std::min(key.width, view->GetWidth());
720 key.height = std::min(key.height, view->GetHeight()); 721 key.height = std::min(key.height, view->GetHeight());
721 key.layers = std::min(key.layers, view->GetNumLayers()); 722 key.layers = std::min(key.layers, view->GetNumLayers());
@@ -787,8 +788,9 @@ void RasterizerVulkan::SetupShaderDescriptors(
787 const auto& entries = shader->GetEntries(); 788 const auto& entries = shader->GetEntries();
788 SetupGraphicsConstBuffers(entries, stage); 789 SetupGraphicsConstBuffers(entries, stage);
789 SetupGraphicsGlobalBuffers(entries, stage); 790 SetupGraphicsGlobalBuffers(entries, stage);
790 SetupGraphicsTexelBuffers(entries, stage); 791 SetupGraphicsUniformTexels(entries, stage);
791 SetupGraphicsTextures(entries, stage); 792 SetupGraphicsTextures(entries, stage);
793 SetupGraphicsStorageTexels(entries, stage);
792 SetupGraphicsImages(entries, stage); 794 SetupGraphicsImages(entries, stage);
793 } 795 }
794 texture_cache.GuardSamplers(false); 796 texture_cache.GuardSamplers(false);
@@ -838,6 +840,10 @@ void RasterizerVulkan::BeginTransformFeedback() {
838 if (regs.tfb_enabled == 0) { 840 if (regs.tfb_enabled == 0) {
839 return; 841 return;
840 } 842 }
843 if (!device.IsExtTransformFeedbackSupported()) {
844 LOG_ERROR(Render_Vulkan, "Transform feedbacks used but not supported");
845 return;
846 }
841 847
842 UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationControl) || 848 UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationControl) ||
843 regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationEval) || 849 regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationEval) ||
@@ -866,6 +872,9 @@ void RasterizerVulkan::EndTransformFeedback() {
866 if (regs.tfb_enabled == 0) { 872 if (regs.tfb_enabled == 0) {
867 return; 873 return;
868 } 874 }
875 if (!device.IsExtTransformFeedbackSupported()) {
876 return;
877 }
869 878
870 scheduler.Record( 879 scheduler.Record(
871 [](vk::CommandBuffer cmdbuf) { cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr); }); 880 [](vk::CommandBuffer cmdbuf) { cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr); });
@@ -877,14 +886,10 @@ void RasterizerVulkan::SetupVertexArrays(FixedPipelineState::VertexInput& vertex
877 886
878 for (std::size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) { 887 for (std::size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
879 const auto& attrib = regs.vertex_attrib_format[index]; 888 const auto& attrib = regs.vertex_attrib_format[index];
880 if (!attrib.IsValid()) { 889 if (attrib.IsConstant()) {
881 vertex_input.SetAttribute(index, false, 0, 0, {}, {}); 890 vertex_input.SetAttribute(index, false, 0, 0, {}, {});
882 continue; 891 continue;
883 } 892 }
884
885 [[maybe_unused]] const auto& buffer = regs.vertex_array[attrib.buffer];
886 ASSERT(buffer.IsEnabled());
887
888 vertex_input.SetAttribute(index, true, attrib.buffer, attrib.offset, attrib.type.Value(), 893 vertex_input.SetAttribute(index, true, attrib.buffer, attrib.offset, attrib.type.Value(),
889 attrib.size.Value()); 894 attrib.size.Value());
890 } 895 }
@@ -980,12 +985,12 @@ void RasterizerVulkan::SetupGraphicsGlobalBuffers(const ShaderEntries& entries,
980 } 985 }
981} 986}
982 987
983void RasterizerVulkan::SetupGraphicsTexelBuffers(const ShaderEntries& entries, std::size_t stage) { 988void RasterizerVulkan::SetupGraphicsUniformTexels(const ShaderEntries& entries, std::size_t stage) {
984 MICROPROFILE_SCOPE(Vulkan_Textures); 989 MICROPROFILE_SCOPE(Vulkan_Textures);
985 const auto& gpu = system.GPU().Maxwell3D(); 990 const auto& gpu = system.GPU().Maxwell3D();
986 for (const auto& entry : entries.texel_buffers) { 991 for (const auto& entry : entries.uniform_texels) {
987 const auto image = GetTextureInfo(gpu, entry, stage).tic; 992 const auto image = GetTextureInfo(gpu, entry, stage).tic;
988 SetupTexelBuffer(image, entry); 993 SetupUniformTexels(image, entry);
989 } 994 }
990} 995}
991 996
@@ -1000,6 +1005,15 @@ void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, std::
1000 } 1005 }
1001} 1006}
1002 1007
1008void RasterizerVulkan::SetupGraphicsStorageTexels(const ShaderEntries& entries, std::size_t stage) {
1009 MICROPROFILE_SCOPE(Vulkan_Textures);
1010 const auto& gpu = system.GPU().Maxwell3D();
1011 for (const auto& entry : entries.storage_texels) {
1012 const auto image = GetTextureInfo(gpu, entry, stage).tic;
1013 SetupStorageTexel(image, entry);
1014 }
1015}
1016
1003void RasterizerVulkan::SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage) { 1017void RasterizerVulkan::SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage) {
1004 MICROPROFILE_SCOPE(Vulkan_Images); 1018 MICROPROFILE_SCOPE(Vulkan_Images);
1005 const auto& gpu = system.GPU().Maxwell3D(); 1019 const auto& gpu = system.GPU().Maxwell3D();
@@ -1032,12 +1046,12 @@ void RasterizerVulkan::SetupComputeGlobalBuffers(const ShaderEntries& entries) {
1032 } 1046 }
1033} 1047}
1034 1048
1035void RasterizerVulkan::SetupComputeTexelBuffers(const ShaderEntries& entries) { 1049void RasterizerVulkan::SetupComputeUniformTexels(const ShaderEntries& entries) {
1036 MICROPROFILE_SCOPE(Vulkan_Textures); 1050 MICROPROFILE_SCOPE(Vulkan_Textures);
1037 const auto& gpu = system.GPU().KeplerCompute(); 1051 const auto& gpu = system.GPU().KeplerCompute();
1038 for (const auto& entry : entries.texel_buffers) { 1052 for (const auto& entry : entries.uniform_texels) {
1039 const auto image = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic; 1053 const auto image = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic;
1040 SetupTexelBuffer(image, entry); 1054 SetupUniformTexels(image, entry);
1041 } 1055 }
1042} 1056}
1043 1057
@@ -1052,6 +1066,15 @@ void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) {
1052 } 1066 }
1053} 1067}
1054 1068
1069void RasterizerVulkan::SetupComputeStorageTexels(const ShaderEntries& entries) {
1070 MICROPROFILE_SCOPE(Vulkan_Textures);
1071 const auto& gpu = system.GPU().KeplerCompute();
1072 for (const auto& entry : entries.storage_texels) {
1073 const auto image = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic;
1074 SetupStorageTexel(image, entry);
1075 }
1076}
1077
1055void RasterizerVulkan::SetupComputeImages(const ShaderEntries& entries) { 1078void RasterizerVulkan::SetupComputeImages(const ShaderEntries& entries) {
1056 MICROPROFILE_SCOPE(Vulkan_Images); 1079 MICROPROFILE_SCOPE(Vulkan_Images);
1057 const auto& gpu = system.GPU().KeplerCompute(); 1080 const auto& gpu = system.GPU().KeplerCompute();
@@ -1101,8 +1124,8 @@ void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAdd
1101 update_descriptor_queue.AddBuffer(buffer, offset, size); 1124 update_descriptor_queue.AddBuffer(buffer, offset, size);
1102} 1125}
1103 1126
1104void RasterizerVulkan::SetupTexelBuffer(const Tegra::Texture::TICEntry& tic, 1127void RasterizerVulkan::SetupUniformTexels(const Tegra::Texture::TICEntry& tic,
1105 const TexelBufferEntry& entry) { 1128 const UniformTexelEntry& entry) {
1106 const auto view = texture_cache.GetTextureSurface(tic, entry); 1129 const auto view = texture_cache.GetTextureSurface(tic, entry);
1107 ASSERT(view->IsBufferView()); 1130 ASSERT(view->IsBufferView());
1108 1131
@@ -1114,8 +1137,8 @@ void RasterizerVulkan::SetupTexture(const Tegra::Texture::FullTextureInfo& textu
1114 auto view = texture_cache.GetTextureSurface(texture.tic, entry); 1137 auto view = texture_cache.GetTextureSurface(texture.tic, entry);
1115 ASSERT(!view->IsBufferView()); 1138 ASSERT(!view->IsBufferView());
1116 1139
1117 const auto image_view = view->GetHandle(texture.tic.x_source, texture.tic.y_source, 1140 const VkImageView image_view = view->GetImageView(texture.tic.x_source, texture.tic.y_source,
1118 texture.tic.z_source, texture.tic.w_source); 1141 texture.tic.z_source, texture.tic.w_source);
1119 const auto sampler = sampler_cache.GetSampler(texture.tsc); 1142 const auto sampler = sampler_cache.GetSampler(texture.tsc);
1120 update_descriptor_queue.AddSampledImage(sampler, image_view); 1143 update_descriptor_queue.AddSampledImage(sampler, image_view);
1121 1144
@@ -1124,6 +1147,14 @@ void RasterizerVulkan::SetupTexture(const Tegra::Texture::FullTextureInfo& textu
1124 sampled_views.push_back(ImageView{std::move(view), image_layout}); 1147 sampled_views.push_back(ImageView{std::move(view), image_layout});
1125} 1148}
1126 1149
1150void RasterizerVulkan::SetupStorageTexel(const Tegra::Texture::TICEntry& tic,
1151 const StorageTexelEntry& entry) {
1152 const auto view = texture_cache.GetImageSurface(tic, entry);
1153 ASSERT(view->IsBufferView());
1154
1155 update_descriptor_queue.AddTexelBuffer(view->GetBufferView());
1156}
1157
1127void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry) { 1158void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry) {
1128 auto view = texture_cache.GetImageSurface(tic, entry); 1159 auto view = texture_cache.GetImageSurface(tic, entry);
1129 1160
@@ -1133,7 +1164,8 @@ void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const Ima
1133 1164
1134 UNIMPLEMENTED_IF(tic.IsBuffer()); 1165 UNIMPLEMENTED_IF(tic.IsBuffer());
1135 1166
1136 const auto image_view = view->GetHandle(tic.x_source, tic.y_source, tic.z_source, tic.w_source); 1167 const VkImageView image_view =
1168 view->GetImageView(tic.x_source, tic.y_source, tic.z_source, tic.w_source);
1137 update_descriptor_queue.AddImage(image_view); 1169 update_descriptor_queue.AddImage(image_view);
1138 1170
1139 const auto image_layout = update_descriptor_queue.GetLastImageLayout(); 1171 const auto image_layout = update_descriptor_queue.GetLastImageLayout();
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 0ed0e48c6..04be37a5e 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -193,12 +193,15 @@ private:
193 /// Setup global buffers in the graphics pipeline. 193 /// Setup global buffers in the graphics pipeline.
194 void SetupGraphicsGlobalBuffers(const ShaderEntries& entries, std::size_t stage); 194 void SetupGraphicsGlobalBuffers(const ShaderEntries& entries, std::size_t stage);
195 195
196 /// Setup texel buffers in the graphics pipeline. 196 /// Setup uniform texels in the graphics pipeline.
197 void SetupGraphicsTexelBuffers(const ShaderEntries& entries, std::size_t stage); 197 void SetupGraphicsUniformTexels(const ShaderEntries& entries, std::size_t stage);
198 198
199 /// Setup textures in the graphics pipeline. 199 /// Setup textures in the graphics pipeline.
200 void SetupGraphicsTextures(const ShaderEntries& entries, std::size_t stage); 200 void SetupGraphicsTextures(const ShaderEntries& entries, std::size_t stage);
201 201
202 /// Setup storage texels in the graphics pipeline.
203 void SetupGraphicsStorageTexels(const ShaderEntries& entries, std::size_t stage);
204
202 /// Setup images in the graphics pipeline. 205 /// Setup images in the graphics pipeline.
203 void SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage); 206 void SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage);
204 207
@@ -209,11 +212,14 @@ private:
209 void SetupComputeGlobalBuffers(const ShaderEntries& entries); 212 void SetupComputeGlobalBuffers(const ShaderEntries& entries);
210 213
211 /// Setup texel buffers in the compute pipeline. 214 /// Setup texel buffers in the compute pipeline.
212 void SetupComputeTexelBuffers(const ShaderEntries& entries); 215 void SetupComputeUniformTexels(const ShaderEntries& entries);
213 216
214 /// Setup textures in the compute pipeline. 217 /// Setup textures in the compute pipeline.
215 void SetupComputeTextures(const ShaderEntries& entries); 218 void SetupComputeTextures(const ShaderEntries& entries);
216 219
220 /// Setup storage texels in the compute pipeline.
221 void SetupComputeStorageTexels(const ShaderEntries& entries);
222
217 /// Setup images in the compute pipeline. 223 /// Setup images in the compute pipeline.
218 void SetupComputeImages(const ShaderEntries& entries); 224 void SetupComputeImages(const ShaderEntries& entries);
219 225
@@ -222,10 +228,12 @@ private:
222 228
223 void SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAddr address); 229 void SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAddr address);
224 230
225 void SetupTexelBuffer(const Tegra::Texture::TICEntry& image, const TexelBufferEntry& entry); 231 void SetupUniformTexels(const Tegra::Texture::TICEntry& image, const UniformTexelEntry& entry);
226 232
227 void SetupTexture(const Tegra::Texture::FullTextureInfo& texture, const SamplerEntry& entry); 233 void SetupTexture(const Tegra::Texture::FullTextureInfo& texture, const SamplerEntry& entry);
228 234
235 void SetupStorageTexel(const Tegra::Texture::TICEntry& tic, const StorageTexelEntry& entry);
236
229 void SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry); 237 void SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry);
230 238
231 void UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs); 239 void UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs);
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
index 890f34a2c..97429cc59 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
@@ -400,8 +400,9 @@ private:
400 u32 binding = specialization.base_binding; 400 u32 binding = specialization.base_binding;
401 binding = DeclareConstantBuffers(binding); 401 binding = DeclareConstantBuffers(binding);
402 binding = DeclareGlobalBuffers(binding); 402 binding = DeclareGlobalBuffers(binding);
403 binding = DeclareTexelBuffers(binding); 403 binding = DeclareUniformTexels(binding);
404 binding = DeclareSamplers(binding); 404 binding = DeclareSamplers(binding);
405 binding = DeclareStorageTexels(binding);
405 binding = DeclareImages(binding); 406 binding = DeclareImages(binding);
406 407
407 const Id main = OpFunction(t_void, {}, TypeFunction(t_void)); 408 const Id main = OpFunction(t_void, {}, TypeFunction(t_void));
@@ -741,8 +742,10 @@ private:
741 if (!IsGenericAttribute(index)) { 742 if (!IsGenericAttribute(index)) {
742 continue; 743 continue;
743 } 744 }
744
745 const u32 location = GetGenericAttributeLocation(index); 745 const u32 location = GetGenericAttributeLocation(index);
746 if (!IsAttributeEnabled(location)) {
747 continue;
748 }
746 const auto type_descriptor = GetAttributeType(location); 749 const auto type_descriptor = GetAttributeType(location);
747 Id type; 750 Id type;
748 if (IsInputAttributeArray()) { 751 if (IsInputAttributeArray()) {
@@ -887,7 +890,7 @@ private:
887 return binding; 890 return binding;
888 } 891 }
889 892
890 u32 DeclareTexelBuffers(u32 binding) { 893 u32 DeclareUniformTexels(u32 binding) {
891 for (const auto& sampler : ir.GetSamplers()) { 894 for (const auto& sampler : ir.GetSamplers()) {
892 if (!sampler.is_buffer) { 895 if (!sampler.is_buffer) {
893 continue; 896 continue;
@@ -908,7 +911,7 @@ private:
908 Decorate(id, spv::Decoration::Binding, binding++); 911 Decorate(id, spv::Decoration::Binding, binding++);
909 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); 912 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
910 913
911 texel_buffers.emplace(sampler.index, TexelBuffer{image_type, id}); 914 uniform_texels.emplace(sampler.index, TexelBuffer{image_type, id});
912 } 915 }
913 return binding; 916 return binding;
914 } 917 }
@@ -943,31 +946,48 @@ private:
943 return binding; 946 return binding;
944 } 947 }
945 948
946 u32 DeclareImages(u32 binding) { 949 u32 DeclareStorageTexels(u32 binding) {
947 for (const auto& image : ir.GetImages()) { 950 for (const auto& image : ir.GetImages()) {
948 const auto [dim, arrayed] = GetImageDim(image); 951 if (image.type != Tegra::Shader::ImageType::TextureBuffer) {
949 constexpr int depth = 0; 952 continue;
950 constexpr bool ms = false;
951 constexpr int sampled = 2; // This won't be accessed with a sampler
952 constexpr auto format = spv::ImageFormat::Unknown;
953 const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {});
954 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
955 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
956 AddGlobalVariable(Name(id, fmt::format("image_{}", image.index)));
957
958 Decorate(id, spv::Decoration::Binding, binding++);
959 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
960 if (image.is_read && !image.is_written) {
961 Decorate(id, spv::Decoration::NonWritable);
962 } else if (image.is_written && !image.is_read) {
963 Decorate(id, spv::Decoration::NonReadable);
964 } 953 }
954 DeclareImage(image, binding);
955 }
956 return binding;
957 }
965 958
966 images.emplace(image.index, StorageImage{image_type, id}); 959 u32 DeclareImages(u32 binding) {
960 for (const auto& image : ir.GetImages()) {
961 if (image.type == Tegra::Shader::ImageType::TextureBuffer) {
962 continue;
963 }
964 DeclareImage(image, binding);
967 } 965 }
968 return binding; 966 return binding;
969 } 967 }
970 968
969 void DeclareImage(const Image& image, u32& binding) {
970 const auto [dim, arrayed] = GetImageDim(image);
971 constexpr int depth = 0;
972 constexpr bool ms = false;
973 constexpr int sampled = 2; // This won't be accessed with a sampler
974 const auto format = image.is_atomic ? spv::ImageFormat::R32ui : spv::ImageFormat::Unknown;
975 const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {});
976 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
977 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
978 AddGlobalVariable(Name(id, fmt::format("image_{}", image.index)));
979
980 Decorate(id, spv::Decoration::Binding, binding++);
981 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
982 if (image.is_read && !image.is_written) {
983 Decorate(id, spv::Decoration::NonWritable);
984 } else if (image.is_written && !image.is_read) {
985 Decorate(id, spv::Decoration::NonReadable);
986 }
987
988 images.emplace(image.index, StorageImage{image_type, id});
989 }
990
971 bool IsRenderTargetEnabled(u32 rt) const { 991 bool IsRenderTargetEnabled(u32 rt) const {
972 for (u32 component = 0; component < 4; ++component) { 992 for (u32 component = 0; component < 4; ++component) {
973 if (header.ps.IsColorComponentOutputEnabled(rt, component)) { 993 if (header.ps.IsColorComponentOutputEnabled(rt, component)) {
@@ -986,6 +1006,10 @@ private:
986 return stage == ShaderType::TesselationControl; 1006 return stage == ShaderType::TesselationControl;
987 } 1007 }
988 1008
1009 bool IsAttributeEnabled(u32 location) const {
1010 return stage != ShaderType::Vertex || specialization.enabled_attributes[location];
1011 }
1012
989 u32 GetNumInputVertices() const { 1013 u32 GetNumInputVertices() const {
990 switch (stage) { 1014 switch (stage) {
991 case ShaderType::Geometry: 1015 case ShaderType::Geometry:
@@ -1201,16 +1225,20 @@ private:
1201 UNIMPLEMENTED_MSG("Unmanaged FrontFacing element={}", element); 1225 UNIMPLEMENTED_MSG("Unmanaged FrontFacing element={}", element);
1202 return {v_float_zero, Type::Float}; 1226 return {v_float_zero, Type::Float};
1203 default: 1227 default:
1204 if (IsGenericAttribute(attribute)) { 1228 if (!IsGenericAttribute(attribute)) {
1205 const u32 location = GetGenericAttributeLocation(attribute); 1229 break;
1206 const auto type_descriptor = GetAttributeType(location);
1207 const Type type = type_descriptor.type;
1208 const Id attribute_id = input_attributes.at(attribute);
1209 const std::vector elements = {element};
1210 const Id pointer = ArrayPass(type_descriptor.scalar, attribute_id, elements);
1211 return {OpLoad(GetTypeDefinition(type), pointer), type};
1212 } 1230 }
1213 break; 1231 const u32 location = GetGenericAttributeLocation(attribute);
1232 if (!IsAttributeEnabled(location)) {
1233 // Disabled attributes (also known as constant attributes) always return zero.
1234 return {v_float_zero, Type::Float};
1235 }
1236 const auto type_descriptor = GetAttributeType(location);
1237 const Type type = type_descriptor.type;
1238 const Id attribute_id = input_attributes.at(attribute);
1239 const std::vector elements = {element};
1240 const Id pointer = ArrayPass(type_descriptor.scalar, attribute_id, elements);
1241 return {OpLoad(GetTypeDefinition(type), pointer), type};
1214 } 1242 }
1215 UNIMPLEMENTED_MSG("Unhandled input attribute: {}", static_cast<u32>(attribute)); 1243 UNIMPLEMENTED_MSG("Unhandled input attribute: {}", static_cast<u32>(attribute));
1216 return {v_float_zero, Type::Float}; 1244 return {v_float_zero, Type::Float};
@@ -1246,7 +1274,7 @@ private:
1246 } else { 1274 } else {
1247 UNREACHABLE_MSG("Unmanaged offset node type"); 1275 UNREACHABLE_MSG("Unmanaged offset node type");
1248 } 1276 }
1249 pointer = OpAccessChain(t_cbuf_float, buffer_id, Constant(t_uint, 0), buffer_index, 1277 pointer = OpAccessChain(t_cbuf_float, buffer_id, v_uint_zero, buffer_index,
1250 buffer_element); 1278 buffer_element);
1251 } 1279 }
1252 return {OpLoad(t_float, pointer), Type::Float}; 1280 return {OpLoad(t_float, pointer), Type::Float};
@@ -1601,7 +1629,7 @@ private:
1601 1629
1602 const Id result = OpIAddCarry(TypeStruct({t_uint, t_uint}), op_a, op_b); 1630 const Id result = OpIAddCarry(TypeStruct({t_uint, t_uint}), op_a, op_b);
1603 const Id carry = OpCompositeExtract(t_uint, result, 1); 1631 const Id carry = OpCompositeExtract(t_uint, result, 1);
1604 return {OpINotEqual(t_bool, carry, Constant(t_uint, 0)), Type::Bool}; 1632 return {OpINotEqual(t_bool, carry, v_uint_zero), Type::Bool};
1605 } 1633 }
1606 1634
1607 Expression LogicalAssign(Operation operation) { 1635 Expression LogicalAssign(Operation operation) {
@@ -1664,7 +1692,7 @@ private:
1664 const auto& meta = std::get<MetaTexture>(operation.GetMeta()); 1692 const auto& meta = std::get<MetaTexture>(operation.GetMeta());
1665 const u32 index = meta.sampler.index; 1693 const u32 index = meta.sampler.index;
1666 if (meta.sampler.is_buffer) { 1694 if (meta.sampler.is_buffer) {
1667 const auto& entry = texel_buffers.at(index); 1695 const auto& entry = uniform_texels.at(index);
1668 return OpLoad(entry.image_type, entry.image); 1696 return OpLoad(entry.image_type, entry.image);
1669 } else { 1697 } else {
1670 const auto& entry = sampled_images.at(index); 1698 const auto& entry = sampled_images.at(index);
@@ -1941,39 +1969,20 @@ private:
1941 return {}; 1969 return {};
1942 } 1970 }
1943 1971
1944 Expression AtomicImageAdd(Operation operation) { 1972 template <Id (Module::*func)(Id, Id, Id, Id, Id)>
1945 UNIMPLEMENTED(); 1973 Expression AtomicImage(Operation operation) {
1946 return {}; 1974 const auto& meta{std::get<MetaImage>(operation.GetMeta())};
1947 } 1975 ASSERT(meta.values.size() == 1);
1948
1949 Expression AtomicImageMin(Operation operation) {
1950 UNIMPLEMENTED();
1951 return {};
1952 }
1953
1954 Expression AtomicImageMax(Operation operation) {
1955 UNIMPLEMENTED();
1956 return {};
1957 }
1958
1959 Expression AtomicImageAnd(Operation operation) {
1960 UNIMPLEMENTED();
1961 return {};
1962 }
1963
1964 Expression AtomicImageOr(Operation operation) {
1965 UNIMPLEMENTED();
1966 return {};
1967 }
1968 1976
1969 Expression AtomicImageXor(Operation operation) { 1977 const Id coordinate = GetCoordinates(operation, Type::Int);
1970 UNIMPLEMENTED(); 1978 const Id image = images.at(meta.image.index).image;
1971 return {}; 1979 const Id sample = v_uint_zero;
1972 } 1980 const Id pointer = OpImageTexelPointer(t_image_uint, image, coordinate, sample);
1973 1981
1974 Expression AtomicImageExchange(Operation operation) { 1982 const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device));
1975 UNIMPLEMENTED(); 1983 const Id semantics = v_uint_zero;
1976 return {}; 1984 const Id value = AsUint(Visit(meta.values[0]));
1985 return {(this->*func)(t_uint, pointer, scope, semantics, value), Type::Uint};
1977 } 1986 }
1978 1987
1979 template <Id (Module::*func)(Id, Id, Id, Id, Id)> 1988 template <Id (Module::*func)(Id, Id, Id, Id, Id)>
@@ -1988,7 +1997,7 @@ private:
1988 return {v_float_zero, Type::Float}; 1997 return {v_float_zero, Type::Float};
1989 } 1998 }
1990 const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device)); 1999 const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device));
1991 const Id semantics = Constant(t_uint, 0); 2000 const Id semantics = v_uint_zero;
1992 const Id value = AsUint(Visit(operation[1])); 2001 const Id value = AsUint(Visit(operation[1]));
1993 2002
1994 return {(this->*func)(t_uint, pointer, scope, semantics, value), Type::Uint}; 2003 return {(this->*func)(t_uint, pointer, scope, semantics, value), Type::Uint};
@@ -2215,8 +2224,8 @@ private:
2215 return {}; 2224 return {};
2216 } 2225 }
2217 2226
2218 Expression MemoryBarrierGL(Operation) { 2227 template <spv::Scope scope>
2219 const auto scope = spv::Scope::Device; 2228 Expression MemoryBarrier(Operation) {
2220 const auto semantics = 2229 const auto semantics =
2221 spv::MemorySemanticsMask::AcquireRelease | spv::MemorySemanticsMask::UniformMemory | 2230 spv::MemorySemanticsMask::AcquireRelease | spv::MemorySemanticsMask::UniformMemory |
2222 spv::MemorySemanticsMask::WorkgroupMemory | 2231 spv::MemorySemanticsMask::WorkgroupMemory |
@@ -2612,11 +2621,11 @@ private:
2612 2621
2613 &SPIRVDecompiler::ImageLoad, 2622 &SPIRVDecompiler::ImageLoad,
2614 &SPIRVDecompiler::ImageStore, 2623 &SPIRVDecompiler::ImageStore,
2615 &SPIRVDecompiler::AtomicImageAdd, 2624 &SPIRVDecompiler::AtomicImage<&Module::OpAtomicIAdd>,
2616 &SPIRVDecompiler::AtomicImageAnd, 2625 &SPIRVDecompiler::AtomicImage<&Module::OpAtomicAnd>,
2617 &SPIRVDecompiler::AtomicImageOr, 2626 &SPIRVDecompiler::AtomicImage<&Module::OpAtomicOr>,
2618 &SPIRVDecompiler::AtomicImageXor, 2627 &SPIRVDecompiler::AtomicImage<&Module::OpAtomicXor>,
2619 &SPIRVDecompiler::AtomicImageExchange, 2628 &SPIRVDecompiler::AtomicImage<&Module::OpAtomicExchange>,
2620 2629
2621 &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange>, 2630 &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange>,
2622 &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd>, 2631 &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd>,
@@ -2681,7 +2690,8 @@ private:
2681 &SPIRVDecompiler::ShuffleIndexed, 2690 &SPIRVDecompiler::ShuffleIndexed,
2682 2691
2683 &SPIRVDecompiler::Barrier, 2692 &SPIRVDecompiler::Barrier,
2684 &SPIRVDecompiler::MemoryBarrierGL, 2693 &SPIRVDecompiler::MemoryBarrier<spv::Scope::Workgroup>,
2694 &SPIRVDecompiler::MemoryBarrier<spv::Scope::Device>,
2685 }; 2695 };
2686 static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount)); 2696 static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount));
2687 2697
@@ -2757,8 +2767,11 @@ private:
2757 Decorate(TypeStruct(t_gmem_array), spv::Decoration::Block), 0, spv::Decoration::Offset, 0); 2767 Decorate(TypeStruct(t_gmem_array), spv::Decoration::Block), 0, spv::Decoration::Offset, 0);
2758 const Id t_gmem_ssbo = TypePointer(spv::StorageClass::StorageBuffer, t_gmem_struct); 2768 const Id t_gmem_ssbo = TypePointer(spv::StorageClass::StorageBuffer, t_gmem_struct);
2759 2769
2770 const Id t_image_uint = TypePointer(spv::StorageClass::Image, t_uint);
2771
2760 const Id v_float_zero = Constant(t_float, 0.0f); 2772 const Id v_float_zero = Constant(t_float, 0.0f);
2761 const Id v_float_one = Constant(t_float, 1.0f); 2773 const Id v_float_one = Constant(t_float, 1.0f);
2774 const Id v_uint_zero = Constant(t_uint, 0);
2762 2775
2763 // Nvidia uses these defaults for varyings (e.g. position and generic attributes) 2776 // Nvidia uses these defaults for varyings (e.g. position and generic attributes)
2764 const Id v_varying_default = 2777 const Id v_varying_default =
@@ -2783,15 +2796,16 @@ private:
2783 std::unordered_map<u8, GenericVaryingDescription> output_attributes; 2796 std::unordered_map<u8, GenericVaryingDescription> output_attributes;
2784 std::map<u32, Id> constant_buffers; 2797 std::map<u32, Id> constant_buffers;
2785 std::map<GlobalMemoryBase, Id> global_buffers; 2798 std::map<GlobalMemoryBase, Id> global_buffers;
2786 std::map<u32, TexelBuffer> texel_buffers; 2799 std::map<u32, TexelBuffer> uniform_texels;
2787 std::map<u32, SampledImage> sampled_images; 2800 std::map<u32, SampledImage> sampled_images;
2801 std::map<u32, TexelBuffer> storage_texels;
2788 std::map<u32, StorageImage> images; 2802 std::map<u32, StorageImage> images;
2789 2803
2804 std::array<Id, Maxwell::NumRenderTargets> frag_colors{};
2790 Id instance_index{}; 2805 Id instance_index{};
2791 Id vertex_index{}; 2806 Id vertex_index{};
2792 Id base_instance{}; 2807 Id base_instance{};
2793 Id base_vertex{}; 2808 Id base_vertex{};
2794 std::array<Id, Maxwell::NumRenderTargets> frag_colors{};
2795 Id frag_depth{}; 2809 Id frag_depth{};
2796 Id frag_coord{}; 2810 Id frag_coord{};
2797 Id front_facing{}; 2811 Id front_facing{};
@@ -3047,13 +3061,17 @@ ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir) {
3047 } 3061 }
3048 for (const auto& sampler : ir.GetSamplers()) { 3062 for (const auto& sampler : ir.GetSamplers()) {
3049 if (sampler.is_buffer) { 3063 if (sampler.is_buffer) {
3050 entries.texel_buffers.emplace_back(sampler); 3064 entries.uniform_texels.emplace_back(sampler);
3051 } else { 3065 } else {
3052 entries.samplers.emplace_back(sampler); 3066 entries.samplers.emplace_back(sampler);
3053 } 3067 }
3054 } 3068 }
3055 for (const auto& image : ir.GetImages()) { 3069 for (const auto& image : ir.GetImages()) {
3056 entries.images.emplace_back(image); 3070 if (image.type == Tegra::Shader::ImageType::TextureBuffer) {
3071 entries.storage_texels.emplace_back(image);
3072 } else {
3073 entries.images.emplace_back(image);
3074 }
3057 } 3075 }
3058 for (const auto& attribute : ir.GetInputAttributes()) { 3076 for (const auto& attribute : ir.GetInputAttributes()) {
3059 if (IsGenericAttribute(attribute)) { 3077 if (IsGenericAttribute(attribute)) {
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.h b/src/video_core/renderer_vulkan/vk_shader_decompiler.h
index f4c05ac3c..2b0e90396 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.h
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.h
@@ -21,8 +21,9 @@ class VKDevice;
21namespace Vulkan { 21namespace Vulkan {
22 22
23using Maxwell = Tegra::Engines::Maxwell3D::Regs; 23using Maxwell = Tegra::Engines::Maxwell3D::Regs;
24using TexelBufferEntry = VideoCommon::Shader::Sampler; 24using UniformTexelEntry = VideoCommon::Shader::Sampler;
25using SamplerEntry = VideoCommon::Shader::Sampler; 25using SamplerEntry = VideoCommon::Shader::Sampler;
26using StorageTexelEntry = VideoCommon::Shader::Image;
26using ImageEntry = VideoCommon::Shader::Image; 27using ImageEntry = VideoCommon::Shader::Image;
27 28
28constexpr u32 DESCRIPTOR_SET = 0; 29constexpr u32 DESCRIPTOR_SET = 0;
@@ -66,13 +67,15 @@ private:
66struct ShaderEntries { 67struct ShaderEntries {
67 u32 NumBindings() const { 68 u32 NumBindings() const {
68 return static_cast<u32>(const_buffers.size() + global_buffers.size() + 69 return static_cast<u32>(const_buffers.size() + global_buffers.size() +
69 texel_buffers.size() + samplers.size() + images.size()); 70 uniform_texels.size() + samplers.size() + storage_texels.size() +
71 images.size());
70 } 72 }
71 73
72 std::vector<ConstBufferEntry> const_buffers; 74 std::vector<ConstBufferEntry> const_buffers;
73 std::vector<GlobalBufferEntry> global_buffers; 75 std::vector<GlobalBufferEntry> global_buffers;
74 std::vector<TexelBufferEntry> texel_buffers; 76 std::vector<UniformTexelEntry> uniform_texels;
75 std::vector<SamplerEntry> samplers; 77 std::vector<SamplerEntry> samplers;
78 std::vector<StorageTexelEntry> storage_texels;
76 std::vector<ImageEntry> images; 79 std::vector<ImageEntry> images;
77 std::set<u32> attributes; 80 std::set<u32> attributes;
78 std::array<bool, Maxwell::NumClipDistances> clip_distances{}; 81 std::array<bool, Maxwell::NumClipDistances> clip_distances{};
@@ -88,7 +91,8 @@ struct Specialization final {
88 u32 shared_memory_size{}; 91 u32 shared_memory_size{};
89 92
90 // Graphics specific 93 // Graphics specific
91 std::optional<float> point_size{}; 94 std::optional<float> point_size;
95 std::bitset<Maxwell::NumVertexAttributes> enabled_attributes;
92 std::array<Maxwell::VertexAttribute::Type, Maxwell::NumVertexAttributes> attribute_types{}; 96 std::array<Maxwell::VertexAttribute::Type, Maxwell::NumVertexAttributes> attribute_types{};
93 bool ndc_minus_one_to_one{}; 97 bool ndc_minus_one_to_one{};
94}; 98};
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index 55f43e61b..430031665 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -100,8 +100,8 @@ vk::Buffer CreateBuffer(const VKDevice& device, const SurfaceParams& params,
100 ci.pNext = nullptr; 100 ci.pNext = nullptr;
101 ci.flags = 0; 101 ci.flags = 0;
102 ci.size = static_cast<VkDeviceSize>(host_memory_size); 102 ci.size = static_cast<VkDeviceSize>(host_memory_size);
103 ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | 103 ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
104 VK_BUFFER_USAGE_TRANSFER_DST_BIT; 104 VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
105 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; 105 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
106 ci.queueFamilyIndexCount = 0; 106 ci.queueFamilyIndexCount = 0;
107 ci.pQueueFamilyIndices = nullptr; 107 ci.pQueueFamilyIndices = nullptr;
@@ -167,6 +167,7 @@ VkImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceP
167 ci.extent = {params.width, params.height, 1}; 167 ci.extent = {params.width, params.height, 1};
168 break; 168 break;
169 case SurfaceTarget::Texture3D: 169 case SurfaceTarget::Texture3D:
170 ci.flags |= VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT;
170 ci.extent = {params.width, params.height, params.depth}; 171 ci.extent = {params.width, params.height, params.depth};
171 break; 172 break;
172 case SurfaceTarget::TextureBuffer: 173 case SurfaceTarget::TextureBuffer:
@@ -176,6 +177,12 @@ VkImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceP
176 return ci; 177 return ci;
177} 178}
178 179
180u32 EncodeSwizzle(Tegra::Texture::SwizzleSource x_source, Tegra::Texture::SwizzleSource y_source,
181 Tegra::Texture::SwizzleSource z_source, Tegra::Texture::SwizzleSource w_source) {
182 return (static_cast<u32>(x_source) << 24) | (static_cast<u32>(y_source) << 16) |
183 (static_cast<u32>(z_source) << 8) | static_cast<u32>(w_source);
184}
185
179} // Anonymous namespace 186} // Anonymous namespace
180 187
181CachedSurface::CachedSurface(Core::System& system, const VKDevice& device, 188CachedSurface::CachedSurface(Core::System& system, const VKDevice& device,
@@ -203,9 +210,11 @@ CachedSurface::CachedSurface(Core::System& system, const VKDevice& device,
203 } 210 }
204 211
205 // TODO(Rodrigo): Move this to a virtual function. 212 // TODO(Rodrigo): Move this to a virtual function.
206 main_view = CreateViewInner( 213 u32 num_layers = 1;
207 ViewParams(params.target, 0, static_cast<u32>(params.GetNumLayers()), 0, params.num_levels), 214 if (params.is_layered || params.target == SurfaceTarget::Texture3D) {
208 true); 215 num_layers = params.depth;
216 }
217 main_view = CreateView(ViewParams(params.target, 0, num_layers, 0, params.num_levels));
209} 218}
210 219
211CachedSurface::~CachedSurface() = default; 220CachedSurface::~CachedSurface() = default;
@@ -253,12 +262,8 @@ void CachedSurface::DecorateSurfaceName() {
253} 262}
254 263
255View CachedSurface::CreateView(const ViewParams& params) { 264View CachedSurface::CreateView(const ViewParams& params) {
256 return CreateViewInner(params, false);
257}
258
259View CachedSurface::CreateViewInner(const ViewParams& params, bool is_proxy) {
260 // TODO(Rodrigo): Add name decorations 265 // TODO(Rodrigo): Add name decorations
261 return views[params] = std::make_shared<CachedSurfaceView>(device, *this, params, is_proxy); 266 return views[params] = std::make_shared<CachedSurfaceView>(device, *this, params);
262} 267}
263 268
264void CachedSurface::UploadBuffer(const std::vector<u8>& staging_buffer) { 269void CachedSurface::UploadBuffer(const std::vector<u8>& staging_buffer) {
@@ -342,38 +347,44 @@ VkImageSubresourceRange CachedSurface::GetImageSubresourceRange() const {
342} 347}
343 348
344CachedSurfaceView::CachedSurfaceView(const VKDevice& device, CachedSurface& surface, 349CachedSurfaceView::CachedSurfaceView(const VKDevice& device, CachedSurface& surface,
345 const ViewParams& params, bool is_proxy) 350 const ViewParams& params)
346 : VideoCommon::ViewBase{params}, params{surface.GetSurfaceParams()}, 351 : VideoCommon::ViewBase{params}, params{surface.GetSurfaceParams()},
347 image{surface.GetImageHandle()}, buffer_view{surface.GetBufferViewHandle()}, 352 image{surface.GetImageHandle()}, buffer_view{surface.GetBufferViewHandle()},
348 aspect_mask{surface.GetAspectMask()}, device{device}, surface{surface}, 353 aspect_mask{surface.GetAspectMask()}, device{device}, surface{surface},
349 base_layer{params.base_layer}, num_layers{params.num_layers}, base_level{params.base_level}, 354 base_level{params.base_level}, num_levels{params.num_levels},
350 num_levels{params.num_levels}, image_view_type{image ? GetImageViewType(params.target) 355 image_view_type{image ? GetImageViewType(params.target) : VK_IMAGE_VIEW_TYPE_1D} {
351 : VK_IMAGE_VIEW_TYPE_1D} {} 356 if (image_view_type == VK_IMAGE_VIEW_TYPE_3D) {
357 base_layer = 0;
358 num_layers = 1;
359 base_slice = params.base_layer;
360 num_slices = params.num_layers;
361 } else {
362 base_layer = params.base_layer;
363 num_layers = params.num_layers;
364 }
365}
352 366
353CachedSurfaceView::~CachedSurfaceView() = default; 367CachedSurfaceView::~CachedSurfaceView() = default;
354 368
355VkImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource y_source, 369VkImageView CachedSurfaceView::GetImageView(SwizzleSource x_source, SwizzleSource y_source,
356 SwizzleSource z_source, SwizzleSource w_source) { 370 SwizzleSource z_source, SwizzleSource w_source) {
357 const u32 swizzle = EncodeSwizzle(x_source, y_source, z_source, w_source); 371 const u32 new_swizzle = EncodeSwizzle(x_source, y_source, z_source, w_source);
358 if (last_image_view && last_swizzle == swizzle) { 372 if (last_image_view && last_swizzle == new_swizzle) {
359 return last_image_view; 373 return last_image_view;
360 } 374 }
361 last_swizzle = swizzle; 375 last_swizzle = new_swizzle;
362 376
363 const auto [entry, is_cache_miss] = view_cache.try_emplace(swizzle); 377 const auto [entry, is_cache_miss] = view_cache.try_emplace(new_swizzle);
364 auto& image_view = entry->second; 378 auto& image_view = entry->second;
365 if (!is_cache_miss) { 379 if (!is_cache_miss) {
366 return last_image_view = *image_view; 380 return last_image_view = *image_view;
367 } 381 }
368 382
369 auto swizzle_x = MaxwellToVK::SwizzleSource(x_source); 383 std::array swizzle{MaxwellToVK::SwizzleSource(x_source), MaxwellToVK::SwizzleSource(y_source),
370 auto swizzle_y = MaxwellToVK::SwizzleSource(y_source); 384 MaxwellToVK::SwizzleSource(z_source), MaxwellToVK::SwizzleSource(w_source)};
371 auto swizzle_z = MaxwellToVK::SwizzleSource(z_source);
372 auto swizzle_w = MaxwellToVK::SwizzleSource(w_source);
373
374 if (params.pixel_format == VideoCore::Surface::PixelFormat::A1B5G5R5U) { 385 if (params.pixel_format == VideoCore::Surface::PixelFormat::A1B5G5R5U) {
375 // A1B5G5R5 is implemented as A1R5G5B5, we have to change the swizzle here. 386 // A1B5G5R5 is implemented as A1R5G5B5, we have to change the swizzle here.
376 std::swap(swizzle_x, swizzle_z); 387 std::swap(swizzle[0], swizzle[2]);
377 } 388 }
378 389
379 // Games can sample depth or stencil values on textures. This is decided by the swizzle value on 390 // Games can sample depth or stencil values on textures. This is decided by the swizzle value on
@@ -395,11 +406,16 @@ VkImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource y
395 UNIMPLEMENTED(); 406 UNIMPLEMENTED();
396 } 407 }
397 408
398 // Vulkan doesn't seem to understand swizzling of a depth stencil image, use identity 409 // Make sure we sample the first component
399 swizzle_x = VK_COMPONENT_SWIZZLE_R; 410 std::transform(
400 swizzle_y = VK_COMPONENT_SWIZZLE_G; 411 swizzle.begin(), swizzle.end(), swizzle.begin(), [](VkComponentSwizzle component) {
401 swizzle_z = VK_COMPONENT_SWIZZLE_B; 412 return component == VK_COMPONENT_SWIZZLE_G ? VK_COMPONENT_SWIZZLE_R : component;
402 swizzle_w = VK_COMPONENT_SWIZZLE_A; 413 });
414 }
415
416 if (image_view_type == VK_IMAGE_VIEW_TYPE_3D) {
417 ASSERT(base_slice == 0);
418 ASSERT(num_slices == params.depth);
403 } 419 }
404 420
405 VkImageViewCreateInfo ci; 421 VkImageViewCreateInfo ci;
@@ -409,7 +425,7 @@ VkImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource y
409 ci.image = surface.GetImageHandle(); 425 ci.image = surface.GetImageHandle();
410 ci.viewType = image_view_type; 426 ci.viewType = image_view_type;
411 ci.format = surface.GetImage().GetFormat(); 427 ci.format = surface.GetImage().GetFormat();
412 ci.components = {swizzle_x, swizzle_y, swizzle_z, swizzle_w}; 428 ci.components = {swizzle[0], swizzle[1], swizzle[2], swizzle[3]};
413 ci.subresourceRange.aspectMask = aspect; 429 ci.subresourceRange.aspectMask = aspect;
414 ci.subresourceRange.baseMipLevel = base_level; 430 ci.subresourceRange.baseMipLevel = base_level;
415 ci.subresourceRange.levelCount = num_levels; 431 ci.subresourceRange.levelCount = num_levels;
@@ -420,6 +436,35 @@ VkImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource y
420 return last_image_view = *image_view; 436 return last_image_view = *image_view;
421} 437}
422 438
439VkImageView CachedSurfaceView::GetAttachment() {
440 if (render_target) {
441 return *render_target;
442 }
443
444 VkImageViewCreateInfo ci;
445 ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
446 ci.pNext = nullptr;
447 ci.flags = 0;
448 ci.image = surface.GetImageHandle();
449 ci.format = surface.GetImage().GetFormat();
450 ci.components = {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
451 VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY};
452 ci.subresourceRange.aspectMask = aspect_mask;
453 ci.subresourceRange.baseMipLevel = base_level;
454 ci.subresourceRange.levelCount = num_levels;
455 if (image_view_type == VK_IMAGE_VIEW_TYPE_3D) {
456 ci.viewType = num_slices > 1 ? VK_IMAGE_VIEW_TYPE_2D_ARRAY : VK_IMAGE_VIEW_TYPE_2D;
457 ci.subresourceRange.baseArrayLayer = base_slice;
458 ci.subresourceRange.layerCount = num_slices;
459 } else {
460 ci.viewType = image_view_type;
461 ci.subresourceRange.baseArrayLayer = base_layer;
462 ci.subresourceRange.layerCount = num_layers;
463 }
464 render_target = device.GetLogical().CreateImageView(ci);
465 return *render_target;
466}
467
423VKTextureCache::VKTextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer, 468VKTextureCache::VKTextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
424 const VKDevice& device, VKResourceManager& resource_manager, 469 const VKDevice& device, VKResourceManager& resource_manager,
425 VKMemoryManager& memory_manager, VKScheduler& scheduler, 470 VKMemoryManager& memory_manager, VKScheduler& scheduler,
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index f211ccb1e..807e26c8a 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -91,7 +91,6 @@ protected:
91 void DecorateSurfaceName(); 91 void DecorateSurfaceName();
92 92
93 View CreateView(const ViewParams& params) override; 93 View CreateView(const ViewParams& params) override;
94 View CreateViewInner(const ViewParams& params, bool is_proxy);
95 94
96private: 95private:
97 void UploadBuffer(const std::vector<u8>& staging_buffer); 96 void UploadBuffer(const std::vector<u8>& staging_buffer);
@@ -120,23 +119,20 @@ private:
120class CachedSurfaceView final : public VideoCommon::ViewBase { 119class CachedSurfaceView final : public VideoCommon::ViewBase {
121public: 120public:
122 explicit CachedSurfaceView(const VKDevice& device, CachedSurface& surface, 121 explicit CachedSurfaceView(const VKDevice& device, CachedSurface& surface,
123 const ViewParams& params, bool is_proxy); 122 const ViewParams& params);
124 ~CachedSurfaceView(); 123 ~CachedSurfaceView();
125 124
126 VkImageView GetHandle(Tegra::Texture::SwizzleSource x_source, 125 VkImageView GetImageView(Tegra::Texture::SwizzleSource x_source,
127 Tegra::Texture::SwizzleSource y_source, 126 Tegra::Texture::SwizzleSource y_source,
128 Tegra::Texture::SwizzleSource z_source, 127 Tegra::Texture::SwizzleSource z_source,
129 Tegra::Texture::SwizzleSource w_source); 128 Tegra::Texture::SwizzleSource w_source);
129
130 VkImageView GetAttachment();
130 131
131 bool IsSameSurface(const CachedSurfaceView& rhs) const { 132 bool IsSameSurface(const CachedSurfaceView& rhs) const {
132 return &surface == &rhs.surface; 133 return &surface == &rhs.surface;
133 } 134 }
134 135
135 VkImageView GetHandle() {
136 return GetHandle(Tegra::Texture::SwizzleSource::R, Tegra::Texture::SwizzleSource::G,
137 Tegra::Texture::SwizzleSource::B, Tegra::Texture::SwizzleSource::A);
138 }
139
140 u32 GetWidth() const { 136 u32 GetWidth() const {
141 return params.GetMipWidth(base_level); 137 return params.GetMipWidth(base_level);
142 } 138 }
@@ -180,14 +176,6 @@ public:
180 } 176 }
181 177
182private: 178private:
183 static u32 EncodeSwizzle(Tegra::Texture::SwizzleSource x_source,
184 Tegra::Texture::SwizzleSource y_source,
185 Tegra::Texture::SwizzleSource z_source,
186 Tegra::Texture::SwizzleSource w_source) {
187 return (static_cast<u32>(x_source) << 24) | (static_cast<u32>(y_source) << 16) |
188 (static_cast<u32>(z_source) << 8) | static_cast<u32>(w_source);
189 }
190
191 // Store a copy of these values to avoid double dereference when reading them 179 // Store a copy of these values to avoid double dereference when reading them
192 const SurfaceParams params; 180 const SurfaceParams params;
193 const VkImage image; 181 const VkImage image;
@@ -196,15 +184,18 @@ private:
196 184
197 const VKDevice& device; 185 const VKDevice& device;
198 CachedSurface& surface; 186 CachedSurface& surface;
199 const u32 base_layer;
200 const u32 num_layers;
201 const u32 base_level; 187 const u32 base_level;
202 const u32 num_levels; 188 const u32 num_levels;
203 const VkImageViewType image_view_type; 189 const VkImageViewType image_view_type;
190 u32 base_layer = 0;
191 u32 num_layers = 0;
192 u32 base_slice = 0;
193 u32 num_slices = 0;
204 194
205 VkImageView last_image_view = nullptr; 195 VkImageView last_image_view = nullptr;
206 u32 last_swizzle = 0; 196 u32 last_swizzle = 0;
207 197
198 vk::ImageView render_target;
208 std::unordered_map<u32, vk::ImageView> view_cache; 199 std::unordered_map<u32, vk::ImageView> view_cache;
209}; 200};
210 201
diff --git a/src/video_core/shader/decode/other.cpp b/src/video_core/shader/decode/other.cpp
index 694b325e1..c0a8f233f 100644
--- a/src/video_core/shader/decode/other.cpp
+++ b/src/video_core/shader/decode/other.cpp
@@ -83,7 +83,7 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
83 return Operation(OperationCode::YNegate); 83 return Operation(OperationCode::YNegate);
84 case SystemVariable::InvocationInfo: 84 case SystemVariable::InvocationInfo:
85 LOG_WARNING(HW_GPU, "S2R instruction with InvocationInfo is incomplete"); 85 LOG_WARNING(HW_GPU, "S2R instruction with InvocationInfo is incomplete");
86 return Immediate(0U); 86 return Immediate(0x00ff'0000U);
87 case SystemVariable::WscaleFactorXY: 87 case SystemVariable::WscaleFactorXY:
88 UNIMPLEMENTED_MSG("S2R WscaleFactorXY is not implemented"); 88 UNIMPLEMENTED_MSG("S2R WscaleFactorXY is not implemented");
89 return Immediate(0U); 89 return Immediate(0U);
@@ -299,9 +299,19 @@ u32 ShaderIR::DecodeOther(NodeBlock& bb, u32 pc) {
299 break; 299 break;
300 } 300 }
301 case OpCode::Id::MEMBAR: { 301 case OpCode::Id::MEMBAR: {
302 UNIMPLEMENTED_IF(instr.membar.type != Tegra::Shader::MembarType::GL);
303 UNIMPLEMENTED_IF(instr.membar.unknown != Tegra::Shader::MembarUnknown::Default); 302 UNIMPLEMENTED_IF(instr.membar.unknown != Tegra::Shader::MembarUnknown::Default);
304 bb.push_back(Operation(OperationCode::MemoryBarrierGL)); 303 const OperationCode type = [instr] {
304 switch (instr.membar.type) {
305 case Tegra::Shader::MembarType::CTA:
306 return OperationCode::MemoryBarrierGroup;
307 case Tegra::Shader::MembarType::GL:
308 return OperationCode::MemoryBarrierGlobal;
309 default:
310 UNIMPLEMENTED_MSG("MEMBAR type={}", static_cast<int>(instr.membar.type.Value()));
311 return OperationCode::MemoryBarrierGlobal;
312 }
313 }();
314 bb.push_back(Operation(type));
305 break; 315 break;
306 } 316 }
307 case OpCode::Id::DEPBAR: { 317 case OpCode::Id::DEPBAR: {
diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h
index c06512413..c5e5165ff 100644
--- a/src/video_core/shader/node.h
+++ b/src/video_core/shader/node.h
@@ -233,8 +233,9 @@ enum class OperationCode {
233 ThreadLtMask, /// () -> uint 233 ThreadLtMask, /// () -> uint
234 ShuffleIndexed, /// (uint value, uint index) -> uint 234 ShuffleIndexed, /// (uint value, uint index) -> uint
235 235
236 Barrier, /// () -> void 236 Barrier, /// () -> void
237 MemoryBarrierGL, /// () -> void 237 MemoryBarrierGroup, /// () -> void
238 MemoryBarrierGlobal, /// () -> void
238 239
239 Amount, 240 Amount,
240}; 241};
diff --git a/src/video_core/texture_cache/format_lookup_table.cpp b/src/video_core/texture_cache/format_lookup_table.cpp
index 7032e0059..f476f03b0 100644
--- a/src/video_core/texture_cache/format_lookup_table.cpp
+++ b/src/video_core/texture_cache/format_lookup_table.cpp
@@ -41,7 +41,7 @@ struct Table {
41 ComponentType alpha_component; 41 ComponentType alpha_component;
42 bool is_srgb; 42 bool is_srgb;
43}; 43};
44constexpr std::array<Table, 77> DefinitionTable = {{ 44constexpr std::array<Table, 78> DefinitionTable = {{
45 {TextureFormat::A8R8G8B8, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::ABGR8U}, 45 {TextureFormat::A8R8G8B8, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::ABGR8U},
46 {TextureFormat::A8R8G8B8, C, SNORM, SNORM, SNORM, SNORM, PixelFormat::ABGR8S}, 46 {TextureFormat::A8R8G8B8, C, SNORM, SNORM, SNORM, SNORM, PixelFormat::ABGR8S},
47 {TextureFormat::A8R8G8B8, C, UINT, UINT, UINT, UINT, PixelFormat::ABGR8UI}, 47 {TextureFormat::A8R8G8B8, C, UINT, UINT, UINT, UINT, PixelFormat::ABGR8UI},
@@ -98,6 +98,7 @@ constexpr std::array<Table, 77> DefinitionTable = {{
98 {TextureFormat::ZF32, C, FLOAT, FLOAT, FLOAT, FLOAT, PixelFormat::Z32F}, 98 {TextureFormat::ZF32, C, FLOAT, FLOAT, FLOAT, FLOAT, PixelFormat::Z32F},
99 {TextureFormat::Z16, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::Z16}, 99 {TextureFormat::Z16, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::Z16},
100 {TextureFormat::S8Z24, C, UINT, UNORM, UNORM, UNORM, PixelFormat::S8Z24}, 100 {TextureFormat::S8Z24, C, UINT, UNORM, UNORM, UNORM, PixelFormat::S8Z24},
101 {TextureFormat::G24R8, C, UINT, UNORM, UNORM, UNORM, PixelFormat::S8Z24},
101 {TextureFormat::ZF32_X24S8, C, FLOAT, UINT, UNORM, UNORM, PixelFormat::Z32FS8}, 102 {TextureFormat::ZF32_X24S8, C, FLOAT, UINT, UNORM, UNORM, PixelFormat::Z32FS8},
102 103
103 {TextureFormat::DXT1, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::DXT1}, 104 {TextureFormat::DXT1, C, UNORM, UNORM, UNORM, UNORM, PixelFormat::DXT1},
diff --git a/src/video_core/texture_cache/surface_base.cpp b/src/video_core/texture_cache/surface_base.cpp
index 715f39d0d..94d3a6ae5 100644
--- a/src/video_core/texture_cache/surface_base.cpp
+++ b/src/video_core/texture_cache/surface_base.cpp
@@ -248,12 +248,11 @@ void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager,
248 248
249 // Use an extra temporal buffer 249 // Use an extra temporal buffer
250 auto& tmp_buffer = staging_cache.GetBuffer(1); 250 auto& tmp_buffer = staging_cache.GetBuffer(1);
251 // Special case for 3D Texture Segments
252 const bool must_read_current_data =
253 params.block_depth > 0 && params.target == VideoCore::Surface::SurfaceTarget::Texture2D;
254 tmp_buffer.resize(guest_memory_size); 251 tmp_buffer.resize(guest_memory_size);
255 host_ptr = tmp_buffer.data(); 252 host_ptr = tmp_buffer.data();
256 if (must_read_current_data) { 253
254 if (params.target == SurfaceTarget::Texture3D) {
255 // Special case for 3D texture segments
257 memory_manager.ReadBlockUnsafe(gpu_addr, host_ptr, guest_memory_size); 256 memory_manager.ReadBlockUnsafe(gpu_addr, host_ptr, guest_memory_size);
258 } 257 }
259 258
diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h
index 79e10ffbb..173f2edba 100644
--- a/src/video_core/texture_cache/surface_base.h
+++ b/src/video_core/texture_cache/surface_base.h
@@ -217,8 +217,8 @@ public:
217 } 217 }
218 218
219 bool IsProtected() const { 219 bool IsProtected() const {
220 // Only 3D Slices are to be protected 220 // Only 3D slices are to be protected
221 return is_target && params.block_depth > 0; 221 return is_target && params.target == SurfaceTarget::Texture3D;
222 } 222 }
223 223
224 bool IsRenderTarget() const { 224 bool IsRenderTarget() const {
@@ -250,6 +250,11 @@ public:
250 return GetView(ViewParams(overview_params.target, 0, num_layers, 0, params.num_levels)); 250 return GetView(ViewParams(overview_params.target, 0, num_layers, 0, params.num_levels));
251 } 251 }
252 252
253 TView Emplace3DView(u32 slice, u32 depth, u32 base_level, u32 num_levels) {
254 return GetView(ViewParams(VideoCore::Surface::SurfaceTarget::Texture3D, slice, depth,
255 base_level, num_levels));
256 }
257
253 std::optional<TView> EmplaceIrregularView(const SurfaceParams& view_params, 258 std::optional<TView> EmplaceIrregularView(const SurfaceParams& view_params,
254 const GPUVAddr view_addr, 259 const GPUVAddr view_addr,
255 const std::size_t candidate_size, const u32 mipmap, 260 const std::size_t candidate_size, const u32 mipmap,
@@ -272,8 +277,8 @@ public:
272 std::optional<TView> EmplaceView(const SurfaceParams& view_params, const GPUVAddr view_addr, 277 std::optional<TView> EmplaceView(const SurfaceParams& view_params, const GPUVAddr view_addr,
273 const std::size_t candidate_size) { 278 const std::size_t candidate_size) {
274 if (params.target == SurfaceTarget::Texture3D || 279 if (params.target == SurfaceTarget::Texture3D ||
275 (params.num_levels == 1 && !params.is_layered) || 280 view_params.target == SurfaceTarget::Texture3D ||
276 view_params.target == SurfaceTarget::Texture3D) { 281 (params.num_levels == 1 && !params.is_layered)) {
277 return {}; 282 return {};
278 } 283 }
279 const auto layer_mipmap{GetLayerMipmap(view_addr)}; 284 const auto layer_mipmap{GetLayerMipmap(view_addr)};
diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp
index 884fabffe..0b2b2b8c4 100644
--- a/src/video_core/texture_cache/surface_params.cpp
+++ b/src/video_core/texture_cache/surface_params.cpp
@@ -215,10 +215,19 @@ SurfaceParams SurfaceParams::CreateForFramebuffer(Core::System& system, std::siz
215 params.num_levels = 1; 215 params.num_levels = 1;
216 params.emulated_levels = 1; 216 params.emulated_levels = 1;
217 217
218 const bool is_layered = config.layers > 1 && params.block_depth == 0; 218 if (config.memory_layout.is_3d != 0) {
219 params.is_layered = is_layered; 219 params.depth = config.layers.Value();
220 params.depth = is_layered ? config.layers.Value() : 1; 220 params.is_layered = false;
221 params.target = is_layered ? SurfaceTarget::Texture2DArray : SurfaceTarget::Texture2D; 221 params.target = SurfaceTarget::Texture3D;
222 } else if (config.layers > 1) {
223 params.depth = config.layers.Value();
224 params.is_layered = true;
225 params.target = SurfaceTarget::Texture2DArray;
226 } else {
227 params.depth = 1;
228 params.is_layered = false;
229 params.target = SurfaceTarget::Texture2D;
230 }
222 return params; 231 return params;
223} 232}
224 233
@@ -237,7 +246,7 @@ SurfaceParams SurfaceParams::CreateForFermiCopySurface(
237 params.width = config.width; 246 params.width = config.width;
238 params.height = config.height; 247 params.height = config.height;
239 params.pitch = config.pitch; 248 params.pitch = config.pitch;
240 // TODO(Rodrigo): Try to guess the surface target from depth and layer parameters 249 // TODO(Rodrigo): Try to guess texture arrays from parameters
241 params.target = SurfaceTarget::Texture2D; 250 params.target = SurfaceTarget::Texture2D;
242 params.depth = 1; 251 params.depth = 1;
243 params.num_levels = 1; 252 params.num_levels = 1;
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index d6efc34b2..b543fc8c0 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -14,6 +14,7 @@
14#include <unordered_map> 14#include <unordered_map>
15#include <vector> 15#include <vector>
16 16
17#include <boost/container/small_vector.hpp>
17#include <boost/icl/interval_map.hpp> 18#include <boost/icl/interval_map.hpp>
18#include <boost/range/iterator_range.hpp> 19#include <boost/range/iterator_range.hpp>
19 20
@@ -53,6 +54,7 @@ using RenderTargetConfig = Tegra::Engines::Maxwell3D::Regs::RenderTargetConfig;
53 54
54template <typename TSurface, typename TView> 55template <typename TSurface, typename TView>
55class TextureCache { 56class TextureCache {
57 using VectorSurface = boost::container::small_vector<TSurface, 1>;
56 58
57public: 59public:
58 void InvalidateRegion(VAddr addr, std::size_t size) { 60 void InvalidateRegion(VAddr addr, std::size_t size) {
@@ -296,30 +298,30 @@ public:
296 const GPUVAddr src_gpu_addr = src_config.Address(); 298 const GPUVAddr src_gpu_addr = src_config.Address();
297 const GPUVAddr dst_gpu_addr = dst_config.Address(); 299 const GPUVAddr dst_gpu_addr = dst_config.Address();
298 DeduceBestBlit(src_params, dst_params, src_gpu_addr, dst_gpu_addr); 300 DeduceBestBlit(src_params, dst_params, src_gpu_addr, dst_gpu_addr);
299 const std::optional<VAddr> dst_cpu_addr = 301
300 system.GPU().MemoryManager().GpuToCpuAddress(dst_gpu_addr); 302 const auto& memory_manager = system.GPU().MemoryManager();
301 const std::optional<VAddr> src_cpu_addr = 303 const std::optional<VAddr> dst_cpu_addr = memory_manager.GpuToCpuAddress(dst_gpu_addr);
302 system.GPU().MemoryManager().GpuToCpuAddress(src_gpu_addr); 304 const std::optional<VAddr> src_cpu_addr = memory_manager.GpuToCpuAddress(src_gpu_addr);
303 std::pair<TSurface, TView> dst_surface = 305 std::pair dst_surface = GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, true, false);
304 GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, true, false); 306 TView src_surface = GetSurface(src_gpu_addr, *src_cpu_addr, src_params, true, false).second;
305 std::pair<TSurface, TView> src_surface = 307 ImageBlit(src_surface, dst_surface.second, copy_config);
306 GetSurface(src_gpu_addr, *src_cpu_addr, src_params, true, false);
307 ImageBlit(src_surface.second, dst_surface.second, copy_config);
308 dst_surface.first->MarkAsModified(true, Tick()); 308 dst_surface.first->MarkAsModified(true, Tick());
309 } 309 }
310 310
311 TSurface TryFindFramebufferSurface(VAddr addr) { 311 TSurface TryFindFramebufferSurface(VAddr addr) const {
312 if (!addr) { 312 if (!addr) {
313 return nullptr; 313 return nullptr;
314 } 314 }
315 const VAddr page = addr >> registry_page_bits; 315 const VAddr page = addr >> registry_page_bits;
316 std::vector<TSurface>& list = registry[page]; 316 const auto it = registry.find(page);
317 for (auto& surface : list) { 317 if (it == registry.end()) {
318 if (surface->GetCpuAddr() == addr) { 318 return nullptr;
319 return surface;
320 }
321 } 319 }
322 return nullptr; 320 const auto& list = it->second;
321 const auto found = std::find_if(list.begin(), list.end(), [addr](const auto& surface) {
322 return surface->GetCpuAddr() == addr;
323 });
324 return found != list.end() ? *found : nullptr;
323 } 325 }
324 326
325 u64 Tick() { 327 u64 Tick() {
@@ -498,18 +500,18 @@ private:
498 * @param untopological Indicates to the recycler that the texture has no way 500 * @param untopological Indicates to the recycler that the texture has no way
499 * to match the overlaps due to topological reasons. 501 * to match the overlaps due to topological reasons.
500 **/ 502 **/
501 RecycleStrategy PickStrategy(std::vector<TSurface>& overlaps, const SurfaceParams& params, 503 RecycleStrategy PickStrategy(VectorSurface& overlaps, const SurfaceParams& params,
502 const GPUVAddr gpu_addr, const MatchTopologyResult untopological) { 504 const GPUVAddr gpu_addr, const MatchTopologyResult untopological) {
503 if (Settings::IsGPULevelExtreme()) { 505 if (Settings::IsGPULevelExtreme()) {
504 return RecycleStrategy::Flush; 506 return RecycleStrategy::Flush;
505 } 507 }
506 // 3D Textures decision 508 // 3D Textures decision
507 if (params.block_depth > 1 || params.target == SurfaceTarget::Texture3D) { 509 if (params.target == SurfaceTarget::Texture3D) {
508 return RecycleStrategy::Flush; 510 return RecycleStrategy::Flush;
509 } 511 }
510 for (const auto& s : overlaps) { 512 for (const auto& s : overlaps) {
511 const auto& s_params = s->GetSurfaceParams(); 513 const auto& s_params = s->GetSurfaceParams();
512 if (s_params.block_depth > 1 || s_params.target == SurfaceTarget::Texture3D) { 514 if (s_params.target == SurfaceTarget::Texture3D) {
513 return RecycleStrategy::Flush; 515 return RecycleStrategy::Flush;
514 } 516 }
515 } 517 }
@@ -538,9 +540,8 @@ private:
538 * @param untopological Indicates to the recycler that the texture has no way to match the 540 * @param untopological Indicates to the recycler that the texture has no way to match the
539 * overlaps due to topological reasons. 541 * overlaps due to topological reasons.
540 **/ 542 **/
541 std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps, 543 std::pair<TSurface, TView> RecycleSurface(VectorSurface& overlaps, const SurfaceParams& params,
542 const SurfaceParams& params, const GPUVAddr gpu_addr, 544 const GPUVAddr gpu_addr, const bool preserve_contents,
543 const bool preserve_contents,
544 const MatchTopologyResult untopological) { 545 const MatchTopologyResult untopological) {
545 const bool do_load = preserve_contents && Settings::IsGPULevelExtreme(); 546 const bool do_load = preserve_contents && Settings::IsGPULevelExtreme();
546 for (auto& surface : overlaps) { 547 for (auto& surface : overlaps) {
@@ -650,47 +651,65 @@ private:
650 * @param params The parameters on the new surface. 651 * @param params The parameters on the new surface.
651 * @param gpu_addr The starting address of the new surface. 652 * @param gpu_addr The starting address of the new surface.
652 **/ 653 **/
653 std::optional<std::pair<TSurface, TView>> TryReconstructSurface(std::vector<TSurface>& overlaps, 654 std::optional<std::pair<TSurface, TView>> TryReconstructSurface(VectorSurface& overlaps,
654 const SurfaceParams& params, 655 const SurfaceParams& params,
655 const GPUVAddr gpu_addr) { 656 GPUVAddr gpu_addr) {
656 if (params.target == SurfaceTarget::Texture3D) { 657 if (params.target == SurfaceTarget::Texture3D) {
657 return {}; 658 return std::nullopt;
658 } 659 }
659 bool modified = false; 660 const auto test_modified = [](TSurface& surface) { return surface->IsModified(); };
660 TSurface new_surface = GetUncachedSurface(gpu_addr, params); 661 TSurface new_surface = GetUncachedSurface(gpu_addr, params);
661 u32 passed_tests = 0; 662
663 if (std::none_of(overlaps.begin(), overlaps.end(), test_modified)) {
664 LoadSurface(new_surface);
665 for (const auto& surface : overlaps) {
666 Unregister(surface);
667 }
668 Register(new_surface);
669 return {{new_surface, new_surface->GetMainView()}};
670 }
671
672 std::size_t passed_tests = 0;
662 for (auto& surface : overlaps) { 673 for (auto& surface : overlaps) {
663 const SurfaceParams& src_params = surface->GetSurfaceParams(); 674 const SurfaceParams& src_params = surface->GetSurfaceParams();
664 if (src_params.is_layered || src_params.num_levels > 1) { 675 const auto mipmap_layer{new_surface->GetLayerMipmap(surface->GetGpuAddr())};
665 // We send this cases to recycle as they are more complex to handle
666 return {};
667 }
668 const std::size_t candidate_size = surface->GetSizeInBytes();
669 auto mipmap_layer{new_surface->GetLayerMipmap(surface->GetGpuAddr())};
670 if (!mipmap_layer) { 676 if (!mipmap_layer) {
671 continue; 677 continue;
672 } 678 }
673 const auto [layer, mipmap] = *mipmap_layer; 679 const auto [base_layer, base_mipmap] = *mipmap_layer;
674 if (new_surface->GetMipmapSize(mipmap) != candidate_size) { 680 if (new_surface->GetMipmapSize(base_mipmap) != surface->GetMipmapSize(0)) {
675 continue; 681 continue;
676 } 682 }
677 modified |= surface->IsModified(); 683 ++passed_tests;
678 // Now we got all the data set up 684
679 const u32 width = SurfaceParams::IntersectWidth(src_params, params, 0, mipmap); 685 // Copy all mipmaps and layers
680 const u32 height = SurfaceParams::IntersectHeight(src_params, params, 0, mipmap); 686 const u32 block_width = params.GetDefaultBlockWidth();
681 const CopyParams copy_params(0, 0, 0, 0, 0, layer, 0, mipmap, width, height, 1); 687 const u32 block_height = params.GetDefaultBlockHeight();
682 passed_tests++; 688 for (u32 mipmap = base_mipmap; mipmap < base_mipmap + src_params.num_levels; ++mipmap) {
683 ImageCopy(surface, new_surface, copy_params); 689 const u32 width = SurfaceParams::IntersectWidth(src_params, params, 0, mipmap);
690 const u32 height = SurfaceParams::IntersectHeight(src_params, params, 0, mipmap);
691 if (width < block_width || height < block_height) {
692 // Current APIs forbid copying small compressed textures, avoid errors
693 break;
694 }
695 const CopyParams copy_params(0, 0, 0, 0, 0, base_layer, 0, mipmap, width, height,
696 src_params.depth);
697 ImageCopy(surface, new_surface, copy_params);
698 }
684 } 699 }
685 if (passed_tests == 0) { 700 if (passed_tests == 0) {
686 return {}; 701 return std::nullopt;
702 }
703 if (Settings::IsGPULevelExtreme() && passed_tests != overlaps.size()) {
687 // In Accurate GPU all tests should pass, else we recycle 704 // In Accurate GPU all tests should pass, else we recycle
688 } else if (Settings::IsGPULevelExtreme() && passed_tests != overlaps.size()) { 705 return std::nullopt;
689 return {};
690 } 706 }
707
708 const bool modified = std::any_of(overlaps.begin(), overlaps.end(), test_modified);
691 for (const auto& surface : overlaps) { 709 for (const auto& surface : overlaps) {
692 Unregister(surface); 710 Unregister(surface);
693 } 711 }
712
694 new_surface->MarkAsModified(modified, Tick()); 713 new_surface->MarkAsModified(modified, Tick());
695 Register(new_surface); 714 Register(new_surface);
696 return {{new_surface, new_surface->GetMainView()}}; 715 return {{new_surface, new_surface->GetMainView()}};
@@ -708,53 +727,11 @@ private:
708 * @param preserve_contents Indicates that the new surface should be loaded from memory or 727 * @param preserve_contents Indicates that the new surface should be loaded from memory or
709 * left blank. 728 * left blank.
710 */ 729 */
711 std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps, 730 std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(VectorSurface& overlaps,
712 const SurfaceParams& params, 731 const SurfaceParams& params,
713 const GPUVAddr gpu_addr, 732 GPUVAddr gpu_addr, VAddr cpu_addr,
714 const VAddr cpu_addr,
715 bool preserve_contents) { 733 bool preserve_contents) {
716 if (params.target == SurfaceTarget::Texture3D) { 734 if (params.target != SurfaceTarget::Texture3D) {
717 bool failed = false;
718 if (params.num_levels > 1) {
719 // We can't handle mipmaps in 3D textures yet, better fallback to LLE approach
720 return std::nullopt;
721 }
722 TSurface new_surface = GetUncachedSurface(gpu_addr, params);
723 bool modified = false;
724 for (auto& surface : overlaps) {
725 const SurfaceParams& src_params = surface->GetSurfaceParams();
726 if (src_params.target != SurfaceTarget::Texture2D) {
727 failed = true;
728 break;
729 }
730 if (src_params.height != params.height) {
731 failed = true;
732 break;
733 }
734 if (src_params.block_depth != params.block_depth ||
735 src_params.block_height != params.block_height) {
736 failed = true;
737 break;
738 }
739 const u32 offset = static_cast<u32>(surface->GetCpuAddr() - cpu_addr);
740 const auto offsets = params.GetBlockOffsetXYZ(offset);
741 const auto z = std::get<2>(offsets);
742 modified |= surface->IsModified();
743 const CopyParams copy_params(0, 0, 0, 0, 0, z, 0, 0, params.width, params.height,
744 1);
745 ImageCopy(surface, new_surface, copy_params);
746 }
747 if (failed) {
748 return std::nullopt;
749 }
750 for (const auto& surface : overlaps) {
751 Unregister(surface);
752 }
753 new_surface->MarkAsModified(modified, Tick());
754 Register(new_surface);
755 auto view = new_surface->GetMainView();
756 return {{std::move(new_surface), view}};
757 } else {
758 for (const auto& surface : overlaps) { 735 for (const auto& surface : overlaps) {
759 if (!surface->MatchTarget(params.target)) { 736 if (!surface->MatchTarget(params.target)) {
760 if (overlaps.size() == 1 && surface->GetCpuAddr() == cpu_addr) { 737 if (overlaps.size() == 1 && surface->GetCpuAddr() == cpu_addr) {
@@ -770,11 +747,60 @@ private:
770 continue; 747 continue;
771 } 748 }
772 if (surface->MatchesStructure(params) == MatchStructureResult::FullMatch) { 749 if (surface->MatchesStructure(params) == MatchStructureResult::FullMatch) {
773 return {{surface, surface->GetMainView()}}; 750 return std::make_pair(surface, surface->GetMainView());
774 } 751 }
775 } 752 }
776 return InitializeSurface(gpu_addr, params, preserve_contents); 753 return InitializeSurface(gpu_addr, params, preserve_contents);
777 } 754 }
755
756 if (params.num_levels > 1) {
757 // We can't handle mipmaps in 3D textures yet, better fallback to LLE approach
758 return std::nullopt;
759 }
760
761 if (overlaps.size() == 1) {
762 const auto& surface = overlaps[0];
763 const SurfaceParams& overlap_params = surface->GetSurfaceParams();
764 // Don't attempt to render to textures with more than one level for now
765 // The texture has to be to the right or the sample address if we want to render to it
766 if (overlap_params.num_levels == 1 && cpu_addr >= surface->GetCpuAddr()) {
767 const u32 offset = static_cast<u32>(cpu_addr - surface->GetCpuAddr());
768 const u32 slice = std::get<2>(params.GetBlockOffsetXYZ(offset));
769 if (slice < overlap_params.depth) {
770 auto view = surface->Emplace3DView(slice, params.depth, 0, 1);
771 return std::make_pair(std::move(surface), std::move(view));
772 }
773 }
774 }
775
776 TSurface new_surface = GetUncachedSurface(gpu_addr, params);
777 bool modified = false;
778
779 for (auto& surface : overlaps) {
780 const SurfaceParams& src_params = surface->GetSurfaceParams();
781 if (src_params.target != SurfaceTarget::Texture2D ||
782 src_params.height != params.height ||
783 src_params.block_depth != params.block_depth ||
784 src_params.block_height != params.block_height) {
785 return std::nullopt;
786 }
787 modified |= surface->IsModified();
788
789 const u32 offset = static_cast<u32>(surface->GetCpuAddr() - cpu_addr);
790 const u32 slice = std::get<2>(params.GetBlockOffsetXYZ(offset));
791 const u32 width = params.width;
792 const u32 height = params.height;
793 const CopyParams copy_params(0, 0, 0, 0, 0, slice, 0, 0, width, height, 1);
794 ImageCopy(surface, new_surface, copy_params);
795 }
796 for (const auto& surface : overlaps) {
797 Unregister(surface);
798 }
799 new_surface->MarkAsModified(modified, Tick());
800 Register(new_surface);
801
802 TView view = new_surface->GetMainView();
803 return std::make_pair(std::move(new_surface), std::move(view));
778 } 804 }
779 805
780 /** 806 /**
@@ -810,7 +836,7 @@ private:
810 TSurface& current_surface = iter->second; 836 TSurface& current_surface = iter->second;
811 const auto topological_result = current_surface->MatchesTopology(params); 837 const auto topological_result = current_surface->MatchesTopology(params);
812 if (topological_result != MatchTopologyResult::FullMatch) { 838 if (topological_result != MatchTopologyResult::FullMatch) {
813 std::vector<TSurface> overlaps{current_surface}; 839 VectorSurface overlaps{current_surface};
814 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, 840 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
815 topological_result); 841 topological_result);
816 } 842 }
@@ -852,7 +878,7 @@ private:
852 } 878 }
853 } 879 }
854 880
855 // Check if it's a 3D texture 881 // Manage 3D textures
856 if (params.block_depth > 0) { 882 if (params.block_depth > 0) {
857 auto surface = 883 auto surface =
858 Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr, preserve_contents); 884 Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr, preserve_contents);
@@ -868,12 +894,9 @@ private:
868 // two things either the candidate surface is a supertexture of the overlap 894 // two things either the candidate surface is a supertexture of the overlap
869 // or they don't match in any known way. 895 // or they don't match in any known way.
870 if (!current_surface->IsInside(gpu_addr, gpu_addr + candidate_size)) { 896 if (!current_surface->IsInside(gpu_addr, gpu_addr + candidate_size)) {
871 if (current_surface->GetGpuAddr() == gpu_addr) { 897 const std::optional view = TryReconstructSurface(overlaps, params, gpu_addr);
872 std::optional<std::pair<TSurface, TView>> view = 898 if (view) {
873 TryReconstructSurface(overlaps, params, gpu_addr); 899 return *view;
874 if (view) {
875 return *view;
876 }
877 } 900 }
878 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, 901 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
879 MatchTopologyResult::FullMatch); 902 MatchTopologyResult::FullMatch);
@@ -991,7 +1014,9 @@ private:
991 params.target = target; 1014 params.target = target;
992 params.is_tiled = false; 1015 params.is_tiled = false;
993 params.srgb_conversion = false; 1016 params.srgb_conversion = false;
994 params.is_layered = false; 1017 params.is_layered =
1018 target == SurfaceTarget::Texture1DArray || target == SurfaceTarget::Texture2DArray ||
1019 target == SurfaceTarget::TextureCubemap || target == SurfaceTarget::TextureCubeArray;
995 params.block_width = 0; 1020 params.block_width = 0;
996 params.block_height = 0; 1021 params.block_height = 0;
997 params.block_depth = 0; 1022 params.block_depth = 0;
@@ -1124,23 +1149,25 @@ private:
1124 } 1149 }
1125 } 1150 }
1126 1151
1127 std::vector<TSurface> GetSurfacesInRegion(const VAddr cpu_addr, const std::size_t size) { 1152 VectorSurface GetSurfacesInRegion(const VAddr cpu_addr, const std::size_t size) {
1128 if (size == 0) { 1153 if (size == 0) {
1129 return {}; 1154 return {};
1130 } 1155 }
1131 const VAddr cpu_addr_end = cpu_addr + size; 1156 const VAddr cpu_addr_end = cpu_addr + size;
1132 VAddr start = cpu_addr >> registry_page_bits;
1133 const VAddr end = (cpu_addr_end - 1) >> registry_page_bits; 1157 const VAddr end = (cpu_addr_end - 1) >> registry_page_bits;
1134 std::vector<TSurface> surfaces; 1158 VectorSurface surfaces;
1135 while (start <= end) { 1159 for (VAddr start = cpu_addr >> registry_page_bits; start <= end; ++start) {
1136 std::vector<TSurface>& list = registry[start]; 1160 const auto it = registry.find(start);
1137 for (auto& surface : list) { 1161 if (it == registry.end()) {
1138 if (!surface->IsPicked() && surface->Overlaps(cpu_addr, cpu_addr_end)) { 1162 continue;
1139 surface->MarkAsPicked(true); 1163 }
1140 surfaces.push_back(surface); 1164 for (auto& surface : it->second) {
1165 if (surface->IsPicked() || !surface->Overlaps(cpu_addr, cpu_addr_end)) {
1166 continue;
1141 } 1167 }
1168 surface->MarkAsPicked(true);
1169 surfaces.push_back(surface);
1142 } 1170 }
1143 start++;
1144 } 1171 }
1145 for (auto& surface : surfaces) { 1172 for (auto& surface : surfaces) {
1146 surface->MarkAsPicked(false); 1173 surface->MarkAsPicked(false);
diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp
index 1adf8932b..1f5e43043 100644
--- a/src/yuzu/bootmanager.cpp
+++ b/src/yuzu/bootmanager.cpp
@@ -106,6 +106,9 @@ public:
106 format.setVersion(4, 3); 106 format.setVersion(4, 3);
107 format.setProfile(QSurfaceFormat::CompatibilityProfile); 107 format.setProfile(QSurfaceFormat::CompatibilityProfile);
108 format.setOption(QSurfaceFormat::FormatOption::DeprecatedFunctions); 108 format.setOption(QSurfaceFormat::FormatOption::DeprecatedFunctions);
109 if (Settings::values.renderer_debug) {
110 format.setOption(QSurfaceFormat::FormatOption::DebugContext);
111 }
109 // TODO: expose a setting for buffer value (ie default/single/double/triple) 112 // TODO: expose a setting for buffer value (ie default/single/double/triple)
110 format.setSwapBehavior(QSurfaceFormat::DefaultSwapBehavior); 113 format.setSwapBehavior(QSurfaceFormat::DefaultSwapBehavior);
111 format.setSwapInterval(0); 114 format.setSwapInterval(0);
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp
index b08b87426..7e9073cc3 100644
--- a/src/yuzu/configuration/config.cpp
+++ b/src/yuzu/configuration/config.cpp
@@ -533,6 +533,8 @@ void Config::ReadDebuggingValues() {
533 Settings::values.quest_flag = ReadSetting(QStringLiteral("quest_flag"), false).toBool(); 533 Settings::values.quest_flag = ReadSetting(QStringLiteral("quest_flag"), false).toBool();
534 Settings::values.disable_cpu_opt = 534 Settings::values.disable_cpu_opt =
535 ReadSetting(QStringLiteral("disable_cpu_opt"), false).toBool(); 535 ReadSetting(QStringLiteral("disable_cpu_opt"), false).toBool();
536 Settings::values.disable_macro_jit =
537 ReadSetting(QStringLiteral("disable_macro_jit"), false).toBool();
536 538
537 qt_config->endGroup(); 539 qt_config->endGroup();
538} 540}
@@ -1011,6 +1013,7 @@ void Config::SaveDebuggingValues() {
1011 WriteSetting(QStringLiteral("dump_nso"), Settings::values.dump_nso, false); 1013 WriteSetting(QStringLiteral("dump_nso"), Settings::values.dump_nso, false);
1012 WriteSetting(QStringLiteral("quest_flag"), Settings::values.quest_flag, false); 1014 WriteSetting(QStringLiteral("quest_flag"), Settings::values.quest_flag, false);
1013 WriteSetting(QStringLiteral("disable_cpu_opt"), Settings::values.disable_cpu_opt, false); 1015 WriteSetting(QStringLiteral("disable_cpu_opt"), Settings::values.disable_cpu_opt, false);
1016 WriteSetting(QStringLiteral("disable_macro_jit"), Settings::values.disable_macro_jit, false);
1014 1017
1015 qt_config->endGroup(); 1018 qt_config->endGroup();
1016} 1019}
diff --git a/src/yuzu/configuration/configure_debug.cpp b/src/yuzu/configuration/configure_debug.cpp
index c2026763e..2c77441fd 100644
--- a/src/yuzu/configuration/configure_debug.cpp
+++ b/src/yuzu/configuration/configure_debug.cpp
@@ -39,6 +39,8 @@ void ConfigureDebug::SetConfiguration() {
39 ui->disable_cpu_opt->setChecked(Settings::values.disable_cpu_opt); 39 ui->disable_cpu_opt->setChecked(Settings::values.disable_cpu_opt);
40 ui->enable_graphics_debugging->setEnabled(!Core::System::GetInstance().IsPoweredOn()); 40 ui->enable_graphics_debugging->setEnabled(!Core::System::GetInstance().IsPoweredOn());
41 ui->enable_graphics_debugging->setChecked(Settings::values.renderer_debug); 41 ui->enable_graphics_debugging->setChecked(Settings::values.renderer_debug);
42 ui->disable_macro_jit->setEnabled(!Core::System::GetInstance().IsPoweredOn());
43 ui->disable_macro_jit->setChecked(Settings::values.disable_macro_jit);
42} 44}
43 45
44void ConfigureDebug::ApplyConfiguration() { 46void ConfigureDebug::ApplyConfiguration() {
@@ -51,6 +53,7 @@ void ConfigureDebug::ApplyConfiguration() {
51 Settings::values.quest_flag = ui->quest_flag->isChecked(); 53 Settings::values.quest_flag = ui->quest_flag->isChecked();
52 Settings::values.disable_cpu_opt = ui->disable_cpu_opt->isChecked(); 54 Settings::values.disable_cpu_opt = ui->disable_cpu_opt->isChecked();
53 Settings::values.renderer_debug = ui->enable_graphics_debugging->isChecked(); 55 Settings::values.renderer_debug = ui->enable_graphics_debugging->isChecked();
56 Settings::values.disable_macro_jit = ui->disable_macro_jit->isChecked();
54 Debugger::ToggleConsole(); 57 Debugger::ToggleConsole();
55 Log::Filter filter; 58 Log::Filter filter;
56 filter.ParseFilterString(Settings::values.log_filter); 59 filter.ParseFilterString(Settings::values.log_filter);
diff --git a/src/yuzu/configuration/configure_debug.ui b/src/yuzu/configuration/configure_debug.ui
index e0d4c4a44..46f0208c6 100644
--- a/src/yuzu/configuration/configure_debug.ui
+++ b/src/yuzu/configuration/configure_debug.ui
@@ -148,6 +148,19 @@
148 </property> 148 </property>
149 </widget> 149 </widget>
150 </item> 150 </item>
151 <item>
152 <widget class="QCheckBox" name="disable_macro_jit">
153 <property name="enabled">
154 <bool>true</bool>
155 </property>
156 <property name="whatsThis">
157 <string>When checked, it disables the macro Just In Time compiler. Enabled this makes games run slower</string>
158 </property>
159 <property name="text">
160 <string>Disable Macro JIT</string>
161 </property>
162 </widget>
163 </item>
151 </layout> 164 </layout>
152 </widget> 165 </widget>
153 </item> 166 </item>
diff --git a/src/yuzu/configuration/configure_input_player.cpp b/src/yuzu/configuration/configure_input_player.cpp
index e4eb5594b..a05fa64ba 100644
--- a/src/yuzu/configuration/configure_input_player.cpp
+++ b/src/yuzu/configuration/configure_input_player.cpp
@@ -480,7 +480,9 @@ void ConfigureInputPlayer::RestoreDefaults() {
480 SetAnalogButton(params, analogs_param[analog_id], analog_sub_buttons[sub_button_id]); 480 SetAnalogButton(params, analogs_param[analog_id], analog_sub_buttons[sub_button_id]);
481 } 481 }
482 } 482 }
483
483 UpdateButtonLabels(); 484 UpdateButtonLabels();
485 ApplyConfiguration();
484} 486}
485 487
486void ConfigureInputPlayer::ClearAll() { 488void ConfigureInputPlayer::ClearAll() {
@@ -505,6 +507,7 @@ void ConfigureInputPlayer::ClearAll() {
505 } 507 }
506 508
507 UpdateButtonLabels(); 509 UpdateButtonLabels();
510 ApplyConfiguration();
508} 511}
509 512
510void ConfigureInputPlayer::UpdateButtonLabels() { 513void ConfigureInputPlayer::UpdateButtonLabels() {
diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp
index c20d48c42..7240270f5 100644
--- a/src/yuzu_cmd/config.cpp
+++ b/src/yuzu_cmd/config.cpp
@@ -432,6 +432,8 @@ void Config::ReadValues() {
432 Settings::values.quest_flag = sdl2_config->GetBoolean("Debugging", "quest_flag", false); 432 Settings::values.quest_flag = sdl2_config->GetBoolean("Debugging", "quest_flag", false);
433 Settings::values.disable_cpu_opt = 433 Settings::values.disable_cpu_opt =
434 sdl2_config->GetBoolean("Debugging", "disable_cpu_opt", false); 434 sdl2_config->GetBoolean("Debugging", "disable_cpu_opt", false);
435 Settings::values.disable_macro_jit =
436 sdl2_config->GetBoolean("Debugging", "disable_macro_jit", false);
435 437
436 const auto title_list = sdl2_config->Get("AddOns", "title_ids", ""); 438 const auto title_list = sdl2_config->Get("AddOns", "title_ids", "");
437 std::stringstream ss(title_list); 439 std::stringstream ss(title_list);
diff --git a/src/yuzu_cmd/default_ini.h b/src/yuzu_cmd/default_ini.h
index abc6e6e65..6f53e9659 100644
--- a/src/yuzu_cmd/default_ini.h
+++ b/src/yuzu_cmd/default_ini.h
@@ -291,6 +291,8 @@ quest_flag =
291# Determines whether or not JIT CPU optimizations are enabled 291# Determines whether or not JIT CPU optimizations are enabled
292# false: Optimizations Enabled, true: Optimizations Disabled 292# false: Optimizations Enabled, true: Optimizations Disabled
293disable_cpu_opt = 293disable_cpu_opt =
294# Enables/Disables the macro JIT compiler
295disable_macro_jit=false
294 296
295[WebService] 297[WebService]
296# Whether or not to enable telemetry 298# Whether or not to enable telemetry
diff --git a/src/yuzu_cmd/emu_window/emu_window_sdl2_gl.cpp b/src/yuzu_cmd/emu_window/emu_window_sdl2_gl.cpp
index 411e7e647..09cc0a3b5 100644
--- a/src/yuzu_cmd/emu_window/emu_window_sdl2_gl.cpp
+++ b/src/yuzu_cmd/emu_window/emu_window_sdl2_gl.cpp
@@ -98,6 +98,9 @@ EmuWindow_SDL2_GL::EmuWindow_SDL2_GL(Core::System& system, bool fullscreen)
98 SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8); 98 SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8);
99 SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 0); 99 SDL_GL_SetAttribute(SDL_GL_ALPHA_SIZE, 0);
100 SDL_GL_SetAttribute(SDL_GL_SHARE_WITH_CURRENT_CONTEXT, 1); 100 SDL_GL_SetAttribute(SDL_GL_SHARE_WITH_CURRENT_CONTEXT, 1);
101 if (Settings::values.renderer_debug) {
102 SDL_GL_SetAttribute(SDL_GL_CONTEXT_FLAGS, SDL_GL_CONTEXT_DEBUG_FLAG);
103 }
101 SDL_GL_SetSwapInterval(0); 104 SDL_GL_SetSwapInterval(0);
102 105
103 std::string window_title = fmt::format("yuzu {} | {}-{}", Common::g_build_fullname, 106 std::string window_title = fmt::format("yuzu {} | {}-{}", Common::g_build_fullname,