summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/hle/kernel/readable_event.cpp2
-rw-r--r--src/core/settings.h1
-rw-r--r--src/video_core/CMakeLists.txt8
-rw-r--r--src/video_core/engines/maxwell_3d.cpp17
-rw-r--r--src/video_core/engines/maxwell_3d.h18
-rw-r--r--src/video_core/macro/macro.cpp45
-rw-r--r--src/video_core/macro/macro.h128
-rw-r--r--src/video_core/macro/macro_interpreter.cpp (renamed from src/video_core/macro_interpreter.cpp)198
-rw-r--r--src/video_core/macro/macro_interpreter.h (renamed from src/video_core/macro_interpreter.h)51
-rw-r--r--src/video_core/macro/macro_jit_x64.cpp640
-rw-r--r--src/video_core/macro/macro_jit_x64.h100
-rw-r--r--src/video_core/renderer_opengl/gl_device.cpp12
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp14
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp6
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp16
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp55
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h16
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.cpp137
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.h9
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp4
-rw-r--r--src/video_core/texture_cache/texture_cache.h71
-rw-r--r--src/yuzu/configuration/config.cpp3
-rw-r--r--src/yuzu/configuration/configure_debug.cpp3
-rw-r--r--src/yuzu/configuration/configure_debug.ui13
-rw-r--r--src/yuzu/configuration/configure_input_player.cpp3
-rw-r--r--src/yuzu_cmd/config.cpp2
-rw-r--r--src/yuzu_cmd/default_ini.h2
29 files changed, 1264 insertions, 314 deletions
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index 00860fcbd..ef5e19e63 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -38,7 +38,7 @@ void ReadableEvent::Clear() {
38 38
39ResultCode ReadableEvent::Reset() { 39ResultCode ReadableEvent::Reset() {
40 if (!is_signaled) { 40 if (!is_signaled) {
41 LOG_ERROR(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}", 41 LOG_TRACE(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
42 GetObjectId(), GetTypeName(), GetName()); 42 GetObjectId(), GetTypeName(), GetName());
43 return ERR_INVALID_STATE; 43 return ERR_INVALID_STATE;
44 } 44 }
diff --git a/src/core/settings.h b/src/core/settings.h
index 78eb33737..36cd66fd4 100644
--- a/src/core/settings.h
+++ b/src/core/settings.h
@@ -474,6 +474,7 @@ struct Values {
474 bool reporting_services; 474 bool reporting_services;
475 bool quest_flag; 475 bool quest_flag;
476 bool disable_cpu_opt; 476 bool disable_cpu_opt;
477 bool disable_macro_jit;
477 478
478 // BCAT 479 // BCAT
479 std::string bcat_backend; 480 std::string bcat_backend;
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index d6ee82836..2bf8d68ce 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -25,6 +25,12 @@ add_library(video_core STATIC
25 engines/shader_bytecode.h 25 engines/shader_bytecode.h
26 engines/shader_header.h 26 engines/shader_header.h
27 engines/shader_type.h 27 engines/shader_type.h
28 macro/macro.cpp
29 macro/macro.h
30 macro/macro_interpreter.cpp
31 macro/macro_interpreter.h
32 macro/macro_jit_x64.cpp
33 macro/macro_jit_x64.h
28 fence_manager.h 34 fence_manager.h
29 gpu.cpp 35 gpu.cpp
30 gpu.h 36 gpu.h
@@ -36,8 +42,6 @@ add_library(video_core STATIC
36 gpu_thread.h 42 gpu_thread.h
37 guest_driver.cpp 43 guest_driver.cpp
38 guest_driver.h 44 guest_driver.h
39 macro_interpreter.cpp
40 macro_interpreter.h
41 memory_manager.cpp 45 memory_manager.cpp
42 memory_manager.h 46 memory_manager.h
43 morton.cpp 47 morton.cpp
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 13ef2e42d..e46b153f9 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -25,9 +25,8 @@ constexpr u32 MacroRegistersStart = 0xE00;
25Maxwell3D::Maxwell3D(Core::System& system, VideoCore::RasterizerInterface& rasterizer, 25Maxwell3D::Maxwell3D(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
26 MemoryManager& memory_manager) 26 MemoryManager& memory_manager)
27 : system{system}, rasterizer{rasterizer}, memory_manager{memory_manager}, 27 : system{system}, rasterizer{rasterizer}, memory_manager{memory_manager},
28 macro_interpreter{*this}, upload_state{memory_manager, regs.upload} { 28 macro_engine{GetMacroEngine(*this)}, upload_state{memory_manager, regs.upload} {
29 dirty.flags.flip(); 29 dirty.flags.flip();
30
31 InitializeRegisterDefaults(); 30 InitializeRegisterDefaults();
32} 31}
33 32
@@ -120,7 +119,7 @@ void Maxwell3D::InitializeRegisterDefaults() {
120 mme_inline[MAXWELL3D_REG_INDEX(index_array.count)] = true; 119 mme_inline[MAXWELL3D_REG_INDEX(index_array.count)] = true;
121} 120}
122 121
123void Maxwell3D::CallMacroMethod(u32 method, std::size_t num_parameters, const u32* parameters) { 122void Maxwell3D::CallMacroMethod(u32 method, const std::vector<u32>& parameters) {
124 // Reset the current macro. 123 // Reset the current macro.
125 executing_macro = 0; 124 executing_macro = 0;
126 125
@@ -129,7 +128,7 @@ void Maxwell3D::CallMacroMethod(u32 method, std::size_t num_parameters, const u3
129 ((method - MacroRegistersStart) >> 1) % static_cast<u32>(macro_positions.size()); 128 ((method - MacroRegistersStart) >> 1) % static_cast<u32>(macro_positions.size());
130 129
131 // Execute the current macro. 130 // Execute the current macro.
132 macro_interpreter.Execute(macro_positions[entry], num_parameters, parameters); 131 macro_engine->Execute(macro_positions[entry], parameters);
133 if (mme_draw.current_mode != MMEDrawMode::Undefined) { 132 if (mme_draw.current_mode != MMEDrawMode::Undefined) {
134 FlushMMEInlineDraw(); 133 FlushMMEInlineDraw();
135 } 134 }
@@ -165,7 +164,7 @@ void Maxwell3D::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
165 164
166 // Call the macro when there are no more parameters in the command buffer 165 // Call the macro when there are no more parameters in the command buffer
167 if (is_last_call) { 166 if (is_last_call) {
168 CallMacroMethod(executing_macro, macro_params.size(), macro_params.data()); 167 CallMacroMethod(executing_macro, macro_params);
169 macro_params.clear(); 168 macro_params.clear();
170 } 169 }
171 return; 170 return;
@@ -201,7 +200,7 @@ void Maxwell3D::CallMethod(u32 method, u32 method_argument, bool is_last_call) {
201 break; 200 break;
202 } 201 }
203 case MAXWELL3D_REG_INDEX(macros.data): { 202 case MAXWELL3D_REG_INDEX(macros.data): {
204 ProcessMacroUpload(arg); 203 macro_engine->AddCode(regs.macros.upload_address, arg);
205 break; 204 break;
206 } 205 }
207 case MAXWELL3D_REG_INDEX(macros.bind): { 206 case MAXWELL3D_REG_INDEX(macros.bind): {
@@ -310,7 +309,7 @@ void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
310 309
311 // Call the macro when there are no more parameters in the command buffer 310 // Call the macro when there are no more parameters in the command buffer
312 if (amount == methods_pending) { 311 if (amount == methods_pending) {
313 CallMacroMethod(executing_macro, macro_params.size(), macro_params.data()); 312 CallMacroMethod(executing_macro, macro_params);
314 macro_params.clear(); 313 macro_params.clear();
315 } 314 }
316 return; 315 return;
@@ -424,9 +423,7 @@ void Maxwell3D::FlushMMEInlineDraw() {
424} 423}
425 424
426void Maxwell3D::ProcessMacroUpload(u32 data) { 425void Maxwell3D::ProcessMacroUpload(u32 data) {
427 ASSERT_MSG(regs.macros.upload_address < macro_memory.size(), 426 macro_engine->AddCode(regs.macros.upload_address++, data);
428 "upload_address exceeded macro_memory size!");
429 macro_memory[regs.macros.upload_address++] = data;
430} 427}
431 428
432void Maxwell3D::ProcessMacroBind(u32 data) { 429void Maxwell3D::ProcessMacroBind(u32 data) {
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index 05dd6b39b..b827b112f 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -23,7 +23,7 @@
23#include "video_core/engines/engine_upload.h" 23#include "video_core/engines/engine_upload.h"
24#include "video_core/engines/shader_type.h" 24#include "video_core/engines/shader_type.h"
25#include "video_core/gpu.h" 25#include "video_core/gpu.h"
26#include "video_core/macro_interpreter.h" 26#include "video_core/macro/macro.h"
27#include "video_core/textures/texture.h" 27#include "video_core/textures/texture.h"
28 28
29namespace Core { 29namespace Core {
@@ -1411,15 +1411,6 @@ public:
1411 1411
1412 const VideoCore::GuestDriverProfile& AccessGuestDriverProfile() const override; 1412 const VideoCore::GuestDriverProfile& AccessGuestDriverProfile() const override;
1413 1413
1414 /// Memory for macro code - it's undetermined how big this is, however 1MB is much larger than
1415 /// we've seen used.
1416 using MacroMemory = std::array<u32, 0x40000>;
1417
1418 /// Gets a reference to macro memory.
1419 const MacroMemory& GetMacroMemory() const {
1420 return macro_memory;
1421 }
1422
1423 bool ShouldExecute() const { 1414 bool ShouldExecute() const {
1424 return execute_on; 1415 return execute_on;
1425 } 1416 }
@@ -1468,16 +1459,13 @@ private:
1468 1459
1469 std::array<bool, Regs::NUM_REGS> mme_inline{}; 1460 std::array<bool, Regs::NUM_REGS> mme_inline{};
1470 1461
1471 /// Memory for macro code
1472 MacroMemory macro_memory;
1473
1474 /// Macro method that is currently being executed / being fed parameters. 1462 /// Macro method that is currently being executed / being fed parameters.
1475 u32 executing_macro = 0; 1463 u32 executing_macro = 0;
1476 /// Parameters that have been submitted to the macro call so far. 1464 /// Parameters that have been submitted to the macro call so far.
1477 std::vector<u32> macro_params; 1465 std::vector<u32> macro_params;
1478 1466
1479 /// Interpreter for the macro codes uploaded to the GPU. 1467 /// Interpreter for the macro codes uploaded to the GPU.
1480 MacroInterpreter macro_interpreter; 1468 std::unique_ptr<MacroEngine> macro_engine;
1481 1469
1482 static constexpr u32 null_cb_data = 0xFFFFFFFF; 1470 static constexpr u32 null_cb_data = 0xFFFFFFFF;
1483 struct { 1471 struct {
@@ -1506,7 +1494,7 @@ private:
1506 * @param num_parameters Number of arguments 1494 * @param num_parameters Number of arguments
1507 * @param parameters Arguments to the method call 1495 * @param parameters Arguments to the method call
1508 */ 1496 */
1509 void CallMacroMethod(u32 method, std::size_t num_parameters, const u32* parameters); 1497 void CallMacroMethod(u32 method, const std::vector<u32>& parameters);
1510 1498
1511 /// Handles writes to the macro uploading register. 1499 /// Handles writes to the macro uploading register.
1512 void ProcessMacroUpload(u32 data); 1500 void ProcessMacroUpload(u32 data);
diff --git a/src/video_core/macro/macro.cpp b/src/video_core/macro/macro.cpp
new file mode 100644
index 000000000..89077a2d8
--- /dev/null
+++ b/src/video_core/macro/macro.cpp
@@ -0,0 +1,45 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "common/logging/log.h"
7#include "core/settings.h"
8#include "video_core/macro/macro.h"
9#include "video_core/macro/macro_interpreter.h"
10#include "video_core/macro/macro_jit_x64.h"
11
12namespace Tegra {
13
14void MacroEngine::AddCode(u32 method, u32 data) {
15 uploaded_macro_code[method].push_back(data);
16}
17
18void MacroEngine::Execute(u32 method, const std::vector<u32>& parameters) {
19 auto compiled_macro = macro_cache.find(method);
20 if (compiled_macro != macro_cache.end()) {
21 compiled_macro->second->Execute(parameters, method);
22 } else {
23 // Macro not compiled, check if it's uploaded and if so, compile it
24 auto macro_code = uploaded_macro_code.find(method);
25 if (macro_code == uploaded_macro_code.end()) {
26 UNREACHABLE_MSG("Macro 0x{0:x} was not uploaded", method);
27 return;
28 }
29 macro_cache[method] = Compile(macro_code->second);
30 macro_cache[method]->Execute(parameters, method);
31 }
32}
33
34std::unique_ptr<MacroEngine> GetMacroEngine(Engines::Maxwell3D& maxwell3d) {
35 if (Settings::values.disable_macro_jit) {
36 return std::make_unique<MacroInterpreter>(maxwell3d);
37 }
38#ifdef ARCHITECTURE_x86_64
39 return std::make_unique<MacroJITx64>(maxwell3d);
40#else
41 return std::make_unique<MacroInterpreter>(maxwell3d);
42#endif
43}
44
45} // namespace Tegra
diff --git a/src/video_core/macro/macro.h b/src/video_core/macro/macro.h
new file mode 100644
index 000000000..b76ed891f
--- /dev/null
+++ b/src/video_core/macro/macro.h
@@ -0,0 +1,128 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <unordered_map>
9#include <vector>
10#include "common/bit_field.h"
11#include "common/common_types.h"
12
13namespace Tegra {
14namespace Engines {
15class Maxwell3D;
16}
17namespace Macro {
18constexpr std::size_t NUM_MACRO_REGISTERS = 8;
19enum class Operation : u32 {
20 ALU = 0,
21 AddImmediate = 1,
22 ExtractInsert = 2,
23 ExtractShiftLeftImmediate = 3,
24 ExtractShiftLeftRegister = 4,
25 Read = 5,
26 Unused = 6, // This operation doesn't seem to be a valid encoding.
27 Branch = 7,
28};
29
30enum class ALUOperation : u32 {
31 Add = 0,
32 AddWithCarry = 1,
33 Subtract = 2,
34 SubtractWithBorrow = 3,
35 // Operations 4-7 don't seem to be valid encodings.
36 Xor = 8,
37 Or = 9,
38 And = 10,
39 AndNot = 11,
40 Nand = 12
41};
42
43enum class ResultOperation : u32 {
44 IgnoreAndFetch = 0,
45 Move = 1,
46 MoveAndSetMethod = 2,
47 FetchAndSend = 3,
48 MoveAndSend = 4,
49 FetchAndSetMethod = 5,
50 MoveAndSetMethodFetchAndSend = 6,
51 MoveAndSetMethodSend = 7
52};
53
54enum class BranchCondition : u32 {
55 Zero = 0,
56 NotZero = 1,
57};
58
59union Opcode {
60 u32 raw;
61 BitField<0, 3, Operation> operation;
62 BitField<4, 3, ResultOperation> result_operation;
63 BitField<4, 1, BranchCondition> branch_condition;
64 // If set on a branch, then the branch doesn't have a delay slot.
65 BitField<5, 1, u32> branch_annul;
66 BitField<7, 1, u32> is_exit;
67 BitField<8, 3, u32> dst;
68 BitField<11, 3, u32> src_a;
69 BitField<14, 3, u32> src_b;
70 // The signed immediate overlaps the second source operand and the alu operation.
71 BitField<14, 18, s32> immediate;
72
73 BitField<17, 5, ALUOperation> alu_operation;
74
75 // Bitfield instructions data
76 BitField<17, 5, u32> bf_src_bit;
77 BitField<22, 5, u32> bf_size;
78 BitField<27, 5, u32> bf_dst_bit;
79
80 u32 GetBitfieldMask() const {
81 return (1 << bf_size) - 1;
82 }
83
84 s32 GetBranchTarget() const {
85 return static_cast<s32>(immediate * sizeof(u32));
86 }
87};
88
89union MethodAddress {
90 u32 raw;
91 BitField<0, 12, u32> address;
92 BitField<12, 6, u32> increment;
93};
94
95} // namespace Macro
96
97class CachedMacro {
98public:
99 virtual ~CachedMacro() = default;
100 /**
101 * Executes the macro code with the specified input parameters.
102 * @param code The macro byte code to execute
103 * @param parameters The parameters of the macro
104 */
105 virtual void Execute(const std::vector<u32>& parameters, u32 method) = 0;
106};
107
108class MacroEngine {
109public:
110 virtual ~MacroEngine() = default;
111
112 // Store the uploaded macro code to compile them when they're called.
113 void AddCode(u32 method, u32 data);
114
115 // Compiles the macro if its not in the cache, and executes the compiled macro
116 void Execute(u32 method, const std::vector<u32>& parameters);
117
118protected:
119 virtual std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code) = 0;
120
121private:
122 std::unordered_map<u32, std::unique_ptr<CachedMacro>> macro_cache;
123 std::unordered_map<u32, std::vector<u32>> uploaded_macro_code;
124};
125
126std::unique_ptr<MacroEngine> GetMacroEngine(Engines::Maxwell3D& maxwell3d);
127
128} // namespace Tegra
diff --git a/src/video_core/macro_interpreter.cpp b/src/video_core/macro/macro_interpreter.cpp
index 947364928..5edff27aa 100644
--- a/src/video_core/macro_interpreter.cpp
+++ b/src/video_core/macro/macro_interpreter.cpp
@@ -1,4 +1,4 @@
1// Copyright 2018 yuzu Emulator Project 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
@@ -6,109 +6,46 @@
6#include "common/logging/log.h" 6#include "common/logging/log.h"
7#include "common/microprofile.h" 7#include "common/microprofile.h"
8#include "video_core/engines/maxwell_3d.h" 8#include "video_core/engines/maxwell_3d.h"
9#include "video_core/macro_interpreter.h" 9#include "video_core/macro/macro_interpreter.h"
10 10
11MICROPROFILE_DEFINE(MacroInterp, "GPU", "Execute macro interpreter", MP_RGB(128, 128, 192)); 11MICROPROFILE_DEFINE(MacroInterp, "GPU", "Execute macro interpreter", MP_RGB(128, 128, 192));
12 12
13namespace Tegra { 13namespace Tegra {
14namespace { 14MacroInterpreter::MacroInterpreter(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {}
15enum class Operation : u32 {
16 ALU = 0,
17 AddImmediate = 1,
18 ExtractInsert = 2,
19 ExtractShiftLeftImmediate = 3,
20 ExtractShiftLeftRegister = 4,
21 Read = 5,
22 Unused = 6, // This operation doesn't seem to be a valid encoding.
23 Branch = 7,
24};
25} // Anonymous namespace
26
27enum class MacroInterpreter::ALUOperation : u32 {
28 Add = 0,
29 AddWithCarry = 1,
30 Subtract = 2,
31 SubtractWithBorrow = 3,
32 // Operations 4-7 don't seem to be valid encodings.
33 Xor = 8,
34 Or = 9,
35 And = 10,
36 AndNot = 11,
37 Nand = 12
38};
39
40enum class MacroInterpreter::ResultOperation : u32 {
41 IgnoreAndFetch = 0,
42 Move = 1,
43 MoveAndSetMethod = 2,
44 FetchAndSend = 3,
45 MoveAndSend = 4,
46 FetchAndSetMethod = 5,
47 MoveAndSetMethodFetchAndSend = 6,
48 MoveAndSetMethodSend = 7
49};
50
51enum class MacroInterpreter::BranchCondition : u32 {
52 Zero = 0,
53 NotZero = 1,
54};
55
56union MacroInterpreter::Opcode {
57 u32 raw;
58 BitField<0, 3, Operation> operation;
59 BitField<4, 3, ResultOperation> result_operation;
60 BitField<4, 1, BranchCondition> branch_condition;
61 // If set on a branch, then the branch doesn't have a delay slot.
62 BitField<5, 1, u32> branch_annul;
63 BitField<7, 1, u32> is_exit;
64 BitField<8, 3, u32> dst;
65 BitField<11, 3, u32> src_a;
66 BitField<14, 3, u32> src_b;
67 // The signed immediate overlaps the second source operand and the alu operation.
68 BitField<14, 18, s32> immediate;
69
70 BitField<17, 5, ALUOperation> alu_operation;
71
72 // Bitfield instructions data
73 BitField<17, 5, u32> bf_src_bit;
74 BitField<22, 5, u32> bf_size;
75 BitField<27, 5, u32> bf_dst_bit;
76
77 u32 GetBitfieldMask() const {
78 return (1 << bf_size) - 1;
79 }
80 15
81 s32 GetBranchTarget() const { 16std::unique_ptr<CachedMacro> MacroInterpreter::Compile(const std::vector<u32>& code) {
82 return static_cast<s32>(immediate * sizeof(u32)); 17 return std::make_unique<MacroInterpreterImpl>(maxwell3d, code);
83 } 18}
84};
85 19
86MacroInterpreter::MacroInterpreter(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {} 20MacroInterpreterImpl::MacroInterpreterImpl(Engines::Maxwell3D& maxwell3d,
21 const std::vector<u32>& code)
22 : maxwell3d(maxwell3d), code(code) {}
87 23
88void MacroInterpreter::Execute(u32 offset, std::size_t num_parameters, const u32* parameters) { 24void MacroInterpreterImpl::Execute(const std::vector<u32>& parameters, u32 method) {
89 MICROPROFILE_SCOPE(MacroInterp); 25 MICROPROFILE_SCOPE(MacroInterp);
90 Reset(); 26 Reset();
91 27
92 registers[1] = parameters[0]; 28 registers[1] = parameters[0];
29 num_parameters = parameters.size();
93 30
94 if (num_parameters > parameters_capacity) { 31 if (num_parameters > parameters_capacity) {
95 parameters_capacity = num_parameters; 32 parameters_capacity = num_parameters;
96 this->parameters = std::make_unique<u32[]>(num_parameters); 33 this->parameters = std::make_unique<u32[]>(num_parameters);
97 } 34 }
98 std::memcpy(this->parameters.get(), parameters, num_parameters * sizeof(u32)); 35 std::memcpy(this->parameters.get(), parameters.data(), num_parameters * sizeof(u32));
99 this->num_parameters = num_parameters; 36 this->num_parameters = num_parameters;
100 37
101 // Execute the code until we hit an exit condition. 38 // Execute the code until we hit an exit condition.
102 bool keep_executing = true; 39 bool keep_executing = true;
103 while (keep_executing) { 40 while (keep_executing) {
104 keep_executing = Step(offset, false); 41 keep_executing = Step(false);
105 } 42 }
106 43
107 // Assert the the macro used all the input parameters 44 // Assert the the macro used all the input parameters
108 ASSERT(next_parameter_index == num_parameters); 45 ASSERT(next_parameter_index == num_parameters);
109} 46}
110 47
111void MacroInterpreter::Reset() { 48void MacroInterpreterImpl::Reset() {
112 registers = {}; 49 registers = {};
113 pc = 0; 50 pc = 0;
114 delayed_pc = {}; 51 delayed_pc = {};
@@ -120,10 +57,10 @@ void MacroInterpreter::Reset() {
120 carry_flag = false; 57 carry_flag = false;
121} 58}
122 59
123bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) { 60bool MacroInterpreterImpl::Step(bool is_delay_slot) {
124 u32 base_address = pc; 61 u32 base_address = pc;
125 62
126 Opcode opcode = GetOpcode(offset); 63 Macro::Opcode opcode = GetOpcode();
127 pc += 4; 64 pc += 4;
128 65
129 // Update the program counter if we were delayed 66 // Update the program counter if we were delayed
@@ -134,18 +71,18 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
134 } 71 }
135 72
136 switch (opcode.operation) { 73 switch (opcode.operation) {
137 case Operation::ALU: { 74 case Macro::Operation::ALU: {
138 u32 result = GetALUResult(opcode.alu_operation, GetRegister(opcode.src_a), 75 u32 result = GetALUResult(opcode.alu_operation, GetRegister(opcode.src_a),
139 GetRegister(opcode.src_b)); 76 GetRegister(opcode.src_b));
140 ProcessResult(opcode.result_operation, opcode.dst, result); 77 ProcessResult(opcode.result_operation, opcode.dst, result);
141 break; 78 break;
142 } 79 }
143 case Operation::AddImmediate: { 80 case Macro::Operation::AddImmediate: {
144 ProcessResult(opcode.result_operation, opcode.dst, 81 ProcessResult(opcode.result_operation, opcode.dst,
145 GetRegister(opcode.src_a) + opcode.immediate); 82 GetRegister(opcode.src_a) + opcode.immediate);
146 break; 83 break;
147 } 84 }
148 case Operation::ExtractInsert: { 85 case Macro::Operation::ExtractInsert: {
149 u32 dst = GetRegister(opcode.src_a); 86 u32 dst = GetRegister(opcode.src_a);
150 u32 src = GetRegister(opcode.src_b); 87 u32 src = GetRegister(opcode.src_b);
151 88
@@ -155,7 +92,7 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
155 ProcessResult(opcode.result_operation, opcode.dst, dst); 92 ProcessResult(opcode.result_operation, opcode.dst, dst);
156 break; 93 break;
157 } 94 }
158 case Operation::ExtractShiftLeftImmediate: { 95 case Macro::Operation::ExtractShiftLeftImmediate: {
159 u32 dst = GetRegister(opcode.src_a); 96 u32 dst = GetRegister(opcode.src_a);
160 u32 src = GetRegister(opcode.src_b); 97 u32 src = GetRegister(opcode.src_b);
161 98
@@ -164,7 +101,7 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
164 ProcessResult(opcode.result_operation, opcode.dst, result); 101 ProcessResult(opcode.result_operation, opcode.dst, result);
165 break; 102 break;
166 } 103 }
167 case Operation::ExtractShiftLeftRegister: { 104 case Macro::Operation::ExtractShiftLeftRegister: {
168 u32 dst = GetRegister(opcode.src_a); 105 u32 dst = GetRegister(opcode.src_a);
169 u32 src = GetRegister(opcode.src_b); 106 u32 src = GetRegister(opcode.src_b);
170 107
@@ -173,12 +110,12 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
173 ProcessResult(opcode.result_operation, opcode.dst, result); 110 ProcessResult(opcode.result_operation, opcode.dst, result);
174 break; 111 break;
175 } 112 }
176 case Operation::Read: { 113 case Macro::Operation::Read: {
177 u32 result = Read(GetRegister(opcode.src_a) + opcode.immediate); 114 u32 result = Read(GetRegister(opcode.src_a) + opcode.immediate);
178 ProcessResult(opcode.result_operation, opcode.dst, result); 115 ProcessResult(opcode.result_operation, opcode.dst, result);
179 break; 116 break;
180 } 117 }
181 case Operation::Branch: { 118 case Macro::Operation::Branch: {
182 ASSERT_MSG(!is_delay_slot, "Executing a branch in a delay slot is not valid"); 119 ASSERT_MSG(!is_delay_slot, "Executing a branch in a delay slot is not valid");
183 u32 value = GetRegister(opcode.src_a); 120 u32 value = GetRegister(opcode.src_a);
184 bool taken = EvaluateBranchCondition(opcode.branch_condition, value); 121 bool taken = EvaluateBranchCondition(opcode.branch_condition, value);
@@ -191,7 +128,7 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
191 128
192 delayed_pc = base_address + opcode.GetBranchTarget(); 129 delayed_pc = base_address + opcode.GetBranchTarget();
193 // Execute one more instruction due to the delay slot. 130 // Execute one more instruction due to the delay slot.
194 return Step(offset, true); 131 return Step(true);
195 } 132 }
196 break; 133 break;
197 } 134 }
@@ -204,51 +141,44 @@ bool MacroInterpreter::Step(u32 offset, bool is_delay_slot) {
204 // cause an exit if it's executed inside a delay slot. 141 // cause an exit if it's executed inside a delay slot.
205 if (opcode.is_exit && !is_delay_slot) { 142 if (opcode.is_exit && !is_delay_slot) {
206 // Exit has a delay slot, execute the next instruction 143 // Exit has a delay slot, execute the next instruction
207 Step(offset, true); 144 Step(true);
208 return false; 145 return false;
209 } 146 }
210 147
211 return true; 148 return true;
212} 149}
213 150
214MacroInterpreter::Opcode MacroInterpreter::GetOpcode(u32 offset) const { 151u32 MacroInterpreterImpl::GetALUResult(Macro::ALUOperation operation, u32 src_a, u32 src_b) {
215 const auto& macro_memory{maxwell3d.GetMacroMemory()};
216 ASSERT((pc % sizeof(u32)) == 0);
217 ASSERT((pc + offset) < macro_memory.size() * sizeof(u32));
218 return {macro_memory[offset + pc / sizeof(u32)]};
219}
220
221u32 MacroInterpreter::GetALUResult(ALUOperation operation, u32 src_a, u32 src_b) {
222 switch (operation) { 152 switch (operation) {
223 case ALUOperation::Add: { 153 case Macro::ALUOperation::Add: {
224 const u64 result{static_cast<u64>(src_a) + src_b}; 154 const u64 result{static_cast<u64>(src_a) + src_b};
225 carry_flag = result > 0xffffffff; 155 carry_flag = result > 0xffffffff;
226 return static_cast<u32>(result); 156 return static_cast<u32>(result);
227 } 157 }
228 case ALUOperation::AddWithCarry: { 158 case Macro::ALUOperation::AddWithCarry: {
229 const u64 result{static_cast<u64>(src_a) + src_b + (carry_flag ? 1ULL : 0ULL)}; 159 const u64 result{static_cast<u64>(src_a) + src_b + (carry_flag ? 1ULL : 0ULL)};
230 carry_flag = result > 0xffffffff; 160 carry_flag = result > 0xffffffff;
231 return static_cast<u32>(result); 161 return static_cast<u32>(result);
232 } 162 }
233 case ALUOperation::Subtract: { 163 case Macro::ALUOperation::Subtract: {
234 const u64 result{static_cast<u64>(src_a) - src_b}; 164 const u64 result{static_cast<u64>(src_a) - src_b};
235 carry_flag = result < 0x100000000; 165 carry_flag = result < 0x100000000;
236 return static_cast<u32>(result); 166 return static_cast<u32>(result);
237 } 167 }
238 case ALUOperation::SubtractWithBorrow: { 168 case Macro::ALUOperation::SubtractWithBorrow: {
239 const u64 result{static_cast<u64>(src_a) - src_b - (carry_flag ? 0ULL : 1ULL)}; 169 const u64 result{static_cast<u64>(src_a) - src_b - (carry_flag ? 0ULL : 1ULL)};
240 carry_flag = result < 0x100000000; 170 carry_flag = result < 0x100000000;
241 return static_cast<u32>(result); 171 return static_cast<u32>(result);
242 } 172 }
243 case ALUOperation::Xor: 173 case Macro::ALUOperation::Xor:
244 return src_a ^ src_b; 174 return src_a ^ src_b;
245 case ALUOperation::Or: 175 case Macro::ALUOperation::Or:
246 return src_a | src_b; 176 return src_a | src_b;
247 case ALUOperation::And: 177 case Macro::ALUOperation::And:
248 return src_a & src_b; 178 return src_a & src_b;
249 case ALUOperation::AndNot: 179 case Macro::ALUOperation::AndNot:
250 return src_a & ~src_b; 180 return src_a & ~src_b;
251 case ALUOperation::Nand: 181 case Macro::ALUOperation::Nand:
252 return ~(src_a & src_b); 182 return ~(src_a & src_b);
253 183
254 default: 184 default:
@@ -257,43 +187,43 @@ u32 MacroInterpreter::GetALUResult(ALUOperation operation, u32 src_a, u32 src_b)
257 } 187 }
258} 188}
259 189
260void MacroInterpreter::ProcessResult(ResultOperation operation, u32 reg, u32 result) { 190void MacroInterpreterImpl::ProcessResult(Macro::ResultOperation operation, u32 reg, u32 result) {
261 switch (operation) { 191 switch (operation) {
262 case ResultOperation::IgnoreAndFetch: 192 case Macro::ResultOperation::IgnoreAndFetch:
263 // Fetch parameter and ignore result. 193 // Fetch parameter and ignore result.
264 SetRegister(reg, FetchParameter()); 194 SetRegister(reg, FetchParameter());
265 break; 195 break;
266 case ResultOperation::Move: 196 case Macro::ResultOperation::Move:
267 // Move result. 197 // Move result.
268 SetRegister(reg, result); 198 SetRegister(reg, result);
269 break; 199 break;
270 case ResultOperation::MoveAndSetMethod: 200 case Macro::ResultOperation::MoveAndSetMethod:
271 // Move result and use as Method Address. 201 // Move result and use as Method Address.
272 SetRegister(reg, result); 202 SetRegister(reg, result);
273 SetMethodAddress(result); 203 SetMethodAddress(result);
274 break; 204 break;
275 case ResultOperation::FetchAndSend: 205 case Macro::ResultOperation::FetchAndSend:
276 // Fetch parameter and send result. 206 // Fetch parameter and send result.
277 SetRegister(reg, FetchParameter()); 207 SetRegister(reg, FetchParameter());
278 Send(result); 208 Send(result);
279 break; 209 break;
280 case ResultOperation::MoveAndSend: 210 case Macro::ResultOperation::MoveAndSend:
281 // Move and send result. 211 // Move and send result.
282 SetRegister(reg, result); 212 SetRegister(reg, result);
283 Send(result); 213 Send(result);
284 break; 214 break;
285 case ResultOperation::FetchAndSetMethod: 215 case Macro::ResultOperation::FetchAndSetMethod:
286 // Fetch parameter and use result as Method Address. 216 // Fetch parameter and use result as Method Address.
287 SetRegister(reg, FetchParameter()); 217 SetRegister(reg, FetchParameter());
288 SetMethodAddress(result); 218 SetMethodAddress(result);
289 break; 219 break;
290 case ResultOperation::MoveAndSetMethodFetchAndSend: 220 case Macro::ResultOperation::MoveAndSetMethodFetchAndSend:
291 // Move result and use as Method Address, then fetch and send parameter. 221 // Move result and use as Method Address, then fetch and send parameter.
292 SetRegister(reg, result); 222 SetRegister(reg, result);
293 SetMethodAddress(result); 223 SetMethodAddress(result);
294 Send(FetchParameter()); 224 Send(FetchParameter());
295 break; 225 break;
296 case ResultOperation::MoveAndSetMethodSend: 226 case Macro::ResultOperation::MoveAndSetMethodSend:
297 // Move result and use as Method Address, then send bits 12:17 of result. 227 // Move result and use as Method Address, then send bits 12:17 of result.
298 SetRegister(reg, result); 228 SetRegister(reg, result);
299 SetMethodAddress(result); 229 SetMethodAddress(result);
@@ -304,16 +234,28 @@ void MacroInterpreter::ProcessResult(ResultOperation operation, u32 reg, u32 res
304 } 234 }
305} 235}
306 236
307u32 MacroInterpreter::FetchParameter() { 237bool MacroInterpreterImpl::EvaluateBranchCondition(Macro::BranchCondition cond, u32 value) const {
308 ASSERT(next_parameter_index < num_parameters); 238 switch (cond) {
309 return parameters[next_parameter_index++]; 239 case Macro::BranchCondition::Zero:
240 return value == 0;
241 case Macro::BranchCondition::NotZero:
242 return value != 0;
243 }
244 UNREACHABLE();
245 return true;
310} 246}
311 247
312u32 MacroInterpreter::GetRegister(u32 register_id) const { 248Macro::Opcode MacroInterpreterImpl::GetOpcode() const {
249 ASSERT((pc % sizeof(u32)) == 0);
250 ASSERT(pc < code.size() * sizeof(u32));
251 return {code[pc / sizeof(u32)]};
252}
253
254u32 MacroInterpreterImpl::GetRegister(u32 register_id) const {
313 return registers.at(register_id); 255 return registers.at(register_id);
314} 256}
315 257
316void MacroInterpreter::SetRegister(u32 register_id, u32 value) { 258void MacroInterpreterImpl::SetRegister(u32 register_id, u32 value) {
317 // Register 0 is hardwired as the zero register. 259 // Register 0 is hardwired as the zero register.
318 // Ensure no writes to it actually occur. 260 // Ensure no writes to it actually occur.
319 if (register_id == 0) { 261 if (register_id == 0) {
@@ -323,30 +265,24 @@ void MacroInterpreter::SetRegister(u32 register_id, u32 value) {
323 registers.at(register_id) = value; 265 registers.at(register_id) = value;
324} 266}
325 267
326void MacroInterpreter::SetMethodAddress(u32 address) { 268void MacroInterpreterImpl::SetMethodAddress(u32 address) {
327 method_address.raw = address; 269 method_address.raw = address;
328} 270}
329 271
330void MacroInterpreter::Send(u32 value) { 272void MacroInterpreterImpl::Send(u32 value) {
331 maxwell3d.CallMethodFromMME(method_address.address, value); 273 maxwell3d.CallMethodFromMME(method_address.address, value);
332 // Increment the method address by the method increment. 274 // Increment the method address by the method increment.
333 method_address.address.Assign(method_address.address.Value() + 275 method_address.address.Assign(method_address.address.Value() +
334 method_address.increment.Value()); 276 method_address.increment.Value());
335} 277}
336 278
337u32 MacroInterpreter::Read(u32 method) const { 279u32 MacroInterpreterImpl::Read(u32 method) const {
338 return maxwell3d.GetRegisterValue(method); 280 return maxwell3d.GetRegisterValue(method);
339} 281}
340 282
341bool MacroInterpreter::EvaluateBranchCondition(BranchCondition cond, u32 value) const { 283u32 MacroInterpreterImpl::FetchParameter() {
342 switch (cond) { 284 ASSERT(next_parameter_index < num_parameters);
343 case BranchCondition::Zero: 285 return parameters[next_parameter_index++];
344 return value == 0;
345 case BranchCondition::NotZero:
346 return value != 0;
347 }
348 UNREACHABLE();
349 return true;
350} 286}
351 287
352} // namespace Tegra 288} // namespace Tegra
diff --git a/src/video_core/macro_interpreter.h b/src/video_core/macro/macro_interpreter.h
index 631146d89..90217fc89 100644
--- a/src/video_core/macro_interpreter.h
+++ b/src/video_core/macro/macro_interpreter.h
@@ -1,44 +1,37 @@
1// Copyright 2018 yuzu Emulator Project 1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#pragma once 5#pragma once
6
7#include <array> 6#include <array>
8#include <optional> 7#include <optional>
9 8#include <vector>
10#include "common/bit_field.h" 9#include "common/bit_field.h"
11#include "common/common_types.h" 10#include "common/common_types.h"
11#include "video_core/macro/macro.h"
12 12
13namespace Tegra { 13namespace Tegra {
14namespace Engines { 14namespace Engines {
15class Maxwell3D; 15class Maxwell3D;
16} 16}
17 17
18class MacroInterpreter final { 18class MacroInterpreter final : public MacroEngine {
19public: 19public:
20 explicit MacroInterpreter(Engines::Maxwell3D& maxwell3d); 20 explicit MacroInterpreter(Engines::Maxwell3D& maxwell3d);
21 21
22 /** 22protected:
23 * Executes the macro code with the specified input parameters. 23 std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code) override;
24 * @param offset Offset to start execution at.
25 * @param parameters The parameters of the macro.
26 */
27 void Execute(u32 offset, std::size_t num_parameters, const u32* parameters);
28 24
29private: 25private:
30 enum class ALUOperation : u32; 26 Engines::Maxwell3D& maxwell3d;
31 enum class BranchCondition : u32; 27};
32 enum class ResultOperation : u32;
33
34 union Opcode;
35 28
36 union MethodAddress { 29class MacroInterpreterImpl : public CachedMacro {
37 u32 raw; 30public:
38 BitField<0, 12, u32> address; 31 MacroInterpreterImpl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code);
39 BitField<12, 6, u32> increment; 32 void Execute(const std::vector<u32>& parameters, u32 method) override;
40 };
41 33
34private:
42 /// Resets the execution engine state, zeroing registers, etc. 35 /// Resets the execution engine state, zeroing registers, etc.
43 void Reset(); 36 void Reset();
44 37
@@ -49,20 +42,20 @@ private:
49 * @param is_delay_slot Whether the current step is being executed due to a delay slot in a 42 * @param is_delay_slot Whether the current step is being executed due to a delay slot in a
50 * previous instruction. 43 * previous instruction.
51 */ 44 */
52 bool Step(u32 offset, bool is_delay_slot); 45 bool Step(bool is_delay_slot);
53 46
54 /// Calculates the result of an ALU operation. src_a OP src_b; 47 /// Calculates the result of an ALU operation. src_a OP src_b;
55 u32 GetALUResult(ALUOperation operation, u32 src_a, u32 src_b); 48 u32 GetALUResult(Macro::ALUOperation operation, u32 src_a, u32 src_b);
56 49
57 /// Performs the result operation on the input result and stores it in the specified register 50 /// Performs the result operation on the input result and stores it in the specified register
58 /// (if necessary). 51 /// (if necessary).
59 void ProcessResult(ResultOperation operation, u32 reg, u32 result); 52 void ProcessResult(Macro::ResultOperation operation, u32 reg, u32 result);
60 53
61 /// Evaluates the branch condition and returns whether the branch should be taken or not. 54 /// Evaluates the branch condition and returns whether the branch should be taken or not.
62 bool EvaluateBranchCondition(BranchCondition cond, u32 value) const; 55 bool EvaluateBranchCondition(Macro::BranchCondition cond, u32 value) const;
63 56
64 /// Reads an opcode at the current program counter location. 57 /// Reads an opcode at the current program counter location.
65 Opcode GetOpcode(u32 offset) const; 58 Macro::Opcode GetOpcode() const;
66 59
67 /// Returns the specified register's value. Register 0 is hardcoded to always return 0. 60 /// Returns the specified register's value. Register 0 is hardcoded to always return 0.
68 u32 GetRegister(u32 register_id) const; 61 u32 GetRegister(u32 register_id) const;
@@ -89,13 +82,11 @@ private:
89 /// Program counter to execute at after the delay slot is executed. 82 /// Program counter to execute at after the delay slot is executed.
90 std::optional<u32> delayed_pc; 83 std::optional<u32> delayed_pc;
91 84
92 static constexpr std::size_t NumMacroRegisters = 8;
93
94 /// General purpose macro registers. 85 /// General purpose macro registers.
95 std::array<u32, NumMacroRegisters> registers = {}; 86 std::array<u32, Macro::NUM_MACRO_REGISTERS> registers = {};
96 87
97 /// Method address to use for the next Send instruction. 88 /// Method address to use for the next Send instruction.
98 MethodAddress method_address = {}; 89 Macro::MethodAddress method_address = {};
99 90
100 /// Input parameters of the current macro. 91 /// Input parameters of the current macro.
101 std::unique_ptr<u32[]> parameters; 92 std::unique_ptr<u32[]> parameters;
@@ -105,5 +96,7 @@ private:
105 u32 next_parameter_index = 0; 96 u32 next_parameter_index = 0;
106 97
107 bool carry_flag = false; 98 bool carry_flag = false;
99 const std::vector<u32>& code;
108}; 100};
101
109} // namespace Tegra 102} // namespace Tegra
diff --git a/src/video_core/macro/macro_jit_x64.cpp b/src/video_core/macro/macro_jit_x64.cpp
new file mode 100644
index 000000000..11c1cc3be
--- /dev/null
+++ b/src/video_core/macro/macro_jit_x64.cpp
@@ -0,0 +1,640 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "common/logging/log.h"
7#include "common/microprofile.h"
8#include "common/x64/xbyak_util.h"
9#include "video_core/engines/maxwell_3d.h"
10#include "video_core/macro/macro_interpreter.h"
11#include "video_core/macro/macro_jit_x64.h"
12
13MICROPROFILE_DEFINE(MacroJitCompile, "GPU", "Compile macro JIT", MP_RGB(173, 255, 47));
14MICROPROFILE_DEFINE(MacroJitExecute, "GPU", "Execute macro JIT", MP_RGB(255, 255, 0));
15
16namespace Tegra {
17static const Xbyak::Reg64 PARAMETERS = Xbyak::util::r9;
18static const Xbyak::Reg64 REGISTERS = Xbyak::util::r10;
19static const Xbyak::Reg64 STATE = Xbyak::util::r11;
20static const Xbyak::Reg64 NEXT_PARAMETER = Xbyak::util::r12;
21static const Xbyak::Reg32 RESULT = Xbyak::util::r13d;
22static const Xbyak::Reg64 RESULT_64 = Xbyak::util::r13;
23static const Xbyak::Reg32 METHOD_ADDRESS = Xbyak::util::r14d;
24static const Xbyak::Reg64 METHOD_ADDRESS_64 = Xbyak::util::r14;
25static const Xbyak::Reg64 BRANCH_HOLDER = Xbyak::util::r15;
26
27static const std::bitset<32> PERSISTENT_REGISTERS = Common::X64::BuildRegSet({
28 PARAMETERS,
29 REGISTERS,
30 STATE,
31 NEXT_PARAMETER,
32 RESULT,
33 METHOD_ADDRESS,
34 BRANCH_HOLDER,
35});
36
37MacroJITx64::MacroJITx64(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {}
38
39std::unique_ptr<CachedMacro> MacroJITx64::Compile(const std::vector<u32>& code) {
40 return std::make_unique<MacroJITx64Impl>(maxwell3d, code);
41}
42
43MacroJITx64Impl::MacroJITx64Impl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code)
44 : Xbyak::CodeGenerator(MAX_CODE_SIZE), code(code), maxwell3d(maxwell3d) {
45 Compile();
46}
47
48MacroJITx64Impl::~MacroJITx64Impl() = default;
49
50void MacroJITx64Impl::Execute(const std::vector<u32>& parameters, u32 method) {
51 MICROPROFILE_SCOPE(MacroJitExecute);
52 ASSERT_OR_EXECUTE(program != nullptr, { return; });
53 JITState state{};
54 state.maxwell3d = &maxwell3d;
55 state.registers = {};
56 state.parameters = parameters.data();
57 program(&state);
58}
59
60void MacroJITx64Impl::Compile_ALU(Macro::Opcode opcode) {
61 const bool is_a_zero = opcode.src_a == 0;
62 const bool is_b_zero = opcode.src_b == 0;
63 const bool valid_operation = !is_a_zero && !is_b_zero;
64 const bool is_move_operation = !is_a_zero && is_b_zero;
65 const bool has_zero_register = is_a_zero || is_b_zero;
66
67 Xbyak::Reg64 src_a;
68 Xbyak::Reg32 src_b;
69
70 if (!optimizer.zero_reg_skip) {
71 src_a = Compile_GetRegister(opcode.src_a, RESULT_64);
72 src_b = Compile_GetRegister(opcode.src_b, ebx);
73 } else {
74 if (!is_a_zero) {
75 src_a = Compile_GetRegister(opcode.src_a, RESULT_64);
76 }
77 if (!is_b_zero) {
78 src_b = Compile_GetRegister(opcode.src_b, ebx);
79 }
80 }
81 Xbyak::Label skip_carry{};
82
83 bool has_emitted = false;
84
85 switch (opcode.alu_operation) {
86 case Macro::ALUOperation::Add:
87 if (optimizer.zero_reg_skip) {
88 if (valid_operation) {
89 add(src_a, src_b);
90 }
91 } else {
92 add(src_a, src_b);
93 }
94
95 if (!optimizer.can_skip_carry) {
96 setc(byte[STATE + offsetof(JITState, carry_flag)]);
97 }
98 break;
99 case Macro::ALUOperation::AddWithCarry:
100 bt(dword[STATE + offsetof(JITState, carry_flag)], 0);
101 adc(src_a, src_b);
102 setc(byte[STATE + offsetof(JITState, carry_flag)]);
103 break;
104 case Macro::ALUOperation::Subtract:
105 if (optimizer.zero_reg_skip) {
106 if (valid_operation) {
107 sub(src_a, src_b);
108 has_emitted = true;
109 }
110 } else {
111 sub(src_a, src_b);
112 has_emitted = true;
113 }
114 if (!optimizer.can_skip_carry && has_emitted) {
115 setc(byte[STATE + offsetof(JITState, carry_flag)]);
116 }
117 break;
118 case Macro::ALUOperation::SubtractWithBorrow:
119 bt(dword[STATE + offsetof(JITState, carry_flag)], 0);
120 sbb(src_a, src_b);
121 setc(byte[STATE + offsetof(JITState, carry_flag)]);
122 break;
123 case Macro::ALUOperation::Xor:
124 if (optimizer.zero_reg_skip) {
125 if (valid_operation) {
126 xor_(src_a, src_b);
127 }
128 } else {
129 xor_(src_a, src_b);
130 }
131 break;
132 case Macro::ALUOperation::Or:
133 if (optimizer.zero_reg_skip) {
134 if (valid_operation) {
135 or_(src_a, src_b);
136 }
137 } else {
138 or_(src_a, src_b);
139 }
140 break;
141 case Macro::ALUOperation::And:
142 if (optimizer.zero_reg_skip) {
143 if (!has_zero_register) {
144 and_(src_a, src_b);
145 }
146 } else {
147 and_(src_a, src_b);
148 }
149 break;
150 case Macro::ALUOperation::AndNot:
151 if (optimizer.zero_reg_skip) {
152 if (!is_a_zero) {
153 not_(src_b);
154 and_(src_a, src_b);
155 }
156 } else {
157 not_(src_b);
158 and_(src_a, src_b);
159 }
160 break;
161 case Macro::ALUOperation::Nand:
162 if (optimizer.zero_reg_skip) {
163 if (!is_a_zero) {
164 and_(src_a, src_b);
165 not_(src_a);
166 }
167 } else {
168 and_(src_a, src_b);
169 not_(src_a);
170 }
171 break;
172 default:
173 UNIMPLEMENTED_MSG("Unimplemented ALU operation {}",
174 static_cast<std::size_t>(opcode.alu_operation.Value()));
175 break;
176 }
177 Compile_ProcessResult(opcode.result_operation, opcode.dst);
178}
179
180void MacroJITx64Impl::Compile_AddImmediate(Macro::Opcode opcode) {
181 if (optimizer.skip_dummy_addimmediate) {
182 // Games tend to use this as an exit instruction placeholder. It's to encode an instruction
183 // without doing anything. In our case we can just not emit anything.
184 if (opcode.result_operation == Macro::ResultOperation::Move && opcode.dst == 0) {
185 return;
186 }
187 }
188 // Check for redundant moves
189 if (optimizer.optimize_for_method_move &&
190 opcode.result_operation == Macro::ResultOperation::MoveAndSetMethod) {
191 if (next_opcode.has_value()) {
192 const auto next = *next_opcode;
193 if (next.result_operation == Macro::ResultOperation::MoveAndSetMethod) {
194 return;
195 }
196 }
197 }
198 if (optimizer.zero_reg_skip && opcode.src_a == 0) {
199 if (opcode.immediate == 0) {
200 xor_(RESULT, RESULT);
201 } else {
202 mov(RESULT, opcode.immediate);
203 }
204 } else {
205 auto result = Compile_GetRegister(opcode.src_a, RESULT);
206 if (opcode.immediate > 2) {
207 add(result, opcode.immediate);
208 } else if (opcode.immediate == 1) {
209 inc(result);
210 } else if (opcode.immediate < 0) {
211 sub(result, opcode.immediate * -1);
212 }
213 }
214 Compile_ProcessResult(opcode.result_operation, opcode.dst);
215}
216
217void MacroJITx64Impl::Compile_ExtractInsert(Macro::Opcode opcode) {
218 auto dst = Compile_GetRegister(opcode.src_a, RESULT);
219 auto src = Compile_GetRegister(opcode.src_b, eax);
220
221 if (opcode.bf_src_bit != 0 && opcode.bf_src_bit != 31) {
222 shr(src, opcode.bf_src_bit);
223 } else if (opcode.bf_src_bit == 31) {
224 xor_(src, src);
225 }
226 // Don't bother masking the whole register since we're using a 32 bit register
227 if (opcode.bf_size != 31 && opcode.bf_size != 0) {
228 and_(src, opcode.GetBitfieldMask());
229 } else if (opcode.bf_size == 0) {
230 xor_(src, src);
231 }
232 if (opcode.bf_dst_bit != 31 && opcode.bf_dst_bit != 0) {
233 shl(src, opcode.bf_dst_bit);
234 } else if (opcode.bf_dst_bit == 31) {
235 xor_(src, src);
236 }
237
238 const u32 mask = ~(opcode.GetBitfieldMask() << opcode.bf_dst_bit);
239 if (mask != 0xffffffff) {
240 and_(dst, mask);
241 }
242 or_(dst, src);
243 Compile_ProcessResult(opcode.result_operation, opcode.dst);
244}
245
246void MacroJITx64Impl::Compile_ExtractShiftLeftImmediate(Macro::Opcode opcode) {
247 auto dst = Compile_GetRegister(opcode.src_a, eax);
248 auto src = Compile_GetRegister(opcode.src_b, RESULT);
249
250 shr(src, al);
251 if (opcode.bf_size != 0 && opcode.bf_size != 31) {
252 and_(src, opcode.GetBitfieldMask());
253 } else if (opcode.bf_size == 0) {
254 xor_(src, src);
255 }
256
257 if (opcode.bf_dst_bit != 0 && opcode.bf_dst_bit != 31) {
258 shl(src, opcode.bf_dst_bit);
259 } else if (opcode.bf_dst_bit == 31) {
260 xor_(src, src);
261 }
262 Compile_ProcessResult(opcode.result_operation, opcode.dst);
263}
264
265void MacroJITx64Impl::Compile_ExtractShiftLeftRegister(Macro::Opcode opcode) {
266 auto dst = Compile_GetRegister(opcode.src_a, eax);
267 auto src = Compile_GetRegister(opcode.src_b, RESULT);
268
269 if (opcode.bf_src_bit != 0) {
270 shr(src, opcode.bf_src_bit);
271 }
272
273 if (opcode.bf_size != 31) {
274 and_(src, opcode.GetBitfieldMask());
275 }
276 shl(src, al);
277 Compile_ProcessResult(opcode.result_operation, opcode.dst);
278}
279
280static u32 Read(Engines::Maxwell3D* maxwell3d, u32 method) {
281 return maxwell3d->GetRegisterValue(method);
282}
283
284static void Send(Engines::Maxwell3D* maxwell3d, Macro::MethodAddress method_address, u32 value) {
285 maxwell3d->CallMethodFromMME(method_address.address, value);
286}
287
288void MacroJITx64Impl::Compile_Read(Macro::Opcode opcode) {
289 if (optimizer.zero_reg_skip && opcode.src_a == 0) {
290 if (opcode.immediate == 0) {
291 xor_(RESULT, RESULT);
292 } else {
293 mov(RESULT, opcode.immediate);
294 }
295 } else {
296 auto result = Compile_GetRegister(opcode.src_a, RESULT);
297 if (opcode.immediate > 2) {
298 add(result, opcode.immediate);
299 } else if (opcode.immediate == 1) {
300 inc(result);
301 } else if (opcode.immediate < 0) {
302 sub(result, opcode.immediate * -1);
303 }
304 }
305 Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
306 mov(Common::X64::ABI_PARAM1, qword[STATE]);
307 mov(Common::X64::ABI_PARAM2, RESULT);
308 Common::X64::CallFarFunction(*this, &Read);
309 Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
310 mov(RESULT, Common::X64::ABI_RETURN.cvt32());
311 Compile_ProcessResult(opcode.result_operation, opcode.dst);
312}
313
314void Tegra::MacroJITx64Impl::Compile_Send(Xbyak::Reg32 value) {
315 Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
316 mov(Common::X64::ABI_PARAM1, qword[STATE]);
317 mov(Common::X64::ABI_PARAM2, METHOD_ADDRESS);
318 mov(Common::X64::ABI_PARAM3, value);
319 Common::X64::CallFarFunction(*this, &Send);
320 Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
321
322 Xbyak::Label dont_process{};
323 // Get increment
324 test(METHOD_ADDRESS, 0x3f000);
325 // If zero, method address doesn't update
326 je(dont_process);
327
328 mov(ecx, METHOD_ADDRESS);
329 and_(METHOD_ADDRESS, 0xfff);
330 shr(ecx, 12);
331 and_(ecx, 0x3f);
332 lea(eax, ptr[rcx + METHOD_ADDRESS_64]);
333 sal(ecx, 12);
334 or_(eax, ecx);
335
336 mov(METHOD_ADDRESS, eax);
337
338 L(dont_process);
339}
340
341void Tegra::MacroJITx64Impl::Compile_Branch(Macro::Opcode opcode) {
342 ASSERT_MSG(!is_delay_slot, "Executing a branch in a delay slot is not valid");
343 const s32 jump_address =
344 static_cast<s32>(pc) + static_cast<s32>(opcode.GetBranchTarget() / sizeof(s32));
345
346 Xbyak::Label end;
347 auto value = Compile_GetRegister(opcode.src_a, eax);
348 test(value, value);
349 if (optimizer.has_delayed_pc) {
350 switch (opcode.branch_condition) {
351 case Macro::BranchCondition::Zero:
352 jne(end, T_NEAR);
353 break;
354 case Macro::BranchCondition::NotZero:
355 je(end, T_NEAR);
356 break;
357 }
358
359 if (opcode.branch_annul) {
360 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
361 jmp(labels[jump_address], T_NEAR);
362 } else {
363 Xbyak::Label handle_post_exit{};
364 Xbyak::Label skip{};
365 jmp(skip, T_NEAR);
366 if (opcode.is_exit) {
367 L(handle_post_exit);
368 // Execute 1 instruction
369 mov(BRANCH_HOLDER, end_of_code);
370 // Jump to next instruction to skip delay slot check
371 jmp(labels[jump_address], T_NEAR);
372 } else {
373 L(handle_post_exit);
374 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
375 jmp(labels[jump_address], T_NEAR);
376 }
377 L(skip);
378 mov(BRANCH_HOLDER, handle_post_exit);
379 jmp(delay_skip[pc], T_NEAR);
380 }
381 } else {
382 switch (opcode.branch_condition) {
383 case Macro::BranchCondition::Zero:
384 je(labels[jump_address], T_NEAR);
385 break;
386 case Macro::BranchCondition::NotZero:
387 jne(labels[jump_address], T_NEAR);
388 break;
389 }
390 }
391
392 L(end);
393}
394
395void Tegra::MacroJITx64Impl::Optimizer_ScanFlags() {
396 optimizer.can_skip_carry = true;
397 optimizer.has_delayed_pc = false;
398 for (auto raw_op : code) {
399 Macro::Opcode op{};
400 op.raw = raw_op;
401
402 if (op.operation == Macro::Operation::ALU) {
403 // Scan for any ALU operations which actually use the carry flag, if they don't exist in
404 // our current code we can skip emitting the carry flag handling operations
405 if (op.alu_operation == Macro::ALUOperation::AddWithCarry ||
406 op.alu_operation == Macro::ALUOperation::SubtractWithBorrow) {
407 optimizer.can_skip_carry = false;
408 }
409 }
410
411 if (op.operation == Macro::Operation::Branch) {
412 if (!op.branch_annul) {
413 optimizer.has_delayed_pc = true;
414 }
415 }
416 }
417}
418
419void MacroJITx64Impl::Compile() {
420 MICROPROFILE_SCOPE(MacroJitCompile);
421 bool keep_executing = true;
422 labels.fill(Xbyak::Label());
423
424 Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, Common::X64::ABI_ALL_CALLEE_SAVED, 8);
425 // JIT state
426 mov(STATE, Common::X64::ABI_PARAM1);
427 mov(PARAMETERS, qword[Common::X64::ABI_PARAM1 +
428 static_cast<Xbyak::uint32>(offsetof(JITState, parameters))]);
429 mov(REGISTERS, Common::X64::ABI_PARAM1);
430 add(REGISTERS, static_cast<Xbyak::uint32>(offsetof(JITState, registers)));
431 xor_(RESULT, RESULT);
432 xor_(METHOD_ADDRESS, METHOD_ADDRESS);
433 xor_(NEXT_PARAMETER, NEXT_PARAMETER);
434 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
435
436 mov(dword[REGISTERS + 4], Compile_FetchParameter());
437
438 // Track get register for zero registers and mark it as no-op
439 optimizer.zero_reg_skip = true;
440
441 // AddImmediate tends to be used as a NOP instruction, if we detect this we can
442 // completely skip the entire code path and no emit anything
443 optimizer.skip_dummy_addimmediate = true;
444
445 // SMO tends to emit a lot of unnecessary method moves, we can mitigate this by only emitting
446 // one if our register isn't "dirty"
447 optimizer.optimize_for_method_move = true;
448
449 // Check to see if we can skip emitting certain instructions
450 Optimizer_ScanFlags();
451
452 const u32 op_count = static_cast<u32>(code.size());
453 for (u32 i = 0; i < op_count; i++) {
454 if (i < op_count - 1) {
455 pc = i + 1;
456 next_opcode = GetOpCode();
457 } else {
458 next_opcode = {};
459 }
460 pc = i;
461 Compile_NextInstruction();
462 }
463
464 L(end_of_code);
465
466 Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, Common::X64::ABI_ALL_CALLEE_SAVED, 8);
467 ret();
468 ready();
469 program = getCode<ProgramType>();
470}
471
472bool MacroJITx64Impl::Compile_NextInstruction() {
473 const auto opcode = GetOpCode();
474 if (labels[pc].getAddress()) {
475 return false;
476 }
477
478 L(labels[pc]);
479
480 switch (opcode.operation) {
481 case Macro::Operation::ALU:
482 Compile_ALU(opcode);
483 break;
484 case Macro::Operation::AddImmediate:
485 Compile_AddImmediate(opcode);
486 break;
487 case Macro::Operation::ExtractInsert:
488 Compile_ExtractInsert(opcode);
489 break;
490 case Macro::Operation::ExtractShiftLeftImmediate:
491 Compile_ExtractShiftLeftImmediate(opcode);
492 break;
493 case Macro::Operation::ExtractShiftLeftRegister:
494 Compile_ExtractShiftLeftRegister(opcode);
495 break;
496 case Macro::Operation::Read:
497 Compile_Read(opcode);
498 break;
499 case Macro::Operation::Branch:
500 Compile_Branch(opcode);
501 break;
502 default:
503 UNIMPLEMENTED_MSG("Unimplemented opcode {}", opcode.operation.Value());
504 break;
505 }
506
507 if (optimizer.has_delayed_pc) {
508 if (opcode.is_exit) {
509 mov(rax, end_of_code);
510 test(BRANCH_HOLDER, BRANCH_HOLDER);
511 cmove(BRANCH_HOLDER, rax);
512 // Jump to next instruction to skip delay slot check
513 je(labels[pc + 1], T_NEAR);
514 } else {
515 // TODO(ogniK): Optimize delay slot branching
516 Xbyak::Label no_delay_slot{};
517 test(BRANCH_HOLDER, BRANCH_HOLDER);
518 je(no_delay_slot, T_NEAR);
519 mov(rax, BRANCH_HOLDER);
520 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
521 jmp(rax);
522 L(no_delay_slot);
523 }
524 L(delay_skip[pc]);
525 if (opcode.is_exit) {
526 return false;
527 }
528 } else {
529 test(BRANCH_HOLDER, BRANCH_HOLDER);
530 jne(end_of_code, T_NEAR);
531 if (opcode.is_exit) {
532 inc(BRANCH_HOLDER);
533 return false;
534 }
535 }
536 return true;
537}
538
539Xbyak::Reg32 Tegra::MacroJITx64Impl::Compile_FetchParameter() {
540 mov(eax, dword[PARAMETERS + NEXT_PARAMETER * sizeof(u32)]);
541 inc(NEXT_PARAMETER);
542 return eax;
543}
544
545Xbyak::Reg32 MacroJITx64Impl::Compile_GetRegister(u32 index, Xbyak::Reg32 dst) {
546 if (index == 0) {
547 // Register 0 is always zero
548 xor_(dst, dst);
549 } else {
550 mov(dst, dword[REGISTERS + index * sizeof(u32)]);
551 }
552
553 return dst;
554}
555
556Xbyak::Reg64 Tegra::MacroJITx64Impl::Compile_GetRegister(u32 index, Xbyak::Reg64 dst) {
557 if (index == 0) {
558 // Register 0 is always zero
559 xor_(dst, dst);
560 } else {
561 mov(dst, dword[REGISTERS + index * sizeof(u32)]);
562 }
563
564 return dst;
565}
566
567void Tegra::MacroJITx64Impl::Compile_WriteCarry(Xbyak::Reg64 dst) {
568 Xbyak::Label zero{}, end{};
569 xor_(ecx, ecx);
570 shr(dst, 32);
571 setne(cl);
572 mov(dword[STATE + offsetof(JITState, carry_flag)], ecx);
573}
574
575void MacroJITx64Impl::Compile_ProcessResult(Macro::ResultOperation operation, u32 reg) {
576 auto SetRegister = [=](u32 reg, Xbyak::Reg32 result) {
577 // Register 0 is supposed to always return 0. NOP is implemented as a store to the zero
578 // register.
579 if (reg == 0) {
580 return;
581 }
582 mov(dword[REGISTERS + reg * sizeof(u32)], result);
583 };
584 auto SetMethodAddress = [=](Xbyak::Reg32 reg) { mov(METHOD_ADDRESS, reg); };
585
586 switch (operation) {
587 case Macro::ResultOperation::IgnoreAndFetch:
588 SetRegister(reg, Compile_FetchParameter());
589 break;
590 case Macro::ResultOperation::Move:
591 SetRegister(reg, RESULT);
592 break;
593 case Macro::ResultOperation::MoveAndSetMethod:
594 SetRegister(reg, RESULT);
595 SetMethodAddress(RESULT);
596 break;
597 case Macro::ResultOperation::FetchAndSend:
598 // Fetch parameter and send result.
599 SetRegister(reg, Compile_FetchParameter());
600 Compile_Send(RESULT);
601 break;
602 case Macro::ResultOperation::MoveAndSend:
603 // Move and send result.
604 SetRegister(reg, RESULT);
605 Compile_Send(RESULT);
606 break;
607 case Macro::ResultOperation::FetchAndSetMethod:
608 // Fetch parameter and use result as Method Address.
609 SetRegister(reg, Compile_FetchParameter());
610 SetMethodAddress(RESULT);
611 break;
612 case Macro::ResultOperation::MoveAndSetMethodFetchAndSend:
613 // Move result and use as Method Address, then fetch and send parameter.
614 SetRegister(reg, RESULT);
615 SetMethodAddress(RESULT);
616 Compile_Send(Compile_FetchParameter());
617 break;
618 case Macro::ResultOperation::MoveAndSetMethodSend:
619 // Move result and use as Method Address, then send bits 12:17 of result.
620 SetRegister(reg, RESULT);
621 SetMethodAddress(RESULT);
622 shr(RESULT, 12);
623 and_(RESULT, 0b111111);
624 Compile_Send(RESULT);
625 break;
626 default:
627 UNIMPLEMENTED_MSG("Unimplemented macro operation {}", static_cast<std::size_t>(operation));
628 }
629}
630
631Macro::Opcode MacroJITx64Impl::GetOpCode() const {
632 ASSERT(pc < code.size());
633 return {code[pc]};
634}
635
636std::bitset<32> MacroJITx64Impl::PersistentCallerSavedRegs() const {
637 return PERSISTENT_REGISTERS & Common::X64::ABI_ALL_CALLER_SAVED;
638}
639
640} // namespace Tegra
diff --git a/src/video_core/macro/macro_jit_x64.h b/src/video_core/macro/macro_jit_x64.h
new file mode 100644
index 000000000..21ee157cf
--- /dev/null
+++ b/src/video_core/macro/macro_jit_x64.h
@@ -0,0 +1,100 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <bitset>
9#include <xbyak.h>
10#include "common/bit_field.h"
11#include "common/common_types.h"
12#include "common/x64/xbyak_abi.h"
13#include "video_core/macro/macro.h"
14
15namespace Tegra {
16
17namespace Engines {
18class Maxwell3D;
19}
20
21/// MAX_CODE_SIZE is arbitrarily chosen based on current booting games
22constexpr size_t MAX_CODE_SIZE = 0x10000;
23
24class MacroJITx64 final : public MacroEngine {
25public:
26 explicit MacroJITx64(Engines::Maxwell3D& maxwell3d);
27
28protected:
29 std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code) override;
30
31private:
32 Engines::Maxwell3D& maxwell3d;
33};
34
35class MacroJITx64Impl : public Xbyak::CodeGenerator, public CachedMacro {
36public:
37 MacroJITx64Impl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code);
38 ~MacroJITx64Impl();
39
40 void Execute(const std::vector<u32>& parameters, u32 method) override;
41
42 void Compile_ALU(Macro::Opcode opcode);
43 void Compile_AddImmediate(Macro::Opcode opcode);
44 void Compile_ExtractInsert(Macro::Opcode opcode);
45 void Compile_ExtractShiftLeftImmediate(Macro::Opcode opcode);
46 void Compile_ExtractShiftLeftRegister(Macro::Opcode opcode);
47 void Compile_Read(Macro::Opcode opcode);
48 void Compile_Branch(Macro::Opcode opcode);
49
50private:
51 void Optimizer_ScanFlags();
52
53 void Compile();
54 bool Compile_NextInstruction();
55
56 Xbyak::Reg32 Compile_FetchParameter();
57 Xbyak::Reg32 Compile_GetRegister(u32 index, Xbyak::Reg32 dst);
58 Xbyak::Reg64 Compile_GetRegister(u32 index, Xbyak::Reg64 dst);
59 void Compile_WriteCarry(Xbyak::Reg64 dst);
60
61 void Compile_ProcessResult(Macro::ResultOperation operation, u32 reg);
62 void Compile_Send(Xbyak::Reg32 value);
63
64 Macro::Opcode GetOpCode() const;
65 std::bitset<32> PersistentCallerSavedRegs() const;
66
67 struct JITState {
68 Engines::Maxwell3D* maxwell3d{};
69 std::array<u32, Macro::NUM_MACRO_REGISTERS> registers{};
70 const u32* parameters{};
71 u32 carry_flag{};
72 };
73 static_assert(offsetof(JITState, maxwell3d) == 0, "Maxwell3D is not at 0x0");
74 using ProgramType = void (*)(JITState*);
75
76 struct OptimizerState {
77 bool can_skip_carry{};
78 bool has_delayed_pc{};
79 bool zero_reg_skip{};
80 bool skip_dummy_addimmediate{};
81 bool optimize_for_method_move{};
82 };
83 OptimizerState optimizer{};
84
85 std::optional<Macro::Opcode> next_opcode{};
86 ProgramType program{nullptr};
87
88 std::array<Xbyak::Label, MAX_CODE_SIZE> labels{};
89 std::array<Xbyak::Label, MAX_CODE_SIZE> delay_skip{};
90 Xbyak::Label end_of_code{};
91
92 bool is_delay_slot{};
93 u32 pc{};
94 std::optional<u32> delayed_pc;
95
96 const std::vector<u32>& code;
97 Engines::Maxwell3D& maxwell3d;
98};
99
100} // namespace Tegra
diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp
index 8b424e2cb..890fc6c63 100644
--- a/src/video_core/renderer_opengl/gl_device.cpp
+++ b/src/video_core/renderer_opengl/gl_device.cpp
@@ -185,12 +185,20 @@ bool IsASTCSupported() {
185Device::Device() 185Device::Device()
186 : max_uniform_buffers{BuildMaxUniformBuffers()}, base_bindings{BuildBaseBindings()} { 186 : max_uniform_buffers{BuildMaxUniformBuffers()}, base_bindings{BuildBaseBindings()} {
187 const std::string_view vendor = reinterpret_cast<const char*>(glGetString(GL_VENDOR)); 187 const std::string_view vendor = reinterpret_cast<const char*>(glGetString(GL_VENDOR));
188 const auto renderer = reinterpret_cast<const char*>(glGetString(GL_RENDERER)); 188 const std::string_view version = reinterpret_cast<const char*>(glGetString(GL_VERSION));
189 const std::vector extensions = GetExtensions(); 189 const std::vector extensions = GetExtensions();
190 190
191 const bool is_nvidia = vendor == "NVIDIA Corporation"; 191 const bool is_nvidia = vendor == "NVIDIA Corporation";
192 const bool is_amd = vendor == "ATI Technologies Inc."; 192 const bool is_amd = vendor == "ATI Technologies Inc.";
193 193
194 bool disable_fast_buffer_sub_data = false;
195 if (is_nvidia && version == "4.6.0 NVIDIA 443.24") {
196 LOG_WARNING(
197 Render_OpenGL,
198 "Beta driver 443.24 is known to have issues. There might be performance issues.");
199 disable_fast_buffer_sub_data = true;
200 }
201
194 uniform_buffer_alignment = GetInteger<std::size_t>(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT); 202 uniform_buffer_alignment = GetInteger<std::size_t>(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT);
195 shader_storage_alignment = GetInteger<std::size_t>(GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT); 203 shader_storage_alignment = GetInteger<std::size_t>(GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT);
196 max_vertex_attributes = GetInteger<u32>(GL_MAX_VERTEX_ATTRIBS); 204 max_vertex_attributes = GetInteger<u32>(GL_MAX_VERTEX_ATTRIBS);
@@ -204,7 +212,7 @@ Device::Device()
204 has_variable_aoffi = TestVariableAoffi(); 212 has_variable_aoffi = TestVariableAoffi();
205 has_component_indexing_bug = is_amd; 213 has_component_indexing_bug = is_amd;
206 has_precise_bug = TestPreciseBug(); 214 has_precise_bug = TestPreciseBug();
207 has_fast_buffer_sub_data = is_nvidia; 215 has_fast_buffer_sub_data = is_nvidia && !disable_fast_buffer_sub_data;
208 use_assembly_shaders = Settings::values.use_assembly_shaders && GLAD_GL_NV_gpu_program5 && 216 use_assembly_shaders = Settings::values.use_assembly_shaders && GLAD_GL_NV_gpu_program5 &&
209 GLAD_GL_NV_compute_program5 && GLAD_GL_NV_transform_feedback && 217 GLAD_GL_NV_compute_program5 && GLAD_GL_NV_transform_feedback &&
210 GLAD_GL_NV_transform_feedback2; 218 GLAD_GL_NV_transform_feedback2;
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index 502b95973..d6e30b321 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -626,7 +626,9 @@ private:
626 break; 626 break;
627 } 627 }
628 } 628 }
629 if (stage != ShaderType::Vertex || device.HasVertexViewportLayer()) { 629
630 if (stage != ShaderType::Geometry &&
631 (stage != ShaderType::Vertex || device.HasVertexViewportLayer())) {
630 if (ir.UsesLayer()) { 632 if (ir.UsesLayer()) {
631 code.AddLine("int gl_Layer;"); 633 code.AddLine("int gl_Layer;");
632 } 634 }
@@ -655,6 +657,16 @@ private:
655 --code.scope; 657 --code.scope;
656 code.AddLine("}};"); 658 code.AddLine("}};");
657 code.AddNewLine(); 659 code.AddNewLine();
660
661 if (stage == ShaderType::Geometry) {
662 if (ir.UsesLayer()) {
663 code.AddLine("out int gl_Layer;");
664 }
665 if (ir.UsesViewportIndex()) {
666 code.AddLine("out int gl_ViewportIndex;");
667 }
668 }
669 code.AddNewLine();
658 } 670 }
659 671
660 void DeclareRegisters() { 672 void DeclareRegisters() {
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index e7952924a..6214fcbc3 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -751,11 +751,9 @@ void RendererOpenGL::RenderScreenshot() {
751} 751}
752 752
753bool RendererOpenGL::Init() { 753bool RendererOpenGL::Init() {
754 if (GLAD_GL_KHR_debug) { 754 if (Settings::values.renderer_debug && GLAD_GL_KHR_debug) {
755 glEnable(GL_DEBUG_OUTPUT); 755 glEnable(GL_DEBUG_OUTPUT);
756 if (Settings::values.renderer_debug) { 756 glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
757 glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
758 }
759 glDebugMessageCallback(DebugHandler, nullptr); 757 glDebugMessageCallback(DebugHandler, nullptr);
760 } 758 }
761 759
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 8e1b46277..281bf9ac3 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -53,8 +53,9 @@ vk::DescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
53 }; 53 };
54 add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size()); 54 add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size());
55 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, entries.global_buffers.size()); 55 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, entries.global_buffers.size());
56 add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, entries.texel_buffers.size()); 56 add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, entries.uniform_texels.size());
57 add_bindings(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, entries.samplers.size()); 57 add_bindings(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, entries.samplers.size());
58 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, entries.storage_texels.size());
58 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size()); 59 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size());
59 60
60 VkDescriptorSetLayoutCreateInfo ci; 61 VkDescriptorSetLayoutCreateInfo ci;
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
index 890fd52cf..9259b618d 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
@@ -42,6 +42,7 @@ vk::DescriptorPool* VKDescriptorPool::AllocateNewPool() {
42 {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, num_sets * 60}, 42 {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, num_sets * 60},
43 {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, num_sets * 64}, 43 {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, num_sets * 64},
44 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, num_sets * 64}, 44 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, num_sets * 64},
45 {VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, num_sets * 64},
45 {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, num_sets * 40}}; 46 {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, num_sets * 40}};
46 47
47 VkDescriptorPoolCreateInfo ci; 48 VkDescriptorPoolCreateInfo ci;
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 65a1c6245..b8ccf164f 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -45,6 +45,7 @@ constexpr VkDescriptorType UNIFORM_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
45constexpr VkDescriptorType STORAGE_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; 45constexpr VkDescriptorType STORAGE_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
46constexpr VkDescriptorType UNIFORM_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; 46constexpr VkDescriptorType UNIFORM_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
47constexpr VkDescriptorType COMBINED_IMAGE_SAMPLER = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; 47constexpr VkDescriptorType COMBINED_IMAGE_SAMPLER = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
48constexpr VkDescriptorType STORAGE_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
48constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; 49constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
49 50
50constexpr VideoCommon::Shader::CompilerSettings compiler_settings{ 51constexpr VideoCommon::Shader::CompilerSettings compiler_settings{
@@ -104,8 +105,9 @@ u32 FillDescriptorLayout(const ShaderEntries& entries,
104 u32 binding = base_binding; 105 u32 binding = base_binding;
105 AddBindings<UNIFORM_BUFFER>(bindings, binding, flags, entries.const_buffers); 106 AddBindings<UNIFORM_BUFFER>(bindings, binding, flags, entries.const_buffers);
106 AddBindings<STORAGE_BUFFER>(bindings, binding, flags, entries.global_buffers); 107 AddBindings<STORAGE_BUFFER>(bindings, binding, flags, entries.global_buffers);
107 AddBindings<UNIFORM_TEXEL_BUFFER>(bindings, binding, flags, entries.texel_buffers); 108 AddBindings<UNIFORM_TEXEL_BUFFER>(bindings, binding, flags, entries.uniform_texels);
108 AddBindings<COMBINED_IMAGE_SAMPLER>(bindings, binding, flags, entries.samplers); 109 AddBindings<COMBINED_IMAGE_SAMPLER>(bindings, binding, flags, entries.samplers);
110 AddBindings<STORAGE_TEXEL_BUFFER>(bindings, binding, flags, entries.storage_texels);
109 AddBindings<STORAGE_IMAGE>(bindings, binding, flags, entries.images); 111 AddBindings<STORAGE_IMAGE>(bindings, binding, flags, entries.images);
110 return binding; 112 return binding;
111} 113}
@@ -377,16 +379,17 @@ void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u3
377 return; 379 return;
378 } 380 }
379 381
380 if constexpr (descriptor_type == UNIFORM_TEXEL_BUFFER) { 382 if constexpr (descriptor_type == UNIFORM_TEXEL_BUFFER ||
381 // Nvidia has a bug where updating multiple uniform texels at once causes the driver to 383 descriptor_type == STORAGE_TEXEL_BUFFER) {
382 // crash. 384 // Nvidia has a bug where updating multiple texels at once causes the driver to crash.
385 // Note: Fixed in driver Windows 443.24, Linux 440.66.15
383 for (u32 i = 0; i < count; ++i) { 386 for (u32 i = 0; i < count; ++i) {
384 VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back(); 387 VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
385 entry.dstBinding = binding + i; 388 entry.dstBinding = binding + i;
386 entry.dstArrayElement = 0; 389 entry.dstArrayElement = 0;
387 entry.descriptorCount = 1; 390 entry.descriptorCount = 1;
388 entry.descriptorType = descriptor_type; 391 entry.descriptorType = descriptor_type;
389 entry.offset = offset + i * entry_size; 392 entry.offset = static_cast<std::size_t>(offset + i * entry_size);
390 entry.stride = entry_size; 393 entry.stride = entry_size;
391 } 394 }
392 } else if (count > 0) { 395 } else if (count > 0) {
@@ -407,8 +410,9 @@ void FillDescriptorUpdateTemplateEntries(
407 std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries) { 410 std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries) {
408 AddEntry<UNIFORM_BUFFER>(template_entries, offset, binding, entries.const_buffers); 411 AddEntry<UNIFORM_BUFFER>(template_entries, offset, binding, entries.const_buffers);
409 AddEntry<STORAGE_BUFFER>(template_entries, offset, binding, entries.global_buffers); 412 AddEntry<STORAGE_BUFFER>(template_entries, offset, binding, entries.global_buffers);
410 AddEntry<UNIFORM_TEXEL_BUFFER>(template_entries, offset, binding, entries.texel_buffers); 413 AddEntry<UNIFORM_TEXEL_BUFFER>(template_entries, offset, binding, entries.uniform_texels);
411 AddEntry<COMBINED_IMAGE_SAMPLER>(template_entries, offset, binding, entries.samplers); 414 AddEntry<COMBINED_IMAGE_SAMPLER>(template_entries, offset, binding, entries.samplers);
415 AddEntry<STORAGE_TEXEL_BUFFER>(template_entries, offset, binding, entries.storage_texels);
412 AddEntry<STORAGE_IMAGE>(template_entries, offset, binding, entries.images); 416 AddEntry<STORAGE_IMAGE>(template_entries, offset, binding, entries.images);
413} 417}
414 418
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index a3d992ed3..d86c46412 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -468,8 +468,9 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
468 const auto& entries = pipeline.GetEntries(); 468 const auto& entries = pipeline.GetEntries();
469 SetupComputeConstBuffers(entries); 469 SetupComputeConstBuffers(entries);
470 SetupComputeGlobalBuffers(entries); 470 SetupComputeGlobalBuffers(entries);
471 SetupComputeTexelBuffers(entries); 471 SetupComputeUniformTexels(entries);
472 SetupComputeTextures(entries); 472 SetupComputeTextures(entries);
473 SetupComputeStorageTexels(entries);
473 SetupComputeImages(entries); 474 SetupComputeImages(entries);
474 475
475 buffer_cache.Unmap(); 476 buffer_cache.Unmap();
@@ -787,8 +788,9 @@ void RasterizerVulkan::SetupShaderDescriptors(
787 const auto& entries = shader->GetEntries(); 788 const auto& entries = shader->GetEntries();
788 SetupGraphicsConstBuffers(entries, stage); 789 SetupGraphicsConstBuffers(entries, stage);
789 SetupGraphicsGlobalBuffers(entries, stage); 790 SetupGraphicsGlobalBuffers(entries, stage);
790 SetupGraphicsTexelBuffers(entries, stage); 791 SetupGraphicsUniformTexels(entries, stage);
791 SetupGraphicsTextures(entries, stage); 792 SetupGraphicsTextures(entries, stage);
793 SetupGraphicsStorageTexels(entries, stage);
792 SetupGraphicsImages(entries, stage); 794 SetupGraphicsImages(entries, stage);
793 } 795 }
794 texture_cache.GuardSamplers(false); 796 texture_cache.GuardSamplers(false);
@@ -838,6 +840,10 @@ void RasterizerVulkan::BeginTransformFeedback() {
838 if (regs.tfb_enabled == 0) { 840 if (regs.tfb_enabled == 0) {
839 return; 841 return;
840 } 842 }
843 if (!device.IsExtTransformFeedbackSupported()) {
844 LOG_ERROR(Render_Vulkan, "Transform feedbacks used but not supported");
845 return;
846 }
841 847
842 UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationControl) || 848 UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationControl) ||
843 regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationEval) || 849 regs.IsShaderConfigEnabled(Maxwell::ShaderProgram::TesselationEval) ||
@@ -866,6 +872,9 @@ void RasterizerVulkan::EndTransformFeedback() {
866 if (regs.tfb_enabled == 0) { 872 if (regs.tfb_enabled == 0) {
867 return; 873 return;
868 } 874 }
875 if (!device.IsExtTransformFeedbackSupported()) {
876 return;
877 }
869 878
870 scheduler.Record( 879 scheduler.Record(
871 [](vk::CommandBuffer cmdbuf) { cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr); }); 880 [](vk::CommandBuffer cmdbuf) { cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr); });
@@ -976,12 +985,12 @@ void RasterizerVulkan::SetupGraphicsGlobalBuffers(const ShaderEntries& entries,
976 } 985 }
977} 986}
978 987
979void RasterizerVulkan::SetupGraphicsTexelBuffers(const ShaderEntries& entries, std::size_t stage) { 988void RasterizerVulkan::SetupGraphicsUniformTexels(const ShaderEntries& entries, std::size_t stage) {
980 MICROPROFILE_SCOPE(Vulkan_Textures); 989 MICROPROFILE_SCOPE(Vulkan_Textures);
981 const auto& gpu = system.GPU().Maxwell3D(); 990 const auto& gpu = system.GPU().Maxwell3D();
982 for (const auto& entry : entries.texel_buffers) { 991 for (const auto& entry : entries.uniform_texels) {
983 const auto image = GetTextureInfo(gpu, entry, stage).tic; 992 const auto image = GetTextureInfo(gpu, entry, stage).tic;
984 SetupTexelBuffer(image, entry); 993 SetupUniformTexels(image, entry);
985 } 994 }
986} 995}
987 996
@@ -996,6 +1005,15 @@ void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, std::
996 } 1005 }
997} 1006}
998 1007
1008void RasterizerVulkan::SetupGraphicsStorageTexels(const ShaderEntries& entries, std::size_t stage) {
1009 MICROPROFILE_SCOPE(Vulkan_Textures);
1010 const auto& gpu = system.GPU().Maxwell3D();
1011 for (const auto& entry : entries.storage_texels) {
1012 const auto image = GetTextureInfo(gpu, entry, stage).tic;
1013 SetupStorageTexel(image, entry);
1014 }
1015}
1016
999void RasterizerVulkan::SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage) { 1017void RasterizerVulkan::SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage) {
1000 MICROPROFILE_SCOPE(Vulkan_Images); 1018 MICROPROFILE_SCOPE(Vulkan_Images);
1001 const auto& gpu = system.GPU().Maxwell3D(); 1019 const auto& gpu = system.GPU().Maxwell3D();
@@ -1028,12 +1046,12 @@ void RasterizerVulkan::SetupComputeGlobalBuffers(const ShaderEntries& entries) {
1028 } 1046 }
1029} 1047}
1030 1048
1031void RasterizerVulkan::SetupComputeTexelBuffers(const ShaderEntries& entries) { 1049void RasterizerVulkan::SetupComputeUniformTexels(const ShaderEntries& entries) {
1032 MICROPROFILE_SCOPE(Vulkan_Textures); 1050 MICROPROFILE_SCOPE(Vulkan_Textures);
1033 const auto& gpu = system.GPU().KeplerCompute(); 1051 const auto& gpu = system.GPU().KeplerCompute();
1034 for (const auto& entry : entries.texel_buffers) { 1052 for (const auto& entry : entries.uniform_texels) {
1035 const auto image = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic; 1053 const auto image = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic;
1036 SetupTexelBuffer(image, entry); 1054 SetupUniformTexels(image, entry);
1037 } 1055 }
1038} 1056}
1039 1057
@@ -1048,6 +1066,15 @@ void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) {
1048 } 1066 }
1049} 1067}
1050 1068
1069void RasterizerVulkan::SetupComputeStorageTexels(const ShaderEntries& entries) {
1070 MICROPROFILE_SCOPE(Vulkan_Textures);
1071 const auto& gpu = system.GPU().KeplerCompute();
1072 for (const auto& entry : entries.storage_texels) {
1073 const auto image = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic;
1074 SetupStorageTexel(image, entry);
1075 }
1076}
1077
1051void RasterizerVulkan::SetupComputeImages(const ShaderEntries& entries) { 1078void RasterizerVulkan::SetupComputeImages(const ShaderEntries& entries) {
1052 MICROPROFILE_SCOPE(Vulkan_Images); 1079 MICROPROFILE_SCOPE(Vulkan_Images);
1053 const auto& gpu = system.GPU().KeplerCompute(); 1080 const auto& gpu = system.GPU().KeplerCompute();
@@ -1097,8 +1124,8 @@ void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAdd
1097 update_descriptor_queue.AddBuffer(buffer, offset, size); 1124 update_descriptor_queue.AddBuffer(buffer, offset, size);
1098} 1125}
1099 1126
1100void RasterizerVulkan::SetupTexelBuffer(const Tegra::Texture::TICEntry& tic, 1127void RasterizerVulkan::SetupUniformTexels(const Tegra::Texture::TICEntry& tic,
1101 const TexelBufferEntry& entry) { 1128 const UniformTexelEntry& entry) {
1102 const auto view = texture_cache.GetTextureSurface(tic, entry); 1129 const auto view = texture_cache.GetTextureSurface(tic, entry);
1103 ASSERT(view->IsBufferView()); 1130 ASSERT(view->IsBufferView());
1104 1131
@@ -1120,6 +1147,14 @@ void RasterizerVulkan::SetupTexture(const Tegra::Texture::FullTextureInfo& textu
1120 sampled_views.push_back(ImageView{std::move(view), image_layout}); 1147 sampled_views.push_back(ImageView{std::move(view), image_layout});
1121} 1148}
1122 1149
1150void RasterizerVulkan::SetupStorageTexel(const Tegra::Texture::TICEntry& tic,
1151 const StorageTexelEntry& entry) {
1152 const auto view = texture_cache.GetImageSurface(tic, entry);
1153 ASSERT(view->IsBufferView());
1154
1155 update_descriptor_queue.AddTexelBuffer(view->GetBufferView());
1156}
1157
1123void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry) { 1158void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry) {
1124 auto view = texture_cache.GetImageSurface(tic, entry); 1159 auto view = texture_cache.GetImageSurface(tic, entry);
1125 1160
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 0ed0e48c6..04be37a5e 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -193,12 +193,15 @@ private:
193 /// Setup global buffers in the graphics pipeline. 193 /// Setup global buffers in the graphics pipeline.
194 void SetupGraphicsGlobalBuffers(const ShaderEntries& entries, std::size_t stage); 194 void SetupGraphicsGlobalBuffers(const ShaderEntries& entries, std::size_t stage);
195 195
196 /// Setup texel buffers in the graphics pipeline. 196 /// Setup uniform texels in the graphics pipeline.
197 void SetupGraphicsTexelBuffers(const ShaderEntries& entries, std::size_t stage); 197 void SetupGraphicsUniformTexels(const ShaderEntries& entries, std::size_t stage);
198 198
199 /// Setup textures in the graphics pipeline. 199 /// Setup textures in the graphics pipeline.
200 void SetupGraphicsTextures(const ShaderEntries& entries, std::size_t stage); 200 void SetupGraphicsTextures(const ShaderEntries& entries, std::size_t stage);
201 201
202 /// Setup storage texels in the graphics pipeline.
203 void SetupGraphicsStorageTexels(const ShaderEntries& entries, std::size_t stage);
204
202 /// Setup images in the graphics pipeline. 205 /// Setup images in the graphics pipeline.
203 void SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage); 206 void SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage);
204 207
@@ -209,11 +212,14 @@ private:
209 void SetupComputeGlobalBuffers(const ShaderEntries& entries); 212 void SetupComputeGlobalBuffers(const ShaderEntries& entries);
210 213
211 /// Setup texel buffers in the compute pipeline. 214 /// Setup texel buffers in the compute pipeline.
212 void SetupComputeTexelBuffers(const ShaderEntries& entries); 215 void SetupComputeUniformTexels(const ShaderEntries& entries);
213 216
214 /// Setup textures in the compute pipeline. 217 /// Setup textures in the compute pipeline.
215 void SetupComputeTextures(const ShaderEntries& entries); 218 void SetupComputeTextures(const ShaderEntries& entries);
216 219
220 /// Setup storage texels in the compute pipeline.
221 void SetupComputeStorageTexels(const ShaderEntries& entries);
222
217 /// Setup images in the compute pipeline. 223 /// Setup images in the compute pipeline.
218 void SetupComputeImages(const ShaderEntries& entries); 224 void SetupComputeImages(const ShaderEntries& entries);
219 225
@@ -222,10 +228,12 @@ private:
222 228
223 void SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAddr address); 229 void SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAddr address);
224 230
225 void SetupTexelBuffer(const Tegra::Texture::TICEntry& image, const TexelBufferEntry& entry); 231 void SetupUniformTexels(const Tegra::Texture::TICEntry& image, const UniformTexelEntry& entry);
226 232
227 void SetupTexture(const Tegra::Texture::FullTextureInfo& texture, const SamplerEntry& entry); 233 void SetupTexture(const Tegra::Texture::FullTextureInfo& texture, const SamplerEntry& entry);
228 234
235 void SetupStorageTexel(const Tegra::Texture::TICEntry& tic, const StorageTexelEntry& entry);
236
229 void SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry); 237 void SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry);
230 238
231 void UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs); 239 void UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs);
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
index a13e8baa7..97429cc59 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
@@ -400,8 +400,9 @@ private:
400 u32 binding = specialization.base_binding; 400 u32 binding = specialization.base_binding;
401 binding = DeclareConstantBuffers(binding); 401 binding = DeclareConstantBuffers(binding);
402 binding = DeclareGlobalBuffers(binding); 402 binding = DeclareGlobalBuffers(binding);
403 binding = DeclareTexelBuffers(binding); 403 binding = DeclareUniformTexels(binding);
404 binding = DeclareSamplers(binding); 404 binding = DeclareSamplers(binding);
405 binding = DeclareStorageTexels(binding);
405 binding = DeclareImages(binding); 406 binding = DeclareImages(binding);
406 407
407 const Id main = OpFunction(t_void, {}, TypeFunction(t_void)); 408 const Id main = OpFunction(t_void, {}, TypeFunction(t_void));
@@ -889,7 +890,7 @@ private:
889 return binding; 890 return binding;
890 } 891 }
891 892
892 u32 DeclareTexelBuffers(u32 binding) { 893 u32 DeclareUniformTexels(u32 binding) {
893 for (const auto& sampler : ir.GetSamplers()) { 894 for (const auto& sampler : ir.GetSamplers()) {
894 if (!sampler.is_buffer) { 895 if (!sampler.is_buffer) {
895 continue; 896 continue;
@@ -910,7 +911,7 @@ private:
910 Decorate(id, spv::Decoration::Binding, binding++); 911 Decorate(id, spv::Decoration::Binding, binding++);
911 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); 912 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
912 913
913 texel_buffers.emplace(sampler.index, TexelBuffer{image_type, id}); 914 uniform_texels.emplace(sampler.index, TexelBuffer{image_type, id});
914 } 915 }
915 return binding; 916 return binding;
916 } 917 }
@@ -945,31 +946,48 @@ private:
945 return binding; 946 return binding;
946 } 947 }
947 948
948 u32 DeclareImages(u32 binding) { 949 u32 DeclareStorageTexels(u32 binding) {
949 for (const auto& image : ir.GetImages()) { 950 for (const auto& image : ir.GetImages()) {
950 const auto [dim, arrayed] = GetImageDim(image); 951 if (image.type != Tegra::Shader::ImageType::TextureBuffer) {
951 constexpr int depth = 0; 952 continue;
952 constexpr bool ms = false;
953 constexpr int sampled = 2; // This won't be accessed with a sampler
954 constexpr auto format = spv::ImageFormat::Unknown;
955 const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {});
956 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
957 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
958 AddGlobalVariable(Name(id, fmt::format("image_{}", image.index)));
959
960 Decorate(id, spv::Decoration::Binding, binding++);
961 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
962 if (image.is_read && !image.is_written) {
963 Decorate(id, spv::Decoration::NonWritable);
964 } else if (image.is_written && !image.is_read) {
965 Decorate(id, spv::Decoration::NonReadable);
966 } 953 }
954 DeclareImage(image, binding);
955 }
956 return binding;
957 }
967 958
968 images.emplace(image.index, StorageImage{image_type, id}); 959 u32 DeclareImages(u32 binding) {
960 for (const auto& image : ir.GetImages()) {
961 if (image.type == Tegra::Shader::ImageType::TextureBuffer) {
962 continue;
963 }
964 DeclareImage(image, binding);
969 } 965 }
970 return binding; 966 return binding;
971 } 967 }
972 968
969 void DeclareImage(const Image& image, u32& binding) {
970 const auto [dim, arrayed] = GetImageDim(image);
971 constexpr int depth = 0;
972 constexpr bool ms = false;
973 constexpr int sampled = 2; // This won't be accessed with a sampler
974 const auto format = image.is_atomic ? spv::ImageFormat::R32ui : spv::ImageFormat::Unknown;
975 const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {});
976 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
977 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
978 AddGlobalVariable(Name(id, fmt::format("image_{}", image.index)));
979
980 Decorate(id, spv::Decoration::Binding, binding++);
981 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
982 if (image.is_read && !image.is_written) {
983 Decorate(id, spv::Decoration::NonWritable);
984 } else if (image.is_written && !image.is_read) {
985 Decorate(id, spv::Decoration::NonReadable);
986 }
987
988 images.emplace(image.index, StorageImage{image_type, id});
989 }
990
973 bool IsRenderTargetEnabled(u32 rt) const { 991 bool IsRenderTargetEnabled(u32 rt) const {
974 for (u32 component = 0; component < 4; ++component) { 992 for (u32 component = 0; component < 4; ++component) {
975 if (header.ps.IsColorComponentOutputEnabled(rt, component)) { 993 if (header.ps.IsColorComponentOutputEnabled(rt, component)) {
@@ -1256,7 +1274,7 @@ private:
1256 } else { 1274 } else {
1257 UNREACHABLE_MSG("Unmanaged offset node type"); 1275 UNREACHABLE_MSG("Unmanaged offset node type");
1258 } 1276 }
1259 pointer = OpAccessChain(t_cbuf_float, buffer_id, Constant(t_uint, 0), buffer_index, 1277 pointer = OpAccessChain(t_cbuf_float, buffer_id, v_uint_zero, buffer_index,
1260 buffer_element); 1278 buffer_element);
1261 } 1279 }
1262 return {OpLoad(t_float, pointer), Type::Float}; 1280 return {OpLoad(t_float, pointer), Type::Float};
@@ -1611,7 +1629,7 @@ private:
1611 1629
1612 const Id result = OpIAddCarry(TypeStruct({t_uint, t_uint}), op_a, op_b); 1630 const Id result = OpIAddCarry(TypeStruct({t_uint, t_uint}), op_a, op_b);
1613 const Id carry = OpCompositeExtract(t_uint, result, 1); 1631 const Id carry = OpCompositeExtract(t_uint, result, 1);
1614 return {OpINotEqual(t_bool, carry, Constant(t_uint, 0)), Type::Bool}; 1632 return {OpINotEqual(t_bool, carry, v_uint_zero), Type::Bool};
1615 } 1633 }
1616 1634
1617 Expression LogicalAssign(Operation operation) { 1635 Expression LogicalAssign(Operation operation) {
@@ -1674,7 +1692,7 @@ private:
1674 const auto& meta = std::get<MetaTexture>(operation.GetMeta()); 1692 const auto& meta = std::get<MetaTexture>(operation.GetMeta());
1675 const u32 index = meta.sampler.index; 1693 const u32 index = meta.sampler.index;
1676 if (meta.sampler.is_buffer) { 1694 if (meta.sampler.is_buffer) {
1677 const auto& entry = texel_buffers.at(index); 1695 const auto& entry = uniform_texels.at(index);
1678 return OpLoad(entry.image_type, entry.image); 1696 return OpLoad(entry.image_type, entry.image);
1679 } else { 1697 } else {
1680 const auto& entry = sampled_images.at(index); 1698 const auto& entry = sampled_images.at(index);
@@ -1951,39 +1969,20 @@ private:
1951 return {}; 1969 return {};
1952 } 1970 }
1953 1971
1954 Expression AtomicImageAdd(Operation operation) { 1972 template <Id (Module::*func)(Id, Id, Id, Id, Id)>
1955 UNIMPLEMENTED(); 1973 Expression AtomicImage(Operation operation) {
1956 return {}; 1974 const auto& meta{std::get<MetaImage>(operation.GetMeta())};
1957 } 1975 ASSERT(meta.values.size() == 1);
1958
1959 Expression AtomicImageMin(Operation operation) {
1960 UNIMPLEMENTED();
1961 return {};
1962 }
1963
1964 Expression AtomicImageMax(Operation operation) {
1965 UNIMPLEMENTED();
1966 return {};
1967 }
1968
1969 Expression AtomicImageAnd(Operation operation) {
1970 UNIMPLEMENTED();
1971 return {};
1972 }
1973
1974 Expression AtomicImageOr(Operation operation) {
1975 UNIMPLEMENTED();
1976 return {};
1977 }
1978 1976
1979 Expression AtomicImageXor(Operation operation) { 1977 const Id coordinate = GetCoordinates(operation, Type::Int);
1980 UNIMPLEMENTED(); 1978 const Id image = images.at(meta.image.index).image;
1981 return {}; 1979 const Id sample = v_uint_zero;
1982 } 1980 const Id pointer = OpImageTexelPointer(t_image_uint, image, coordinate, sample);
1983 1981
1984 Expression AtomicImageExchange(Operation operation) { 1982 const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device));
1985 UNIMPLEMENTED(); 1983 const Id semantics = v_uint_zero;
1986 return {}; 1984 const Id value = AsUint(Visit(meta.values[0]));
1985 return {(this->*func)(t_uint, pointer, scope, semantics, value), Type::Uint};
1987 } 1986 }
1988 1987
1989 template <Id (Module::*func)(Id, Id, Id, Id, Id)> 1988 template <Id (Module::*func)(Id, Id, Id, Id, Id)>
@@ -1998,7 +1997,7 @@ private:
1998 return {v_float_zero, Type::Float}; 1997 return {v_float_zero, Type::Float};
1999 } 1998 }
2000 const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device)); 1999 const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device));
2001 const Id semantics = Constant(t_uint, 0); 2000 const Id semantics = v_uint_zero;
2002 const Id value = AsUint(Visit(operation[1])); 2001 const Id value = AsUint(Visit(operation[1]));
2003 2002
2004 return {(this->*func)(t_uint, pointer, scope, semantics, value), Type::Uint}; 2003 return {(this->*func)(t_uint, pointer, scope, semantics, value), Type::Uint};
@@ -2622,11 +2621,11 @@ private:
2622 2621
2623 &SPIRVDecompiler::ImageLoad, 2622 &SPIRVDecompiler::ImageLoad,
2624 &SPIRVDecompiler::ImageStore, 2623 &SPIRVDecompiler::ImageStore,
2625 &SPIRVDecompiler::AtomicImageAdd, 2624 &SPIRVDecompiler::AtomicImage<&Module::OpAtomicIAdd>,
2626 &SPIRVDecompiler::AtomicImageAnd, 2625 &SPIRVDecompiler::AtomicImage<&Module::OpAtomicAnd>,
2627 &SPIRVDecompiler::AtomicImageOr, 2626 &SPIRVDecompiler::AtomicImage<&Module::OpAtomicOr>,
2628 &SPIRVDecompiler::AtomicImageXor, 2627 &SPIRVDecompiler::AtomicImage<&Module::OpAtomicXor>,
2629 &SPIRVDecompiler::AtomicImageExchange, 2628 &SPIRVDecompiler::AtomicImage<&Module::OpAtomicExchange>,
2630 2629
2631 &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange>, 2630 &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange>,
2632 &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd>, 2631 &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd>,
@@ -2768,8 +2767,11 @@ private:
2768 Decorate(TypeStruct(t_gmem_array), spv::Decoration::Block), 0, spv::Decoration::Offset, 0); 2767 Decorate(TypeStruct(t_gmem_array), spv::Decoration::Block), 0, spv::Decoration::Offset, 0);
2769 const Id t_gmem_ssbo = TypePointer(spv::StorageClass::StorageBuffer, t_gmem_struct); 2768 const Id t_gmem_ssbo = TypePointer(spv::StorageClass::StorageBuffer, t_gmem_struct);
2770 2769
2770 const Id t_image_uint = TypePointer(spv::StorageClass::Image, t_uint);
2771
2771 const Id v_float_zero = Constant(t_float, 0.0f); 2772 const Id v_float_zero = Constant(t_float, 0.0f);
2772 const Id v_float_one = Constant(t_float, 1.0f); 2773 const Id v_float_one = Constant(t_float, 1.0f);
2774 const Id v_uint_zero = Constant(t_uint, 0);
2773 2775
2774 // Nvidia uses these defaults for varyings (e.g. position and generic attributes) 2776 // Nvidia uses these defaults for varyings (e.g. position and generic attributes)
2775 const Id v_varying_default = 2777 const Id v_varying_default =
@@ -2794,15 +2796,16 @@ private:
2794 std::unordered_map<u8, GenericVaryingDescription> output_attributes; 2796 std::unordered_map<u8, GenericVaryingDescription> output_attributes;
2795 std::map<u32, Id> constant_buffers; 2797 std::map<u32, Id> constant_buffers;
2796 std::map<GlobalMemoryBase, Id> global_buffers; 2798 std::map<GlobalMemoryBase, Id> global_buffers;
2797 std::map<u32, TexelBuffer> texel_buffers; 2799 std::map<u32, TexelBuffer> uniform_texels;
2798 std::map<u32, SampledImage> sampled_images; 2800 std::map<u32, SampledImage> sampled_images;
2801 std::map<u32, TexelBuffer> storage_texels;
2799 std::map<u32, StorageImage> images; 2802 std::map<u32, StorageImage> images;
2800 2803
2804 std::array<Id, Maxwell::NumRenderTargets> frag_colors{};
2801 Id instance_index{}; 2805 Id instance_index{};
2802 Id vertex_index{}; 2806 Id vertex_index{};
2803 Id base_instance{}; 2807 Id base_instance{};
2804 Id base_vertex{}; 2808 Id base_vertex{};
2805 std::array<Id, Maxwell::NumRenderTargets> frag_colors{};
2806 Id frag_depth{}; 2809 Id frag_depth{};
2807 Id frag_coord{}; 2810 Id frag_coord{};
2808 Id front_facing{}; 2811 Id front_facing{};
@@ -3058,13 +3061,17 @@ ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir) {
3058 } 3061 }
3059 for (const auto& sampler : ir.GetSamplers()) { 3062 for (const auto& sampler : ir.GetSamplers()) {
3060 if (sampler.is_buffer) { 3063 if (sampler.is_buffer) {
3061 entries.texel_buffers.emplace_back(sampler); 3064 entries.uniform_texels.emplace_back(sampler);
3062 } else { 3065 } else {
3063 entries.samplers.emplace_back(sampler); 3066 entries.samplers.emplace_back(sampler);
3064 } 3067 }
3065 } 3068 }
3066 for (const auto& image : ir.GetImages()) { 3069 for (const auto& image : ir.GetImages()) {
3067 entries.images.emplace_back(image); 3070 if (image.type == Tegra::Shader::ImageType::TextureBuffer) {
3071 entries.storage_texels.emplace_back(image);
3072 } else {
3073 entries.images.emplace_back(image);
3074 }
3068 } 3075 }
3069 for (const auto& attribute : ir.GetInputAttributes()) { 3076 for (const auto& attribute : ir.GetInputAttributes()) {
3070 if (IsGenericAttribute(attribute)) { 3077 if (IsGenericAttribute(attribute)) {
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.h b/src/video_core/renderer_vulkan/vk_shader_decompiler.h
index b7af26388..2b0e90396 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.h
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.h
@@ -21,8 +21,9 @@ class VKDevice;
21namespace Vulkan { 21namespace Vulkan {
22 22
23using Maxwell = Tegra::Engines::Maxwell3D::Regs; 23using Maxwell = Tegra::Engines::Maxwell3D::Regs;
24using TexelBufferEntry = VideoCommon::Shader::Sampler; 24using UniformTexelEntry = VideoCommon::Shader::Sampler;
25using SamplerEntry = VideoCommon::Shader::Sampler; 25using SamplerEntry = VideoCommon::Shader::Sampler;
26using StorageTexelEntry = VideoCommon::Shader::Image;
26using ImageEntry = VideoCommon::Shader::Image; 27using ImageEntry = VideoCommon::Shader::Image;
27 28
28constexpr u32 DESCRIPTOR_SET = 0; 29constexpr u32 DESCRIPTOR_SET = 0;
@@ -66,13 +67,15 @@ private:
66struct ShaderEntries { 67struct ShaderEntries {
67 u32 NumBindings() const { 68 u32 NumBindings() const {
68 return static_cast<u32>(const_buffers.size() + global_buffers.size() + 69 return static_cast<u32>(const_buffers.size() + global_buffers.size() +
69 texel_buffers.size() + samplers.size() + images.size()); 70 uniform_texels.size() + samplers.size() + storage_texels.size() +
71 images.size());
70 } 72 }
71 73
72 std::vector<ConstBufferEntry> const_buffers; 74 std::vector<ConstBufferEntry> const_buffers;
73 std::vector<GlobalBufferEntry> global_buffers; 75 std::vector<GlobalBufferEntry> global_buffers;
74 std::vector<TexelBufferEntry> texel_buffers; 76 std::vector<UniformTexelEntry> uniform_texels;
75 std::vector<SamplerEntry> samplers; 77 std::vector<SamplerEntry> samplers;
78 std::vector<StorageTexelEntry> storage_texels;
76 std::vector<ImageEntry> images; 79 std::vector<ImageEntry> images;
77 std::set<u32> attributes; 80 std::set<u32> attributes;
78 std::array<bool, Maxwell::NumClipDistances> clip_distances{}; 81 std::array<bool, Maxwell::NumClipDistances> clip_distances{};
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index 2f1d5021d..ea487b770 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -100,8 +100,8 @@ vk::Buffer CreateBuffer(const VKDevice& device, const SurfaceParams& params,
100 ci.pNext = nullptr; 100 ci.pNext = nullptr;
101 ci.flags = 0; 101 ci.flags = 0;
102 ci.size = static_cast<VkDeviceSize>(host_memory_size); 102 ci.size = static_cast<VkDeviceSize>(host_memory_size);
103 ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | 103 ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
104 VK_BUFFER_USAGE_TRANSFER_DST_BIT; 104 VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
105 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; 105 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
106 ci.queueFamilyIndexCount = 0; 106 ci.queueFamilyIndexCount = 0;
107 ci.pQueueFamilyIndices = nullptr; 107 ci.pQueueFamilyIndices = nullptr;
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 45e3ddd2c..6f63217a2 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -655,45 +655,63 @@ private:
655 **/ 655 **/
656 std::optional<std::pair<TSurface, TView>> TryReconstructSurface(VectorSurface& overlaps, 656 std::optional<std::pair<TSurface, TView>> TryReconstructSurface(VectorSurface& overlaps,
657 const SurfaceParams& params, 657 const SurfaceParams& params,
658 const GPUVAddr gpu_addr) { 658 GPUVAddr gpu_addr) {
659 if (params.target == SurfaceTarget::Texture3D) { 659 if (params.target == SurfaceTarget::Texture3D) {
660 return {}; 660 return std::nullopt;
661 } 661 }
662 bool modified = false; 662 const auto test_modified = [](TSurface& surface) { return surface->IsModified(); };
663 TSurface new_surface = GetUncachedSurface(gpu_addr, params); 663 TSurface new_surface = GetUncachedSurface(gpu_addr, params);
664 u32 passed_tests = 0; 664
665 if (std::none_of(overlaps.begin(), overlaps.end(), test_modified)) {
666 LoadSurface(new_surface);
667 for (const auto& surface : overlaps) {
668 Unregister(surface);
669 }
670 Register(new_surface);
671 return {{new_surface, new_surface->GetMainView()}};
672 }
673
674 std::size_t passed_tests = 0;
665 for (auto& surface : overlaps) { 675 for (auto& surface : overlaps) {
666 const SurfaceParams& src_params = surface->GetSurfaceParams(); 676 const SurfaceParams& src_params = surface->GetSurfaceParams();
667 if (src_params.is_layered || src_params.num_levels > 1) { 677 const auto mipmap_layer{new_surface->GetLayerMipmap(surface->GetGpuAddr())};
668 // We send this cases to recycle as they are more complex to handle
669 return {};
670 }
671 const std::size_t candidate_size = surface->GetSizeInBytes();
672 auto mipmap_layer{new_surface->GetLayerMipmap(surface->GetGpuAddr())};
673 if (!mipmap_layer) { 678 if (!mipmap_layer) {
674 continue; 679 continue;
675 } 680 }
676 const auto [layer, mipmap] = *mipmap_layer; 681 const auto [base_layer, base_mipmap] = *mipmap_layer;
677 if (new_surface->GetMipmapSize(mipmap) != candidate_size) { 682 if (new_surface->GetMipmapSize(base_mipmap) != surface->GetMipmapSize(0)) {
678 continue; 683 continue;
679 } 684 }
680 modified |= surface->IsModified(); 685 ++passed_tests;
681 // Now we got all the data set up 686
682 const u32 width = SurfaceParams::IntersectWidth(src_params, params, 0, mipmap); 687 // Copy all mipmaps and layers
683 const u32 height = SurfaceParams::IntersectHeight(src_params, params, 0, mipmap); 688 const u32 block_width = params.GetDefaultBlockWidth();
684 const CopyParams copy_params(0, 0, 0, 0, 0, layer, 0, mipmap, width, height, 1); 689 const u32 block_height = params.GetDefaultBlockHeight();
685 passed_tests++; 690 for (u32 mipmap = base_mipmap; mipmap < base_mipmap + src_params.num_levels; ++mipmap) {
686 ImageCopy(surface, new_surface, copy_params); 691 const u32 width = SurfaceParams::IntersectWidth(src_params, params, 0, mipmap);
692 const u32 height = SurfaceParams::IntersectHeight(src_params, params, 0, mipmap);
693 if (width < block_width || height < block_height) {
694 // Current APIs forbid copying small compressed textures, avoid errors
695 break;
696 }
697 const CopyParams copy_params(0, 0, 0, 0, 0, base_layer, 0, mipmap, width, height,
698 src_params.depth);
699 ImageCopy(surface, new_surface, copy_params);
700 }
687 } 701 }
688 if (passed_tests == 0) { 702 if (passed_tests == 0) {
689 return {}; 703 return std::nullopt;
704 }
705 if (Settings::IsGPULevelExtreme() && passed_tests != overlaps.size()) {
690 // In Accurate GPU all tests should pass, else we recycle 706 // In Accurate GPU all tests should pass, else we recycle
691 } else if (Settings::IsGPULevelExtreme() && passed_tests != overlaps.size()) { 707 return std::nullopt;
692 return {};
693 } 708 }
709
710 const bool modified = std::any_of(overlaps.begin(), overlaps.end(), test_modified);
694 for (const auto& surface : overlaps) { 711 for (const auto& surface : overlaps) {
695 Unregister(surface); 712 Unregister(surface);
696 } 713 }
714
697 new_surface->MarkAsModified(modified, Tick()); 715 new_surface->MarkAsModified(modified, Tick());
698 Register(new_surface); 716 Register(new_surface);
699 return {{new_surface, new_surface->GetMainView()}}; 717 return {{new_surface, new_surface->GetMainView()}};
@@ -871,12 +889,9 @@ private:
871 // two things either the candidate surface is a supertexture of the overlap 889 // two things either the candidate surface is a supertexture of the overlap
872 // or they don't match in any known way. 890 // or they don't match in any known way.
873 if (!current_surface->IsInside(gpu_addr, gpu_addr + candidate_size)) { 891 if (!current_surface->IsInside(gpu_addr, gpu_addr + candidate_size)) {
874 if (current_surface->GetGpuAddr() == gpu_addr) { 892 const std::optional view = TryReconstructSurface(overlaps, params, gpu_addr);
875 std::optional<std::pair<TSurface, TView>> view = 893 if (view) {
876 TryReconstructSurface(overlaps, params, gpu_addr); 894 return *view;
877 if (view) {
878 return *view;
879 }
880 } 895 }
881 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents, 896 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
882 MatchTopologyResult::FullMatch); 897 MatchTopologyResult::FullMatch);
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp
index b08b87426..7e9073cc3 100644
--- a/src/yuzu/configuration/config.cpp
+++ b/src/yuzu/configuration/config.cpp
@@ -533,6 +533,8 @@ void Config::ReadDebuggingValues() {
533 Settings::values.quest_flag = ReadSetting(QStringLiteral("quest_flag"), false).toBool(); 533 Settings::values.quest_flag = ReadSetting(QStringLiteral("quest_flag"), false).toBool();
534 Settings::values.disable_cpu_opt = 534 Settings::values.disable_cpu_opt =
535 ReadSetting(QStringLiteral("disable_cpu_opt"), false).toBool(); 535 ReadSetting(QStringLiteral("disable_cpu_opt"), false).toBool();
536 Settings::values.disable_macro_jit =
537 ReadSetting(QStringLiteral("disable_macro_jit"), false).toBool();
536 538
537 qt_config->endGroup(); 539 qt_config->endGroup();
538} 540}
@@ -1011,6 +1013,7 @@ void Config::SaveDebuggingValues() {
1011 WriteSetting(QStringLiteral("dump_nso"), Settings::values.dump_nso, false); 1013 WriteSetting(QStringLiteral("dump_nso"), Settings::values.dump_nso, false);
1012 WriteSetting(QStringLiteral("quest_flag"), Settings::values.quest_flag, false); 1014 WriteSetting(QStringLiteral("quest_flag"), Settings::values.quest_flag, false);
1013 WriteSetting(QStringLiteral("disable_cpu_opt"), Settings::values.disable_cpu_opt, false); 1015 WriteSetting(QStringLiteral("disable_cpu_opt"), Settings::values.disable_cpu_opt, false);
1016 WriteSetting(QStringLiteral("disable_macro_jit"), Settings::values.disable_macro_jit, false);
1014 1017
1015 qt_config->endGroup(); 1018 qt_config->endGroup();
1016} 1019}
diff --git a/src/yuzu/configuration/configure_debug.cpp b/src/yuzu/configuration/configure_debug.cpp
index c2026763e..2c77441fd 100644
--- a/src/yuzu/configuration/configure_debug.cpp
+++ b/src/yuzu/configuration/configure_debug.cpp
@@ -39,6 +39,8 @@ void ConfigureDebug::SetConfiguration() {
39 ui->disable_cpu_opt->setChecked(Settings::values.disable_cpu_opt); 39 ui->disable_cpu_opt->setChecked(Settings::values.disable_cpu_opt);
40 ui->enable_graphics_debugging->setEnabled(!Core::System::GetInstance().IsPoweredOn()); 40 ui->enable_graphics_debugging->setEnabled(!Core::System::GetInstance().IsPoweredOn());
41 ui->enable_graphics_debugging->setChecked(Settings::values.renderer_debug); 41 ui->enable_graphics_debugging->setChecked(Settings::values.renderer_debug);
42 ui->disable_macro_jit->setEnabled(!Core::System::GetInstance().IsPoweredOn());
43 ui->disable_macro_jit->setChecked(Settings::values.disable_macro_jit);
42} 44}
43 45
44void ConfigureDebug::ApplyConfiguration() { 46void ConfigureDebug::ApplyConfiguration() {
@@ -51,6 +53,7 @@ void ConfigureDebug::ApplyConfiguration() {
51 Settings::values.quest_flag = ui->quest_flag->isChecked(); 53 Settings::values.quest_flag = ui->quest_flag->isChecked();
52 Settings::values.disable_cpu_opt = ui->disable_cpu_opt->isChecked(); 54 Settings::values.disable_cpu_opt = ui->disable_cpu_opt->isChecked();
53 Settings::values.renderer_debug = ui->enable_graphics_debugging->isChecked(); 55 Settings::values.renderer_debug = ui->enable_graphics_debugging->isChecked();
56 Settings::values.disable_macro_jit = ui->disable_macro_jit->isChecked();
54 Debugger::ToggleConsole(); 57 Debugger::ToggleConsole();
55 Log::Filter filter; 58 Log::Filter filter;
56 filter.ParseFilterString(Settings::values.log_filter); 59 filter.ParseFilterString(Settings::values.log_filter);
diff --git a/src/yuzu/configuration/configure_debug.ui b/src/yuzu/configuration/configure_debug.ui
index e0d4c4a44..46f0208c6 100644
--- a/src/yuzu/configuration/configure_debug.ui
+++ b/src/yuzu/configuration/configure_debug.ui
@@ -148,6 +148,19 @@
148 </property> 148 </property>
149 </widget> 149 </widget>
150 </item> 150 </item>
151 <item>
152 <widget class="QCheckBox" name="disable_macro_jit">
153 <property name="enabled">
154 <bool>true</bool>
155 </property>
156 <property name="whatsThis">
157 <string>When checked, it disables the macro Just In Time compiler. Enabled this makes games run slower</string>
158 </property>
159 <property name="text">
160 <string>Disable Macro JIT</string>
161 </property>
162 </widget>
163 </item>
151 </layout> 164 </layout>
152 </widget> 165 </widget>
153 </item> 166 </item>
diff --git a/src/yuzu/configuration/configure_input_player.cpp b/src/yuzu/configuration/configure_input_player.cpp
index e4eb5594b..a05fa64ba 100644
--- a/src/yuzu/configuration/configure_input_player.cpp
+++ b/src/yuzu/configuration/configure_input_player.cpp
@@ -480,7 +480,9 @@ void ConfigureInputPlayer::RestoreDefaults() {
480 SetAnalogButton(params, analogs_param[analog_id], analog_sub_buttons[sub_button_id]); 480 SetAnalogButton(params, analogs_param[analog_id], analog_sub_buttons[sub_button_id]);
481 } 481 }
482 } 482 }
483
483 UpdateButtonLabels(); 484 UpdateButtonLabels();
485 ApplyConfiguration();
484} 486}
485 487
486void ConfigureInputPlayer::ClearAll() { 488void ConfigureInputPlayer::ClearAll() {
@@ -505,6 +507,7 @@ void ConfigureInputPlayer::ClearAll() {
505 } 507 }
506 508
507 UpdateButtonLabels(); 509 UpdateButtonLabels();
510 ApplyConfiguration();
508} 511}
509 512
510void ConfigureInputPlayer::UpdateButtonLabels() { 513void ConfigureInputPlayer::UpdateButtonLabels() {
diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp
index c20d48c42..7240270f5 100644
--- a/src/yuzu_cmd/config.cpp
+++ b/src/yuzu_cmd/config.cpp
@@ -432,6 +432,8 @@ void Config::ReadValues() {
432 Settings::values.quest_flag = sdl2_config->GetBoolean("Debugging", "quest_flag", false); 432 Settings::values.quest_flag = sdl2_config->GetBoolean("Debugging", "quest_flag", false);
433 Settings::values.disable_cpu_opt = 433 Settings::values.disable_cpu_opt =
434 sdl2_config->GetBoolean("Debugging", "disable_cpu_opt", false); 434 sdl2_config->GetBoolean("Debugging", "disable_cpu_opt", false);
435 Settings::values.disable_macro_jit =
436 sdl2_config->GetBoolean("Debugging", "disable_macro_jit", false);
435 437
436 const auto title_list = sdl2_config->Get("AddOns", "title_ids", ""); 438 const auto title_list = sdl2_config->Get("AddOns", "title_ids", "");
437 std::stringstream ss(title_list); 439 std::stringstream ss(title_list);
diff --git a/src/yuzu_cmd/default_ini.h b/src/yuzu_cmd/default_ini.h
index abc6e6e65..6f53e9659 100644
--- a/src/yuzu_cmd/default_ini.h
+++ b/src/yuzu_cmd/default_ini.h
@@ -291,6 +291,8 @@ quest_flag =
291# Determines whether or not JIT CPU optimizations are enabled 291# Determines whether or not JIT CPU optimizations are enabled
292# false: Optimizations Enabled, true: Optimizations Disabled 292# false: Optimizations Enabled, true: Optimizations Disabled
293disable_cpu_opt = 293disable_cpu_opt =
294# Enables/Disables the macro JIT compiler
295disable_macro_jit=false
294 296
295[WebService] 297[WebService]
296# Whether or not to enable telemetry 298# Whether or not to enable telemetry