summaryrefslogtreecommitdiff
path: root/src/video_core/macro
diff options
context:
space:
mode:
Diffstat (limited to 'src/video_core/macro')
-rw-r--r--src/video_core/macro/macro.cpp45
-rw-r--r--src/video_core/macro/macro.h128
-rw-r--r--src/video_core/macro/macro_interpreter.cpp288
-rw-r--r--src/video_core/macro/macro_interpreter.h102
-rw-r--r--src/video_core/macro/macro_jit_x64.cpp633
-rw-r--r--src/video_core/macro/macro_jit_x64.h98
6 files changed, 1294 insertions, 0 deletions
diff --git a/src/video_core/macro/macro.cpp b/src/video_core/macro/macro.cpp
new file mode 100644
index 000000000..85a6e5dd4
--- /dev/null
+++ b/src/video_core/macro/macro.cpp
@@ -0,0 +1,45 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "common/logging/log.h"
7#include "core/settings.h"
8#include "video_core/macro/macro.h"
9#include "video_core/macro/macro_interpreter.h"
10#include "video_core/macro/macro_jit_x64.h"
11
12namespace Tegra {
13
14void MacroEngine::AddCode(u32 method, u32 data) {
15 uploaded_macro_code[method].push_back(data);
16}
17
18void MacroEngine::Execute(u32 method, std::vector<u32> parameters) {
19 auto compiled_macro = macro_cache.find(method);
20 if (compiled_macro != macro_cache.end()) {
21 compiled_macro->second->Execute(parameters, method);
22 } else {
23 // Macro not compiled, check if it's uploaded and if so, compile it
24 auto macro_code = uploaded_macro_code.find(method);
25 if (macro_code == uploaded_macro_code.end()) {
26 UNREACHABLE_MSG("Macro 0x{0:x} was not uploaded", method);
27 return;
28 }
29 macro_cache[method] = Compile(macro_code->second);
30 macro_cache[method]->Execute(parameters, method);
31 }
32}
33
34std::unique_ptr<MacroEngine> GetMacroEngine(Engines::Maxwell3D& maxwell3d) {
35 if (Settings::values.disable_macro_jit) {
36 return std::make_unique<MacroInterpreter>(maxwell3d);
37 }
38#ifdef ARCHITECTURE_x86_64
39 return std::make_unique<MacroJITx64>(maxwell3d);
40#else
41 return std::make_unique<MacroInterpreter>(maxwell3d);
42#endif
43}
44
45} // namespace Tegra
diff --git a/src/video_core/macro/macro.h b/src/video_core/macro/macro.h
new file mode 100644
index 000000000..28ca243d1
--- /dev/null
+++ b/src/video_core/macro/macro.h
@@ -0,0 +1,128 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <unordered_map>
9#include <vector>
10#include "common/bit_field.h"
11#include "common/common_types.h"
12
13namespace Tegra {
14namespace Engines {
15class Maxwell3D;
16}
17namespace Macro {
18constexpr std::size_t NUM_MACRO_REGISTERS = 8;
19enum class Operation : u32 {
20 ALU = 0,
21 AddImmediate = 1,
22 ExtractInsert = 2,
23 ExtractShiftLeftImmediate = 3,
24 ExtractShiftLeftRegister = 4,
25 Read = 5,
26 Unused = 6, // This operation doesn't seem to be a valid encoding.
27 Branch = 7,
28};
29
30enum class ALUOperation : u32 {
31 Add = 0,
32 AddWithCarry = 1,
33 Subtract = 2,
34 SubtractWithBorrow = 3,
35 // Operations 4-7 don't seem to be valid encodings.
36 Xor = 8,
37 Or = 9,
38 And = 10,
39 AndNot = 11,
40 Nand = 12
41};
42
43enum class ResultOperation : u32 {
44 IgnoreAndFetch = 0,
45 Move = 1,
46 MoveAndSetMethod = 2,
47 FetchAndSend = 3,
48 MoveAndSend = 4,
49 FetchAndSetMethod = 5,
50 MoveAndSetMethodFetchAndSend = 6,
51 MoveAndSetMethodSend = 7
52};
53
54enum class BranchCondition : u32 {
55 Zero = 0,
56 NotZero = 1,
57};
58
59union Opcode {
60 u32 raw;
61 BitField<0, 3, Operation> operation;
62 BitField<4, 3, ResultOperation> result_operation;
63 BitField<4, 1, BranchCondition> branch_condition;
64 // If set on a branch, then the branch doesn't have a delay slot.
65 BitField<5, 1, u32> branch_annul;
66 BitField<7, 1, u32> is_exit;
67 BitField<8, 3, u32> dst;
68 BitField<11, 3, u32> src_a;
69 BitField<14, 3, u32> src_b;
70 // The signed immediate overlaps the second source operand and the alu operation.
71 BitField<14, 18, s32> immediate;
72
73 BitField<17, 5, ALUOperation> alu_operation;
74
75 // Bitfield instructions data
76 BitField<17, 5, u32> bf_src_bit;
77 BitField<22, 5, u32> bf_size;
78 BitField<27, 5, u32> bf_dst_bit;
79
80 u32 GetBitfieldMask() const {
81 return (1 << bf_size) - 1;
82 }
83
84 s32 GetBranchTarget() const {
85 return static_cast<s32>(immediate * sizeof(u32));
86 }
87};
88
89union MethodAddress {
90 u32 raw;
91 BitField<0, 12, u32> address;
92 BitField<12, 6, u32> increment;
93};
94
95} // namespace Macro
96
97class CachedMacro {
98public:
99 virtual ~CachedMacro() = default;
100 /**
101 * Executes the macro code with the specified input parameters.
102 * @param code The macro byte code to execute
103 * @param parameters The parameters of the macro
104 */
105 virtual void Execute(std::vector<u32>& parameters, u32 method) = 0;
106};
107
108class MacroEngine {
109public:
110 virtual ~MacroEngine() = default;
111
112 // Store the uploaded macro code to compile them when they're called.
113 void AddCode(u32 method, u32 data);
114
115 // Compiles the macro if its not in the cache, and executes the compiled macro
116 void Execute(u32 method, std::vector<u32> parameters);
117
118protected:
119 virtual std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code) = 0;
120
121private:
122 std::unordered_map<u32, std::unique_ptr<CachedMacro>> macro_cache;
123 std::unordered_map<u32, std::vector<u32>> uploaded_macro_code;
124};
125
126std::unique_ptr<MacroEngine> GetMacroEngine(Engines::Maxwell3D& maxwell3d);
127
128} // namespace Tegra
diff --git a/src/video_core/macro/macro_interpreter.cpp b/src/video_core/macro/macro_interpreter.cpp
new file mode 100644
index 000000000..e63296a21
--- /dev/null
+++ b/src/video_core/macro/macro_interpreter.cpp
@@ -0,0 +1,288 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "common/logging/log.h"
7#include "common/microprofile.h"
8#include "video_core/engines/maxwell_3d.h"
9#include "video_core/macro/macro_interpreter.h"
10
11MICROPROFILE_DEFINE(MacroInterp, "GPU", "Execute macro interpreter", MP_RGB(128, 128, 192));
12
13namespace Tegra {
14MacroInterpreter::MacroInterpreter(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {}
15
16std::unique_ptr<CachedMacro> MacroInterpreter::Compile(const std::vector<u32>& code) {
17 return std::make_unique<MacroInterpreterImpl>(maxwell3d, code);
18}
19
20MacroInterpreterImpl::MacroInterpreterImpl(Engines::Maxwell3D& maxwell3d,
21 const std::vector<u32>& code)
22 : maxwell3d(maxwell3d), code(code) {}
23
24void MacroInterpreterImpl::Execute(std::vector<u32>& parameters, u32 method) {
25 MICROPROFILE_SCOPE(MacroInterp);
26 Reset();
27
28 registers[1] = parameters[0];
29 num_parameters = parameters.size();
30
31 if (num_parameters > parameters_capacity) {
32 parameters_capacity = num_parameters;
33 this->parameters = std::make_unique<u32[]>(num_parameters);
34 }
35 std::memcpy(this->parameters.get(), parameters.data(), num_parameters * sizeof(u32));
36 this->num_parameters = num_parameters;
37
38 // Execute the code until we hit an exit condition.
39 bool keep_executing = true;
40 while (keep_executing) {
41 keep_executing = Step(false);
42 }
43
44 // Assert the the macro used all the input parameters
45 ASSERT(next_parameter_index == num_parameters);
46}
47
48void MacroInterpreterImpl::Reset() {
49 registers = {};
50 pc = 0;
51 delayed_pc = {};
52 method_address.raw = 0;
53 num_parameters = 0;
54 // The next parameter index starts at 1, because $r1 already has the value of the first
55 // parameter.
56 next_parameter_index = 1;
57 carry_flag = false;
58}
59
60bool MacroInterpreterImpl::Step(bool is_delay_slot) {
61 u32 base_address = pc;
62
63 Macro::Opcode opcode = GetOpcode();
64 pc += 4;
65
66 // Update the program counter if we were delayed
67 if (delayed_pc) {
68 ASSERT(is_delay_slot);
69 pc = *delayed_pc;
70 delayed_pc = {};
71 }
72
73 switch (opcode.operation) {
74 case Macro::Operation::ALU: {
75 u32 result = GetALUResult(opcode.alu_operation, GetRegister(opcode.src_a),
76 GetRegister(opcode.src_b));
77 ProcessResult(opcode.result_operation, opcode.dst, result);
78 break;
79 }
80 case Macro::Operation::AddImmediate: {
81 ProcessResult(opcode.result_operation, opcode.dst,
82 GetRegister(opcode.src_a) + opcode.immediate);
83 break;
84 }
85 case Macro::Operation::ExtractInsert: {
86 u32 dst = GetRegister(opcode.src_a);
87 u32 src = GetRegister(opcode.src_b);
88
89 src = (src >> opcode.bf_src_bit) & opcode.GetBitfieldMask();
90 dst &= ~(opcode.GetBitfieldMask() << opcode.bf_dst_bit);
91 dst |= src << opcode.bf_dst_bit;
92 ProcessResult(opcode.result_operation, opcode.dst, dst);
93 break;
94 }
95 case Macro::Operation::ExtractShiftLeftImmediate: {
96 u32 dst = GetRegister(opcode.src_a);
97 u32 src = GetRegister(opcode.src_b);
98
99 u32 result = ((src >> dst) & opcode.GetBitfieldMask()) << opcode.bf_dst_bit;
100
101 ProcessResult(opcode.result_operation, opcode.dst, result);
102 break;
103 }
104 case Macro::Operation::ExtractShiftLeftRegister: {
105 u32 dst = GetRegister(opcode.src_a);
106 u32 src = GetRegister(opcode.src_b);
107
108 u32 result = ((src >> opcode.bf_src_bit) & opcode.GetBitfieldMask()) << dst;
109
110 ProcessResult(opcode.result_operation, opcode.dst, result);
111 break;
112 }
113 case Macro::Operation::Read: {
114 u32 result = Read(GetRegister(opcode.src_a) + opcode.immediate);
115 ProcessResult(opcode.result_operation, opcode.dst, result);
116 break;
117 }
118 case Macro::Operation::Branch: {
119 ASSERT_MSG(!is_delay_slot, "Executing a branch in a delay slot is not valid");
120 u32 value = GetRegister(opcode.src_a);
121 bool taken = EvaluateBranchCondition(opcode.branch_condition, value);
122 if (taken) {
123 // Ignore the delay slot if the branch has the annul bit.
124 if (opcode.branch_annul) {
125 pc = base_address + opcode.GetBranchTarget();
126 return true;
127 }
128
129 delayed_pc = base_address + opcode.GetBranchTarget();
130 // Execute one more instruction due to the delay slot.
131 return Step(true);
132 }
133 break;
134 }
135 default:
136 UNIMPLEMENTED_MSG("Unimplemented macro operation {}",
137 static_cast<u32>(opcode.operation.Value()));
138 }
139
140 // An instruction with the Exit flag will not actually
141 // cause an exit if it's executed inside a delay slot.
142 if (opcode.is_exit && !is_delay_slot) {
143 // Exit has a delay slot, execute the next instruction
144 Step(true);
145 return false;
146 }
147
148 return true;
149}
150
151u32 MacroInterpreterImpl::GetALUResult(Macro::ALUOperation operation, u32 src_a, u32 src_b) {
152 switch (operation) {
153 case Macro::ALUOperation::Add: {
154 const u64 result{static_cast<u64>(src_a) + src_b};
155 carry_flag = result > 0xffffffff;
156 return static_cast<u32>(result);
157 }
158 case Macro::ALUOperation::AddWithCarry: {
159 const u64 result{static_cast<u64>(src_a) + src_b + (carry_flag ? 1ULL : 0ULL)};
160 carry_flag = result > 0xffffffff;
161 return static_cast<u32>(result);
162 }
163 case Macro::ALUOperation::Subtract: {
164 const u64 result{static_cast<u64>(src_a) - src_b};
165 carry_flag = result < 0x100000000;
166 return static_cast<u32>(result);
167 }
168 case Macro::ALUOperation::SubtractWithBorrow: {
169 const u64 result{static_cast<u64>(src_a) - src_b - (carry_flag ? 0ULL : 1ULL)};
170 carry_flag = result < 0x100000000;
171 return static_cast<u32>(result);
172 }
173 case Macro::ALUOperation::Xor:
174 return src_a ^ src_b;
175 case Macro::ALUOperation::Or:
176 return src_a | src_b;
177 case Macro::ALUOperation::And:
178 return src_a & src_b;
179 case Macro::ALUOperation::AndNot:
180 return src_a & ~src_b;
181 case Macro::ALUOperation::Nand:
182 return ~(src_a & src_b);
183
184 default:
185 UNIMPLEMENTED_MSG("Unimplemented ALU operation {}", static_cast<u32>(operation));
186 return 0;
187 }
188}
189
190void MacroInterpreterImpl::ProcessResult(Macro::ResultOperation operation, u32 reg, u32 result) {
191 switch (operation) {
192 case Macro::ResultOperation::IgnoreAndFetch:
193 // Fetch parameter and ignore result.
194 SetRegister(reg, FetchParameter());
195 break;
196 case Macro::ResultOperation::Move:
197 // Move result.
198 SetRegister(reg, result);
199 break;
200 case Macro::ResultOperation::MoveAndSetMethod:
201 // Move result and use as Method Address.
202 SetRegister(reg, result);
203 SetMethodAddress(result);
204 break;
205 case Macro::ResultOperation::FetchAndSend:
206 // Fetch parameter and send result.
207 SetRegister(reg, FetchParameter());
208 Send(result);
209 break;
210 case Macro::ResultOperation::MoveAndSend:
211 // Move and send result.
212 SetRegister(reg, result);
213 Send(result);
214 break;
215 case Macro::ResultOperation::FetchAndSetMethod:
216 // Fetch parameter and use result as Method Address.
217 SetRegister(reg, FetchParameter());
218 SetMethodAddress(result);
219 break;
220 case Macro::ResultOperation::MoveAndSetMethodFetchAndSend:
221 // Move result and use as Method Address, then fetch and send parameter.
222 SetRegister(reg, result);
223 SetMethodAddress(result);
224 Send(FetchParameter());
225 break;
226 case Macro::ResultOperation::MoveAndSetMethodSend:
227 // Move result and use as Method Address, then send bits 12:17 of result.
228 SetRegister(reg, result);
229 SetMethodAddress(result);
230 Send((result >> 12) & 0b111111);
231 break;
232 default:
233 UNIMPLEMENTED_MSG("Unimplemented result operation {}", static_cast<u32>(operation));
234 }
235}
236
237bool MacroInterpreterImpl::EvaluateBranchCondition(Macro::BranchCondition cond, u32 value) const {
238 switch (cond) {
239 case Macro::BranchCondition::Zero:
240 return value == 0;
241 case Macro::BranchCondition::NotZero:
242 return value != 0;
243 }
244 UNREACHABLE();
245 return true;
246}
247
248Macro::Opcode MacroInterpreterImpl::GetOpcode() const {
249 ASSERT((pc % sizeof(u32)) == 0);
250 ASSERT(pc < code.size() * sizeof(u32));
251 return {code[pc / sizeof(u32)]};
252}
253
254u32 MacroInterpreterImpl::GetRegister(u32 register_id) const {
255 return registers.at(register_id);
256}
257
258void MacroInterpreterImpl::SetRegister(u32 register_id, u32 value) {
259 // Register 0 is hardwired as the zero register.
260 // Ensure no writes to it actually occur.
261 if (register_id == 0) {
262 return;
263 }
264
265 registers.at(register_id) = value;
266}
267
268void MacroInterpreterImpl::SetMethodAddress(u32 address) {
269 method_address.raw = address;
270}
271
272void MacroInterpreterImpl::Send(u32 value) {
273 maxwell3d.CallMethodFromMME(method_address.address, value);
274 // Increment the method address by the method increment.
275 method_address.address.Assign(method_address.address.Value() +
276 method_address.increment.Value());
277}
278
279u32 MacroInterpreterImpl::Read(u32 method) const {
280 return maxwell3d.GetRegisterValue(method);
281}
282
283u32 MacroInterpreterImpl::FetchParameter() {
284 ASSERT(next_parameter_index < num_parameters);
285 return parameters[next_parameter_index++];
286}
287
288} // namespace Tegra
diff --git a/src/video_core/macro/macro_interpreter.h b/src/video_core/macro/macro_interpreter.h
new file mode 100644
index 000000000..fb923f7b9
--- /dev/null
+++ b/src/video_core/macro/macro_interpreter.h
@@ -0,0 +1,102 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6#include <array>
7#include <optional>
8#include <vector>
9#include "common/bit_field.h"
10#include "common/common_types.h"
11#include "video_core/macro/macro.h"
12
13namespace Tegra {
14namespace Engines {
15class Maxwell3D;
16}
17
18class MacroInterpreter final : public MacroEngine {
19public:
20 explicit MacroInterpreter(Engines::Maxwell3D& maxwell3d);
21
22protected:
23 std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code) override;
24
25private:
26 Engines::Maxwell3D& maxwell3d;
27};
28
29class MacroInterpreterImpl : public CachedMacro {
30public:
31 MacroInterpreterImpl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code);
32 void Execute(std::vector<u32>& parameters, u32 method) override;
33
34private:
35 /// Resets the execution engine state, zeroing registers, etc.
36 void Reset();
37
38 /**
39 * Executes a single macro instruction located at the current program counter. Returns whether
40 * the interpreter should keep running.
41 * @param offset Offset to start execution at.
42 * @param is_delay_slot Whether the current step is being executed due to a delay slot in a
43 * previous instruction.
44 */
45 bool Step(bool is_delay_slot);
46
47 /// Calculates the result of an ALU operation. src_a OP src_b;
48 u32 GetALUResult(Macro::ALUOperation operation, u32 src_a, u32 src_b);
49
50 /// Performs the result operation on the input result and stores it in the specified register
51 /// (if necessary).
52 void ProcessResult(Macro::ResultOperation operation, u32 reg, u32 result);
53
54 /// Evaluates the branch condition and returns whether the branch should be taken or not.
55 bool EvaluateBranchCondition(Macro::BranchCondition cond, u32 value) const;
56
57 /// Reads an opcode at the current program counter location.
58 Macro::Opcode GetOpcode() const;
59
60 /// Returns the specified register's value. Register 0 is hardcoded to always return 0.
61 u32 GetRegister(u32 register_id) const;
62
63 /// Sets the register to the input value.
64 void SetRegister(u32 register_id, u32 value);
65
66 /// Sets the method address to use for the next Send instruction.
67 void SetMethodAddress(u32 address);
68
69 /// Calls a GPU Engine method with the input parameter.
70 void Send(u32 value);
71
72 /// Reads a GPU register located at the method address.
73 u32 Read(u32 method) const;
74
75 /// Returns the next parameter in the parameter queue.
76 u32 FetchParameter();
77
78 Engines::Maxwell3D& maxwell3d;
79
80 /// Current program counter
81 u32 pc;
82 /// Program counter to execute at after the delay slot is executed.
83 std::optional<u32> delayed_pc;
84
85 /// General purpose macro registers.
86 std::array<u32, Macro::NUM_MACRO_REGISTERS> registers = {};
87
88 /// Method address to use for the next Send instruction.
89 Macro::MethodAddress method_address = {};
90
91 /// Input parameters of the current macro.
92 std::unique_ptr<u32[]> parameters;
93 std::size_t num_parameters = 0;
94 std::size_t parameters_capacity = 0;
95 /// Index of the next parameter that will be fetched by the 'parm' instruction.
96 u32 next_parameter_index = 0;
97
98 bool carry_flag = false;
99 const std::vector<u32>& code;
100};
101
102} // namespace Tegra
diff --git a/src/video_core/macro/macro_jit_x64.cpp b/src/video_core/macro/macro_jit_x64.cpp
new file mode 100644
index 000000000..1b657236a
--- /dev/null
+++ b/src/video_core/macro/macro_jit_x64.cpp
@@ -0,0 +1,633 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "common/logging/log.h"
7#include "common/microprofile.h"
8#include "common/x64/xbyak_util.h"
9#include "video_core/engines/maxwell_3d.h"
10#include "video_core/macro/macro_interpreter.h"
11#include "video_core/macro/macro_jit_x64.h"
12
13MICROPROFILE_DEFINE(MacroJitCompile, "GPU", "Compile macro JIT", MP_RGB(173, 255, 47));
14MICROPROFILE_DEFINE(MacroJitExecute, "GPU", "Execute macro JIT", MP_RGB(255, 255, 0));
15
16namespace Tegra {
17using JitFunction = void (MacroJITx64Impl::*)(Macro::Opcode opcode);
18const std::array<JitFunction, 8> InstructionTable{
19 &MacroJITx64Impl::Compile_ALU,
20 &MacroJITx64Impl::Compile_AddImmediate,
21 &MacroJITx64Impl::Compile_ExtractInsert,
22 &MacroJITx64Impl::Compile_ExtractShiftLeftImmediate,
23 &MacroJITx64Impl::Compile_ExtractShiftLeftRegister,
24 &MacroJITx64Impl::Compile_Read,
25 nullptr,
26 &MacroJITx64Impl::Compile_Branch,
27};
28
29static const Xbyak::Reg64 PARAMETERS = Xbyak::util::r9;
30static const Xbyak::Reg64 REGISTERS = Xbyak::util::r10;
31static const Xbyak::Reg64 STATE = Xbyak::util::r11;
32static const Xbyak::Reg64 NEXT_PARAMETER = Xbyak::util::r12;
33static const Xbyak::Reg32 RESULT = Xbyak::util::r13d;
34static const Xbyak::Reg64 RESULT_64 = Xbyak::util::r13;
35static const Xbyak::Reg32 METHOD_ADDRESS = Xbyak::util::r14d;
36static const Xbyak::Reg64 METHOD_ADDRESS_64 = Xbyak::util::r14;
37static const Xbyak::Reg64 BRANCH_HOLDER = Xbyak::util::r15;
38
39static const std::bitset<32> PERSISTENT_REGISTERS = Common::X64::BuildRegSet({
40 PARAMETERS,
41 REGISTERS,
42 STATE,
43 NEXT_PARAMETER,
44 RESULT,
45 METHOD_ADDRESS,
46 BRANCH_HOLDER,
47});
48
49MacroJITx64::MacroJITx64(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {}
50
51std::unique_ptr<CachedMacro> MacroJITx64::Compile(const std::vector<u32>& code) {
52 return std::make_unique<MacroJITx64Impl>(maxwell3d, code);
53}
54
55MacroJITx64Impl::MacroJITx64Impl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code)
56 : Xbyak::CodeGenerator(MAX_CODE_SIZE), code(code), maxwell3d(maxwell3d) {
57 Compile();
58}
59
60MacroJITx64Impl::~MacroJITx64Impl() = default;
61
62void MacroJITx64Impl::Execute(std::vector<u32>& parameters, u32 method) {
63 MICROPROFILE_SCOPE(MacroJitExecute);
64 ASSERT_OR_EXECUTE(program != nullptr, { return; });
65 JITState state{};
66 state.maxwell3d = &maxwell3d;
67 state.registers = {};
68 state.parameters = parameters.data();
69 program(&state);
70}
71
72void MacroJITx64Impl::Compile_ALU(Macro::Opcode opcode) {
73 const bool is_a_zero = opcode.src_a == 0;
74 const bool is_b_zero = opcode.src_b == 0;
75 const bool valid_operation = !is_a_zero && !is_b_zero;
76 const bool is_move_operation = !is_a_zero && is_b_zero;
77 const bool has_zero_register = is_a_zero || is_b_zero;
78
79 Xbyak::Reg64 src_a;
80 Xbyak::Reg32 src_b;
81
82 if (!optimizer.zero_reg_skip) {
83 src_a = Compile_GetRegister(opcode.src_a, RESULT_64);
84 src_b = Compile_GetRegister(opcode.src_b, ebx);
85 } else {
86 if (!is_a_zero) {
87 src_a = Compile_GetRegister(opcode.src_a, RESULT_64);
88 }
89 if (!is_b_zero) {
90 src_b = Compile_GetRegister(opcode.src_b, ebx);
91 }
92 }
93 Xbyak::Label skip_carry{};
94
95 bool has_emitted = false;
96
97 switch (opcode.alu_operation) {
98 case Macro::ALUOperation::Add:
99 if (optimizer.zero_reg_skip) {
100 if (valid_operation) {
101 add(src_a, src_b);
102 }
103 } else {
104 add(src_a, src_b);
105 }
106
107 if (!optimizer.can_skip_carry) {
108 setc(byte[STATE + offsetof(JITState, carry_flag)]);
109 }
110 break;
111 case Macro::ALUOperation::AddWithCarry:
112 bt(dword[STATE + offsetof(JITState, carry_flag)], 0);
113 adc(src_a, src_b);
114 setc(byte[STATE + offsetof(JITState, carry_flag)]);
115 break;
116 case Macro::ALUOperation::Subtract:
117 if (optimizer.zero_reg_skip) {
118 if (valid_operation) {
119 sub(src_a, src_b);
120 has_emitted = true;
121 }
122 } else {
123 sub(src_a, src_b);
124 has_emitted = true;
125 }
126 if (!optimizer.can_skip_carry && has_emitted) {
127 setc(byte[STATE + offsetof(JITState, carry_flag)]);
128 }
129 break;
130 case Macro::ALUOperation::SubtractWithBorrow:
131 bt(dword[STATE + offsetof(JITState, carry_flag)], 0);
132 sbb(src_a, src_b);
133 setc(byte[STATE + offsetof(JITState, carry_flag)]);
134 break;
135 case Macro::ALUOperation::Xor:
136 if (optimizer.zero_reg_skip) {
137 if (valid_operation) {
138 xor_(src_a, src_b);
139 }
140 } else {
141 xor_(src_a, src_b);
142 }
143 break;
144 case Macro::ALUOperation::Or:
145 if (optimizer.zero_reg_skip) {
146 if (valid_operation) {
147 or_(src_a, src_b);
148 }
149 } else {
150 or_(src_a, src_b);
151 }
152 break;
153 case Macro::ALUOperation::And:
154 if (optimizer.zero_reg_skip) {
155 if (!has_zero_register) {
156 and_(src_a, src_b);
157 }
158 } else {
159 and_(src_a, src_b);
160 }
161 break;
162 case Macro::ALUOperation::AndNot:
163 if (optimizer.zero_reg_skip) {
164 if (!is_a_zero) {
165 not_(src_b);
166 and_(src_a, src_b);
167 }
168 } else {
169 not_(src_b);
170 and_(src_a, src_b);
171 }
172 break;
173 case Macro::ALUOperation::Nand:
174 if (optimizer.zero_reg_skip) {
175 if (!is_a_zero) {
176 and_(src_a, src_b);
177 not_(src_a);
178 }
179 } else {
180 and_(src_a, src_b);
181 not_(src_a);
182 }
183 break;
184 default:
185 UNIMPLEMENTED_MSG("Unimplemented ALU operation {}",
186 static_cast<std::size_t>(opcode.alu_operation.Value()));
187 break;
188 }
189 Compile_ProcessResult(opcode.result_operation, opcode.dst);
190}
191
192void MacroJITx64Impl::Compile_AddImmediate(Macro::Opcode opcode) {
193 if (optimizer.skip_dummy_addimmediate) {
194 // Games tend to use this as an exit instruction placeholder. It's to encode an instruction
195 // without doing anything. In our case we can just not emit anything.
196 if (opcode.result_operation == Macro::ResultOperation::Move && opcode.dst == 0) {
197 return;
198 }
199 }
200 // Check for redundant moves
201 if (optimizer.optimize_for_method_move &&
202 opcode.result_operation == Macro::ResultOperation::MoveAndSetMethod) {
203 if (next_opcode.has_value()) {
204 const auto next = *next_opcode;
205 if (next.result_operation == Macro::ResultOperation::MoveAndSetMethod) {
206 return;
207 }
208 }
209 }
210 if (optimizer.zero_reg_skip && opcode.src_a == 0) {
211 if (opcode.immediate == 0) {
212 xor_(RESULT, RESULT);
213 } else {
214 mov(RESULT, opcode.immediate);
215 }
216 } else {
217 auto result = Compile_GetRegister(opcode.src_a, RESULT);
218 if (opcode.immediate > 2) {
219 add(result, opcode.immediate);
220 } else if (opcode.immediate == 1) {
221 inc(result);
222 } else if (opcode.immediate < 0) {
223 sub(result, opcode.immediate * -1);
224 }
225 }
226 Compile_ProcessResult(opcode.result_operation, opcode.dst);
227}
228
229void MacroJITx64Impl::Compile_ExtractInsert(Macro::Opcode opcode) {
230 auto dst = Compile_GetRegister(opcode.src_a, RESULT);
231 auto src = Compile_GetRegister(opcode.src_b, eax);
232
233 if (opcode.bf_src_bit != 0 && opcode.bf_src_bit != 31) {
234 shr(src, opcode.bf_src_bit);
235 } else if (opcode.bf_src_bit == 31) {
236 xor_(src, src);
237 }
238 // Don't bother masking the whole register since we're using a 32 bit register
239 if (opcode.bf_size != 31 && opcode.bf_size != 0) {
240 and_(src, opcode.GetBitfieldMask());
241 } else if (opcode.bf_size == 0) {
242 xor_(src, src);
243 }
244 if (opcode.bf_dst_bit != 31 && opcode.bf_dst_bit != 0) {
245 shl(src, opcode.bf_dst_bit);
246 } else if (opcode.bf_dst_bit == 31) {
247 xor_(src, src);
248 }
249
250 const u32 mask = ~(opcode.GetBitfieldMask() << opcode.bf_dst_bit);
251 if (mask != 0xffffffff) {
252 and_(dst, mask);
253 }
254 or_(dst, src);
255 Compile_ProcessResult(opcode.result_operation, opcode.dst);
256}
257
258void MacroJITx64Impl::Compile_ExtractShiftLeftImmediate(Macro::Opcode opcode) {
259 auto dst = Compile_GetRegister(opcode.src_a, eax);
260 auto src = Compile_GetRegister(opcode.src_b, RESULT);
261
262 shr(src, al);
263 if (opcode.bf_size != 0 && opcode.bf_size != 31) {
264 and_(src, opcode.GetBitfieldMask());
265 } else if (opcode.bf_size == 0) {
266 xor_(src, src);
267 }
268
269 if (opcode.bf_dst_bit != 0 && opcode.bf_dst_bit != 31) {
270 shl(src, opcode.bf_dst_bit);
271 } else if (opcode.bf_dst_bit == 31) {
272 xor_(src, src);
273 }
274 Compile_ProcessResult(opcode.result_operation, opcode.dst);
275}
276
277void MacroJITx64Impl::Compile_ExtractShiftLeftRegister(Macro::Opcode opcode) {
278 auto dst = Compile_GetRegister(opcode.src_a, eax);
279 auto src = Compile_GetRegister(opcode.src_b, RESULT);
280
281 if (opcode.bf_src_bit != 0) {
282 shr(src, opcode.bf_src_bit);
283 }
284
285 if (opcode.bf_size != 31) {
286 and_(src, opcode.GetBitfieldMask());
287 }
288 shl(src, al);
289 Compile_ProcessResult(opcode.result_operation, opcode.dst);
290}
291
292static u32 Read(Engines::Maxwell3D* maxwell3d, u32 method) {
293 return maxwell3d->GetRegisterValue(method);
294}
295
296static void Send(Engines::Maxwell3D* maxwell3d, Macro::MethodAddress method_address, u32 value) {
297 maxwell3d->CallMethodFromMME(method_address.address, value);
298}
299
300void MacroJITx64Impl::Compile_Read(Macro::Opcode opcode) {
301 if (optimizer.zero_reg_skip && opcode.src_a == 0) {
302 if (opcode.immediate == 0) {
303 xor_(RESULT, RESULT);
304 } else {
305 mov(RESULT, opcode.immediate);
306 }
307 } else {
308 auto result = Compile_GetRegister(opcode.src_a, RESULT);
309 if (opcode.immediate > 2) {
310 add(result, opcode.immediate);
311 } else if (opcode.immediate == 1) {
312 inc(result);
313 } else if (opcode.immediate < 0) {
314 sub(result, opcode.immediate * -1);
315 }
316 }
317 Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
318 mov(Common::X64::ABI_PARAM1, qword[STATE]);
319 mov(Common::X64::ABI_PARAM2, RESULT);
320 Common::X64::CallFarFunction(*this, &Read);
321 Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
322 mov(RESULT, Common::X64::ABI_RETURN.cvt32());
323 Compile_ProcessResult(opcode.result_operation, opcode.dst);
324}
325
326void Tegra::MacroJITx64Impl::Compile_Send(Xbyak::Reg32 value) {
327 Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
328 mov(Common::X64::ABI_PARAM1, qword[STATE]);
329 mov(Common::X64::ABI_PARAM2, METHOD_ADDRESS);
330 mov(Common::X64::ABI_PARAM3, value);
331 Common::X64::CallFarFunction(*this, &Send);
332 Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
333
334 Xbyak::Label dont_process{};
335 // Get increment
336 test(METHOD_ADDRESS, 0x3f000);
337 // If zero, method address doesn't update
338 je(dont_process);
339
340 mov(ecx, METHOD_ADDRESS);
341 and_(METHOD_ADDRESS, 0xfff);
342 shr(ecx, 12);
343 and_(ecx, 0x3f);
344 lea(eax, ptr[rcx + METHOD_ADDRESS_64]);
345 sal(ecx, 12);
346 or_(eax, ecx);
347
348 mov(METHOD_ADDRESS, eax);
349
350 L(dont_process);
351}
352
353void Tegra::MacroJITx64Impl::Compile_Branch(Macro::Opcode opcode) {
354 ASSERT_MSG(!is_delay_slot, "Executing a branch in a delay slot is not valid");
355 const s32 jump_address =
356 static_cast<s32>(pc) + static_cast<s32>(opcode.GetBranchTarget() / sizeof(s32));
357
358 Xbyak::Label end;
359 auto value = Compile_GetRegister(opcode.src_a, eax);
360 test(value, value);
361 if (optimizer.has_delayed_pc) {
362 switch (opcode.branch_condition) {
363 case Macro::BranchCondition::Zero:
364 jne(end, T_NEAR);
365 break;
366 case Macro::BranchCondition::NotZero:
367 je(end, T_NEAR);
368 break;
369 }
370
371 if (opcode.branch_annul) {
372 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
373 jmp(labels[jump_address], T_NEAR);
374 } else {
375 Xbyak::Label handle_post_exit{};
376 Xbyak::Label skip{};
377 jmp(skip, T_NEAR);
378 if (opcode.is_exit) {
379 L(handle_post_exit);
380 // Execute 1 instruction
381 mov(BRANCH_HOLDER, end_of_code);
382 // Jump to next instruction to skip delay slot check
383 jmp(labels[jump_address], T_NEAR);
384 } else {
385 L(handle_post_exit);
386 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
387 jmp(labels[jump_address], T_NEAR);
388 }
389 L(skip);
390 mov(BRANCH_HOLDER, handle_post_exit);
391 jmp(delay_skip[pc], T_NEAR);
392 }
393 } else {
394 switch (opcode.branch_condition) {
395 case Macro::BranchCondition::Zero:
396 je(labels[jump_address], T_NEAR);
397 break;
398 case Macro::BranchCondition::NotZero:
399 jne(labels[jump_address], T_NEAR);
400 break;
401 }
402 }
403
404 L(end);
405}
406
407void Tegra::MacroJITx64Impl::Optimizer_ScanFlags() {
408 optimizer.can_skip_carry = true;
409 optimizer.has_delayed_pc = false;
410 for (auto raw_op : code) {
411 Macro::Opcode op{};
412 op.raw = raw_op;
413
414 if (op.operation == Macro::Operation::ALU) {
415 // Scan for any ALU operations which actually use the carry flag, if they don't exist in
416 // our current code we can skip emitting the carry flag handling operations
417 if (op.alu_operation == Macro::ALUOperation::AddWithCarry ||
418 op.alu_operation == Macro::ALUOperation::SubtractWithBorrow) {
419 optimizer.can_skip_carry = false;
420 }
421 }
422
423 if (op.operation == Macro::Operation::Branch) {
424 if (!op.branch_annul) {
425 optimizer.has_delayed_pc = true;
426 }
427 }
428 }
429}
430
431void MacroJITx64Impl::Compile() {
432 MICROPROFILE_SCOPE(MacroJitCompile);
433 bool keep_executing = true;
434 labels.fill(Xbyak::Label());
435
436 Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, Common::X64::ABI_ALL_CALLEE_SAVED, 8);
437 // JIT state
438 mov(STATE, Common::X64::ABI_PARAM1);
439 mov(PARAMETERS, qword[Common::X64::ABI_PARAM1 +
440 static_cast<Xbyak::uint32>(offsetof(JITState, parameters))]);
441 mov(REGISTERS, Common::X64::ABI_PARAM1);
442 add(REGISTERS, static_cast<Xbyak::uint32>(offsetof(JITState, registers)));
443 xor_(RESULT, RESULT);
444 xor_(METHOD_ADDRESS, METHOD_ADDRESS);
445 xor_(NEXT_PARAMETER, NEXT_PARAMETER);
446 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
447
448 mov(dword[REGISTERS + 4], Compile_FetchParameter());
449
450 // Track get register for zero registers and mark it as no-op
451 optimizer.zero_reg_skip = true;
452
453 // AddImmediate tends to be used as a NOP instruction, if we detect this we can
454 // completely skip the entire code path and no emit anything
455 optimizer.skip_dummy_addimmediate = true;
456
457 // SMO tends to emit a lot of unnecessary method moves, we can mitigate this by only emitting
458 // one if our register isn't "dirty"
459 optimizer.optimize_for_method_move = true;
460
461 // Check to see if we can skip emitting certain instructions
462 Optimizer_ScanFlags();
463
464 const u32 op_count = static_cast<u32>(code.size());
465 for (u32 i = 0; i < op_count; i++) {
466 if (i < op_count - 1) {
467 pc = i + 1;
468 next_opcode = GetOpCode();
469 } else {
470 next_opcode = {};
471 }
472 pc = i;
473 Compile_NextInstruction();
474 }
475
476 L(end_of_code);
477
478 Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, Common::X64::ABI_ALL_CALLEE_SAVED, 8);
479 ret();
480 ready();
481 program = getCode<ProgramType>();
482}
483
484bool MacroJITx64Impl::Compile_NextInstruction() {
485 const auto opcode = GetOpCode();
486 if (labels[pc].getAddress()) {
487 return false;
488 }
489
490 L(labels[pc]);
491
492 const std::size_t op = static_cast<std::size_t>(opcode.operation.Value());
493
494 if (InstructionTable[op] == nullptr) {
495 UNIMPLEMENTED_MSG("Unimplemented opcode {}", op);
496 } else {
497 ((*this).*InstructionTable[op])(opcode);
498 }
499
500 if (optimizer.has_delayed_pc) {
501 if (opcode.is_exit) {
502 mov(rax, end_of_code);
503 test(BRANCH_HOLDER, BRANCH_HOLDER);
504 cmove(BRANCH_HOLDER, rax);
505 // Jump to next instruction to skip delay slot check
506 je(labels[pc + 1], T_NEAR);
507 } else {
508 // TODO(ogniK): Optimize delay slot branching
509 Xbyak::Label no_delay_slot{};
510 test(BRANCH_HOLDER, BRANCH_HOLDER);
511 je(no_delay_slot, T_NEAR);
512 mov(rax, BRANCH_HOLDER);
513 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
514 jmp(rax);
515 L(no_delay_slot);
516 }
517 L(delay_skip[pc]);
518 if (opcode.is_exit) {
519 return false;
520 }
521 } else {
522 test(BRANCH_HOLDER, BRANCH_HOLDER);
523 jne(end_of_code, T_NEAR);
524 if (opcode.is_exit) {
525 inc(BRANCH_HOLDER);
526 return false;
527 }
528 }
529 return true;
530}
531
532Xbyak::Reg32 Tegra::MacroJITx64Impl::Compile_FetchParameter() {
533 mov(eax, dword[PARAMETERS + NEXT_PARAMETER * sizeof(u32)]);
534 inc(NEXT_PARAMETER);
535 return eax;
536}
537
538Xbyak::Reg32 MacroJITx64Impl::Compile_GetRegister(u32 index, Xbyak::Reg32 dst) {
539 if (index == 0) {
540 // Register 0 is always zero
541 xor_(dst, dst);
542 } else {
543 mov(dst, dword[REGISTERS + index * sizeof(u32)]);
544 }
545
546 return dst;
547}
548
549Xbyak::Reg64 Tegra::MacroJITx64Impl::Compile_GetRegister(u32 index, Xbyak::Reg64 dst) {
550 if (index == 0) {
551 // Register 0 is always zero
552 xor_(dst, dst);
553 } else {
554 mov(dst, dword[REGISTERS + index * sizeof(u32)]);
555 }
556
557 return dst;
558}
559
560void Tegra::MacroJITx64Impl::Compile_WriteCarry(Xbyak::Reg64 dst) {
561 Xbyak::Label zero{}, end{};
562 xor_(ecx, ecx);
563 shr(dst, 32);
564 setne(cl);
565 mov(dword[STATE + offsetof(JITState, carry_flag)], ecx);
566}
567
568void MacroJITx64Impl::Compile_ProcessResult(Macro::ResultOperation operation, u32 reg) {
569 auto SetRegister = [=](u32 reg, Xbyak::Reg32 result) {
570 // Register 0 is supposed to always return 0. NOP is implemented as a store to the zero
571 // register.
572 if (reg == 0) {
573 return;
574 }
575 mov(dword[REGISTERS + reg * sizeof(u32)], result);
576 };
577 auto SetMethodAddress = [=](Xbyak::Reg32 reg) { mov(METHOD_ADDRESS, reg); };
578
579 switch (operation) {
580 case Macro::ResultOperation::IgnoreAndFetch:
581 SetRegister(reg, Compile_FetchParameter());
582 break;
583 case Macro::ResultOperation::Move:
584 SetRegister(reg, RESULT);
585 break;
586 case Macro::ResultOperation::MoveAndSetMethod:
587 SetRegister(reg, RESULT);
588 SetMethodAddress(RESULT);
589 break;
590 case Macro::ResultOperation::FetchAndSend:
591 // Fetch parameter and send result.
592 SetRegister(reg, Compile_FetchParameter());
593 Compile_Send(RESULT);
594 break;
595 case Macro::ResultOperation::MoveAndSend:
596 // Move and send result.
597 SetRegister(reg, RESULT);
598 Compile_Send(RESULT);
599 break;
600 case Macro::ResultOperation::FetchAndSetMethod:
601 // Fetch parameter and use result as Method Address.
602 SetRegister(reg, Compile_FetchParameter());
603 SetMethodAddress(RESULT);
604 break;
605 case Macro::ResultOperation::MoveAndSetMethodFetchAndSend:
606 // Move result and use as Method Address, then fetch and send parameter.
607 SetRegister(reg, RESULT);
608 SetMethodAddress(RESULT);
609 Compile_Send(Compile_FetchParameter());
610 break;
611 case Macro::ResultOperation::MoveAndSetMethodSend:
612 // Move result and use as Method Address, then send bits 12:17 of result.
613 SetRegister(reg, RESULT);
614 SetMethodAddress(RESULT);
615 shr(RESULT, 12);
616 and_(RESULT, 0b111111);
617 Compile_Send(RESULT);
618 break;
619 default:
620 UNIMPLEMENTED_MSG("Unimplemented macro operation {}", static_cast<std::size_t>(operation));
621 }
622}
623
624Macro::Opcode MacroJITx64Impl::GetOpCode() const {
625 ASSERT(pc < code.size());
626 return {code[pc]};
627}
628
629std::bitset<32> MacroJITx64Impl::PersistentCallerSavedRegs() const {
630 return PERSISTENT_REGISTERS & Common::X64::ABI_ALL_CALLER_SAVED;
631}
632
633} // namespace Tegra
diff --git a/src/video_core/macro/macro_jit_x64.h b/src/video_core/macro/macro_jit_x64.h
new file mode 100644
index 000000000..71cd6a3b0
--- /dev/null
+++ b/src/video_core/macro/macro_jit_x64.h
@@ -0,0 +1,98 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <bitset>
9#include <xbyak.h>
10#include "common/bit_field.h"
11#include "common/common_types.h"
12#include "common/x64/xbyak_abi.h"
13#include "video_core/macro/macro.h"
14
15namespace Tegra {
16namespace Engines {
17class Maxwell3D;
18}
19
20/// MAX_CODE_SIZE is arbitrarily chosen based on current booting games
21constexpr size_t MAX_CODE_SIZE = 0x10000;
22
23class MacroJITx64 final : public MacroEngine {
24public:
25 explicit MacroJITx64(Engines::Maxwell3D& maxwell3d);
26
27protected:
28 std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code) override;
29
30private:
31 Engines::Maxwell3D& maxwell3d;
32};
33
34class MacroJITx64Impl : public Xbyak::CodeGenerator, public CachedMacro {
35public:
36 MacroJITx64Impl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code);
37 ~MacroJITx64Impl();
38 void Execute(std::vector<u32>& parameters, u32 method) override;
39
40 void Compile_ALU(Macro::Opcode opcode);
41 void Compile_AddImmediate(Macro::Opcode opcode);
42 void Compile_ExtractInsert(Macro::Opcode opcode);
43 void Compile_ExtractShiftLeftImmediate(Macro::Opcode opcode);
44 void Compile_ExtractShiftLeftRegister(Macro::Opcode opcode);
45 void Compile_Read(Macro::Opcode opcode);
46 void Compile_Branch(Macro::Opcode opcode);
47
48private:
49 void Optimizer_ScanFlags();
50
51 void Compile();
52 bool Compile_NextInstruction();
53
54 Xbyak::Reg32 Compile_FetchParameter();
55 Xbyak::Reg32 Compile_GetRegister(u32 index, Xbyak::Reg32 dst);
56 Xbyak::Reg64 Compile_GetRegister(u32 index, Xbyak::Reg64 dst);
57 void Compile_WriteCarry(Xbyak::Reg64 dst);
58
59 void Compile_ProcessResult(Macro::ResultOperation operation, u32 reg);
60 void Compile_Send(Xbyak::Reg32 value);
61
62 Macro::Opcode GetOpCode() const;
63 std::bitset<32> PersistentCallerSavedRegs() const;
64
65 struct JITState {
66 Engines::Maxwell3D* maxwell3d{};
67 std::array<u32, Macro::NUM_MACRO_REGISTERS> registers{};
68 u32* parameters{};
69 u32 carry_flag{};
70 };
71 static_assert(offsetof(JITState, maxwell3d) == 0, "Maxwell3D is not at 0x0");
72 using ProgramType = void (*)(JITState*);
73
74 struct OptimizerState {
75 bool can_skip_carry{};
76 bool has_delayed_pc{};
77 bool zero_reg_skip{};
78 bool skip_dummy_addimmediate{};
79 bool optimize_for_method_move{};
80 };
81 OptimizerState optimizer{};
82
83 std::optional<Macro::Opcode> next_opcode{};
84 ProgramType program{nullptr};
85
86 std::array<Xbyak::Label, MAX_CODE_SIZE> labels;
87 std::array<Xbyak::Label, MAX_CODE_SIZE> delay_skip{};
88 Xbyak::Label end_of_code{};
89
90 bool is_delay_slot{};
91 u32 pc{};
92 std::optional<u32> delayed_pc;
93
94 const std::vector<u32>& code;
95 Engines::Maxwell3D& maxwell3d;
96};
97
98} // namespace Tegra