summaryrefslogtreecommitdiff
path: root/src/video_core/macro
diff options
context:
space:
mode:
authorGravatar bunnei2020-06-04 11:40:52 -0400
committerGravatar GitHub2020-06-04 11:40:52 -0400
commit34d4abc4f9c264fb4dfc32b9b44aaeecaa4695a0 (patch)
tree95b864039ba58c858b0bb89c2eb6b56a25475d22 /src/video_core/macro
parentMerge pull request #4039 from FearlessTobi/port-5376 (diff)
parentDefault init labels and use initializer list for macro engine (diff)
downloadyuzu-34d4abc4f9c264fb4dfc32b9b44aaeecaa4695a0.tar.gz
yuzu-34d4abc4f9c264fb4dfc32b9b44aaeecaa4695a0.tar.xz
yuzu-34d4abc4f9c264fb4dfc32b9b44aaeecaa4695a0.zip
Merge pull request #4009 from ogniK5377/macro-jit-prod
video_core: Implement Macro JIT
Diffstat (limited to 'src/video_core/macro')
-rw-r--r--src/video_core/macro/macro.cpp45
-rw-r--r--src/video_core/macro/macro.h128
-rw-r--r--src/video_core/macro/macro_interpreter.cpp288
-rw-r--r--src/video_core/macro/macro_interpreter.h102
-rw-r--r--src/video_core/macro/macro_jit_x64.cpp640
-rw-r--r--src/video_core/macro/macro_jit_x64.h100
6 files changed, 1303 insertions, 0 deletions
diff --git a/src/video_core/macro/macro.cpp b/src/video_core/macro/macro.cpp
new file mode 100644
index 000000000..89077a2d8
--- /dev/null
+++ b/src/video_core/macro/macro.cpp
@@ -0,0 +1,45 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "common/logging/log.h"
7#include "core/settings.h"
8#include "video_core/macro/macro.h"
9#include "video_core/macro/macro_interpreter.h"
10#include "video_core/macro/macro_jit_x64.h"
11
12namespace Tegra {
13
14void MacroEngine::AddCode(u32 method, u32 data) {
15 uploaded_macro_code[method].push_back(data);
16}
17
18void MacroEngine::Execute(u32 method, const std::vector<u32>& parameters) {
19 auto compiled_macro = macro_cache.find(method);
20 if (compiled_macro != macro_cache.end()) {
21 compiled_macro->second->Execute(parameters, method);
22 } else {
23 // Macro not compiled, check if it's uploaded and if so, compile it
24 auto macro_code = uploaded_macro_code.find(method);
25 if (macro_code == uploaded_macro_code.end()) {
26 UNREACHABLE_MSG("Macro 0x{0:x} was not uploaded", method);
27 return;
28 }
29 macro_cache[method] = Compile(macro_code->second);
30 macro_cache[method]->Execute(parameters, method);
31 }
32}
33
34std::unique_ptr<MacroEngine> GetMacroEngine(Engines::Maxwell3D& maxwell3d) {
35 if (Settings::values.disable_macro_jit) {
36 return std::make_unique<MacroInterpreter>(maxwell3d);
37 }
38#ifdef ARCHITECTURE_x86_64
39 return std::make_unique<MacroJITx64>(maxwell3d);
40#else
41 return std::make_unique<MacroInterpreter>(maxwell3d);
42#endif
43}
44
45} // namespace Tegra
diff --git a/src/video_core/macro/macro.h b/src/video_core/macro/macro.h
new file mode 100644
index 000000000..b76ed891f
--- /dev/null
+++ b/src/video_core/macro/macro.h
@@ -0,0 +1,128 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <unordered_map>
9#include <vector>
10#include "common/bit_field.h"
11#include "common/common_types.h"
12
13namespace Tegra {
14namespace Engines {
15class Maxwell3D;
16}
17namespace Macro {
18constexpr std::size_t NUM_MACRO_REGISTERS = 8;
19enum class Operation : u32 {
20 ALU = 0,
21 AddImmediate = 1,
22 ExtractInsert = 2,
23 ExtractShiftLeftImmediate = 3,
24 ExtractShiftLeftRegister = 4,
25 Read = 5,
26 Unused = 6, // This operation doesn't seem to be a valid encoding.
27 Branch = 7,
28};
29
30enum class ALUOperation : u32 {
31 Add = 0,
32 AddWithCarry = 1,
33 Subtract = 2,
34 SubtractWithBorrow = 3,
35 // Operations 4-7 don't seem to be valid encodings.
36 Xor = 8,
37 Or = 9,
38 And = 10,
39 AndNot = 11,
40 Nand = 12
41};
42
43enum class ResultOperation : u32 {
44 IgnoreAndFetch = 0,
45 Move = 1,
46 MoveAndSetMethod = 2,
47 FetchAndSend = 3,
48 MoveAndSend = 4,
49 FetchAndSetMethod = 5,
50 MoveAndSetMethodFetchAndSend = 6,
51 MoveAndSetMethodSend = 7
52};
53
54enum class BranchCondition : u32 {
55 Zero = 0,
56 NotZero = 1,
57};
58
59union Opcode {
60 u32 raw;
61 BitField<0, 3, Operation> operation;
62 BitField<4, 3, ResultOperation> result_operation;
63 BitField<4, 1, BranchCondition> branch_condition;
64 // If set on a branch, then the branch doesn't have a delay slot.
65 BitField<5, 1, u32> branch_annul;
66 BitField<7, 1, u32> is_exit;
67 BitField<8, 3, u32> dst;
68 BitField<11, 3, u32> src_a;
69 BitField<14, 3, u32> src_b;
70 // The signed immediate overlaps the second source operand and the alu operation.
71 BitField<14, 18, s32> immediate;
72
73 BitField<17, 5, ALUOperation> alu_operation;
74
75 // Bitfield instructions data
76 BitField<17, 5, u32> bf_src_bit;
77 BitField<22, 5, u32> bf_size;
78 BitField<27, 5, u32> bf_dst_bit;
79
80 u32 GetBitfieldMask() const {
81 return (1 << bf_size) - 1;
82 }
83
84 s32 GetBranchTarget() const {
85 return static_cast<s32>(immediate * sizeof(u32));
86 }
87};
88
89union MethodAddress {
90 u32 raw;
91 BitField<0, 12, u32> address;
92 BitField<12, 6, u32> increment;
93};
94
95} // namespace Macro
96
97class CachedMacro {
98public:
99 virtual ~CachedMacro() = default;
100 /**
101 * Executes the macro code with the specified input parameters.
102 * @param code The macro byte code to execute
103 * @param parameters The parameters of the macro
104 */
105 virtual void Execute(const std::vector<u32>& parameters, u32 method) = 0;
106};
107
108class MacroEngine {
109public:
110 virtual ~MacroEngine() = default;
111
112 // Store the uploaded macro code to compile them when they're called.
113 void AddCode(u32 method, u32 data);
114
115 // Compiles the macro if its not in the cache, and executes the compiled macro
116 void Execute(u32 method, const std::vector<u32>& parameters);
117
118protected:
119 virtual std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code) = 0;
120
121private:
122 std::unordered_map<u32, std::unique_ptr<CachedMacro>> macro_cache;
123 std::unordered_map<u32, std::vector<u32>> uploaded_macro_code;
124};
125
126std::unique_ptr<MacroEngine> GetMacroEngine(Engines::Maxwell3D& maxwell3d);
127
128} // namespace Tegra
diff --git a/src/video_core/macro/macro_interpreter.cpp b/src/video_core/macro/macro_interpreter.cpp
new file mode 100644
index 000000000..5edff27aa
--- /dev/null
+++ b/src/video_core/macro/macro_interpreter.cpp
@@ -0,0 +1,288 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "common/logging/log.h"
7#include "common/microprofile.h"
8#include "video_core/engines/maxwell_3d.h"
9#include "video_core/macro/macro_interpreter.h"
10
11MICROPROFILE_DEFINE(MacroInterp, "GPU", "Execute macro interpreter", MP_RGB(128, 128, 192));
12
13namespace Tegra {
14MacroInterpreter::MacroInterpreter(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {}
15
16std::unique_ptr<CachedMacro> MacroInterpreter::Compile(const std::vector<u32>& code) {
17 return std::make_unique<MacroInterpreterImpl>(maxwell3d, code);
18}
19
20MacroInterpreterImpl::MacroInterpreterImpl(Engines::Maxwell3D& maxwell3d,
21 const std::vector<u32>& code)
22 : maxwell3d(maxwell3d), code(code) {}
23
24void MacroInterpreterImpl::Execute(const std::vector<u32>& parameters, u32 method) {
25 MICROPROFILE_SCOPE(MacroInterp);
26 Reset();
27
28 registers[1] = parameters[0];
29 num_parameters = parameters.size();
30
31 if (num_parameters > parameters_capacity) {
32 parameters_capacity = num_parameters;
33 this->parameters = std::make_unique<u32[]>(num_parameters);
34 }
35 std::memcpy(this->parameters.get(), parameters.data(), num_parameters * sizeof(u32));
36 this->num_parameters = num_parameters;
37
38 // Execute the code until we hit an exit condition.
39 bool keep_executing = true;
40 while (keep_executing) {
41 keep_executing = Step(false);
42 }
43
44 // Assert the the macro used all the input parameters
45 ASSERT(next_parameter_index == num_parameters);
46}
47
48void MacroInterpreterImpl::Reset() {
49 registers = {};
50 pc = 0;
51 delayed_pc = {};
52 method_address.raw = 0;
53 num_parameters = 0;
54 // The next parameter index starts at 1, because $r1 already has the value of the first
55 // parameter.
56 next_parameter_index = 1;
57 carry_flag = false;
58}
59
60bool MacroInterpreterImpl::Step(bool is_delay_slot) {
61 u32 base_address = pc;
62
63 Macro::Opcode opcode = GetOpcode();
64 pc += 4;
65
66 // Update the program counter if we were delayed
67 if (delayed_pc) {
68 ASSERT(is_delay_slot);
69 pc = *delayed_pc;
70 delayed_pc = {};
71 }
72
73 switch (opcode.operation) {
74 case Macro::Operation::ALU: {
75 u32 result = GetALUResult(opcode.alu_operation, GetRegister(opcode.src_a),
76 GetRegister(opcode.src_b));
77 ProcessResult(opcode.result_operation, opcode.dst, result);
78 break;
79 }
80 case Macro::Operation::AddImmediate: {
81 ProcessResult(opcode.result_operation, opcode.dst,
82 GetRegister(opcode.src_a) + opcode.immediate);
83 break;
84 }
85 case Macro::Operation::ExtractInsert: {
86 u32 dst = GetRegister(opcode.src_a);
87 u32 src = GetRegister(opcode.src_b);
88
89 src = (src >> opcode.bf_src_bit) & opcode.GetBitfieldMask();
90 dst &= ~(opcode.GetBitfieldMask() << opcode.bf_dst_bit);
91 dst |= src << opcode.bf_dst_bit;
92 ProcessResult(opcode.result_operation, opcode.dst, dst);
93 break;
94 }
95 case Macro::Operation::ExtractShiftLeftImmediate: {
96 u32 dst = GetRegister(opcode.src_a);
97 u32 src = GetRegister(opcode.src_b);
98
99 u32 result = ((src >> dst) & opcode.GetBitfieldMask()) << opcode.bf_dst_bit;
100
101 ProcessResult(opcode.result_operation, opcode.dst, result);
102 break;
103 }
104 case Macro::Operation::ExtractShiftLeftRegister: {
105 u32 dst = GetRegister(opcode.src_a);
106 u32 src = GetRegister(opcode.src_b);
107
108 u32 result = ((src >> opcode.bf_src_bit) & opcode.GetBitfieldMask()) << dst;
109
110 ProcessResult(opcode.result_operation, opcode.dst, result);
111 break;
112 }
113 case Macro::Operation::Read: {
114 u32 result = Read(GetRegister(opcode.src_a) + opcode.immediate);
115 ProcessResult(opcode.result_operation, opcode.dst, result);
116 break;
117 }
118 case Macro::Operation::Branch: {
119 ASSERT_MSG(!is_delay_slot, "Executing a branch in a delay slot is not valid");
120 u32 value = GetRegister(opcode.src_a);
121 bool taken = EvaluateBranchCondition(opcode.branch_condition, value);
122 if (taken) {
123 // Ignore the delay slot if the branch has the annul bit.
124 if (opcode.branch_annul) {
125 pc = base_address + opcode.GetBranchTarget();
126 return true;
127 }
128
129 delayed_pc = base_address + opcode.GetBranchTarget();
130 // Execute one more instruction due to the delay slot.
131 return Step(true);
132 }
133 break;
134 }
135 default:
136 UNIMPLEMENTED_MSG("Unimplemented macro operation {}",
137 static_cast<u32>(opcode.operation.Value()));
138 }
139
140 // An instruction with the Exit flag will not actually
141 // cause an exit if it's executed inside a delay slot.
142 if (opcode.is_exit && !is_delay_slot) {
143 // Exit has a delay slot, execute the next instruction
144 Step(true);
145 return false;
146 }
147
148 return true;
149}
150
151u32 MacroInterpreterImpl::GetALUResult(Macro::ALUOperation operation, u32 src_a, u32 src_b) {
152 switch (operation) {
153 case Macro::ALUOperation::Add: {
154 const u64 result{static_cast<u64>(src_a) + src_b};
155 carry_flag = result > 0xffffffff;
156 return static_cast<u32>(result);
157 }
158 case Macro::ALUOperation::AddWithCarry: {
159 const u64 result{static_cast<u64>(src_a) + src_b + (carry_flag ? 1ULL : 0ULL)};
160 carry_flag = result > 0xffffffff;
161 return static_cast<u32>(result);
162 }
163 case Macro::ALUOperation::Subtract: {
164 const u64 result{static_cast<u64>(src_a) - src_b};
165 carry_flag = result < 0x100000000;
166 return static_cast<u32>(result);
167 }
168 case Macro::ALUOperation::SubtractWithBorrow: {
169 const u64 result{static_cast<u64>(src_a) - src_b - (carry_flag ? 0ULL : 1ULL)};
170 carry_flag = result < 0x100000000;
171 return static_cast<u32>(result);
172 }
173 case Macro::ALUOperation::Xor:
174 return src_a ^ src_b;
175 case Macro::ALUOperation::Or:
176 return src_a | src_b;
177 case Macro::ALUOperation::And:
178 return src_a & src_b;
179 case Macro::ALUOperation::AndNot:
180 return src_a & ~src_b;
181 case Macro::ALUOperation::Nand:
182 return ~(src_a & src_b);
183
184 default:
185 UNIMPLEMENTED_MSG("Unimplemented ALU operation {}", static_cast<u32>(operation));
186 return 0;
187 }
188}
189
190void MacroInterpreterImpl::ProcessResult(Macro::ResultOperation operation, u32 reg, u32 result) {
191 switch (operation) {
192 case Macro::ResultOperation::IgnoreAndFetch:
193 // Fetch parameter and ignore result.
194 SetRegister(reg, FetchParameter());
195 break;
196 case Macro::ResultOperation::Move:
197 // Move result.
198 SetRegister(reg, result);
199 break;
200 case Macro::ResultOperation::MoveAndSetMethod:
201 // Move result and use as Method Address.
202 SetRegister(reg, result);
203 SetMethodAddress(result);
204 break;
205 case Macro::ResultOperation::FetchAndSend:
206 // Fetch parameter and send result.
207 SetRegister(reg, FetchParameter());
208 Send(result);
209 break;
210 case Macro::ResultOperation::MoveAndSend:
211 // Move and send result.
212 SetRegister(reg, result);
213 Send(result);
214 break;
215 case Macro::ResultOperation::FetchAndSetMethod:
216 // Fetch parameter and use result as Method Address.
217 SetRegister(reg, FetchParameter());
218 SetMethodAddress(result);
219 break;
220 case Macro::ResultOperation::MoveAndSetMethodFetchAndSend:
221 // Move result and use as Method Address, then fetch and send parameter.
222 SetRegister(reg, result);
223 SetMethodAddress(result);
224 Send(FetchParameter());
225 break;
226 case Macro::ResultOperation::MoveAndSetMethodSend:
227 // Move result and use as Method Address, then send bits 12:17 of result.
228 SetRegister(reg, result);
229 SetMethodAddress(result);
230 Send((result >> 12) & 0b111111);
231 break;
232 default:
233 UNIMPLEMENTED_MSG("Unimplemented result operation {}", static_cast<u32>(operation));
234 }
235}
236
237bool MacroInterpreterImpl::EvaluateBranchCondition(Macro::BranchCondition cond, u32 value) const {
238 switch (cond) {
239 case Macro::BranchCondition::Zero:
240 return value == 0;
241 case Macro::BranchCondition::NotZero:
242 return value != 0;
243 }
244 UNREACHABLE();
245 return true;
246}
247
248Macro::Opcode MacroInterpreterImpl::GetOpcode() const {
249 ASSERT((pc % sizeof(u32)) == 0);
250 ASSERT(pc < code.size() * sizeof(u32));
251 return {code[pc / sizeof(u32)]};
252}
253
254u32 MacroInterpreterImpl::GetRegister(u32 register_id) const {
255 return registers.at(register_id);
256}
257
258void MacroInterpreterImpl::SetRegister(u32 register_id, u32 value) {
259 // Register 0 is hardwired as the zero register.
260 // Ensure no writes to it actually occur.
261 if (register_id == 0) {
262 return;
263 }
264
265 registers.at(register_id) = value;
266}
267
268void MacroInterpreterImpl::SetMethodAddress(u32 address) {
269 method_address.raw = address;
270}
271
272void MacroInterpreterImpl::Send(u32 value) {
273 maxwell3d.CallMethodFromMME(method_address.address, value);
274 // Increment the method address by the method increment.
275 method_address.address.Assign(method_address.address.Value() +
276 method_address.increment.Value());
277}
278
279u32 MacroInterpreterImpl::Read(u32 method) const {
280 return maxwell3d.GetRegisterValue(method);
281}
282
283u32 MacroInterpreterImpl::FetchParameter() {
284 ASSERT(next_parameter_index < num_parameters);
285 return parameters[next_parameter_index++];
286}
287
288} // namespace Tegra
diff --git a/src/video_core/macro/macro_interpreter.h b/src/video_core/macro/macro_interpreter.h
new file mode 100644
index 000000000..90217fc89
--- /dev/null
+++ b/src/video_core/macro/macro_interpreter.h
@@ -0,0 +1,102 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6#include <array>
7#include <optional>
8#include <vector>
9#include "common/bit_field.h"
10#include "common/common_types.h"
11#include "video_core/macro/macro.h"
12
13namespace Tegra {
14namespace Engines {
15class Maxwell3D;
16}
17
18class MacroInterpreter final : public MacroEngine {
19public:
20 explicit MacroInterpreter(Engines::Maxwell3D& maxwell3d);
21
22protected:
23 std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code) override;
24
25private:
26 Engines::Maxwell3D& maxwell3d;
27};
28
29class MacroInterpreterImpl : public CachedMacro {
30public:
31 MacroInterpreterImpl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code);
32 void Execute(const std::vector<u32>& parameters, u32 method) override;
33
34private:
35 /// Resets the execution engine state, zeroing registers, etc.
36 void Reset();
37
38 /**
39 * Executes a single macro instruction located at the current program counter. Returns whether
40 * the interpreter should keep running.
41 * @param offset Offset to start execution at.
42 * @param is_delay_slot Whether the current step is being executed due to a delay slot in a
43 * previous instruction.
44 */
45 bool Step(bool is_delay_slot);
46
47 /// Calculates the result of an ALU operation. src_a OP src_b;
48 u32 GetALUResult(Macro::ALUOperation operation, u32 src_a, u32 src_b);
49
50 /// Performs the result operation on the input result and stores it in the specified register
51 /// (if necessary).
52 void ProcessResult(Macro::ResultOperation operation, u32 reg, u32 result);
53
54 /// Evaluates the branch condition and returns whether the branch should be taken or not.
55 bool EvaluateBranchCondition(Macro::BranchCondition cond, u32 value) const;
56
57 /// Reads an opcode at the current program counter location.
58 Macro::Opcode GetOpcode() const;
59
60 /// Returns the specified register's value. Register 0 is hardcoded to always return 0.
61 u32 GetRegister(u32 register_id) const;
62
63 /// Sets the register to the input value.
64 void SetRegister(u32 register_id, u32 value);
65
66 /// Sets the method address to use for the next Send instruction.
67 void SetMethodAddress(u32 address);
68
69 /// Calls a GPU Engine method with the input parameter.
70 void Send(u32 value);
71
72 /// Reads a GPU register located at the method address.
73 u32 Read(u32 method) const;
74
75 /// Returns the next parameter in the parameter queue.
76 u32 FetchParameter();
77
78 Engines::Maxwell3D& maxwell3d;
79
80 /// Current program counter
81 u32 pc;
82 /// Program counter to execute at after the delay slot is executed.
83 std::optional<u32> delayed_pc;
84
85 /// General purpose macro registers.
86 std::array<u32, Macro::NUM_MACRO_REGISTERS> registers = {};
87
88 /// Method address to use for the next Send instruction.
89 Macro::MethodAddress method_address = {};
90
91 /// Input parameters of the current macro.
92 std::unique_ptr<u32[]> parameters;
93 std::size_t num_parameters = 0;
94 std::size_t parameters_capacity = 0;
95 /// Index of the next parameter that will be fetched by the 'parm' instruction.
96 u32 next_parameter_index = 0;
97
98 bool carry_flag = false;
99 const std::vector<u32>& code;
100};
101
102} // namespace Tegra
diff --git a/src/video_core/macro/macro_jit_x64.cpp b/src/video_core/macro/macro_jit_x64.cpp
new file mode 100644
index 000000000..11c1cc3be
--- /dev/null
+++ b/src/video_core/macro/macro_jit_x64.cpp
@@ -0,0 +1,640 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6#include "common/logging/log.h"
7#include "common/microprofile.h"
8#include "common/x64/xbyak_util.h"
9#include "video_core/engines/maxwell_3d.h"
10#include "video_core/macro/macro_interpreter.h"
11#include "video_core/macro/macro_jit_x64.h"
12
13MICROPROFILE_DEFINE(MacroJitCompile, "GPU", "Compile macro JIT", MP_RGB(173, 255, 47));
14MICROPROFILE_DEFINE(MacroJitExecute, "GPU", "Execute macro JIT", MP_RGB(255, 255, 0));
15
16namespace Tegra {
17static const Xbyak::Reg64 PARAMETERS = Xbyak::util::r9;
18static const Xbyak::Reg64 REGISTERS = Xbyak::util::r10;
19static const Xbyak::Reg64 STATE = Xbyak::util::r11;
20static const Xbyak::Reg64 NEXT_PARAMETER = Xbyak::util::r12;
21static const Xbyak::Reg32 RESULT = Xbyak::util::r13d;
22static const Xbyak::Reg64 RESULT_64 = Xbyak::util::r13;
23static const Xbyak::Reg32 METHOD_ADDRESS = Xbyak::util::r14d;
24static const Xbyak::Reg64 METHOD_ADDRESS_64 = Xbyak::util::r14;
25static const Xbyak::Reg64 BRANCH_HOLDER = Xbyak::util::r15;
26
27static const std::bitset<32> PERSISTENT_REGISTERS = Common::X64::BuildRegSet({
28 PARAMETERS,
29 REGISTERS,
30 STATE,
31 NEXT_PARAMETER,
32 RESULT,
33 METHOD_ADDRESS,
34 BRANCH_HOLDER,
35});
36
37MacroJITx64::MacroJITx64(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {}
38
39std::unique_ptr<CachedMacro> MacroJITx64::Compile(const std::vector<u32>& code) {
40 return std::make_unique<MacroJITx64Impl>(maxwell3d, code);
41}
42
43MacroJITx64Impl::MacroJITx64Impl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code)
44 : Xbyak::CodeGenerator(MAX_CODE_SIZE), code(code), maxwell3d(maxwell3d) {
45 Compile();
46}
47
48MacroJITx64Impl::~MacroJITx64Impl() = default;
49
50void MacroJITx64Impl::Execute(const std::vector<u32>& parameters, u32 method) {
51 MICROPROFILE_SCOPE(MacroJitExecute);
52 ASSERT_OR_EXECUTE(program != nullptr, { return; });
53 JITState state{};
54 state.maxwell3d = &maxwell3d;
55 state.registers = {};
56 state.parameters = parameters.data();
57 program(&state);
58}
59
60void MacroJITx64Impl::Compile_ALU(Macro::Opcode opcode) {
61 const bool is_a_zero = opcode.src_a == 0;
62 const bool is_b_zero = opcode.src_b == 0;
63 const bool valid_operation = !is_a_zero && !is_b_zero;
64 const bool is_move_operation = !is_a_zero && is_b_zero;
65 const bool has_zero_register = is_a_zero || is_b_zero;
66
67 Xbyak::Reg64 src_a;
68 Xbyak::Reg32 src_b;
69
70 if (!optimizer.zero_reg_skip) {
71 src_a = Compile_GetRegister(opcode.src_a, RESULT_64);
72 src_b = Compile_GetRegister(opcode.src_b, ebx);
73 } else {
74 if (!is_a_zero) {
75 src_a = Compile_GetRegister(opcode.src_a, RESULT_64);
76 }
77 if (!is_b_zero) {
78 src_b = Compile_GetRegister(opcode.src_b, ebx);
79 }
80 }
81 Xbyak::Label skip_carry{};
82
83 bool has_emitted = false;
84
85 switch (opcode.alu_operation) {
86 case Macro::ALUOperation::Add:
87 if (optimizer.zero_reg_skip) {
88 if (valid_operation) {
89 add(src_a, src_b);
90 }
91 } else {
92 add(src_a, src_b);
93 }
94
95 if (!optimizer.can_skip_carry) {
96 setc(byte[STATE + offsetof(JITState, carry_flag)]);
97 }
98 break;
99 case Macro::ALUOperation::AddWithCarry:
100 bt(dword[STATE + offsetof(JITState, carry_flag)], 0);
101 adc(src_a, src_b);
102 setc(byte[STATE + offsetof(JITState, carry_flag)]);
103 break;
104 case Macro::ALUOperation::Subtract:
105 if (optimizer.zero_reg_skip) {
106 if (valid_operation) {
107 sub(src_a, src_b);
108 has_emitted = true;
109 }
110 } else {
111 sub(src_a, src_b);
112 has_emitted = true;
113 }
114 if (!optimizer.can_skip_carry && has_emitted) {
115 setc(byte[STATE + offsetof(JITState, carry_flag)]);
116 }
117 break;
118 case Macro::ALUOperation::SubtractWithBorrow:
119 bt(dword[STATE + offsetof(JITState, carry_flag)], 0);
120 sbb(src_a, src_b);
121 setc(byte[STATE + offsetof(JITState, carry_flag)]);
122 break;
123 case Macro::ALUOperation::Xor:
124 if (optimizer.zero_reg_skip) {
125 if (valid_operation) {
126 xor_(src_a, src_b);
127 }
128 } else {
129 xor_(src_a, src_b);
130 }
131 break;
132 case Macro::ALUOperation::Or:
133 if (optimizer.zero_reg_skip) {
134 if (valid_operation) {
135 or_(src_a, src_b);
136 }
137 } else {
138 or_(src_a, src_b);
139 }
140 break;
141 case Macro::ALUOperation::And:
142 if (optimizer.zero_reg_skip) {
143 if (!has_zero_register) {
144 and_(src_a, src_b);
145 }
146 } else {
147 and_(src_a, src_b);
148 }
149 break;
150 case Macro::ALUOperation::AndNot:
151 if (optimizer.zero_reg_skip) {
152 if (!is_a_zero) {
153 not_(src_b);
154 and_(src_a, src_b);
155 }
156 } else {
157 not_(src_b);
158 and_(src_a, src_b);
159 }
160 break;
161 case Macro::ALUOperation::Nand:
162 if (optimizer.zero_reg_skip) {
163 if (!is_a_zero) {
164 and_(src_a, src_b);
165 not_(src_a);
166 }
167 } else {
168 and_(src_a, src_b);
169 not_(src_a);
170 }
171 break;
172 default:
173 UNIMPLEMENTED_MSG("Unimplemented ALU operation {}",
174 static_cast<std::size_t>(opcode.alu_operation.Value()));
175 break;
176 }
177 Compile_ProcessResult(opcode.result_operation, opcode.dst);
178}
179
180void MacroJITx64Impl::Compile_AddImmediate(Macro::Opcode opcode) {
181 if (optimizer.skip_dummy_addimmediate) {
182 // Games tend to use this as an exit instruction placeholder. It's to encode an instruction
183 // without doing anything. In our case we can just not emit anything.
184 if (opcode.result_operation == Macro::ResultOperation::Move && opcode.dst == 0) {
185 return;
186 }
187 }
188 // Check for redundant moves
189 if (optimizer.optimize_for_method_move &&
190 opcode.result_operation == Macro::ResultOperation::MoveAndSetMethod) {
191 if (next_opcode.has_value()) {
192 const auto next = *next_opcode;
193 if (next.result_operation == Macro::ResultOperation::MoveAndSetMethod) {
194 return;
195 }
196 }
197 }
198 if (optimizer.zero_reg_skip && opcode.src_a == 0) {
199 if (opcode.immediate == 0) {
200 xor_(RESULT, RESULT);
201 } else {
202 mov(RESULT, opcode.immediate);
203 }
204 } else {
205 auto result = Compile_GetRegister(opcode.src_a, RESULT);
206 if (opcode.immediate > 2) {
207 add(result, opcode.immediate);
208 } else if (opcode.immediate == 1) {
209 inc(result);
210 } else if (opcode.immediate < 0) {
211 sub(result, opcode.immediate * -1);
212 }
213 }
214 Compile_ProcessResult(opcode.result_operation, opcode.dst);
215}
216
217void MacroJITx64Impl::Compile_ExtractInsert(Macro::Opcode opcode) {
218 auto dst = Compile_GetRegister(opcode.src_a, RESULT);
219 auto src = Compile_GetRegister(opcode.src_b, eax);
220
221 if (opcode.bf_src_bit != 0 && opcode.bf_src_bit != 31) {
222 shr(src, opcode.bf_src_bit);
223 } else if (opcode.bf_src_bit == 31) {
224 xor_(src, src);
225 }
226 // Don't bother masking the whole register since we're using a 32 bit register
227 if (opcode.bf_size != 31 && opcode.bf_size != 0) {
228 and_(src, opcode.GetBitfieldMask());
229 } else if (opcode.bf_size == 0) {
230 xor_(src, src);
231 }
232 if (opcode.bf_dst_bit != 31 && opcode.bf_dst_bit != 0) {
233 shl(src, opcode.bf_dst_bit);
234 } else if (opcode.bf_dst_bit == 31) {
235 xor_(src, src);
236 }
237
238 const u32 mask = ~(opcode.GetBitfieldMask() << opcode.bf_dst_bit);
239 if (mask != 0xffffffff) {
240 and_(dst, mask);
241 }
242 or_(dst, src);
243 Compile_ProcessResult(opcode.result_operation, opcode.dst);
244}
245
246void MacroJITx64Impl::Compile_ExtractShiftLeftImmediate(Macro::Opcode opcode) {
247 auto dst = Compile_GetRegister(opcode.src_a, eax);
248 auto src = Compile_GetRegister(opcode.src_b, RESULT);
249
250 shr(src, al);
251 if (opcode.bf_size != 0 && opcode.bf_size != 31) {
252 and_(src, opcode.GetBitfieldMask());
253 } else if (opcode.bf_size == 0) {
254 xor_(src, src);
255 }
256
257 if (opcode.bf_dst_bit != 0 && opcode.bf_dst_bit != 31) {
258 shl(src, opcode.bf_dst_bit);
259 } else if (opcode.bf_dst_bit == 31) {
260 xor_(src, src);
261 }
262 Compile_ProcessResult(opcode.result_operation, opcode.dst);
263}
264
265void MacroJITx64Impl::Compile_ExtractShiftLeftRegister(Macro::Opcode opcode) {
266 auto dst = Compile_GetRegister(opcode.src_a, eax);
267 auto src = Compile_GetRegister(opcode.src_b, RESULT);
268
269 if (opcode.bf_src_bit != 0) {
270 shr(src, opcode.bf_src_bit);
271 }
272
273 if (opcode.bf_size != 31) {
274 and_(src, opcode.GetBitfieldMask());
275 }
276 shl(src, al);
277 Compile_ProcessResult(opcode.result_operation, opcode.dst);
278}
279
280static u32 Read(Engines::Maxwell3D* maxwell3d, u32 method) {
281 return maxwell3d->GetRegisterValue(method);
282}
283
284static void Send(Engines::Maxwell3D* maxwell3d, Macro::MethodAddress method_address, u32 value) {
285 maxwell3d->CallMethodFromMME(method_address.address, value);
286}
287
288void MacroJITx64Impl::Compile_Read(Macro::Opcode opcode) {
289 if (optimizer.zero_reg_skip && opcode.src_a == 0) {
290 if (opcode.immediate == 0) {
291 xor_(RESULT, RESULT);
292 } else {
293 mov(RESULT, opcode.immediate);
294 }
295 } else {
296 auto result = Compile_GetRegister(opcode.src_a, RESULT);
297 if (opcode.immediate > 2) {
298 add(result, opcode.immediate);
299 } else if (opcode.immediate == 1) {
300 inc(result);
301 } else if (opcode.immediate < 0) {
302 sub(result, opcode.immediate * -1);
303 }
304 }
305 Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
306 mov(Common::X64::ABI_PARAM1, qword[STATE]);
307 mov(Common::X64::ABI_PARAM2, RESULT);
308 Common::X64::CallFarFunction(*this, &Read);
309 Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
310 mov(RESULT, Common::X64::ABI_RETURN.cvt32());
311 Compile_ProcessResult(opcode.result_operation, opcode.dst);
312}
313
314void Tegra::MacroJITx64Impl::Compile_Send(Xbyak::Reg32 value) {
315 Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
316 mov(Common::X64::ABI_PARAM1, qword[STATE]);
317 mov(Common::X64::ABI_PARAM2, METHOD_ADDRESS);
318 mov(Common::X64::ABI_PARAM3, value);
319 Common::X64::CallFarFunction(*this, &Send);
320 Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, PersistentCallerSavedRegs(), 0);
321
322 Xbyak::Label dont_process{};
323 // Get increment
324 test(METHOD_ADDRESS, 0x3f000);
325 // If zero, method address doesn't update
326 je(dont_process);
327
328 mov(ecx, METHOD_ADDRESS);
329 and_(METHOD_ADDRESS, 0xfff);
330 shr(ecx, 12);
331 and_(ecx, 0x3f);
332 lea(eax, ptr[rcx + METHOD_ADDRESS_64]);
333 sal(ecx, 12);
334 or_(eax, ecx);
335
336 mov(METHOD_ADDRESS, eax);
337
338 L(dont_process);
339}
340
341void Tegra::MacroJITx64Impl::Compile_Branch(Macro::Opcode opcode) {
342 ASSERT_MSG(!is_delay_slot, "Executing a branch in a delay slot is not valid");
343 const s32 jump_address =
344 static_cast<s32>(pc) + static_cast<s32>(opcode.GetBranchTarget() / sizeof(s32));
345
346 Xbyak::Label end;
347 auto value = Compile_GetRegister(opcode.src_a, eax);
348 test(value, value);
349 if (optimizer.has_delayed_pc) {
350 switch (opcode.branch_condition) {
351 case Macro::BranchCondition::Zero:
352 jne(end, T_NEAR);
353 break;
354 case Macro::BranchCondition::NotZero:
355 je(end, T_NEAR);
356 break;
357 }
358
359 if (opcode.branch_annul) {
360 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
361 jmp(labels[jump_address], T_NEAR);
362 } else {
363 Xbyak::Label handle_post_exit{};
364 Xbyak::Label skip{};
365 jmp(skip, T_NEAR);
366 if (opcode.is_exit) {
367 L(handle_post_exit);
368 // Execute 1 instruction
369 mov(BRANCH_HOLDER, end_of_code);
370 // Jump to next instruction to skip delay slot check
371 jmp(labels[jump_address], T_NEAR);
372 } else {
373 L(handle_post_exit);
374 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
375 jmp(labels[jump_address], T_NEAR);
376 }
377 L(skip);
378 mov(BRANCH_HOLDER, handle_post_exit);
379 jmp(delay_skip[pc], T_NEAR);
380 }
381 } else {
382 switch (opcode.branch_condition) {
383 case Macro::BranchCondition::Zero:
384 je(labels[jump_address], T_NEAR);
385 break;
386 case Macro::BranchCondition::NotZero:
387 jne(labels[jump_address], T_NEAR);
388 break;
389 }
390 }
391
392 L(end);
393}
394
395void Tegra::MacroJITx64Impl::Optimizer_ScanFlags() {
396 optimizer.can_skip_carry = true;
397 optimizer.has_delayed_pc = false;
398 for (auto raw_op : code) {
399 Macro::Opcode op{};
400 op.raw = raw_op;
401
402 if (op.operation == Macro::Operation::ALU) {
403 // Scan for any ALU operations which actually use the carry flag, if they don't exist in
404 // our current code we can skip emitting the carry flag handling operations
405 if (op.alu_operation == Macro::ALUOperation::AddWithCarry ||
406 op.alu_operation == Macro::ALUOperation::SubtractWithBorrow) {
407 optimizer.can_skip_carry = false;
408 }
409 }
410
411 if (op.operation == Macro::Operation::Branch) {
412 if (!op.branch_annul) {
413 optimizer.has_delayed_pc = true;
414 }
415 }
416 }
417}
418
419void MacroJITx64Impl::Compile() {
420 MICROPROFILE_SCOPE(MacroJitCompile);
421 bool keep_executing = true;
422 labels.fill(Xbyak::Label());
423
424 Common::X64::ABI_PushRegistersAndAdjustStackGPS(*this, Common::X64::ABI_ALL_CALLEE_SAVED, 8);
425 // JIT state
426 mov(STATE, Common::X64::ABI_PARAM1);
427 mov(PARAMETERS, qword[Common::X64::ABI_PARAM1 +
428 static_cast<Xbyak::uint32>(offsetof(JITState, parameters))]);
429 mov(REGISTERS, Common::X64::ABI_PARAM1);
430 add(REGISTERS, static_cast<Xbyak::uint32>(offsetof(JITState, registers)));
431 xor_(RESULT, RESULT);
432 xor_(METHOD_ADDRESS, METHOD_ADDRESS);
433 xor_(NEXT_PARAMETER, NEXT_PARAMETER);
434 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
435
436 mov(dword[REGISTERS + 4], Compile_FetchParameter());
437
438 // Track get register for zero registers and mark it as no-op
439 optimizer.zero_reg_skip = true;
440
441 // AddImmediate tends to be used as a NOP instruction, if we detect this we can
442 // completely skip the entire code path and no emit anything
443 optimizer.skip_dummy_addimmediate = true;
444
445 // SMO tends to emit a lot of unnecessary method moves, we can mitigate this by only emitting
446 // one if our register isn't "dirty"
447 optimizer.optimize_for_method_move = true;
448
449 // Check to see if we can skip emitting certain instructions
450 Optimizer_ScanFlags();
451
452 const u32 op_count = static_cast<u32>(code.size());
453 for (u32 i = 0; i < op_count; i++) {
454 if (i < op_count - 1) {
455 pc = i + 1;
456 next_opcode = GetOpCode();
457 } else {
458 next_opcode = {};
459 }
460 pc = i;
461 Compile_NextInstruction();
462 }
463
464 L(end_of_code);
465
466 Common::X64::ABI_PopRegistersAndAdjustStackGPS(*this, Common::X64::ABI_ALL_CALLEE_SAVED, 8);
467 ret();
468 ready();
469 program = getCode<ProgramType>();
470}
471
472bool MacroJITx64Impl::Compile_NextInstruction() {
473 const auto opcode = GetOpCode();
474 if (labels[pc].getAddress()) {
475 return false;
476 }
477
478 L(labels[pc]);
479
480 switch (opcode.operation) {
481 case Macro::Operation::ALU:
482 Compile_ALU(opcode);
483 break;
484 case Macro::Operation::AddImmediate:
485 Compile_AddImmediate(opcode);
486 break;
487 case Macro::Operation::ExtractInsert:
488 Compile_ExtractInsert(opcode);
489 break;
490 case Macro::Operation::ExtractShiftLeftImmediate:
491 Compile_ExtractShiftLeftImmediate(opcode);
492 break;
493 case Macro::Operation::ExtractShiftLeftRegister:
494 Compile_ExtractShiftLeftRegister(opcode);
495 break;
496 case Macro::Operation::Read:
497 Compile_Read(opcode);
498 break;
499 case Macro::Operation::Branch:
500 Compile_Branch(opcode);
501 break;
502 default:
503 UNIMPLEMENTED_MSG("Unimplemented opcode {}", opcode.operation.Value());
504 break;
505 }
506
507 if (optimizer.has_delayed_pc) {
508 if (opcode.is_exit) {
509 mov(rax, end_of_code);
510 test(BRANCH_HOLDER, BRANCH_HOLDER);
511 cmove(BRANCH_HOLDER, rax);
512 // Jump to next instruction to skip delay slot check
513 je(labels[pc + 1], T_NEAR);
514 } else {
515 // TODO(ogniK): Optimize delay slot branching
516 Xbyak::Label no_delay_slot{};
517 test(BRANCH_HOLDER, BRANCH_HOLDER);
518 je(no_delay_slot, T_NEAR);
519 mov(rax, BRANCH_HOLDER);
520 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
521 jmp(rax);
522 L(no_delay_slot);
523 }
524 L(delay_skip[pc]);
525 if (opcode.is_exit) {
526 return false;
527 }
528 } else {
529 test(BRANCH_HOLDER, BRANCH_HOLDER);
530 jne(end_of_code, T_NEAR);
531 if (opcode.is_exit) {
532 inc(BRANCH_HOLDER);
533 return false;
534 }
535 }
536 return true;
537}
538
539Xbyak::Reg32 Tegra::MacroJITx64Impl::Compile_FetchParameter() {
540 mov(eax, dword[PARAMETERS + NEXT_PARAMETER * sizeof(u32)]);
541 inc(NEXT_PARAMETER);
542 return eax;
543}
544
545Xbyak::Reg32 MacroJITx64Impl::Compile_GetRegister(u32 index, Xbyak::Reg32 dst) {
546 if (index == 0) {
547 // Register 0 is always zero
548 xor_(dst, dst);
549 } else {
550 mov(dst, dword[REGISTERS + index * sizeof(u32)]);
551 }
552
553 return dst;
554}
555
556Xbyak::Reg64 Tegra::MacroJITx64Impl::Compile_GetRegister(u32 index, Xbyak::Reg64 dst) {
557 if (index == 0) {
558 // Register 0 is always zero
559 xor_(dst, dst);
560 } else {
561 mov(dst, dword[REGISTERS + index * sizeof(u32)]);
562 }
563
564 return dst;
565}
566
567void Tegra::MacroJITx64Impl::Compile_WriteCarry(Xbyak::Reg64 dst) {
568 Xbyak::Label zero{}, end{};
569 xor_(ecx, ecx);
570 shr(dst, 32);
571 setne(cl);
572 mov(dword[STATE + offsetof(JITState, carry_flag)], ecx);
573}
574
575void MacroJITx64Impl::Compile_ProcessResult(Macro::ResultOperation operation, u32 reg) {
576 auto SetRegister = [=](u32 reg, Xbyak::Reg32 result) {
577 // Register 0 is supposed to always return 0. NOP is implemented as a store to the zero
578 // register.
579 if (reg == 0) {
580 return;
581 }
582 mov(dword[REGISTERS + reg * sizeof(u32)], result);
583 };
584 auto SetMethodAddress = [=](Xbyak::Reg32 reg) { mov(METHOD_ADDRESS, reg); };
585
586 switch (operation) {
587 case Macro::ResultOperation::IgnoreAndFetch:
588 SetRegister(reg, Compile_FetchParameter());
589 break;
590 case Macro::ResultOperation::Move:
591 SetRegister(reg, RESULT);
592 break;
593 case Macro::ResultOperation::MoveAndSetMethod:
594 SetRegister(reg, RESULT);
595 SetMethodAddress(RESULT);
596 break;
597 case Macro::ResultOperation::FetchAndSend:
598 // Fetch parameter and send result.
599 SetRegister(reg, Compile_FetchParameter());
600 Compile_Send(RESULT);
601 break;
602 case Macro::ResultOperation::MoveAndSend:
603 // Move and send result.
604 SetRegister(reg, RESULT);
605 Compile_Send(RESULT);
606 break;
607 case Macro::ResultOperation::FetchAndSetMethod:
608 // Fetch parameter and use result as Method Address.
609 SetRegister(reg, Compile_FetchParameter());
610 SetMethodAddress(RESULT);
611 break;
612 case Macro::ResultOperation::MoveAndSetMethodFetchAndSend:
613 // Move result and use as Method Address, then fetch and send parameter.
614 SetRegister(reg, RESULT);
615 SetMethodAddress(RESULT);
616 Compile_Send(Compile_FetchParameter());
617 break;
618 case Macro::ResultOperation::MoveAndSetMethodSend:
619 // Move result and use as Method Address, then send bits 12:17 of result.
620 SetRegister(reg, RESULT);
621 SetMethodAddress(RESULT);
622 shr(RESULT, 12);
623 and_(RESULT, 0b111111);
624 Compile_Send(RESULT);
625 break;
626 default:
627 UNIMPLEMENTED_MSG("Unimplemented macro operation {}", static_cast<std::size_t>(operation));
628 }
629}
630
631Macro::Opcode MacroJITx64Impl::GetOpCode() const {
632 ASSERT(pc < code.size());
633 return {code[pc]};
634}
635
636std::bitset<32> MacroJITx64Impl::PersistentCallerSavedRegs() const {
637 return PERSISTENT_REGISTERS & Common::X64::ABI_ALL_CALLER_SAVED;
638}
639
640} // namespace Tegra
diff --git a/src/video_core/macro/macro_jit_x64.h b/src/video_core/macro/macro_jit_x64.h
new file mode 100644
index 000000000..21ee157cf
--- /dev/null
+++ b/src/video_core/macro/macro_jit_x64.h
@@ -0,0 +1,100 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <bitset>
9#include <xbyak.h>
10#include "common/bit_field.h"
11#include "common/common_types.h"
12#include "common/x64/xbyak_abi.h"
13#include "video_core/macro/macro.h"
14
15namespace Tegra {
16
17namespace Engines {
18class Maxwell3D;
19}
20
21/// MAX_CODE_SIZE is arbitrarily chosen based on current booting games
22constexpr size_t MAX_CODE_SIZE = 0x10000;
23
24class MacroJITx64 final : public MacroEngine {
25public:
26 explicit MacroJITx64(Engines::Maxwell3D& maxwell3d);
27
28protected:
29 std::unique_ptr<CachedMacro> Compile(const std::vector<u32>& code) override;
30
31private:
32 Engines::Maxwell3D& maxwell3d;
33};
34
35class MacroJITx64Impl : public Xbyak::CodeGenerator, public CachedMacro {
36public:
37 MacroJITx64Impl(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& code);
38 ~MacroJITx64Impl();
39
40 void Execute(const std::vector<u32>& parameters, u32 method) override;
41
42 void Compile_ALU(Macro::Opcode opcode);
43 void Compile_AddImmediate(Macro::Opcode opcode);
44 void Compile_ExtractInsert(Macro::Opcode opcode);
45 void Compile_ExtractShiftLeftImmediate(Macro::Opcode opcode);
46 void Compile_ExtractShiftLeftRegister(Macro::Opcode opcode);
47 void Compile_Read(Macro::Opcode opcode);
48 void Compile_Branch(Macro::Opcode opcode);
49
50private:
51 void Optimizer_ScanFlags();
52
53 void Compile();
54 bool Compile_NextInstruction();
55
56 Xbyak::Reg32 Compile_FetchParameter();
57 Xbyak::Reg32 Compile_GetRegister(u32 index, Xbyak::Reg32 dst);
58 Xbyak::Reg64 Compile_GetRegister(u32 index, Xbyak::Reg64 dst);
59 void Compile_WriteCarry(Xbyak::Reg64 dst);
60
61 void Compile_ProcessResult(Macro::ResultOperation operation, u32 reg);
62 void Compile_Send(Xbyak::Reg32 value);
63
64 Macro::Opcode GetOpCode() const;
65 std::bitset<32> PersistentCallerSavedRegs() const;
66
67 struct JITState {
68 Engines::Maxwell3D* maxwell3d{};
69 std::array<u32, Macro::NUM_MACRO_REGISTERS> registers{};
70 const u32* parameters{};
71 u32 carry_flag{};
72 };
73 static_assert(offsetof(JITState, maxwell3d) == 0, "Maxwell3D is not at 0x0");
74 using ProgramType = void (*)(JITState*);
75
76 struct OptimizerState {
77 bool can_skip_carry{};
78 bool has_delayed_pc{};
79 bool zero_reg_skip{};
80 bool skip_dummy_addimmediate{};
81 bool optimize_for_method_move{};
82 };
83 OptimizerState optimizer{};
84
85 std::optional<Macro::Opcode> next_opcode{};
86 ProgramType program{nullptr};
87
88 std::array<Xbyak::Label, MAX_CODE_SIZE> labels{};
89 std::array<Xbyak::Label, MAX_CODE_SIZE> delay_skip{};
90 Xbyak::Label end_of_code{};
91
92 bool is_delay_slot{};
93 u32 pc{};
94 std::optional<u32> delayed_pc;
95
96 const std::vector<u32>& code;
97 Engines::Maxwell3D& maxwell3d;
98};
99
100} // namespace Tegra