diff options
| author | 2021-02-08 02:54:35 -0300 | |
|---|---|---|
| committer | 2021-07-22 21:51:22 -0400 | |
| commit | 2930dccecc933d6748772e9f51a5724fe1e6771b (patch) | |
| tree | ec4aa48062f8a2fcba31b1c64f769ddf25a87832 | |
| parent | shader: Better constant folding (diff) | |
| download | yuzu-2930dccecc933d6748772e9f51a5724fe1e6771b.tar.gz yuzu-2930dccecc933d6748772e9f51a5724fe1e6771b.tar.xz yuzu-2930dccecc933d6748772e9f51a5724fe1e6771b.zip | |
spirv: Initial SPIR-V support
Diffstat (limited to '')
21 files changed, 1400 insertions, 3299 deletions
diff --git a/externals/sirit b/externals/sirit | |||
| Subproject eefca56afd49379bdebc97ded8b480839f93088 | Subproject 1f7b70730d610cfbd5099ab93dd38ec8a78e7e3 | ||
diff --git a/src/shader_recompiler/CMakeLists.txt b/src/shader_recompiler/CMakeLists.txt index 248e90d4b..12fbcb37c 100644 --- a/src/shader_recompiler/CMakeLists.txt +++ b/src/shader_recompiler/CMakeLists.txt | |||
| @@ -1,5 +1,16 @@ | |||
| 1 | add_executable(shader_recompiler | 1 | add_executable(shader_recompiler |
| 2 | backend/spirv/emit_spirv.cpp | ||
| 2 | backend/spirv/emit_spirv.h | 3 | backend/spirv/emit_spirv.h |
| 4 | backend/spirv/emit_spirv_bitwise_conversion.cpp | ||
| 5 | backend/spirv/emit_spirv_composite.cpp | ||
| 6 | backend/spirv/emit_spirv_context_get_set.cpp | ||
| 7 | backend/spirv/emit_spirv_control_flow.cpp | ||
| 8 | backend/spirv/emit_spirv_floating_point.cpp | ||
| 9 | backend/spirv/emit_spirv_integer.cpp | ||
| 10 | backend/spirv/emit_spirv_logical.cpp | ||
| 11 | backend/spirv/emit_spirv_memory.cpp | ||
| 12 | backend/spirv/emit_spirv_select.cpp | ||
| 13 | backend/spirv/emit_spirv_undefined.cpp | ||
| 3 | environment.h | 14 | environment.h |
| 4 | exception.h | 15 | exception.h |
| 5 | file_environment.cpp | 16 | file_environment.cpp |
| @@ -72,7 +83,9 @@ add_executable(shader_recompiler | |||
| 72 | main.cpp | 83 | main.cpp |
| 73 | object_pool.h | 84 | object_pool.h |
| 74 | ) | 85 | ) |
| 75 | target_link_libraries(shader_recompiler PRIVATE fmt::fmt) | 86 | |
| 87 | target_include_directories(video_core PRIVATE sirit) | ||
| 88 | target_link_libraries(shader_recompiler PRIVATE fmt::fmt sirit) | ||
| 76 | 89 | ||
| 77 | if (MSVC) | 90 | if (MSVC) |
| 78 | target_compile_options(shader_recompiler PRIVATE | 91 | target_compile_options(shader_recompiler PRIVATE |
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.cpp b/src/shader_recompiler/backend/spirv/emit_spirv.cpp new file mode 100644 index 000000000..7c4269fad --- /dev/null +++ b/src/shader_recompiler/backend/spirv/emit_spirv.cpp | |||
| @@ -0,0 +1,134 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <numeric> | ||
| 6 | #include <type_traits> | ||
| 7 | |||
| 8 | #include "shader_recompiler/backend/spirv/emit_spirv.h" | ||
| 9 | #include "shader_recompiler/frontend/ir/basic_block.h" | ||
| 10 | #include "shader_recompiler/frontend/ir/function.h" | ||
| 11 | #include "shader_recompiler/frontend/ir/microinstruction.h" | ||
| 12 | #include "shader_recompiler/frontend/ir/program.h" | ||
| 13 | |||
| 14 | namespace Shader::Backend::SPIRV { | ||
| 15 | |||
| 16 | EmitContext::EmitContext(IR::Program& program) { | ||
| 17 | AddCapability(spv::Capability::Shader); | ||
| 18 | AddCapability(spv::Capability::Float16); | ||
| 19 | AddCapability(spv::Capability::Float64); | ||
| 20 | void_id = TypeVoid(); | ||
| 21 | |||
| 22 | u1 = Name(TypeBool(), "u1"); | ||
| 23 | f32.Define(*this, TypeFloat(32), "f32"); | ||
| 24 | u32.Define(*this, TypeInt(32, false), "u32"); | ||
| 25 | f16.Define(*this, TypeFloat(16), "f16"); | ||
| 26 | f64.Define(*this, TypeFloat(64), "f64"); | ||
| 27 | |||
| 28 | for (const IR::Function& function : program.functions) { | ||
| 29 | for (IR::Block* const block : function.blocks) { | ||
| 30 | block_label_map.emplace_back(block, OpLabel()); | ||
| 31 | } | ||
| 32 | } | ||
| 33 | std::ranges::sort(block_label_map, {}, &std::pair<IR::Block*, Id>::first); | ||
| 34 | } | ||
| 35 | |||
| 36 | EmitContext::~EmitContext() = default; | ||
| 37 | |||
| 38 | EmitSPIRV::EmitSPIRV(IR::Program& program) { | ||
| 39 | EmitContext ctx{program}; | ||
| 40 | const Id void_function{ctx.TypeFunction(ctx.void_id)}; | ||
| 41 | // FIXME: Forward declare functions (needs sirit support) | ||
| 42 | Id func{}; | ||
| 43 | for (IR::Function& function : program.functions) { | ||
| 44 | func = ctx.OpFunction(ctx.void_id, spv::FunctionControlMask::MaskNone, void_function); | ||
| 45 | for (IR::Block* const block : function.blocks) { | ||
| 46 | ctx.AddLabel(ctx.BlockLabel(block)); | ||
| 47 | for (IR::Inst& inst : block->Instructions()) { | ||
| 48 | EmitInst(ctx, &inst); | ||
| 49 | } | ||
| 50 | } | ||
| 51 | ctx.OpFunctionEnd(); | ||
| 52 | } | ||
| 53 | ctx.AddEntryPoint(spv::ExecutionModel::GLCompute, func, "main"); | ||
| 54 | |||
| 55 | std::vector<u32> result{ctx.Assemble()}; | ||
| 56 | std::FILE* file{std::fopen("shader.spv", "wb")}; | ||
| 57 | std::fwrite(result.data(), sizeof(u32), result.size(), file); | ||
| 58 | std::fclose(file); | ||
| 59 | std::system("spirv-dis shader.spv"); | ||
| 60 | std::system("spirv-val shader.spv"); | ||
| 61 | } | ||
| 62 | |||
| 63 | template <auto method> | ||
| 64 | static void Invoke(EmitSPIRV& emit, EmitContext& ctx, IR::Inst* inst) { | ||
| 65 | using M = decltype(method); | ||
| 66 | using std::is_invocable_r_v; | ||
| 67 | if constexpr (is_invocable_r_v<Id, M, EmitSPIRV&, EmitContext&>) { | ||
| 68 | ctx.Define(inst, (emit.*method)(ctx)); | ||
| 69 | } else if constexpr (is_invocable_r_v<Id, M, EmitSPIRV&, EmitContext&, Id>) { | ||
| 70 | ctx.Define(inst, (emit.*method)(ctx, ctx.Def(inst->Arg(0)))); | ||
| 71 | } else if constexpr (is_invocable_r_v<Id, M, EmitSPIRV&, EmitContext&, Id, Id>) { | ||
| 72 | ctx.Define(inst, (emit.*method)(ctx, ctx.Def(inst->Arg(0)), ctx.Def(inst->Arg(1)))); | ||
| 73 | } else if constexpr (is_invocable_r_v<Id, M, EmitSPIRV&, EmitContext&, Id, Id, Id>) { | ||
| 74 | ctx.Define(inst, (emit.*method)(ctx, ctx.Def(inst->Arg(0)), ctx.Def(inst->Arg(1)), | ||
| 75 | ctx.Def(inst->Arg(2)))); | ||
| 76 | } else if constexpr (is_invocable_r_v<Id, M, EmitSPIRV&, EmitContext&, IR::Inst*, Id, Id>) { | ||
| 77 | ctx.Define(inst, (emit.*method)(ctx, inst, ctx.Def(inst->Arg(0)), ctx.Def(inst->Arg(1)))); | ||
| 78 | } else if constexpr (is_invocable_r_v<Id, M, EmitSPIRV&, EmitContext&, IR::Inst*, Id, Id, Id>) { | ||
| 79 | ctx.Define(inst, (emit.*method)(ctx, inst, ctx.Def(inst->Arg(0)), ctx.Def(inst->Arg(1)), | ||
| 80 | ctx.Def(inst->Arg(2)))); | ||
| 81 | } else if constexpr (is_invocable_r_v<Id, M, EmitSPIRV&, EmitContext&, Id, u32>) { | ||
| 82 | ctx.Define(inst, (emit.*method)(ctx, ctx.Def(inst->Arg(0)), inst->Arg(1).U32())); | ||
| 83 | } else if constexpr (is_invocable_r_v<Id, M, EmitSPIRV&, EmitContext&, const IR::Value&>) { | ||
| 84 | ctx.Define(inst, (emit.*method)(ctx, inst->Arg(0))); | ||
| 85 | } else if constexpr (is_invocable_r_v<Id, M, EmitSPIRV&, EmitContext&, const IR::Value&, | ||
| 86 | const IR::Value&>) { | ||
| 87 | ctx.Define(inst, (emit.*method)(ctx, inst->Arg(0), inst->Arg(1))); | ||
| 88 | } else if constexpr (is_invocable_r_v<void, M, EmitSPIRV&, EmitContext&, IR::Inst*>) { | ||
| 89 | (emit.*method)(ctx, inst); | ||
| 90 | } else if constexpr (is_invocable_r_v<void, M, EmitSPIRV&, EmitContext&>) { | ||
| 91 | (emit.*method)(ctx); | ||
| 92 | } else { | ||
| 93 | static_assert(false, "Bad format"); | ||
| 94 | } | ||
| 95 | } | ||
| 96 | |||
| 97 | void EmitSPIRV::EmitInst(EmitContext& ctx, IR::Inst* inst) { | ||
| 98 | switch (inst->Opcode()) { | ||
| 99 | #define OPCODE(name, result_type, ...) \ | ||
| 100 | case IR::Opcode::name: \ | ||
| 101 | return Invoke<&EmitSPIRV::Emit##name>(*this, ctx, inst); | ||
| 102 | #include "shader_recompiler/frontend/ir/opcodes.inc" | ||
| 103 | #undef OPCODE | ||
| 104 | } | ||
| 105 | throw LogicError("Invalid opcode {}", inst->Opcode()); | ||
| 106 | } | ||
| 107 | |||
| 108 | void EmitSPIRV::EmitPhi(EmitContext&) { | ||
| 109 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 110 | } | ||
| 111 | |||
| 112 | void EmitSPIRV::EmitVoid(EmitContext&) {} | ||
| 113 | |||
| 114 | void EmitSPIRV::EmitIdentity(EmitContext&) { | ||
| 115 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 116 | } | ||
| 117 | |||
| 118 | void EmitSPIRV::EmitGetZeroFromOp(EmitContext&) { | ||
| 119 | throw LogicError("Unreachable instruction"); | ||
| 120 | } | ||
| 121 | |||
| 122 | void EmitSPIRV::EmitGetSignFromOp(EmitContext&) { | ||
| 123 | throw LogicError("Unreachable instruction"); | ||
| 124 | } | ||
| 125 | |||
| 126 | void EmitSPIRV::EmitGetCarryFromOp(EmitContext&) { | ||
| 127 | throw LogicError("Unreachable instruction"); | ||
| 128 | } | ||
| 129 | |||
| 130 | void EmitSPIRV::EmitGetOverflowFromOp(EmitContext&) { | ||
| 131 | throw LogicError("Unreachable instruction"); | ||
| 132 | } | ||
| 133 | |||
| 134 | } // namespace Shader::Backend::SPIRV | ||
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.h b/src/shader_recompiler/backend/spirv/emit_spirv.h index 99cc8e08a..3f4b68a7d 100644 --- a/src/shader_recompiler/backend/spirv/emit_spirv.h +++ b/src/shader_recompiler/backend/spirv/emit_spirv.h | |||
| @@ -4,18 +4,326 @@ | |||
| 4 | 4 | ||
| 5 | #pragma once | 5 | #pragma once |
| 6 | 6 | ||
| 7 | #include <sirit/sirit.h> | ||
| 8 | |||
| 9 | #include <boost/container/flat_map.hpp> | ||
| 10 | |||
| 11 | #include "common/common_types.h" | ||
| 7 | #include "shader_recompiler/frontend/ir/microinstruction.h" | 12 | #include "shader_recompiler/frontend/ir/microinstruction.h" |
| 8 | #include "shader_recompiler/frontend/ir/program.h" | 13 | #include "shader_recompiler/frontend/ir/program.h" |
| 9 | 14 | ||
| 10 | namespace Shader::Backend::SPIRV { | 15 | namespace Shader::Backend::SPIRV { |
| 11 | 16 | ||
| 17 | using Sirit::Id; | ||
| 18 | |||
| 19 | class DefMap { | ||
| 20 | public: | ||
| 21 | void Define(IR::Inst* inst, Id def_id) { | ||
| 22 | const InstInfo info{.use_count{inst->UseCount()}, .def_id{def_id}}; | ||
| 23 | const auto it{map.insert(map.end(), std::make_pair(inst, info))}; | ||
| 24 | if (it == map.end()) { | ||
| 25 | throw LogicError("Defining already defined instruction"); | ||
| 26 | } | ||
| 27 | } | ||
| 28 | |||
| 29 | [[nodiscard]] Id Consume(IR::Inst* inst) { | ||
| 30 | const auto it{map.find(inst)}; | ||
| 31 | if (it == map.end()) { | ||
| 32 | throw LogicError("Consuming undefined instruction"); | ||
| 33 | } | ||
| 34 | const Id def_id{it->second.def_id}; | ||
| 35 | if (--it->second.use_count == 0) { | ||
| 36 | map.erase(it); | ||
| 37 | } | ||
| 38 | return def_id; | ||
| 39 | } | ||
| 40 | |||
| 41 | private: | ||
| 42 | struct InstInfo { | ||
| 43 | int use_count; | ||
| 44 | Id def_id; | ||
| 45 | }; | ||
| 46 | |||
| 47 | boost::container::flat_map<IR::Inst*, InstInfo> map; | ||
| 48 | }; | ||
| 49 | |||
| 50 | class VectorTypes { | ||
| 51 | public: | ||
| 52 | void Define(Sirit::Module& sirit_ctx, Id base_type, std::string_view name) { | ||
| 53 | defs[0] = sirit_ctx.Name(base_type, name); | ||
| 54 | |||
| 55 | std::array<char, 6> def_name; | ||
| 56 | for (int i = 1; i < 4; ++i) { | ||
| 57 | const std::string_view def_name_view( | ||
| 58 | def_name.data(), | ||
| 59 | fmt::format_to_n(def_name.data(), def_name.size(), "{}x{}", name, i + 1).size); | ||
| 60 | defs[i] = sirit_ctx.Name(sirit_ctx.TypeVector(base_type, i + 1), def_name_view); | ||
| 61 | } | ||
| 62 | } | ||
| 63 | |||
| 64 | [[nodiscard]] Id operator[](size_t size) const noexcept { | ||
| 65 | return defs[size - 1]; | ||
| 66 | } | ||
| 67 | |||
| 68 | private: | ||
| 69 | std::array<Id, 4> defs; | ||
| 70 | }; | ||
| 71 | |||
| 72 | class EmitContext final : public Sirit::Module { | ||
| 73 | public: | ||
| 74 | explicit EmitContext(IR::Program& program); | ||
| 75 | ~EmitContext(); | ||
| 76 | |||
| 77 | [[nodiscard]] Id Def(const IR::Value& value) { | ||
| 78 | if (!value.IsImmediate()) { | ||
| 79 | return def_map.Consume(value.Inst()); | ||
| 80 | } | ||
| 81 | switch (value.Type()) { | ||
| 82 | case IR::Type::U32: | ||
| 83 | return Constant(u32[1], value.U32()); | ||
| 84 | case IR::Type::F32: | ||
| 85 | return Constant(f32[1], value.F32()); | ||
| 86 | default: | ||
| 87 | throw NotImplementedException("Immediate type {}", value.Type()); | ||
| 88 | } | ||
| 89 | } | ||
| 90 | |||
| 91 | void Define(IR::Inst* inst, Id def_id) { | ||
| 92 | def_map.Define(inst, def_id); | ||
| 93 | } | ||
| 94 | |||
| 95 | [[nodiscard]] Id BlockLabel(IR::Block* block) const { | ||
| 96 | const auto it{std::ranges::lower_bound(block_label_map, block, {}, | ||
| 97 | &std::pair<IR::Block*, Id>::first)}; | ||
| 98 | if (it == block_label_map.end()) { | ||
| 99 | throw LogicError("Undefined block"); | ||
| 100 | } | ||
| 101 | return it->second; | ||
| 102 | } | ||
| 103 | |||
| 104 | Id void_id{}; | ||
| 105 | Id u1{}; | ||
| 106 | VectorTypes f32; | ||
| 107 | VectorTypes u32; | ||
| 108 | VectorTypes f16; | ||
| 109 | VectorTypes f64; | ||
| 110 | |||
| 111 | Id workgroup_id{}; | ||
| 112 | Id local_invocation_id{}; | ||
| 113 | |||
| 114 | private: | ||
| 115 | DefMap def_map; | ||
| 116 | std::vector<std::pair<IR::Block*, Id>> block_label_map; | ||
| 117 | }; | ||
| 118 | |||
| 12 | class EmitSPIRV { | 119 | class EmitSPIRV { |
| 13 | public: | 120 | public: |
| 121 | explicit EmitSPIRV(IR::Program& program); | ||
| 122 | |||
| 14 | private: | 123 | private: |
| 124 | void EmitInst(EmitContext& ctx, IR::Inst* inst); | ||
| 125 | |||
| 15 | // Microinstruction emitters | 126 | // Microinstruction emitters |
| 16 | #define OPCODE(name, result_type, ...) void Emit##name(EmitContext& ctx, IR::Inst* inst); | 127 | void EmitPhi(EmitContext& ctx); |
| 17 | #include "shader_recompiler/frontend/ir/opcodes.inc" | 128 | void EmitVoid(EmitContext& ctx); |
| 18 | #undef OPCODE | 129 | void EmitIdentity(EmitContext& ctx); |
| 130 | void EmitBranch(EmitContext& ctx, IR::Inst* inst); | ||
| 131 | void EmitBranchConditional(EmitContext& ctx, IR::Inst* inst); | ||
| 132 | void EmitExit(EmitContext& ctx); | ||
| 133 | void EmitReturn(EmitContext& ctx); | ||
| 134 | void EmitUnreachable(EmitContext& ctx); | ||
| 135 | void EmitGetRegister(EmitContext& ctx); | ||
| 136 | void EmitSetRegister(EmitContext& ctx); | ||
| 137 | void EmitGetPred(EmitContext& ctx); | ||
| 138 | void EmitSetPred(EmitContext& ctx); | ||
| 139 | Id EmitGetCbuf(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset); | ||
| 140 | void EmitGetAttribute(EmitContext& ctx); | ||
| 141 | void EmitSetAttribute(EmitContext& ctx); | ||
| 142 | void EmitGetAttributeIndexed(EmitContext& ctx); | ||
| 143 | void EmitSetAttributeIndexed(EmitContext& ctx); | ||
| 144 | void EmitGetZFlag(EmitContext& ctx); | ||
| 145 | void EmitGetSFlag(EmitContext& ctx); | ||
| 146 | void EmitGetCFlag(EmitContext& ctx); | ||
| 147 | void EmitGetOFlag(EmitContext& ctx); | ||
| 148 | void EmitSetZFlag(EmitContext& ctx); | ||
| 149 | void EmitSetSFlag(EmitContext& ctx); | ||
| 150 | void EmitSetCFlag(EmitContext& ctx); | ||
| 151 | void EmitSetOFlag(EmitContext& ctx); | ||
| 152 | Id EmitWorkgroupId(EmitContext& ctx); | ||
| 153 | Id EmitLocalInvocationId(EmitContext& ctx); | ||
| 154 | void EmitUndef1(EmitContext& ctx); | ||
| 155 | void EmitUndef8(EmitContext& ctx); | ||
| 156 | void EmitUndef16(EmitContext& ctx); | ||
| 157 | void EmitUndef32(EmitContext& ctx); | ||
| 158 | void EmitUndef64(EmitContext& ctx); | ||
| 159 | void EmitLoadGlobalU8(EmitContext& ctx); | ||
| 160 | void EmitLoadGlobalS8(EmitContext& ctx); | ||
| 161 | void EmitLoadGlobalU16(EmitContext& ctx); | ||
| 162 | void EmitLoadGlobalS16(EmitContext& ctx); | ||
| 163 | void EmitLoadGlobal32(EmitContext& ctx); | ||
| 164 | void EmitLoadGlobal64(EmitContext& ctx); | ||
| 165 | void EmitLoadGlobal128(EmitContext& ctx); | ||
| 166 | void EmitWriteGlobalU8(EmitContext& ctx); | ||
| 167 | void EmitWriteGlobalS8(EmitContext& ctx); | ||
| 168 | void EmitWriteGlobalU16(EmitContext& ctx); | ||
| 169 | void EmitWriteGlobalS16(EmitContext& ctx); | ||
| 170 | void EmitWriteGlobal32(EmitContext& ctx); | ||
| 171 | void EmitWriteGlobal64(EmitContext& ctx); | ||
| 172 | void EmitWriteGlobal128(EmitContext& ctx); | ||
| 173 | void EmitLoadStorageU8(EmitContext& ctx); | ||
| 174 | void EmitLoadStorageS8(EmitContext& ctx); | ||
| 175 | void EmitLoadStorageU16(EmitContext& ctx); | ||
| 176 | void EmitLoadStorageS16(EmitContext& ctx); | ||
| 177 | Id EmitLoadStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset); | ||
| 178 | void EmitLoadStorage64(EmitContext& ctx); | ||
| 179 | void EmitLoadStorage128(EmitContext& ctx); | ||
| 180 | void EmitWriteStorageU8(EmitContext& ctx); | ||
| 181 | void EmitWriteStorageS8(EmitContext& ctx); | ||
| 182 | void EmitWriteStorageU16(EmitContext& ctx); | ||
| 183 | void EmitWriteStorageS16(EmitContext& ctx); | ||
| 184 | void EmitWriteStorage32(EmitContext& ctx); | ||
| 185 | void EmitWriteStorage64(EmitContext& ctx); | ||
| 186 | void EmitWriteStorage128(EmitContext& ctx); | ||
| 187 | void EmitCompositeConstructU32x2(EmitContext& ctx); | ||
| 188 | void EmitCompositeConstructU32x3(EmitContext& ctx); | ||
| 189 | void EmitCompositeConstructU32x4(EmitContext& ctx); | ||
| 190 | void EmitCompositeExtractU32x2(EmitContext& ctx); | ||
| 191 | Id EmitCompositeExtractU32x3(EmitContext& ctx, Id vector, u32 index); | ||
| 192 | void EmitCompositeExtractU32x4(EmitContext& ctx); | ||
| 193 | void EmitCompositeConstructF16x2(EmitContext& ctx); | ||
| 194 | void EmitCompositeConstructF16x3(EmitContext& ctx); | ||
| 195 | void EmitCompositeConstructF16x4(EmitContext& ctx); | ||
| 196 | void EmitCompositeExtractF16x2(EmitContext& ctx); | ||
| 197 | void EmitCompositeExtractF16x3(EmitContext& ctx); | ||
| 198 | void EmitCompositeExtractF16x4(EmitContext& ctx); | ||
| 199 | void EmitCompositeConstructF32x2(EmitContext& ctx); | ||
| 200 | void EmitCompositeConstructF32x3(EmitContext& ctx); | ||
| 201 | void EmitCompositeConstructF32x4(EmitContext& ctx); | ||
| 202 | void EmitCompositeExtractF32x2(EmitContext& ctx); | ||
| 203 | void EmitCompositeExtractF32x3(EmitContext& ctx); | ||
| 204 | void EmitCompositeExtractF32x4(EmitContext& ctx); | ||
| 205 | void EmitCompositeConstructF64x2(EmitContext& ctx); | ||
| 206 | void EmitCompositeConstructF64x3(EmitContext& ctx); | ||
| 207 | void EmitCompositeConstructF64x4(EmitContext& ctx); | ||
| 208 | void EmitCompositeExtractF64x2(EmitContext& ctx); | ||
| 209 | void EmitCompositeExtractF64x3(EmitContext& ctx); | ||
| 210 | void EmitCompositeExtractF64x4(EmitContext& ctx); | ||
| 211 | void EmitSelect8(EmitContext& ctx); | ||
| 212 | void EmitSelect16(EmitContext& ctx); | ||
| 213 | void EmitSelect32(EmitContext& ctx); | ||
| 214 | void EmitSelect64(EmitContext& ctx); | ||
| 215 | void EmitBitCastU16F16(EmitContext& ctx); | ||
| 216 | Id EmitBitCastU32F32(EmitContext& ctx, Id value); | ||
| 217 | void EmitBitCastU64F64(EmitContext& ctx); | ||
| 218 | void EmitBitCastF16U16(EmitContext& ctx); | ||
| 219 | Id EmitBitCastF32U32(EmitContext& ctx, Id value); | ||
| 220 | void EmitBitCastF64U64(EmitContext& ctx); | ||
| 221 | void EmitPackUint2x32(EmitContext& ctx); | ||
| 222 | void EmitUnpackUint2x32(EmitContext& ctx); | ||
| 223 | void EmitPackFloat2x16(EmitContext& ctx); | ||
| 224 | void EmitUnpackFloat2x16(EmitContext& ctx); | ||
| 225 | void EmitPackDouble2x32(EmitContext& ctx); | ||
| 226 | void EmitUnpackDouble2x32(EmitContext& ctx); | ||
| 227 | void EmitGetZeroFromOp(EmitContext& ctx); | ||
| 228 | void EmitGetSignFromOp(EmitContext& ctx); | ||
| 229 | void EmitGetCarryFromOp(EmitContext& ctx); | ||
| 230 | void EmitGetOverflowFromOp(EmitContext& ctx); | ||
| 231 | void EmitFPAbs16(EmitContext& ctx); | ||
| 232 | void EmitFPAbs32(EmitContext& ctx); | ||
| 233 | void EmitFPAbs64(EmitContext& ctx); | ||
| 234 | Id EmitFPAdd16(EmitContext& ctx, IR::Inst* inst, Id a, Id b); | ||
| 235 | Id EmitFPAdd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b); | ||
| 236 | Id EmitFPAdd64(EmitContext& ctx, IR::Inst* inst, Id a, Id b); | ||
| 237 | Id EmitFPFma16(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c); | ||
| 238 | Id EmitFPFma32(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c); | ||
| 239 | Id EmitFPFma64(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c); | ||
| 240 | void EmitFPMax32(EmitContext& ctx); | ||
| 241 | void EmitFPMax64(EmitContext& ctx); | ||
| 242 | void EmitFPMin32(EmitContext& ctx); | ||
| 243 | void EmitFPMin64(EmitContext& ctx); | ||
| 244 | Id EmitFPMul16(EmitContext& ctx, IR::Inst* inst, Id a, Id b); | ||
| 245 | Id EmitFPMul32(EmitContext& ctx, IR::Inst* inst, Id a, Id b); | ||
| 246 | Id EmitFPMul64(EmitContext& ctx, IR::Inst* inst, Id a, Id b); | ||
| 247 | void EmitFPNeg16(EmitContext& ctx); | ||
| 248 | void EmitFPNeg32(EmitContext& ctx); | ||
| 249 | void EmitFPNeg64(EmitContext& ctx); | ||
| 250 | void EmitFPRecip32(EmitContext& ctx); | ||
| 251 | void EmitFPRecip64(EmitContext& ctx); | ||
| 252 | void EmitFPRecipSqrt32(EmitContext& ctx); | ||
| 253 | void EmitFPRecipSqrt64(EmitContext& ctx); | ||
| 254 | void EmitFPSqrt(EmitContext& ctx); | ||
| 255 | void EmitFPSin(EmitContext& ctx); | ||
| 256 | void EmitFPSinNotReduced(EmitContext& ctx); | ||
| 257 | void EmitFPExp2(EmitContext& ctx); | ||
| 258 | void EmitFPExp2NotReduced(EmitContext& ctx); | ||
| 259 | void EmitFPCos(EmitContext& ctx); | ||
| 260 | void EmitFPCosNotReduced(EmitContext& ctx); | ||
| 261 | void EmitFPLog2(EmitContext& ctx); | ||
| 262 | void EmitFPSaturate16(EmitContext& ctx); | ||
| 263 | void EmitFPSaturate32(EmitContext& ctx); | ||
| 264 | void EmitFPSaturate64(EmitContext& ctx); | ||
| 265 | void EmitFPRoundEven16(EmitContext& ctx); | ||
| 266 | void EmitFPRoundEven32(EmitContext& ctx); | ||
| 267 | void EmitFPRoundEven64(EmitContext& ctx); | ||
| 268 | void EmitFPFloor16(EmitContext& ctx); | ||
| 269 | void EmitFPFloor32(EmitContext& ctx); | ||
| 270 | void EmitFPFloor64(EmitContext& ctx); | ||
| 271 | void EmitFPCeil16(EmitContext& ctx); | ||
| 272 | void EmitFPCeil32(EmitContext& ctx); | ||
| 273 | void EmitFPCeil64(EmitContext& ctx); | ||
| 274 | void EmitFPTrunc16(EmitContext& ctx); | ||
| 275 | void EmitFPTrunc32(EmitContext& ctx); | ||
| 276 | void EmitFPTrunc64(EmitContext& ctx); | ||
| 277 | Id EmitIAdd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b); | ||
| 278 | void EmitIAdd64(EmitContext& ctx); | ||
| 279 | Id EmitISub32(EmitContext& ctx, Id a, Id b); | ||
| 280 | void EmitISub64(EmitContext& ctx); | ||
| 281 | Id EmitIMul32(EmitContext& ctx, Id a, Id b); | ||
| 282 | void EmitINeg32(EmitContext& ctx); | ||
| 283 | void EmitIAbs32(EmitContext& ctx); | ||
| 284 | Id EmitShiftLeftLogical32(EmitContext& ctx, Id base, Id shift); | ||
| 285 | void EmitShiftRightLogical32(EmitContext& ctx); | ||
| 286 | void EmitShiftRightArithmetic32(EmitContext& ctx); | ||
| 287 | void EmitBitwiseAnd32(EmitContext& ctx); | ||
| 288 | void EmitBitwiseOr32(EmitContext& ctx); | ||
| 289 | void EmitBitwiseXor32(EmitContext& ctx); | ||
| 290 | void EmitBitFieldInsert(EmitContext& ctx); | ||
| 291 | void EmitBitFieldSExtract(EmitContext& ctx); | ||
| 292 | Id EmitBitFieldUExtract(EmitContext& ctx, Id base, Id offset, Id count); | ||
| 293 | void EmitSLessThan(EmitContext& ctx); | ||
| 294 | void EmitULessThan(EmitContext& ctx); | ||
| 295 | void EmitIEqual(EmitContext& ctx); | ||
| 296 | void EmitSLessThanEqual(EmitContext& ctx); | ||
| 297 | void EmitULessThanEqual(EmitContext& ctx); | ||
| 298 | void EmitSGreaterThan(EmitContext& ctx); | ||
| 299 | void EmitUGreaterThan(EmitContext& ctx); | ||
| 300 | void EmitINotEqual(EmitContext& ctx); | ||
| 301 | void EmitSGreaterThanEqual(EmitContext& ctx); | ||
| 302 | Id EmitUGreaterThanEqual(EmitContext& ctx, Id lhs, Id rhs); | ||
| 303 | void EmitLogicalOr(EmitContext& ctx); | ||
| 304 | void EmitLogicalAnd(EmitContext& ctx); | ||
| 305 | void EmitLogicalXor(EmitContext& ctx); | ||
| 306 | void EmitLogicalNot(EmitContext& ctx); | ||
| 307 | void EmitConvertS16F16(EmitContext& ctx); | ||
| 308 | void EmitConvertS16F32(EmitContext& ctx); | ||
| 309 | void EmitConvertS16F64(EmitContext& ctx); | ||
| 310 | void EmitConvertS32F16(EmitContext& ctx); | ||
| 311 | void EmitConvertS32F32(EmitContext& ctx); | ||
| 312 | void EmitConvertS32F64(EmitContext& ctx); | ||
| 313 | void EmitConvertS64F16(EmitContext& ctx); | ||
| 314 | void EmitConvertS64F32(EmitContext& ctx); | ||
| 315 | void EmitConvertS64F64(EmitContext& ctx); | ||
| 316 | void EmitConvertU16F16(EmitContext& ctx); | ||
| 317 | void EmitConvertU16F32(EmitContext& ctx); | ||
| 318 | void EmitConvertU16F64(EmitContext& ctx); | ||
| 319 | void EmitConvertU32F16(EmitContext& ctx); | ||
| 320 | void EmitConvertU32F32(EmitContext& ctx); | ||
| 321 | void EmitConvertU32F64(EmitContext& ctx); | ||
| 322 | void EmitConvertU64F16(EmitContext& ctx); | ||
| 323 | void EmitConvertU64F32(EmitContext& ctx); | ||
| 324 | void EmitConvertU64F64(EmitContext& ctx); | ||
| 325 | void EmitConvertU64U32(EmitContext& ctx); | ||
| 326 | void EmitConvertU32U64(EmitContext& ctx); | ||
| 19 | }; | 327 | }; |
| 20 | 328 | ||
| 21 | } // namespace Shader::Backend::SPIRV | 329 | } // namespace Shader::Backend::SPIRV |
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_bitwise_conversion.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_bitwise_conversion.cpp new file mode 100644 index 000000000..447df5b8c --- /dev/null +++ b/src/shader_recompiler/backend/spirv/emit_spirv_bitwise_conversion.cpp | |||
| @@ -0,0 +1,57 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "shader_recompiler/backend/spirv/emit_spirv.h" | ||
| 6 | |||
| 7 | namespace Shader::Backend::SPIRV { | ||
| 8 | |||
| 9 | void EmitSPIRV::EmitBitCastU16F16(EmitContext&) { | ||
| 10 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 11 | } | ||
| 12 | |||
| 13 | Id EmitSPIRV::EmitBitCastU32F32(EmitContext& ctx, Id value) { | ||
| 14 | return ctx.OpBitcast(ctx.u32[1], value); | ||
| 15 | } | ||
| 16 | |||
| 17 | void EmitSPIRV::EmitBitCastU64F64(EmitContext&) { | ||
| 18 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 19 | } | ||
| 20 | |||
| 21 | void EmitSPIRV::EmitBitCastF16U16(EmitContext&) { | ||
| 22 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 23 | } | ||
| 24 | |||
| 25 | Id EmitSPIRV::EmitBitCastF32U32(EmitContext& ctx, Id value) { | ||
| 26 | return ctx.OpBitcast(ctx.f32[1], value); | ||
| 27 | } | ||
| 28 | |||
| 29 | void EmitSPIRV::EmitBitCastF64U64(EmitContext&) { | ||
| 30 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 31 | } | ||
| 32 | |||
| 33 | void EmitSPIRV::EmitPackUint2x32(EmitContext&) { | ||
| 34 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 35 | } | ||
| 36 | |||
| 37 | void EmitSPIRV::EmitUnpackUint2x32(EmitContext&) { | ||
| 38 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 39 | } | ||
| 40 | |||
| 41 | void EmitSPIRV::EmitPackFloat2x16(EmitContext&) { | ||
| 42 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 43 | } | ||
| 44 | |||
| 45 | void EmitSPIRV::EmitUnpackFloat2x16(EmitContext&) { | ||
| 46 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 47 | } | ||
| 48 | |||
| 49 | void EmitSPIRV::EmitPackDouble2x32(EmitContext&) { | ||
| 50 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 51 | } | ||
| 52 | |||
| 53 | void EmitSPIRV::EmitUnpackDouble2x32(EmitContext&) { | ||
| 54 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 55 | } | ||
| 56 | |||
| 57 | } // namespace Shader::Backend::SPIRV | ||
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_composite.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_composite.cpp new file mode 100644 index 000000000..b190cf876 --- /dev/null +++ b/src/shader_recompiler/backend/spirv/emit_spirv_composite.cpp | |||
| @@ -0,0 +1,105 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "shader_recompiler/backend/spirv/emit_spirv.h" | ||
| 6 | |||
| 7 | namespace Shader::Backend::SPIRV { | ||
| 8 | |||
| 9 | void EmitSPIRV::EmitCompositeConstructU32x2(EmitContext&) { | ||
| 10 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 11 | } | ||
| 12 | |||
| 13 | void EmitSPIRV::EmitCompositeConstructU32x3(EmitContext&) { | ||
| 14 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 15 | } | ||
| 16 | |||
| 17 | void EmitSPIRV::EmitCompositeConstructU32x4(EmitContext&) { | ||
| 18 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 19 | } | ||
| 20 | |||
| 21 | void EmitSPIRV::EmitCompositeExtractU32x2(EmitContext&) { | ||
| 22 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 23 | } | ||
| 24 | |||
| 25 | Id EmitSPIRV::EmitCompositeExtractU32x3(EmitContext& ctx, Id vector, u32 index) { | ||
| 26 | return ctx.OpCompositeExtract(ctx.u32[1], vector, index); | ||
| 27 | } | ||
| 28 | |||
| 29 | void EmitSPIRV::EmitCompositeExtractU32x4(EmitContext&) { | ||
| 30 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 31 | } | ||
| 32 | |||
| 33 | void EmitSPIRV::EmitCompositeConstructF16x2(EmitContext&) { | ||
| 34 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 35 | } | ||
| 36 | |||
| 37 | void EmitSPIRV::EmitCompositeConstructF16x3(EmitContext&) { | ||
| 38 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 39 | } | ||
| 40 | |||
| 41 | void EmitSPIRV::EmitCompositeConstructF16x4(EmitContext&) { | ||
| 42 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 43 | } | ||
| 44 | |||
| 45 | void EmitSPIRV::EmitCompositeExtractF16x2(EmitContext&) { | ||
| 46 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 47 | } | ||
| 48 | |||
| 49 | void EmitSPIRV::EmitCompositeExtractF16x3(EmitContext&) { | ||
| 50 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 51 | } | ||
| 52 | |||
| 53 | void EmitSPIRV::EmitCompositeExtractF16x4(EmitContext&) { | ||
| 54 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 55 | } | ||
| 56 | |||
| 57 | void EmitSPIRV::EmitCompositeConstructF32x2(EmitContext&) { | ||
| 58 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 59 | } | ||
| 60 | |||
| 61 | void EmitSPIRV::EmitCompositeConstructF32x3(EmitContext&) { | ||
| 62 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 63 | } | ||
| 64 | |||
| 65 | void EmitSPIRV::EmitCompositeConstructF32x4(EmitContext&) { | ||
| 66 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 67 | } | ||
| 68 | |||
| 69 | void EmitSPIRV::EmitCompositeExtractF32x2(EmitContext&) { | ||
| 70 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 71 | } | ||
| 72 | |||
| 73 | void EmitSPIRV::EmitCompositeExtractF32x3(EmitContext&) { | ||
| 74 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 75 | } | ||
| 76 | |||
| 77 | void EmitSPIRV::EmitCompositeExtractF32x4(EmitContext&) { | ||
| 78 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 79 | } | ||
| 80 | |||
| 81 | void EmitSPIRV::EmitCompositeConstructF64x2(EmitContext&) { | ||
| 82 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 83 | } | ||
| 84 | |||
| 85 | void EmitSPIRV::EmitCompositeConstructF64x3(EmitContext&) { | ||
| 86 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 87 | } | ||
| 88 | |||
| 89 | void EmitSPIRV::EmitCompositeConstructF64x4(EmitContext&) { | ||
| 90 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 91 | } | ||
| 92 | |||
| 93 | void EmitSPIRV::EmitCompositeExtractF64x2(EmitContext&) { | ||
| 94 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 95 | } | ||
| 96 | |||
| 97 | void EmitSPIRV::EmitCompositeExtractF64x3(EmitContext&) { | ||
| 98 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 99 | } | ||
| 100 | |||
| 101 | void EmitSPIRV::EmitCompositeExtractF64x4(EmitContext&) { | ||
| 102 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 103 | } | ||
| 104 | |||
| 105 | } // namespace Shader::Backend::SPIRV | ||
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp new file mode 100644 index 000000000..b121305ea --- /dev/null +++ b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp | |||
| @@ -0,0 +1,102 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "shader_recompiler/backend/spirv/emit_spirv.h" | ||
| 6 | |||
| 7 | namespace Shader::Backend::SPIRV { | ||
| 8 | |||
| 9 | void EmitSPIRV::EmitGetRegister(EmitContext&) { | ||
| 10 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 11 | } | ||
| 12 | |||
| 13 | void EmitSPIRV::EmitSetRegister(EmitContext&) { | ||
| 14 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 15 | } | ||
| 16 | |||
| 17 | void EmitSPIRV::EmitGetPred(EmitContext&) { | ||
| 18 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 19 | } | ||
| 20 | |||
| 21 | void EmitSPIRV::EmitSetPred(EmitContext&) { | ||
| 22 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 23 | } | ||
| 24 | |||
| 25 | Id EmitSPIRV::EmitGetCbuf(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) { | ||
| 26 | if (!binding.IsImmediate()) { | ||
| 27 | throw NotImplementedException("Constant buffer indexing"); | ||
| 28 | } | ||
| 29 | if (!offset.IsImmediate()) { | ||
| 30 | throw NotImplementedException("Variable constant buffer offset"); | ||
| 31 | } | ||
| 32 | return ctx.Name(ctx.OpUndef(ctx.u32[1]), "unimplemented_cbuf"); | ||
| 33 | } | ||
| 34 | |||
| 35 | void EmitSPIRV::EmitGetAttribute(EmitContext&) { | ||
| 36 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 37 | } | ||
| 38 | |||
| 39 | void EmitSPIRV::EmitSetAttribute(EmitContext&) { | ||
| 40 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 41 | } | ||
| 42 | |||
| 43 | void EmitSPIRV::EmitGetAttributeIndexed(EmitContext&) { | ||
| 44 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 45 | } | ||
| 46 | |||
| 47 | void EmitSPIRV::EmitSetAttributeIndexed(EmitContext&) { | ||
| 48 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 49 | } | ||
| 50 | |||
| 51 | void EmitSPIRV::EmitGetZFlag(EmitContext&) { | ||
| 52 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 53 | } | ||
| 54 | |||
| 55 | void EmitSPIRV::EmitGetSFlag(EmitContext&) { | ||
| 56 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 57 | } | ||
| 58 | |||
| 59 | void EmitSPIRV::EmitGetCFlag(EmitContext&) { | ||
| 60 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 61 | } | ||
| 62 | |||
| 63 | void EmitSPIRV::EmitGetOFlag(EmitContext&) { | ||
| 64 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 65 | } | ||
| 66 | |||
| 67 | void EmitSPIRV::EmitSetZFlag(EmitContext&) { | ||
| 68 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 69 | } | ||
| 70 | |||
| 71 | void EmitSPIRV::EmitSetSFlag(EmitContext&) { | ||
| 72 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 73 | } | ||
| 74 | |||
| 75 | void EmitSPIRV::EmitSetCFlag(EmitContext&) { | ||
| 76 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 77 | } | ||
| 78 | |||
| 79 | void EmitSPIRV::EmitSetOFlag(EmitContext&) { | ||
| 80 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 81 | } | ||
| 82 | |||
| 83 | Id EmitSPIRV::EmitWorkgroupId(EmitContext& ctx) { | ||
| 84 | if (ctx.workgroup_id.value == 0) { | ||
| 85 | ctx.workgroup_id = ctx.AddGlobalVariable( | ||
| 86 | ctx.TypePointer(spv::StorageClass::Input, ctx.u32[3]), spv::StorageClass::Input); | ||
| 87 | ctx.Decorate(ctx.workgroup_id, spv::Decoration::BuiltIn, spv::BuiltIn::WorkgroupId); | ||
| 88 | } | ||
| 89 | return ctx.OpLoad(ctx.u32[3], ctx.workgroup_id); | ||
| 90 | } | ||
| 91 | |||
| 92 | Id EmitSPIRV::EmitLocalInvocationId(EmitContext& ctx) { | ||
| 93 | if (ctx.local_invocation_id.value == 0) { | ||
| 94 | ctx.local_invocation_id = ctx.AddGlobalVariable( | ||
| 95 | ctx.TypePointer(spv::StorageClass::Input, ctx.u32[3]), spv::StorageClass::Input); | ||
| 96 | ctx.Decorate(ctx.local_invocation_id, spv::Decoration::BuiltIn, | ||
| 97 | spv::BuiltIn::LocalInvocationId); | ||
| 98 | } | ||
| 99 | return ctx.OpLoad(ctx.u32[3], ctx.local_invocation_id); | ||
| 100 | } | ||
| 101 | |||
| 102 | } // namespace Shader::Backend::SPIRV | ||
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_control_flow.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_control_flow.cpp new file mode 100644 index 000000000..770fe113c --- /dev/null +++ b/src/shader_recompiler/backend/spirv/emit_spirv_control_flow.cpp | |||
| @@ -0,0 +1,30 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "shader_recompiler/backend/spirv/emit_spirv.h" | ||
| 6 | |||
| 7 | namespace Shader::Backend::SPIRV { | ||
| 8 | |||
| 9 | void EmitSPIRV::EmitBranch(EmitContext& ctx, IR::Inst* inst) { | ||
| 10 | ctx.OpBranch(ctx.BlockLabel(inst->Arg(0).Label())); | ||
| 11 | } | ||
| 12 | |||
| 13 | void EmitSPIRV::EmitBranchConditional(EmitContext& ctx, IR::Inst* inst) { | ||
| 14 | ctx.OpBranchConditional(ctx.Def(inst->Arg(0)), ctx.BlockLabel(inst->Arg(1).Label()), | ||
| 15 | ctx.BlockLabel(inst->Arg(2).Label())); | ||
| 16 | } | ||
| 17 | |||
| 18 | void EmitSPIRV::EmitExit(EmitContext& ctx) { | ||
| 19 | ctx.OpReturn(); | ||
| 20 | } | ||
| 21 | |||
| 22 | void EmitSPIRV::EmitReturn(EmitContext&) { | ||
| 23 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 24 | } | ||
| 25 | |||
| 26 | void EmitSPIRV::EmitUnreachable(EmitContext&) { | ||
| 27 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 28 | } | ||
| 29 | |||
| 30 | } // namespace Shader::Backend::SPIRV | ||
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_floating_point.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_floating_point.cpp new file mode 100644 index 000000000..9c39537e2 --- /dev/null +++ b/src/shader_recompiler/backend/spirv/emit_spirv_floating_point.cpp | |||
| @@ -0,0 +1,220 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "shader_recompiler/backend/spirv/emit_spirv.h" | ||
| 6 | #include "shader_recompiler/frontend/ir/modifiers.h" | ||
| 7 | |||
| 8 | namespace Shader::Backend::SPIRV { | ||
| 9 | namespace { | ||
| 10 | Id Decorate(EmitContext& ctx, IR::Inst* inst, Id op) { | ||
| 11 | const auto flags{inst->Flags<IR::FpControl>()}; | ||
| 12 | if (flags.no_contraction) { | ||
| 13 | ctx.Decorate(op, spv::Decoration::NoContraction); | ||
| 14 | } | ||
| 15 | switch (flags.rounding) { | ||
| 16 | case IR::FpRounding::RN: | ||
| 17 | break; | ||
| 18 | case IR::FpRounding::RM: | ||
| 19 | ctx.Decorate(op, spv::Decoration::FPRoundingMode, spv::FPRoundingMode::RTN); | ||
| 20 | break; | ||
| 21 | case IR::FpRounding::RP: | ||
| 22 | ctx.Decorate(op, spv::Decoration::FPRoundingMode, spv::FPRoundingMode::RTP); | ||
| 23 | break; | ||
| 24 | case IR::FpRounding::RZ: | ||
| 25 | ctx.Decorate(op, spv::Decoration::FPRoundingMode, spv::FPRoundingMode::RTZ); | ||
| 26 | break; | ||
| 27 | } | ||
| 28 | if (flags.fmz_mode != IR::FmzMode::FTZ) { | ||
| 29 | throw NotImplementedException("Denorm management not implemented"); | ||
| 30 | } | ||
| 31 | return op; | ||
| 32 | } | ||
| 33 | |||
| 34 | } // Anonymous namespace | ||
| 35 | |||
| 36 | void EmitSPIRV::EmitFPAbs16(EmitContext&) { | ||
| 37 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 38 | } | ||
| 39 | |||
| 40 | void EmitSPIRV::EmitFPAbs32(EmitContext&) { | ||
| 41 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 42 | } | ||
| 43 | |||
| 44 | void EmitSPIRV::EmitFPAbs64(EmitContext&) { | ||
| 45 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 46 | } | ||
| 47 | |||
| 48 | Id EmitSPIRV::EmitFPAdd16(EmitContext& ctx, IR::Inst* inst, Id a, Id b) { | ||
| 49 | return Decorate(ctx, inst, ctx.OpFAdd(ctx.f16[1], a, b)); | ||
| 50 | } | ||
| 51 | |||
| 52 | Id EmitSPIRV::EmitFPAdd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b) { | ||
| 53 | return Decorate(ctx, inst, ctx.OpFAdd(ctx.f32[1], a, b)); | ||
| 54 | } | ||
| 55 | |||
| 56 | Id EmitSPIRV::EmitFPAdd64(EmitContext& ctx, IR::Inst* inst, Id a, Id b) { | ||
| 57 | return Decorate(ctx, inst, ctx.OpFAdd(ctx.f64[1], a, b)); | ||
| 58 | } | ||
| 59 | |||
| 60 | Id EmitSPIRV::EmitFPFma16(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c) { | ||
| 61 | return Decorate(ctx, inst, ctx.OpFma(ctx.f16[1], a, b, c)); | ||
| 62 | } | ||
| 63 | |||
| 64 | Id EmitSPIRV::EmitFPFma32(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c) { | ||
| 65 | return Decorate(ctx, inst, ctx.OpFma(ctx.f32[1], a, b, c)); | ||
| 66 | } | ||
| 67 | |||
| 68 | Id EmitSPIRV::EmitFPFma64(EmitContext& ctx, IR::Inst* inst, Id a, Id b, Id c) { | ||
| 69 | return Decorate(ctx, inst, ctx.OpFma(ctx.f64[1], a, b, c)); | ||
| 70 | } | ||
| 71 | |||
| 72 | void EmitSPIRV::EmitFPMax32(EmitContext&) { | ||
| 73 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 74 | } | ||
| 75 | |||
| 76 | void EmitSPIRV::EmitFPMax64(EmitContext&) { | ||
| 77 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 78 | } | ||
| 79 | |||
| 80 | void EmitSPIRV::EmitFPMin32(EmitContext&) { | ||
| 81 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 82 | } | ||
| 83 | |||
| 84 | void EmitSPIRV::EmitFPMin64(EmitContext&) { | ||
| 85 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 86 | } | ||
| 87 | |||
| 88 | Id EmitSPIRV::EmitFPMul16(EmitContext& ctx, IR::Inst* inst, Id a, Id b) { | ||
| 89 | return Decorate(ctx, inst, ctx.OpFMul(ctx.f16[1], a, b)); | ||
| 90 | } | ||
| 91 | |||
| 92 | Id EmitSPIRV::EmitFPMul32(EmitContext& ctx, IR::Inst* inst, Id a, Id b) { | ||
| 93 | return Decorate(ctx, inst, ctx.OpFMul(ctx.f32[1], a, b)); | ||
| 94 | } | ||
| 95 | |||
| 96 | Id EmitSPIRV::EmitFPMul64(EmitContext& ctx, IR::Inst* inst, Id a, Id b) { | ||
| 97 | return Decorate(ctx, inst, ctx.OpFMul(ctx.f64[1], a, b)); | ||
| 98 | } | ||
| 99 | |||
| 100 | void EmitSPIRV::EmitFPNeg16(EmitContext&) { | ||
| 101 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 102 | } | ||
| 103 | |||
| 104 | void EmitSPIRV::EmitFPNeg32(EmitContext&) { | ||
| 105 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 106 | } | ||
| 107 | |||
| 108 | void EmitSPIRV::EmitFPNeg64(EmitContext&) { | ||
| 109 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 110 | } | ||
| 111 | |||
| 112 | void EmitSPIRV::EmitFPRecip32(EmitContext&) { | ||
| 113 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 114 | } | ||
| 115 | |||
| 116 | void EmitSPIRV::EmitFPRecip64(EmitContext&) { | ||
| 117 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 118 | } | ||
| 119 | |||
| 120 | void EmitSPIRV::EmitFPRecipSqrt32(EmitContext&) { | ||
| 121 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 122 | } | ||
| 123 | |||
| 124 | void EmitSPIRV::EmitFPRecipSqrt64(EmitContext&) { | ||
| 125 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 126 | } | ||
| 127 | |||
| 128 | void EmitSPIRV::EmitFPSqrt(EmitContext&) { | ||
| 129 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 130 | } | ||
| 131 | |||
| 132 | void EmitSPIRV::EmitFPSin(EmitContext&) { | ||
| 133 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 134 | } | ||
| 135 | |||
| 136 | void EmitSPIRV::EmitFPSinNotReduced(EmitContext&) { | ||
| 137 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 138 | } | ||
| 139 | |||
| 140 | void EmitSPIRV::EmitFPExp2(EmitContext&) { | ||
| 141 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 142 | } | ||
| 143 | |||
| 144 | void EmitSPIRV::EmitFPExp2NotReduced(EmitContext&) { | ||
| 145 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 146 | } | ||
| 147 | |||
| 148 | void EmitSPIRV::EmitFPCos(EmitContext&) { | ||
| 149 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 150 | } | ||
| 151 | |||
| 152 | void EmitSPIRV::EmitFPCosNotReduced(EmitContext&) { | ||
| 153 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 154 | } | ||
| 155 | |||
| 156 | void EmitSPIRV::EmitFPLog2(EmitContext&) { | ||
| 157 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 158 | } | ||
| 159 | |||
| 160 | void EmitSPIRV::EmitFPSaturate16(EmitContext&) { | ||
| 161 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 162 | } | ||
| 163 | |||
| 164 | void EmitSPIRV::EmitFPSaturate32(EmitContext&) { | ||
| 165 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 166 | } | ||
| 167 | |||
| 168 | void EmitSPIRV::EmitFPSaturate64(EmitContext&) { | ||
| 169 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 170 | } | ||
| 171 | |||
| 172 | void EmitSPIRV::EmitFPRoundEven16(EmitContext&) { | ||
| 173 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 174 | } | ||
| 175 | |||
| 176 | void EmitSPIRV::EmitFPRoundEven32(EmitContext&) { | ||
| 177 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 178 | } | ||
| 179 | |||
| 180 | void EmitSPIRV::EmitFPRoundEven64(EmitContext&) { | ||
| 181 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 182 | } | ||
| 183 | |||
| 184 | void EmitSPIRV::EmitFPFloor16(EmitContext&) { | ||
| 185 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 186 | } | ||
| 187 | |||
| 188 | void EmitSPIRV::EmitFPFloor32(EmitContext&) { | ||
| 189 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 190 | } | ||
| 191 | |||
| 192 | void EmitSPIRV::EmitFPFloor64(EmitContext&) { | ||
| 193 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 194 | } | ||
| 195 | |||
| 196 | void EmitSPIRV::EmitFPCeil16(EmitContext&) { | ||
| 197 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 198 | } | ||
| 199 | |||
| 200 | void EmitSPIRV::EmitFPCeil32(EmitContext&) { | ||
| 201 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 202 | } | ||
| 203 | |||
| 204 | void EmitSPIRV::EmitFPCeil64(EmitContext&) { | ||
| 205 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 206 | } | ||
| 207 | |||
| 208 | void EmitSPIRV::EmitFPTrunc16(EmitContext&) { | ||
| 209 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 210 | } | ||
| 211 | |||
| 212 | void EmitSPIRV::EmitFPTrunc32(EmitContext&) { | ||
| 213 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 214 | } | ||
| 215 | |||
| 216 | void EmitSPIRV::EmitFPTrunc64(EmitContext&) { | ||
| 217 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 218 | } | ||
| 219 | |||
| 220 | } // namespace Shader::Backend::SPIRV | ||
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_integer.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_integer.cpp new file mode 100644 index 000000000..3ef4f3d78 --- /dev/null +++ b/src/shader_recompiler/backend/spirv/emit_spirv_integer.cpp | |||
| @@ -0,0 +1,132 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "shader_recompiler/backend/spirv/emit_spirv.h" | ||
| 6 | |||
| 7 | namespace Shader::Backend::SPIRV { | ||
| 8 | |||
| 9 | Id EmitSPIRV::EmitIAdd32(EmitContext& ctx, IR::Inst* inst, Id a, Id b) { | ||
| 10 | if (inst->HasAssociatedPseudoOperation()) { | ||
| 11 | throw NotImplementedException("Pseudo-operations on IAdd32"); | ||
| 12 | } | ||
| 13 | return ctx.OpIAdd(ctx.u32[1], a, b); | ||
| 14 | } | ||
| 15 | |||
| 16 | void EmitSPIRV::EmitIAdd64(EmitContext&) { | ||
| 17 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 18 | } | ||
| 19 | |||
| 20 | Id EmitSPIRV::EmitISub32(EmitContext& ctx, Id a, Id b) { | ||
| 21 | return ctx.OpISub(ctx.u32[1], a, b); | ||
| 22 | } | ||
| 23 | |||
| 24 | void EmitSPIRV::EmitISub64(EmitContext&) { | ||
| 25 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 26 | } | ||
| 27 | |||
| 28 | Id EmitSPIRV::EmitIMul32(EmitContext& ctx, Id a, Id b) { | ||
| 29 | return ctx.OpIMul(ctx.u32[1], a, b); | ||
| 30 | } | ||
| 31 | |||
| 32 | void EmitSPIRV::EmitINeg32(EmitContext&) { | ||
| 33 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 34 | } | ||
| 35 | |||
| 36 | void EmitSPIRV::EmitIAbs32(EmitContext&) { | ||
| 37 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 38 | } | ||
| 39 | |||
| 40 | Id EmitSPIRV::EmitShiftLeftLogical32(EmitContext& ctx, Id base, Id shift) { | ||
| 41 | return ctx.OpShiftLeftLogical(ctx.u32[1], base, shift); | ||
| 42 | } | ||
| 43 | |||
| 44 | void EmitSPIRV::EmitShiftRightLogical32(EmitContext&) { | ||
| 45 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 46 | } | ||
| 47 | |||
| 48 | void EmitSPIRV::EmitShiftRightArithmetic32(EmitContext&) { | ||
| 49 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 50 | } | ||
| 51 | |||
| 52 | void EmitSPIRV::EmitBitwiseAnd32(EmitContext&) { | ||
| 53 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 54 | } | ||
| 55 | |||
| 56 | void EmitSPIRV::EmitBitwiseOr32(EmitContext&) { | ||
| 57 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 58 | } | ||
| 59 | |||
| 60 | void EmitSPIRV::EmitBitwiseXor32(EmitContext&) { | ||
| 61 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 62 | } | ||
| 63 | |||
| 64 | void EmitSPIRV::EmitBitFieldInsert(EmitContext&) { | ||
| 65 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 66 | } | ||
| 67 | |||
| 68 | void EmitSPIRV::EmitBitFieldSExtract(EmitContext&) { | ||
| 69 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 70 | } | ||
| 71 | |||
| 72 | Id EmitSPIRV::EmitBitFieldUExtract(EmitContext& ctx, Id base, Id offset, Id count) { | ||
| 73 | return ctx.OpBitFieldUExtract(ctx.u32[1], base, offset, count); | ||
| 74 | } | ||
| 75 | |||
| 76 | void EmitSPIRV::EmitSLessThan(EmitContext&) { | ||
| 77 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 78 | } | ||
| 79 | |||
| 80 | void EmitSPIRV::EmitULessThan(EmitContext&) { | ||
| 81 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 82 | } | ||
| 83 | |||
| 84 | void EmitSPIRV::EmitIEqual(EmitContext&) { | ||
| 85 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 86 | } | ||
| 87 | |||
| 88 | void EmitSPIRV::EmitSLessThanEqual(EmitContext&) { | ||
| 89 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 90 | } | ||
| 91 | |||
| 92 | void EmitSPIRV::EmitULessThanEqual(EmitContext&) { | ||
| 93 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 94 | } | ||
| 95 | |||
| 96 | void EmitSPIRV::EmitSGreaterThan(EmitContext&) { | ||
| 97 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 98 | } | ||
| 99 | |||
| 100 | void EmitSPIRV::EmitUGreaterThan(EmitContext&) { | ||
| 101 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 102 | } | ||
| 103 | |||
| 104 | void EmitSPIRV::EmitINotEqual(EmitContext&) { | ||
| 105 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 106 | } | ||
| 107 | |||
| 108 | void EmitSPIRV::EmitSGreaterThanEqual(EmitContext&) { | ||
| 109 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 110 | } | ||
| 111 | |||
| 112 | Id EmitSPIRV::EmitUGreaterThanEqual(EmitContext& ctx, Id lhs, Id rhs) { | ||
| 113 | return ctx.OpUGreaterThanEqual(ctx.u1, lhs, rhs); | ||
| 114 | } | ||
| 115 | |||
| 116 | void EmitSPIRV::EmitLogicalOr(EmitContext&) { | ||
| 117 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 118 | } | ||
| 119 | |||
| 120 | void EmitSPIRV::EmitLogicalAnd(EmitContext&) { | ||
| 121 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 122 | } | ||
| 123 | |||
| 124 | void EmitSPIRV::EmitLogicalXor(EmitContext&) { | ||
| 125 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 126 | } | ||
| 127 | |||
| 128 | void EmitSPIRV::EmitLogicalNot(EmitContext&) { | ||
| 129 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 130 | } | ||
| 131 | |||
| 132 | } // namespace Shader::Backend::SPIRV | ||
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_logical.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_logical.cpp new file mode 100644 index 000000000..7b43c4ed8 --- /dev/null +++ b/src/shader_recompiler/backend/spirv/emit_spirv_logical.cpp | |||
| @@ -0,0 +1,89 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "shader_recompiler/backend/spirv/emit_spirv.h" | ||
| 6 | |||
| 7 | namespace Shader::Backend::SPIRV { | ||
| 8 | |||
| 9 | void EmitSPIRV::EmitConvertS16F16(EmitContext&) { | ||
| 10 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 11 | } | ||
| 12 | |||
| 13 | void EmitSPIRV::EmitConvertS16F32(EmitContext&) { | ||
| 14 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 15 | } | ||
| 16 | |||
| 17 | void EmitSPIRV::EmitConvertS16F64(EmitContext&) { | ||
| 18 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 19 | } | ||
| 20 | |||
| 21 | void EmitSPIRV::EmitConvertS32F16(EmitContext&) { | ||
| 22 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 23 | } | ||
| 24 | |||
| 25 | void EmitSPIRV::EmitConvertS32F32(EmitContext&) { | ||
| 26 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 27 | } | ||
| 28 | |||
| 29 | void EmitSPIRV::EmitConvertS32F64(EmitContext&) { | ||
| 30 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 31 | } | ||
| 32 | |||
| 33 | void EmitSPIRV::EmitConvertS64F16(EmitContext&) { | ||
| 34 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 35 | } | ||
| 36 | |||
| 37 | void EmitSPIRV::EmitConvertS64F32(EmitContext&) { | ||
| 38 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 39 | } | ||
| 40 | |||
| 41 | void EmitSPIRV::EmitConvertS64F64(EmitContext&) { | ||
| 42 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 43 | } | ||
| 44 | |||
| 45 | void EmitSPIRV::EmitConvertU16F16(EmitContext&) { | ||
| 46 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 47 | } | ||
| 48 | |||
| 49 | void EmitSPIRV::EmitConvertU16F32(EmitContext&) { | ||
| 50 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 51 | } | ||
| 52 | |||
| 53 | void EmitSPIRV::EmitConvertU16F64(EmitContext&) { | ||
| 54 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 55 | } | ||
| 56 | |||
| 57 | void EmitSPIRV::EmitConvertU32F16(EmitContext&) { | ||
| 58 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 59 | } | ||
| 60 | |||
| 61 | void EmitSPIRV::EmitConvertU32F32(EmitContext&) { | ||
| 62 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 63 | } | ||
| 64 | |||
| 65 | void EmitSPIRV::EmitConvertU32F64(EmitContext&) { | ||
| 66 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 67 | } | ||
| 68 | |||
| 69 | void EmitSPIRV::EmitConvertU64F16(EmitContext&) { | ||
| 70 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 71 | } | ||
| 72 | |||
| 73 | void EmitSPIRV::EmitConvertU64F32(EmitContext&) { | ||
| 74 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 75 | } | ||
| 76 | |||
| 77 | void EmitSPIRV::EmitConvertU64F64(EmitContext&) { | ||
| 78 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 79 | } | ||
| 80 | |||
| 81 | void EmitSPIRV::EmitConvertU64U32(EmitContext&) { | ||
| 82 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 83 | } | ||
| 84 | |||
| 85 | void EmitSPIRV::EmitConvertU32U64(EmitContext&) { | ||
| 86 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 87 | } | ||
| 88 | |||
| 89 | } // namespace Shader::Backend::SPIRV | ||
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_memory.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_memory.cpp new file mode 100644 index 000000000..21a0d72fa --- /dev/null +++ b/src/shader_recompiler/backend/spirv/emit_spirv_memory.cpp | |||
| @@ -0,0 +1,125 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "shader_recompiler/backend/spirv/emit_spirv.h" | ||
| 6 | |||
| 7 | namespace Shader::Backend::SPIRV { | ||
| 8 | |||
| 9 | void EmitSPIRV::EmitLoadGlobalU8(EmitContext&) { | ||
| 10 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 11 | } | ||
| 12 | |||
| 13 | void EmitSPIRV::EmitLoadGlobalS8(EmitContext&) { | ||
| 14 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 15 | } | ||
| 16 | |||
| 17 | void EmitSPIRV::EmitLoadGlobalU16(EmitContext&) { | ||
| 18 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 19 | } | ||
| 20 | |||
| 21 | void EmitSPIRV::EmitLoadGlobalS16(EmitContext&) { | ||
| 22 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 23 | } | ||
| 24 | |||
| 25 | void EmitSPIRV::EmitLoadGlobal32(EmitContext&) { | ||
| 26 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 27 | } | ||
| 28 | |||
| 29 | void EmitSPIRV::EmitLoadGlobal64(EmitContext&) { | ||
| 30 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 31 | } | ||
| 32 | |||
| 33 | void EmitSPIRV::EmitLoadGlobal128(EmitContext&) { | ||
| 34 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 35 | } | ||
| 36 | |||
| 37 | void EmitSPIRV::EmitWriteGlobalU8(EmitContext&) { | ||
| 38 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 39 | } | ||
| 40 | |||
| 41 | void EmitSPIRV::EmitWriteGlobalS8(EmitContext&) { | ||
| 42 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 43 | } | ||
| 44 | |||
| 45 | void EmitSPIRV::EmitWriteGlobalU16(EmitContext&) { | ||
| 46 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 47 | } | ||
| 48 | |||
| 49 | void EmitSPIRV::EmitWriteGlobalS16(EmitContext&) { | ||
| 50 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 51 | } | ||
| 52 | |||
| 53 | void EmitSPIRV::EmitWriteGlobal32(EmitContext&) { | ||
| 54 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 55 | } | ||
| 56 | |||
| 57 | void EmitSPIRV::EmitWriteGlobal64(EmitContext&) { | ||
| 58 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 59 | } | ||
| 60 | |||
| 61 | void EmitSPIRV::EmitWriteGlobal128(EmitContext&) { | ||
| 62 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 63 | } | ||
| 64 | |||
| 65 | void EmitSPIRV::EmitLoadStorageU8(EmitContext&) { | ||
| 66 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 67 | } | ||
| 68 | |||
| 69 | void EmitSPIRV::EmitLoadStorageS8(EmitContext&) { | ||
| 70 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 71 | } | ||
| 72 | |||
| 73 | void EmitSPIRV::EmitLoadStorageU16(EmitContext&) { | ||
| 74 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 75 | } | ||
| 76 | |||
| 77 | void EmitSPIRV::EmitLoadStorageS16(EmitContext&) { | ||
| 78 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 79 | } | ||
| 80 | |||
| 81 | Id EmitSPIRV::EmitLoadStorage32(EmitContext& ctx, const IR::Value& binding, | ||
| 82 | [[maybe_unused]] const IR::Value& offset) { | ||
| 83 | if (!binding.IsImmediate()) { | ||
| 84 | throw NotImplementedException("Storage buffer indexing"); | ||
| 85 | } | ||
| 86 | return ctx.Name(ctx.OpUndef(ctx.u32[1]), "unimplemented_sbuf"); | ||
| 87 | } | ||
| 88 | |||
| 89 | void EmitSPIRV::EmitLoadStorage64(EmitContext&) { | ||
| 90 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 91 | } | ||
| 92 | |||
| 93 | void EmitSPIRV::EmitLoadStorage128(EmitContext&) { | ||
| 94 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 95 | } | ||
| 96 | |||
| 97 | void EmitSPIRV::EmitWriteStorageU8(EmitContext&) { | ||
| 98 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 99 | } | ||
| 100 | |||
| 101 | void EmitSPIRV::EmitWriteStorageS8(EmitContext&) { | ||
| 102 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 103 | } | ||
| 104 | |||
| 105 | void EmitSPIRV::EmitWriteStorageU16(EmitContext&) { | ||
| 106 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 107 | } | ||
| 108 | |||
| 109 | void EmitSPIRV::EmitWriteStorageS16(EmitContext&) { | ||
| 110 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 111 | } | ||
| 112 | |||
| 113 | void EmitSPIRV::EmitWriteStorage32(EmitContext& ctx) { | ||
| 114 | ctx.Name(ctx.OpUndef(ctx.u32[1]), "unimplemented_sbuf_store"); | ||
| 115 | } | ||
| 116 | |||
| 117 | void EmitSPIRV::EmitWriteStorage64(EmitContext&) { | ||
| 118 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 119 | } | ||
| 120 | |||
| 121 | void EmitSPIRV::EmitWriteStorage128(EmitContext&) { | ||
| 122 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 123 | } | ||
| 124 | |||
| 125 | } // namespace Shader::Backend::SPIRV | ||
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_select.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_select.cpp new file mode 100644 index 000000000..40a856f72 --- /dev/null +++ b/src/shader_recompiler/backend/spirv/emit_spirv_select.cpp | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "shader_recompiler/backend/spirv/emit_spirv.h" | ||
| 6 | |||
| 7 | namespace Shader::Backend::SPIRV { | ||
| 8 | |||
| 9 | void EmitSPIRV::EmitSelect8(EmitContext&) { | ||
| 10 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 11 | } | ||
| 12 | |||
| 13 | void EmitSPIRV::EmitSelect16(EmitContext&) { | ||
| 14 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 15 | } | ||
| 16 | |||
| 17 | void EmitSPIRV::EmitSelect32(EmitContext&) { | ||
| 18 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 19 | } | ||
| 20 | |||
| 21 | void EmitSPIRV::EmitSelect64(EmitContext&) { | ||
| 22 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 23 | } | ||
| 24 | |||
| 25 | } // namespace Shader::Backend::SPIRV | ||
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_undefined.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_undefined.cpp new file mode 100644 index 000000000..3850b072c --- /dev/null +++ b/src/shader_recompiler/backend/spirv/emit_spirv_undefined.cpp | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include "shader_recompiler/backend/spirv/emit_spirv.h" | ||
| 6 | |||
| 7 | namespace Shader::Backend::SPIRV { | ||
| 8 | |||
| 9 | void EmitSPIRV::EmitUndef1(EmitContext&) { | ||
| 10 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 11 | } | ||
| 12 | |||
| 13 | void EmitSPIRV::EmitUndef8(EmitContext&) { | ||
| 14 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 15 | } | ||
| 16 | |||
| 17 | void EmitSPIRV::EmitUndef16(EmitContext&) { | ||
| 18 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 19 | } | ||
| 20 | |||
| 21 | void EmitSPIRV::EmitUndef32(EmitContext&) { | ||
| 22 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 23 | } | ||
| 24 | |||
| 25 | void EmitSPIRV::EmitUndef64(EmitContext&) { | ||
| 26 | throw NotImplementedException("SPIR-V Instruction"); | ||
| 27 | } | ||
| 28 | |||
| 29 | } // namespace Shader::Backend::SPIRV | ||
diff --git a/src/shader_recompiler/frontend/ir/ir_emitter.cpp b/src/shader_recompiler/frontend/ir/ir_emitter.cpp index 9d7dc034c..ada0be834 100644 --- a/src/shader_recompiler/frontend/ir/ir_emitter.cpp +++ b/src/shader_recompiler/frontend/ir/ir_emitter.cpp | |||
| @@ -130,27 +130,27 @@ void IREmitter::SetAttribute(IR::Attribute attribute, const F32& value) { | |||
| 130 | } | 130 | } |
| 131 | 131 | ||
| 132 | U32 IREmitter::WorkgroupIdX() { | 132 | U32 IREmitter::WorkgroupIdX() { |
| 133 | return Inst<U32>(Opcode::WorkgroupIdX); | 133 | return U32{CompositeExtract(Inst(Opcode::WorkgroupId), 0)}; |
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | U32 IREmitter::WorkgroupIdY() { | 136 | U32 IREmitter::WorkgroupIdY() { |
| 137 | return Inst<U32>(Opcode::WorkgroupIdY); | 137 | return U32{CompositeExtract(Inst(Opcode::WorkgroupId), 1)}; |
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | U32 IREmitter::WorkgroupIdZ() { | 140 | U32 IREmitter::WorkgroupIdZ() { |
| 141 | return Inst<U32>(Opcode::WorkgroupIdZ); | 141 | return U32{CompositeExtract(Inst(Opcode::WorkgroupId), 2)}; |
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | U32 IREmitter::LocalInvocationIdX() { | 144 | U32 IREmitter::LocalInvocationIdX() { |
| 145 | return Inst<U32>(Opcode::LocalInvocationIdX); | 145 | return U32{CompositeExtract(Inst(Opcode::LocalInvocationId), 0)}; |
| 146 | } | 146 | } |
| 147 | 147 | ||
| 148 | U32 IREmitter::LocalInvocationIdY() { | 148 | U32 IREmitter::LocalInvocationIdY() { |
| 149 | return Inst<U32>(Opcode::LocalInvocationIdY); | 149 | return U32{CompositeExtract(Inst(Opcode::LocalInvocationId), 1)}; |
| 150 | } | 150 | } |
| 151 | 151 | ||
| 152 | U32 IREmitter::LocalInvocationIdZ() { | 152 | U32 IREmitter::LocalInvocationIdZ() { |
| 153 | return Inst<U32>(Opcode::LocalInvocationIdZ); | 153 | return U32{CompositeExtract(Inst(Opcode::LocalInvocationId), 2)}; |
| 154 | } | 154 | } |
| 155 | 155 | ||
| 156 | U32 IREmitter::LoadGlobalU8(const U64& address) { | 156 | U32 IREmitter::LoadGlobalU8(const U64& address) { |
diff --git a/src/shader_recompiler/frontend/ir/opcodes.inc b/src/shader_recompiler/frontend/ir/opcodes.inc index 82b04f37c..5dc65f2df 100644 --- a/src/shader_recompiler/frontend/ir/opcodes.inc +++ b/src/shader_recompiler/frontend/ir/opcodes.inc | |||
| @@ -21,9 +21,9 @@ OPCODE(GetPred, U1, Pred | |||
| 21 | OPCODE(SetPred, Void, Pred, U1, ) | 21 | OPCODE(SetPred, Void, Pred, U1, ) |
| 22 | OPCODE(GetCbuf, U32, U32, U32, ) | 22 | OPCODE(GetCbuf, U32, U32, U32, ) |
| 23 | OPCODE(GetAttribute, U32, Attribute, ) | 23 | OPCODE(GetAttribute, U32, Attribute, ) |
| 24 | OPCODE(SetAttribute, U32, Attribute, ) | 24 | OPCODE(SetAttribute, Void, Attribute, U32, ) |
| 25 | OPCODE(GetAttributeIndexed, U32, U32, ) | 25 | OPCODE(GetAttributeIndexed, U32, U32, ) |
| 26 | OPCODE(SetAttributeIndexed, U32, U32, ) | 26 | OPCODE(SetAttributeIndexed, Void, U32, U32, ) |
| 27 | OPCODE(GetZFlag, U1, Void, ) | 27 | OPCODE(GetZFlag, U1, Void, ) |
| 28 | OPCODE(GetSFlag, U1, Void, ) | 28 | OPCODE(GetSFlag, U1, Void, ) |
| 29 | OPCODE(GetCFlag, U1, Void, ) | 29 | OPCODE(GetCFlag, U1, Void, ) |
| @@ -32,12 +32,8 @@ OPCODE(SetZFlag, Void, U1, | |||
| 32 | OPCODE(SetSFlag, Void, U1, ) | 32 | OPCODE(SetSFlag, Void, U1, ) |
| 33 | OPCODE(SetCFlag, Void, U1, ) | 33 | OPCODE(SetCFlag, Void, U1, ) |
| 34 | OPCODE(SetOFlag, Void, U1, ) | 34 | OPCODE(SetOFlag, Void, U1, ) |
| 35 | OPCODE(WorkgroupIdX, U32, ) | 35 | OPCODE(WorkgroupId, U32x3, ) |
| 36 | OPCODE(WorkgroupIdY, U32, ) | 36 | OPCODE(LocalInvocationId, U32x3, ) |
| 37 | OPCODE(WorkgroupIdZ, U32, ) | ||
| 38 | OPCODE(LocalInvocationIdX, U32, ) | ||
| 39 | OPCODE(LocalInvocationIdY, U32, ) | ||
| 40 | OPCODE(LocalInvocationIdZ, U32, ) | ||
| 41 | 37 | ||
| 42 | // Undefined | 38 | // Undefined |
| 43 | OPCODE(Undef1, U1, ) | 39 | OPCODE(Undef1, U1, ) |
diff --git a/src/shader_recompiler/frontend/maxwell/translate/translate.cpp b/src/shader_recompiler/frontend/maxwell/translate/translate.cpp index dcc3f6c0e..7e6bb07a2 100644 --- a/src/shader_recompiler/frontend/maxwell/translate/translate.cpp +++ b/src/shader_recompiler/frontend/maxwell/translate/translate.cpp | |||
| @@ -11,15 +11,15 @@ | |||
| 11 | 11 | ||
| 12 | namespace Shader::Maxwell { | 12 | namespace Shader::Maxwell { |
| 13 | 13 | ||
| 14 | template <auto visitor_method> | 14 | template <auto method> |
| 15 | static void Invoke(TranslatorVisitor& visitor, Location pc, u64 insn) { | 15 | static void Invoke(TranslatorVisitor& visitor, Location pc, u64 insn) { |
| 16 | using MethodType = decltype(visitor_method); | 16 | using MethodType = decltype(method); |
| 17 | if constexpr (std::is_invocable_r_v<void, MethodType, TranslatorVisitor&, Location, u64>) { | 17 | if constexpr (std::is_invocable_r_v<void, MethodType, TranslatorVisitor&, Location, u64>) { |
| 18 | (visitor.*visitor_method)(pc, insn); | 18 | (visitor.*method)(pc, insn); |
| 19 | } else if constexpr (std::is_invocable_r_v<void, MethodType, TranslatorVisitor&, u64>) { | 19 | } else if constexpr (std::is_invocable_r_v<void, MethodType, TranslatorVisitor&, u64>) { |
| 20 | (visitor.*visitor_method)(insn); | 20 | (visitor.*method)(insn); |
| 21 | } else { | 21 | } else { |
| 22 | (visitor.*visitor_method)(); | 22 | (visitor.*method)(); |
| 23 | } | 23 | } |
| 24 | } | 24 | } |
| 25 | 25 | ||
diff --git a/src/shader_recompiler/ir_opt/identity_removal_pass.cpp b/src/shader_recompiler/ir_opt/identity_removal_pass.cpp index 39a972919..593efde39 100644 --- a/src/shader_recompiler/ir_opt/identity_removal_pass.cpp +++ b/src/shader_recompiler/ir_opt/identity_removal_pass.cpp | |||
| @@ -13,7 +13,7 @@ namespace Shader::Optimization { | |||
| 13 | void IdentityRemovalPass(IR::Function& function) { | 13 | void IdentityRemovalPass(IR::Function& function) { |
| 14 | std::vector<IR::Inst*> to_invalidate; | 14 | std::vector<IR::Inst*> to_invalidate; |
| 15 | 15 | ||
| 16 | for (auto& block : function.blocks) { | 16 | for (IR::Block* const block : function.blocks) { |
| 17 | for (auto inst = block->begin(); inst != block->end();) { | 17 | for (auto inst = block->begin(); inst != block->end();) { |
| 18 | const size_t num_args{inst->NumArgs()}; | 18 | const size_t num_args{inst->NumArgs()}; |
| 19 | for (size_t i = 0; i < num_args; ++i) { | 19 | for (size_t i = 0; i < num_args; ++i) { |
diff --git a/src/shader_recompiler/main.cpp b/src/shader_recompiler/main.cpp index 19e36590c..9887e066d 100644 --- a/src/shader_recompiler/main.cpp +++ b/src/shader_recompiler/main.cpp | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | 6 | ||
| 7 | #include <fmt/format.h> | 7 | #include <fmt/format.h> |
| 8 | 8 | ||
| 9 | #include "shader_recompiler/backend/spirv/emit_spirv.h" | ||
| 9 | #include "shader_recompiler/file_environment.h" | 10 | #include "shader_recompiler/file_environment.h" |
| 10 | #include "shader_recompiler/frontend/ir/basic_block.h" | 11 | #include "shader_recompiler/frontend/ir/basic_block.h" |
| 11 | #include "shader_recompiler/frontend/ir/ir_emitter.h" | 12 | #include "shader_recompiler/frontend/ir/ir_emitter.h" |
| @@ -51,18 +52,18 @@ void RunDatabase() { | |||
| 51 | int main() { | 52 | int main() { |
| 52 | // RunDatabase(); | 53 | // RunDatabase(); |
| 53 | 54 | ||
| 54 | // FileEnvironment env{"D:\\Shaders\\Database\\test.bin"}; | ||
| 55 | FileEnvironment env{"D:\\Shaders\\Database\\Oninaki\\CS15C2FB1F0B965767.bin"}; | ||
| 56 | auto cfg{std::make_unique<Flow::CFG>(env, 0)}; | ||
| 57 | // fmt::print(stdout, "{}\n", cfg->Dot()); | ||
| 58 | |||
| 59 | auto inst_pool{std::make_unique<ObjectPool<IR::Inst>>()}; | 55 | auto inst_pool{std::make_unique<ObjectPool<IR::Inst>>()}; |
| 60 | auto block_pool{std::make_unique<ObjectPool<IR::Block>>()}; | 56 | auto block_pool{std::make_unique<ObjectPool<IR::Block>>()}; |
| 61 | 57 | ||
| 62 | for (int i = 0; i < 8192 * 4; ++i) { | 58 | // FileEnvironment env{"D:\\Shaders\\Database\\test.bin"}; |
| 63 | void(inst_pool->Create(IR::Opcode::Void, 0)); | 59 | FileEnvironment env{"D:\\Shaders\\Database\\Oninaki\\CS15C2FB1F0B965767.bin"}; |
| 60 | for (int i = 0; i < 1; ++i) { | ||
| 61 | block_pool->ReleaseContents(); | ||
| 62 | inst_pool->ReleaseContents(); | ||
| 63 | auto cfg{std::make_unique<Flow::CFG>(env, 0)}; | ||
| 64 | // fmt::print(stdout, "{}\n", cfg->Dot()); | ||
| 65 | IR::Program program{TranslateProgram(*inst_pool, *block_pool, env, *cfg)}; | ||
| 66 | // fmt::print(stdout, "{}\n", IR::DumpProgram(program)); | ||
| 67 | Backend::SPIRV::EmitSPIRV spirv{program}; | ||
| 64 | } | 68 | } |
| 65 | |||
| 66 | IR::Program program{TranslateProgram(*inst_pool, *block_pool, env, *cfg)}; | ||
| 67 | fmt::print(stdout, "{}\n", IR::DumpProgram(program)); | ||
| 68 | } | 69 | } |
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp deleted file mode 100644 index c6846d886..000000000 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp +++ /dev/null | |||
| @@ -1,3166 +0,0 @@ | |||
| 1 | // Copyright 2019 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <functional> | ||
| 6 | #include <limits> | ||
| 7 | #include <map> | ||
| 8 | #include <optional> | ||
| 9 | #include <type_traits> | ||
| 10 | #include <unordered_map> | ||
| 11 | #include <utility> | ||
| 12 | |||
| 13 | #include <fmt/format.h> | ||
| 14 | |||
| 15 | #include <sirit/sirit.h> | ||
| 16 | |||
| 17 | #include "common/alignment.h" | ||
| 18 | #include "common/assert.h" | ||
| 19 | #include "common/common_types.h" | ||
| 20 | #include "common/logging/log.h" | ||
| 21 | #include "video_core/engines/maxwell_3d.h" | ||
| 22 | #include "video_core/engines/shader_bytecode.h" | ||
| 23 | #include "video_core/engines/shader_header.h" | ||
| 24 | #include "video_core/engines/shader_type.h" | ||
| 25 | #include "video_core/renderer_vulkan/vk_shader_decompiler.h" | ||
| 26 | #include "video_core/shader/node.h" | ||
| 27 | #include "video_core/shader/shader_ir.h" | ||
| 28 | #include "video_core/shader/transform_feedback.h" | ||
| 29 | #include "video_core/vulkan_common/vulkan_device.h" | ||
| 30 | |||
| 31 | namespace Vulkan { | ||
| 32 | |||
| 33 | namespace { | ||
| 34 | |||
| 35 | using Sirit::Id; | ||
| 36 | using Tegra::Engines::ShaderType; | ||
| 37 | using Tegra::Shader::Attribute; | ||
| 38 | using Tegra::Shader::PixelImap; | ||
| 39 | using Tegra::Shader::Register; | ||
| 40 | using namespace VideoCommon::Shader; | ||
| 41 | |||
| 42 | using Maxwell = Tegra::Engines::Maxwell3D::Regs; | ||
| 43 | using Operation = const OperationNode&; | ||
| 44 | |||
| 45 | class ASTDecompiler; | ||
| 46 | class ExprDecompiler; | ||
| 47 | |||
| 48 | // TODO(Rodrigo): Use rasterizer's value | ||
| 49 | constexpr u32 MaxConstBufferFloats = 0x4000; | ||
| 50 | constexpr u32 MaxConstBufferElements = MaxConstBufferFloats / 4; | ||
| 51 | |||
| 52 | constexpr u32 NumInputPatches = 32; // This value seems to be the standard | ||
| 53 | |||
| 54 | enum class Type { Void, Bool, Bool2, Float, Int, Uint, HalfFloat }; | ||
| 55 | |||
| 56 | class Expression final { | ||
| 57 | public: | ||
| 58 | Expression(Id id_, Type type_) : id{id_}, type{type_} { | ||
| 59 | ASSERT(type_ != Type::Void); | ||
| 60 | } | ||
| 61 | Expression() : type{Type::Void} {} | ||
| 62 | |||
| 63 | Id id{}; | ||
| 64 | Type type{}; | ||
| 65 | }; | ||
| 66 | static_assert(std::is_standard_layout_v<Expression>); | ||
| 67 | |||
| 68 | struct TexelBuffer { | ||
| 69 | Id image_type{}; | ||
| 70 | Id image{}; | ||
| 71 | }; | ||
| 72 | |||
| 73 | struct SampledImage { | ||
| 74 | Id image_type{}; | ||
| 75 | Id sampler_type{}; | ||
| 76 | Id sampler_pointer_type{}; | ||
| 77 | Id variable{}; | ||
| 78 | }; | ||
| 79 | |||
| 80 | struct StorageImage { | ||
| 81 | Id image_type{}; | ||
| 82 | Id image{}; | ||
| 83 | }; | ||
| 84 | |||
| 85 | struct AttributeType { | ||
| 86 | Type type; | ||
| 87 | Id scalar; | ||
| 88 | Id vector; | ||
| 89 | }; | ||
| 90 | |||
| 91 | struct VertexIndices { | ||
| 92 | std::optional<u32> position; | ||
| 93 | std::optional<u32> layer; | ||
| 94 | std::optional<u32> viewport; | ||
| 95 | std::optional<u32> point_size; | ||
| 96 | std::optional<u32> clip_distances; | ||
| 97 | }; | ||
| 98 | |||
| 99 | struct GenericVaryingDescription { | ||
| 100 | Id id = nullptr; | ||
| 101 | u32 first_element = 0; | ||
| 102 | bool is_scalar = false; | ||
| 103 | }; | ||
| 104 | |||
| 105 | spv::Dim GetSamplerDim(const SamplerEntry& sampler) { | ||
| 106 | ASSERT(!sampler.is_buffer); | ||
| 107 | switch (sampler.type) { | ||
| 108 | case Tegra::Shader::TextureType::Texture1D: | ||
| 109 | return spv::Dim::Dim1D; | ||
| 110 | case Tegra::Shader::TextureType::Texture2D: | ||
| 111 | return spv::Dim::Dim2D; | ||
| 112 | case Tegra::Shader::TextureType::Texture3D: | ||
| 113 | return spv::Dim::Dim3D; | ||
| 114 | case Tegra::Shader::TextureType::TextureCube: | ||
| 115 | return spv::Dim::Cube; | ||
| 116 | default: | ||
| 117 | UNIMPLEMENTED_MSG("Unimplemented sampler type={}", sampler.type); | ||
| 118 | return spv::Dim::Dim2D; | ||
| 119 | } | ||
| 120 | } | ||
| 121 | |||
| 122 | std::pair<spv::Dim, bool> GetImageDim(const ImageEntry& image) { | ||
| 123 | switch (image.type) { | ||
| 124 | case Tegra::Shader::ImageType::Texture1D: | ||
| 125 | return {spv::Dim::Dim1D, false}; | ||
| 126 | case Tegra::Shader::ImageType::TextureBuffer: | ||
| 127 | return {spv::Dim::Buffer, false}; | ||
| 128 | case Tegra::Shader::ImageType::Texture1DArray: | ||
| 129 | return {spv::Dim::Dim1D, true}; | ||
| 130 | case Tegra::Shader::ImageType::Texture2D: | ||
| 131 | return {spv::Dim::Dim2D, false}; | ||
| 132 | case Tegra::Shader::ImageType::Texture2DArray: | ||
| 133 | return {spv::Dim::Dim2D, true}; | ||
| 134 | case Tegra::Shader::ImageType::Texture3D: | ||
| 135 | return {spv::Dim::Dim3D, false}; | ||
| 136 | default: | ||
| 137 | UNIMPLEMENTED_MSG("Unimplemented image type={}", image.type); | ||
| 138 | return {spv::Dim::Dim2D, false}; | ||
| 139 | } | ||
| 140 | } | ||
| 141 | |||
| 142 | /// Returns the number of vertices present in a primitive topology. | ||
| 143 | u32 GetNumPrimitiveTopologyVertices(Maxwell::PrimitiveTopology primitive_topology) { | ||
| 144 | switch (primitive_topology) { | ||
| 145 | case Maxwell::PrimitiveTopology::Points: | ||
| 146 | return 1; | ||
| 147 | case Maxwell::PrimitiveTopology::Lines: | ||
| 148 | case Maxwell::PrimitiveTopology::LineLoop: | ||
| 149 | case Maxwell::PrimitiveTopology::LineStrip: | ||
| 150 | return 2; | ||
| 151 | case Maxwell::PrimitiveTopology::Triangles: | ||
| 152 | case Maxwell::PrimitiveTopology::TriangleStrip: | ||
| 153 | case Maxwell::PrimitiveTopology::TriangleFan: | ||
| 154 | return 3; | ||
| 155 | case Maxwell::PrimitiveTopology::LinesAdjacency: | ||
| 156 | case Maxwell::PrimitiveTopology::LineStripAdjacency: | ||
| 157 | return 4; | ||
| 158 | case Maxwell::PrimitiveTopology::TrianglesAdjacency: | ||
| 159 | case Maxwell::PrimitiveTopology::TriangleStripAdjacency: | ||
| 160 | return 6; | ||
| 161 | case Maxwell::PrimitiveTopology::Quads: | ||
| 162 | UNIMPLEMENTED_MSG("Quads"); | ||
| 163 | return 3; | ||
| 164 | case Maxwell::PrimitiveTopology::QuadStrip: | ||
| 165 | UNIMPLEMENTED_MSG("QuadStrip"); | ||
| 166 | return 3; | ||
| 167 | case Maxwell::PrimitiveTopology::Polygon: | ||
| 168 | UNIMPLEMENTED_MSG("Polygon"); | ||
| 169 | return 3; | ||
| 170 | case Maxwell::PrimitiveTopology::Patches: | ||
| 171 | UNIMPLEMENTED_MSG("Patches"); | ||
| 172 | return 3; | ||
| 173 | default: | ||
| 174 | UNREACHABLE(); | ||
| 175 | return 3; | ||
| 176 | } | ||
| 177 | } | ||
| 178 | |||
| 179 | spv::ExecutionMode GetExecutionMode(Maxwell::TessellationPrimitive primitive) { | ||
| 180 | switch (primitive) { | ||
| 181 | case Maxwell::TessellationPrimitive::Isolines: | ||
| 182 | return spv::ExecutionMode::Isolines; | ||
| 183 | case Maxwell::TessellationPrimitive::Triangles: | ||
| 184 | return spv::ExecutionMode::Triangles; | ||
| 185 | case Maxwell::TessellationPrimitive::Quads: | ||
| 186 | return spv::ExecutionMode::Quads; | ||
| 187 | } | ||
| 188 | UNREACHABLE(); | ||
| 189 | return spv::ExecutionMode::Triangles; | ||
| 190 | } | ||
| 191 | |||
| 192 | spv::ExecutionMode GetExecutionMode(Maxwell::TessellationSpacing spacing) { | ||
| 193 | switch (spacing) { | ||
| 194 | case Maxwell::TessellationSpacing::Equal: | ||
| 195 | return spv::ExecutionMode::SpacingEqual; | ||
| 196 | case Maxwell::TessellationSpacing::FractionalOdd: | ||
| 197 | return spv::ExecutionMode::SpacingFractionalOdd; | ||
| 198 | case Maxwell::TessellationSpacing::FractionalEven: | ||
| 199 | return spv::ExecutionMode::SpacingFractionalEven; | ||
| 200 | } | ||
| 201 | UNREACHABLE(); | ||
| 202 | return spv::ExecutionMode::SpacingEqual; | ||
| 203 | } | ||
| 204 | |||
| 205 | spv::ExecutionMode GetExecutionMode(Maxwell::PrimitiveTopology input_topology) { | ||
| 206 | switch (input_topology) { | ||
| 207 | case Maxwell::PrimitiveTopology::Points: | ||
| 208 | return spv::ExecutionMode::InputPoints; | ||
| 209 | case Maxwell::PrimitiveTopology::Lines: | ||
| 210 | case Maxwell::PrimitiveTopology::LineLoop: | ||
| 211 | case Maxwell::PrimitiveTopology::LineStrip: | ||
| 212 | return spv::ExecutionMode::InputLines; | ||
| 213 | case Maxwell::PrimitiveTopology::Triangles: | ||
| 214 | case Maxwell::PrimitiveTopology::TriangleStrip: | ||
| 215 | case Maxwell::PrimitiveTopology::TriangleFan: | ||
| 216 | return spv::ExecutionMode::Triangles; | ||
| 217 | case Maxwell::PrimitiveTopology::LinesAdjacency: | ||
| 218 | case Maxwell::PrimitiveTopology::LineStripAdjacency: | ||
| 219 | return spv::ExecutionMode::InputLinesAdjacency; | ||
| 220 | case Maxwell::PrimitiveTopology::TrianglesAdjacency: | ||
| 221 | case Maxwell::PrimitiveTopology::TriangleStripAdjacency: | ||
| 222 | return spv::ExecutionMode::InputTrianglesAdjacency; | ||
| 223 | case Maxwell::PrimitiveTopology::Quads: | ||
| 224 | UNIMPLEMENTED_MSG("Quads"); | ||
| 225 | return spv::ExecutionMode::Triangles; | ||
| 226 | case Maxwell::PrimitiveTopology::QuadStrip: | ||
| 227 | UNIMPLEMENTED_MSG("QuadStrip"); | ||
| 228 | return spv::ExecutionMode::Triangles; | ||
| 229 | case Maxwell::PrimitiveTopology::Polygon: | ||
| 230 | UNIMPLEMENTED_MSG("Polygon"); | ||
| 231 | return spv::ExecutionMode::Triangles; | ||
| 232 | case Maxwell::PrimitiveTopology::Patches: | ||
| 233 | UNIMPLEMENTED_MSG("Patches"); | ||
| 234 | return spv::ExecutionMode::Triangles; | ||
| 235 | } | ||
| 236 | UNREACHABLE(); | ||
| 237 | return spv::ExecutionMode::Triangles; | ||
| 238 | } | ||
| 239 | |||
| 240 | spv::ExecutionMode GetExecutionMode(Tegra::Shader::OutputTopology output_topology) { | ||
| 241 | switch (output_topology) { | ||
| 242 | case Tegra::Shader::OutputTopology::PointList: | ||
| 243 | return spv::ExecutionMode::OutputPoints; | ||
| 244 | case Tegra::Shader::OutputTopology::LineStrip: | ||
| 245 | return spv::ExecutionMode::OutputLineStrip; | ||
| 246 | case Tegra::Shader::OutputTopology::TriangleStrip: | ||
| 247 | return spv::ExecutionMode::OutputTriangleStrip; | ||
| 248 | default: | ||
| 249 | UNREACHABLE(); | ||
| 250 | return spv::ExecutionMode::OutputPoints; | ||
| 251 | } | ||
| 252 | } | ||
| 253 | |||
| 254 | /// Returns true if an attribute index is one of the 32 generic attributes | ||
| 255 | constexpr bool IsGenericAttribute(Attribute::Index attribute) { | ||
| 256 | return attribute >= Attribute::Index::Attribute_0 && | ||
| 257 | attribute <= Attribute::Index::Attribute_31; | ||
| 258 | } | ||
| 259 | |||
| 260 | /// Returns the location of a generic attribute | ||
| 261 | u32 GetGenericAttributeLocation(Attribute::Index attribute) { | ||
| 262 | ASSERT(IsGenericAttribute(attribute)); | ||
| 263 | return static_cast<u32>(attribute) - static_cast<u32>(Attribute::Index::Attribute_0); | ||
| 264 | } | ||
| 265 | |||
| 266 | /// Returns true if an object has to be treated as precise | ||
| 267 | bool IsPrecise(Operation operand) { | ||
| 268 | const auto& meta{operand.GetMeta()}; | ||
| 269 | if (std::holds_alternative<MetaArithmetic>(meta)) { | ||
| 270 | return std::get<MetaArithmetic>(meta).precise; | ||
| 271 | } | ||
| 272 | return false; | ||
| 273 | } | ||
| 274 | |||
| 275 | class SPIRVDecompiler final : public Sirit::Module { | ||
| 276 | public: | ||
| 277 | explicit SPIRVDecompiler(const Device& device_, const ShaderIR& ir_, ShaderType stage_, | ||
| 278 | const Registry& registry_, const Specialization& specialization_) | ||
| 279 | : Module(0x00010300), device{device_}, ir{ir_}, stage{stage_}, header{ir_.GetHeader()}, | ||
| 280 | registry{registry_}, specialization{specialization_} { | ||
| 281 | if (stage_ != ShaderType::Compute) { | ||
| 282 | transform_feedback = BuildTransformFeedback(registry_.GetGraphicsInfo()); | ||
| 283 | } | ||
| 284 | |||
| 285 | AddCapability(spv::Capability::Shader); | ||
| 286 | AddCapability(spv::Capability::UniformAndStorageBuffer16BitAccess); | ||
| 287 | AddCapability(spv::Capability::ImageQuery); | ||
| 288 | AddCapability(spv::Capability::Image1D); | ||
| 289 | AddCapability(spv::Capability::ImageBuffer); | ||
| 290 | AddCapability(spv::Capability::ImageGatherExtended); | ||
| 291 | AddCapability(spv::Capability::SampledBuffer); | ||
| 292 | AddCapability(spv::Capability::StorageImageWriteWithoutFormat); | ||
| 293 | AddCapability(spv::Capability::DrawParameters); | ||
| 294 | AddCapability(spv::Capability::SubgroupBallotKHR); | ||
| 295 | AddCapability(spv::Capability::SubgroupVoteKHR); | ||
| 296 | AddExtension("SPV_KHR_16bit_storage"); | ||
| 297 | AddExtension("SPV_KHR_shader_ballot"); | ||
| 298 | AddExtension("SPV_KHR_subgroup_vote"); | ||
| 299 | AddExtension("SPV_KHR_storage_buffer_storage_class"); | ||
| 300 | AddExtension("SPV_KHR_variable_pointers"); | ||
| 301 | AddExtension("SPV_KHR_shader_draw_parameters"); | ||
| 302 | |||
| 303 | if (!transform_feedback.empty()) { | ||
| 304 | if (device.IsExtTransformFeedbackSupported()) { | ||
| 305 | AddCapability(spv::Capability::TransformFeedback); | ||
| 306 | } else { | ||
| 307 | LOG_ERROR(Render_Vulkan, "Shader requires transform feedbacks but these are not " | ||
| 308 | "supported on this device"); | ||
| 309 | } | ||
| 310 | } | ||
| 311 | if (ir.UsesLayer() || ir.UsesViewportIndex()) { | ||
| 312 | if (ir.UsesViewportIndex()) { | ||
| 313 | AddCapability(spv::Capability::MultiViewport); | ||
| 314 | } | ||
| 315 | if (stage != ShaderType::Geometry && device.IsExtShaderViewportIndexLayerSupported()) { | ||
| 316 | AddExtension("SPV_EXT_shader_viewport_index_layer"); | ||
| 317 | AddCapability(spv::Capability::ShaderViewportIndexLayerEXT); | ||
| 318 | } | ||
| 319 | } | ||
| 320 | if (device.IsFormatlessImageLoadSupported()) { | ||
| 321 | AddCapability(spv::Capability::StorageImageReadWithoutFormat); | ||
| 322 | } | ||
| 323 | if (device.IsFloat16Supported()) { | ||
| 324 | AddCapability(spv::Capability::Float16); | ||
| 325 | } | ||
| 326 | t_scalar_half = Name(TypeFloat(device_.IsFloat16Supported() ? 16 : 32), "scalar_half"); | ||
| 327 | t_half = Name(TypeVector(t_scalar_half, 2), "half"); | ||
| 328 | |||
| 329 | const Id main = Decompile(); | ||
| 330 | |||
| 331 | switch (stage) { | ||
| 332 | case ShaderType::Vertex: | ||
| 333 | AddEntryPoint(spv::ExecutionModel::Vertex, main, "main", interfaces); | ||
| 334 | break; | ||
| 335 | case ShaderType::TesselationControl: | ||
| 336 | AddCapability(spv::Capability::Tessellation); | ||
| 337 | AddEntryPoint(spv::ExecutionModel::TessellationControl, main, "main", interfaces); | ||
| 338 | AddExecutionMode(main, spv::ExecutionMode::OutputVertices, | ||
| 339 | header.common2.threads_per_input_primitive); | ||
| 340 | break; | ||
| 341 | case ShaderType::TesselationEval: { | ||
| 342 | const auto& info = registry.GetGraphicsInfo(); | ||
| 343 | AddCapability(spv::Capability::Tessellation); | ||
| 344 | AddEntryPoint(spv::ExecutionModel::TessellationEvaluation, main, "main", interfaces); | ||
| 345 | AddExecutionMode(main, GetExecutionMode(info.tessellation_primitive)); | ||
| 346 | AddExecutionMode(main, GetExecutionMode(info.tessellation_spacing)); | ||
| 347 | AddExecutionMode(main, info.tessellation_clockwise | ||
| 348 | ? spv::ExecutionMode::VertexOrderCw | ||
| 349 | : spv::ExecutionMode::VertexOrderCcw); | ||
| 350 | break; | ||
| 351 | } | ||
| 352 | case ShaderType::Geometry: { | ||
| 353 | const auto& info = registry.GetGraphicsInfo(); | ||
| 354 | AddCapability(spv::Capability::Geometry); | ||
| 355 | AddEntryPoint(spv::ExecutionModel::Geometry, main, "main", interfaces); | ||
| 356 | AddExecutionMode(main, GetExecutionMode(info.primitive_topology)); | ||
| 357 | AddExecutionMode(main, GetExecutionMode(header.common3.output_topology)); | ||
| 358 | AddExecutionMode(main, spv::ExecutionMode::OutputVertices, | ||
| 359 | header.common4.max_output_vertices); | ||
| 360 | // TODO(Rodrigo): Where can we get this info from? | ||
| 361 | AddExecutionMode(main, spv::ExecutionMode::Invocations, 1U); | ||
| 362 | break; | ||
| 363 | } | ||
| 364 | case ShaderType::Fragment: | ||
| 365 | AddEntryPoint(spv::ExecutionModel::Fragment, main, "main", interfaces); | ||
| 366 | AddExecutionMode(main, spv::ExecutionMode::OriginUpperLeft); | ||
| 367 | if (header.ps.omap.depth) { | ||
| 368 | AddExecutionMode(main, spv::ExecutionMode::DepthReplacing); | ||
| 369 | } | ||
| 370 | if (specialization.early_fragment_tests) { | ||
| 371 | AddExecutionMode(main, spv::ExecutionMode::EarlyFragmentTests); | ||
| 372 | } | ||
| 373 | break; | ||
| 374 | case ShaderType::Compute: | ||
| 375 | const auto workgroup_size = specialization.workgroup_size; | ||
| 376 | AddExecutionMode(main, spv::ExecutionMode::LocalSize, workgroup_size[0], | ||
| 377 | workgroup_size[1], workgroup_size[2]); | ||
| 378 | AddEntryPoint(spv::ExecutionModel::GLCompute, main, "main", interfaces); | ||
| 379 | break; | ||
| 380 | } | ||
| 381 | } | ||
| 382 | |||
| 383 | private: | ||
| 384 | Id Decompile() { | ||
| 385 | DeclareCommon(); | ||
| 386 | DeclareVertex(); | ||
| 387 | DeclareTessControl(); | ||
| 388 | DeclareTessEval(); | ||
| 389 | DeclareGeometry(); | ||
| 390 | DeclareFragment(); | ||
| 391 | DeclareCompute(); | ||
| 392 | DeclareRegisters(); | ||
| 393 | DeclareCustomVariables(); | ||
| 394 | DeclarePredicates(); | ||
| 395 | DeclareLocalMemory(); | ||
| 396 | DeclareSharedMemory(); | ||
| 397 | DeclareInternalFlags(); | ||
| 398 | DeclareInputAttributes(); | ||
| 399 | DeclareOutputAttributes(); | ||
| 400 | |||
| 401 | u32 binding = specialization.base_binding; | ||
| 402 | binding = DeclareConstantBuffers(binding); | ||
| 403 | binding = DeclareGlobalBuffers(binding); | ||
| 404 | binding = DeclareUniformTexels(binding); | ||
| 405 | binding = DeclareSamplers(binding); | ||
| 406 | binding = DeclareStorageTexels(binding); | ||
| 407 | binding = DeclareImages(binding); | ||
| 408 | |||
| 409 | const Id main = OpFunction(t_void, {}, TypeFunction(t_void)); | ||
| 410 | AddLabel(); | ||
| 411 | |||
| 412 | if (ir.IsDecompiled()) { | ||
| 413 | DeclareFlowVariables(); | ||
| 414 | DecompileAST(); | ||
| 415 | } else { | ||
| 416 | AllocateLabels(); | ||
| 417 | DecompileBranchMode(); | ||
| 418 | } | ||
| 419 | |||
| 420 | OpReturn(); | ||
| 421 | OpFunctionEnd(); | ||
| 422 | |||
| 423 | return main; | ||
| 424 | } | ||
| 425 | |||
| 426 | void DefinePrologue() { | ||
| 427 | if (stage == ShaderType::Vertex) { | ||
| 428 | // Clear Position to avoid reading trash on the Z conversion. | ||
| 429 | const auto position_index = out_indices.position.value(); | ||
| 430 | const Id position = AccessElement(t_out_float4, out_vertex, position_index); | ||
| 431 | OpStore(position, v_varying_default); | ||
| 432 | |||
| 433 | if (specialization.point_size) { | ||
| 434 | const u32 point_size_index = out_indices.point_size.value(); | ||
| 435 | const Id out_point_size = AccessElement(t_out_float, out_vertex, point_size_index); | ||
| 436 | OpStore(out_point_size, Constant(t_float, *specialization.point_size)); | ||
| 437 | } | ||
| 438 | } | ||
| 439 | } | ||
| 440 | |||
| 441 | void DecompileAST(); | ||
| 442 | |||
| 443 | void DecompileBranchMode() { | ||
| 444 | const u32 first_address = ir.GetBasicBlocks().begin()->first; | ||
| 445 | const Id loop_label = OpLabel("loop"); | ||
| 446 | const Id merge_label = OpLabel("merge"); | ||
| 447 | const Id dummy_label = OpLabel(); | ||
| 448 | const Id jump_label = OpLabel(); | ||
| 449 | continue_label = OpLabel("continue"); | ||
| 450 | |||
| 451 | std::vector<Sirit::Literal> literals; | ||
| 452 | std::vector<Id> branch_labels; | ||
| 453 | for (const auto& [literal, label] : labels) { | ||
| 454 | literals.push_back(literal); | ||
| 455 | branch_labels.push_back(label); | ||
| 456 | } | ||
| 457 | |||
| 458 | jmp_to = OpVariable(TypePointer(spv::StorageClass::Function, t_uint), | ||
| 459 | spv::StorageClass::Function, Constant(t_uint, first_address)); | ||
| 460 | AddLocalVariable(jmp_to); | ||
| 461 | |||
| 462 | std::tie(ssy_flow_stack, ssy_flow_stack_top) = CreateFlowStack(); | ||
| 463 | std::tie(pbk_flow_stack, pbk_flow_stack_top) = CreateFlowStack(); | ||
| 464 | |||
| 465 | Name(jmp_to, "jmp_to"); | ||
| 466 | Name(ssy_flow_stack, "ssy_flow_stack"); | ||
| 467 | Name(ssy_flow_stack_top, "ssy_flow_stack_top"); | ||
| 468 | Name(pbk_flow_stack, "pbk_flow_stack"); | ||
| 469 | Name(pbk_flow_stack_top, "pbk_flow_stack_top"); | ||
| 470 | |||
| 471 | DefinePrologue(); | ||
| 472 | |||
| 473 | OpBranch(loop_label); | ||
| 474 | AddLabel(loop_label); | ||
| 475 | OpLoopMerge(merge_label, continue_label, spv::LoopControlMask::MaskNone); | ||
| 476 | OpBranch(dummy_label); | ||
| 477 | |||
| 478 | AddLabel(dummy_label); | ||
| 479 | const Id default_branch = OpLabel(); | ||
| 480 | const Id jmp_to_load = OpLoad(t_uint, jmp_to); | ||
| 481 | OpSelectionMerge(jump_label, spv::SelectionControlMask::MaskNone); | ||
| 482 | OpSwitch(jmp_to_load, default_branch, literals, branch_labels); | ||
| 483 | |||
| 484 | AddLabel(default_branch); | ||
| 485 | OpReturn(); | ||
| 486 | |||
| 487 | for (const auto& [address, bb] : ir.GetBasicBlocks()) { | ||
| 488 | AddLabel(labels.at(address)); | ||
| 489 | |||
| 490 | VisitBasicBlock(bb); | ||
| 491 | |||
| 492 | const auto next_it = labels.lower_bound(address + 1); | ||
| 493 | const Id next_label = next_it != labels.end() ? next_it->second : default_branch; | ||
| 494 | OpBranch(next_label); | ||
| 495 | } | ||
| 496 | |||
| 497 | AddLabel(jump_label); | ||
| 498 | OpBranch(continue_label); | ||
| 499 | AddLabel(continue_label); | ||
| 500 | OpBranch(loop_label); | ||
| 501 | AddLabel(merge_label); | ||
| 502 | } | ||
| 503 | |||
| 504 | private: | ||
| 505 | friend class ASTDecompiler; | ||
| 506 | friend class ExprDecompiler; | ||
| 507 | |||
| 508 | static constexpr auto INTERNAL_FLAGS_COUNT = static_cast<std::size_t>(InternalFlag::Amount); | ||
| 509 | |||
| 510 | void AllocateLabels() { | ||
| 511 | for (const auto& pair : ir.GetBasicBlocks()) { | ||
| 512 | const u32 address = pair.first; | ||
| 513 | labels.emplace(address, OpLabel(fmt::format("label_0x{:x}", address))); | ||
| 514 | } | ||
| 515 | } | ||
| 516 | |||
| 517 | void DeclareCommon() { | ||
| 518 | thread_id = | ||
| 519 | DeclareInputBuiltIn(spv::BuiltIn::SubgroupLocalInvocationId, t_in_uint, "thread_id"); | ||
| 520 | thread_masks[0] = | ||
| 521 | DeclareInputBuiltIn(spv::BuiltIn::SubgroupEqMask, t_in_uint4, "thread_eq_mask"); | ||
| 522 | thread_masks[1] = | ||
| 523 | DeclareInputBuiltIn(spv::BuiltIn::SubgroupGeMask, t_in_uint4, "thread_ge_mask"); | ||
| 524 | thread_masks[2] = | ||
| 525 | DeclareInputBuiltIn(spv::BuiltIn::SubgroupGtMask, t_in_uint4, "thread_gt_mask"); | ||
| 526 | thread_masks[3] = | ||
| 527 | DeclareInputBuiltIn(spv::BuiltIn::SubgroupLeMask, t_in_uint4, "thread_le_mask"); | ||
| 528 | thread_masks[4] = | ||
| 529 | DeclareInputBuiltIn(spv::BuiltIn::SubgroupLtMask, t_in_uint4, "thread_lt_mask"); | ||
| 530 | } | ||
| 531 | |||
| 532 | void DeclareVertex() { | ||
| 533 | if (stage != ShaderType::Vertex) { | ||
| 534 | return; | ||
| 535 | } | ||
| 536 | Id out_vertex_struct; | ||
| 537 | std::tie(out_vertex_struct, out_indices) = DeclareVertexStruct(); | ||
| 538 | const Id vertex_ptr = TypePointer(spv::StorageClass::Output, out_vertex_struct); | ||
| 539 | out_vertex = OpVariable(vertex_ptr, spv::StorageClass::Output); | ||
| 540 | interfaces.push_back(AddGlobalVariable(Name(out_vertex, "out_vertex"))); | ||
| 541 | |||
| 542 | // Declare input attributes | ||
| 543 | vertex_index = DeclareInputBuiltIn(spv::BuiltIn::VertexIndex, t_in_int, "vertex_index"); | ||
| 544 | instance_index = | ||
| 545 | DeclareInputBuiltIn(spv::BuiltIn::InstanceIndex, t_in_int, "instance_index"); | ||
| 546 | base_vertex = DeclareInputBuiltIn(spv::BuiltIn::BaseVertex, t_in_int, "base_vertex"); | ||
| 547 | base_instance = DeclareInputBuiltIn(spv::BuiltIn::BaseInstance, t_in_int, "base_instance"); | ||
| 548 | } | ||
| 549 | |||
| 550 | void DeclareTessControl() { | ||
| 551 | if (stage != ShaderType::TesselationControl) { | ||
| 552 | return; | ||
| 553 | } | ||
| 554 | DeclareInputVertexArray(NumInputPatches); | ||
| 555 | DeclareOutputVertexArray(header.common2.threads_per_input_primitive); | ||
| 556 | |||
| 557 | tess_level_outer = DeclareBuiltIn( | ||
| 558 | spv::BuiltIn::TessLevelOuter, spv::StorageClass::Output, | ||
| 559 | TypePointer(spv::StorageClass::Output, TypeArray(t_float, Constant(t_uint, 4U))), | ||
| 560 | "tess_level_outer"); | ||
| 561 | Decorate(tess_level_outer, spv::Decoration::Patch); | ||
| 562 | |||
| 563 | tess_level_inner = DeclareBuiltIn( | ||
| 564 | spv::BuiltIn::TessLevelInner, spv::StorageClass::Output, | ||
| 565 | TypePointer(spv::StorageClass::Output, TypeArray(t_float, Constant(t_uint, 2U))), | ||
| 566 | "tess_level_inner"); | ||
| 567 | Decorate(tess_level_inner, spv::Decoration::Patch); | ||
| 568 | |||
| 569 | invocation_id = DeclareInputBuiltIn(spv::BuiltIn::InvocationId, t_in_int, "invocation_id"); | ||
| 570 | } | ||
| 571 | |||
| 572 | void DeclareTessEval() { | ||
| 573 | if (stage != ShaderType::TesselationEval) { | ||
| 574 | return; | ||
| 575 | } | ||
| 576 | DeclareInputVertexArray(NumInputPatches); | ||
| 577 | DeclareOutputVertex(); | ||
| 578 | |||
| 579 | tess_coord = DeclareInputBuiltIn(spv::BuiltIn::TessCoord, t_in_float3, "tess_coord"); | ||
| 580 | } | ||
| 581 | |||
| 582 | void DeclareGeometry() { | ||
| 583 | if (stage != ShaderType::Geometry) { | ||
| 584 | return; | ||
| 585 | } | ||
| 586 | const auto& info = registry.GetGraphicsInfo(); | ||
| 587 | const u32 num_input = GetNumPrimitiveTopologyVertices(info.primitive_topology); | ||
| 588 | DeclareInputVertexArray(num_input); | ||
| 589 | DeclareOutputVertex(); | ||
| 590 | } | ||
| 591 | |||
| 592 | void DeclareFragment() { | ||
| 593 | if (stage != ShaderType::Fragment) { | ||
| 594 | return; | ||
| 595 | } | ||
| 596 | |||
| 597 | for (u32 rt = 0; rt < static_cast<u32>(std::size(frag_colors)); ++rt) { | ||
| 598 | if (!IsRenderTargetEnabled(rt)) { | ||
| 599 | continue; | ||
| 600 | } | ||
| 601 | const Id id = AddGlobalVariable(OpVariable(t_out_float4, spv::StorageClass::Output)); | ||
| 602 | Name(id, fmt::format("frag_color{}", rt)); | ||
| 603 | Decorate(id, spv::Decoration::Location, rt); | ||
| 604 | |||
| 605 | frag_colors[rt] = id; | ||
| 606 | interfaces.push_back(id); | ||
| 607 | } | ||
| 608 | |||
| 609 | if (header.ps.omap.depth) { | ||
| 610 | frag_depth = AddGlobalVariable(OpVariable(t_out_float, spv::StorageClass::Output)); | ||
| 611 | Name(frag_depth, "frag_depth"); | ||
| 612 | Decorate(frag_depth, spv::Decoration::BuiltIn, | ||
| 613 | static_cast<u32>(spv::BuiltIn::FragDepth)); | ||
| 614 | |||
| 615 | interfaces.push_back(frag_depth); | ||
| 616 | } | ||
| 617 | |||
| 618 | frag_coord = DeclareInputBuiltIn(spv::BuiltIn::FragCoord, t_in_float4, "frag_coord"); | ||
| 619 | front_facing = DeclareInputBuiltIn(spv::BuiltIn::FrontFacing, t_in_bool, "front_facing"); | ||
| 620 | point_coord = DeclareInputBuiltIn(spv::BuiltIn::PointCoord, t_in_float2, "point_coord"); | ||
| 621 | } | ||
| 622 | |||
| 623 | void DeclareCompute() { | ||
| 624 | if (stage != ShaderType::Compute) { | ||
| 625 | return; | ||
| 626 | } | ||
| 627 | |||
| 628 | workgroup_id = DeclareInputBuiltIn(spv::BuiltIn::WorkgroupId, t_in_uint3, "workgroup_id"); | ||
| 629 | local_invocation_id = | ||
| 630 | DeclareInputBuiltIn(spv::BuiltIn::LocalInvocationId, t_in_uint3, "local_invocation_id"); | ||
| 631 | } | ||
| 632 | |||
| 633 | void DeclareRegisters() { | ||
| 634 | for (const u32 gpr : ir.GetRegisters()) { | ||
| 635 | const Id id = OpVariable(t_prv_float, spv::StorageClass::Private, v_float_zero); | ||
| 636 | Name(id, fmt::format("gpr_{}", gpr)); | ||
| 637 | registers.emplace(gpr, AddGlobalVariable(id)); | ||
| 638 | } | ||
| 639 | } | ||
| 640 | |||
| 641 | void DeclareCustomVariables() { | ||
| 642 | const u32 num_custom_variables = ir.GetNumCustomVariables(); | ||
| 643 | for (u32 i = 0; i < num_custom_variables; ++i) { | ||
| 644 | const Id id = OpVariable(t_prv_float, spv::StorageClass::Private, v_float_zero); | ||
| 645 | Name(id, fmt::format("custom_var_{}", i)); | ||
| 646 | custom_variables.emplace(i, AddGlobalVariable(id)); | ||
| 647 | } | ||
| 648 | } | ||
| 649 | |||
| 650 | void DeclarePredicates() { | ||
| 651 | for (const auto pred : ir.GetPredicates()) { | ||
| 652 | const Id id = OpVariable(t_prv_bool, spv::StorageClass::Private, v_false); | ||
| 653 | Name(id, fmt::format("pred_{}", static_cast<u32>(pred))); | ||
| 654 | predicates.emplace(pred, AddGlobalVariable(id)); | ||
| 655 | } | ||
| 656 | } | ||
| 657 | |||
| 658 | void DeclareFlowVariables() { | ||
| 659 | for (u32 i = 0; i < ir.GetASTNumVariables(); i++) { | ||
| 660 | const Id id = OpVariable(t_prv_bool, spv::StorageClass::Private, v_false); | ||
| 661 | Name(id, fmt::format("flow_var_{}", static_cast<u32>(i))); | ||
| 662 | flow_variables.emplace(i, AddGlobalVariable(id)); | ||
| 663 | } | ||
| 664 | } | ||
| 665 | |||
| 666 | void DeclareLocalMemory() { | ||
| 667 | // TODO(Rodrigo): Unstub kernel local memory size and pass it from a register at | ||
| 668 | // specialization time. | ||
| 669 | const u64 lmem_size = stage == ShaderType::Compute ? 0x400 : header.GetLocalMemorySize(); | ||
| 670 | if (lmem_size == 0) { | ||
| 671 | return; | ||
| 672 | } | ||
| 673 | const auto element_count = static_cast<u32>(Common::AlignUp(lmem_size, 4) / 4); | ||
| 674 | const Id type_array = TypeArray(t_float, Constant(t_uint, element_count)); | ||
| 675 | const Id type_pointer = TypePointer(spv::StorageClass::Private, type_array); | ||
| 676 | Name(type_pointer, "LocalMemory"); | ||
| 677 | |||
| 678 | local_memory = | ||
| 679 | OpVariable(type_pointer, spv::StorageClass::Private, ConstantNull(type_array)); | ||
| 680 | AddGlobalVariable(Name(local_memory, "local_memory")); | ||
| 681 | } | ||
| 682 | |||
| 683 | void DeclareSharedMemory() { | ||
| 684 | if (stage != ShaderType::Compute) { | ||
| 685 | return; | ||
| 686 | } | ||
| 687 | t_smem_uint = TypePointer(spv::StorageClass::Workgroup, t_uint); | ||
| 688 | |||
| 689 | u32 smem_size = specialization.shared_memory_size * 4; | ||
| 690 | if (smem_size == 0) { | ||
| 691 | // Avoid declaring an empty array. | ||
| 692 | return; | ||
| 693 | } | ||
| 694 | const u32 limit = device.GetMaxComputeSharedMemorySize(); | ||
| 695 | if (smem_size > limit) { | ||
| 696 | LOG_ERROR(Render_Vulkan, "Shared memory size {} is clamped to host's limit {}", | ||
| 697 | smem_size, limit); | ||
| 698 | smem_size = limit; | ||
| 699 | } | ||
| 700 | |||
| 701 | const Id type_array = TypeArray(t_uint, Constant(t_uint, smem_size / 4)); | ||
| 702 | const Id type_pointer = TypePointer(spv::StorageClass::Workgroup, type_array); | ||
| 703 | Name(type_pointer, "SharedMemory"); | ||
| 704 | |||
| 705 | shared_memory = OpVariable(type_pointer, spv::StorageClass::Workgroup); | ||
| 706 | AddGlobalVariable(Name(shared_memory, "shared_memory")); | ||
| 707 | } | ||
| 708 | |||
| 709 | void DeclareInternalFlags() { | ||
| 710 | static constexpr std::array names{"zero", "sign", "carry", "overflow"}; | ||
| 711 | |||
| 712 | for (std::size_t flag = 0; flag < INTERNAL_FLAGS_COUNT; ++flag) { | ||
| 713 | const Id id = OpVariable(t_prv_bool, spv::StorageClass::Private, v_false); | ||
| 714 | internal_flags[flag] = AddGlobalVariable(Name(id, names[flag])); | ||
| 715 | } | ||
| 716 | } | ||
| 717 | |||
| 718 | void DeclareInputVertexArray(u32 length) { | ||
| 719 | constexpr auto storage = spv::StorageClass::Input; | ||
| 720 | std::tie(in_indices, in_vertex) = DeclareVertexArray(storage, "in_indices", length); | ||
| 721 | } | ||
| 722 | |||
| 723 | void DeclareOutputVertexArray(u32 length) { | ||
| 724 | constexpr auto storage = spv::StorageClass::Output; | ||
| 725 | std::tie(out_indices, out_vertex) = DeclareVertexArray(storage, "out_indices", length); | ||
| 726 | } | ||
| 727 | |||
| 728 | std::tuple<VertexIndices, Id> DeclareVertexArray(spv::StorageClass storage_class, | ||
| 729 | std::string name, u32 length) { | ||
| 730 | const auto [struct_id, indices] = DeclareVertexStruct(); | ||
| 731 | const Id vertex_array = TypeArray(struct_id, Constant(t_uint, length)); | ||
| 732 | const Id vertex_ptr = TypePointer(storage_class, vertex_array); | ||
| 733 | const Id vertex = OpVariable(vertex_ptr, storage_class); | ||
| 734 | AddGlobalVariable(Name(vertex, std::move(name))); | ||
| 735 | interfaces.push_back(vertex); | ||
| 736 | return {indices, vertex}; | ||
| 737 | } | ||
| 738 | |||
| 739 | void DeclareOutputVertex() { | ||
| 740 | Id out_vertex_struct; | ||
| 741 | std::tie(out_vertex_struct, out_indices) = DeclareVertexStruct(); | ||
| 742 | const Id out_vertex_ptr = TypePointer(spv::StorageClass::Output, out_vertex_struct); | ||
| 743 | out_vertex = OpVariable(out_vertex_ptr, spv::StorageClass::Output); | ||
| 744 | interfaces.push_back(AddGlobalVariable(Name(out_vertex, "out_vertex"))); | ||
| 745 | } | ||
| 746 | |||
| 747 | void DeclareInputAttributes() { | ||
| 748 | for (const auto index : ir.GetInputAttributes()) { | ||
| 749 | if (!IsGenericAttribute(index)) { | ||
| 750 | continue; | ||
| 751 | } | ||
| 752 | const u32 location = GetGenericAttributeLocation(index); | ||
| 753 | if (!IsAttributeEnabled(location)) { | ||
| 754 | continue; | ||
| 755 | } | ||
| 756 | const auto type_descriptor = GetAttributeType(location); | ||
| 757 | Id type; | ||
| 758 | if (IsInputAttributeArray()) { | ||
| 759 | type = GetTypeVectorDefinitionLut(type_descriptor.type).at(3); | ||
| 760 | type = TypeArray(type, Constant(t_uint, GetNumInputVertices())); | ||
| 761 | type = TypePointer(spv::StorageClass::Input, type); | ||
| 762 | } else { | ||
| 763 | type = type_descriptor.vector; | ||
| 764 | } | ||
| 765 | const Id id = OpVariable(type, spv::StorageClass::Input); | ||
| 766 | AddGlobalVariable(Name(id, fmt::format("in_attr{}", location))); | ||
| 767 | input_attributes.emplace(index, id); | ||
| 768 | interfaces.push_back(id); | ||
| 769 | |||
| 770 | Decorate(id, spv::Decoration::Location, location); | ||
| 771 | |||
| 772 | if (stage != ShaderType::Fragment) { | ||
| 773 | continue; | ||
| 774 | } | ||
| 775 | switch (header.ps.GetPixelImap(location)) { | ||
| 776 | case PixelImap::Constant: | ||
| 777 | Decorate(id, spv::Decoration::Flat); | ||
| 778 | break; | ||
| 779 | case PixelImap::Perspective: | ||
| 780 | // Default | ||
| 781 | break; | ||
| 782 | case PixelImap::ScreenLinear: | ||
| 783 | Decorate(id, spv::Decoration::NoPerspective); | ||
| 784 | break; | ||
| 785 | default: | ||
| 786 | UNREACHABLE_MSG("Unused attribute being fetched"); | ||
| 787 | } | ||
| 788 | } | ||
| 789 | } | ||
| 790 | |||
| 791 | void DeclareOutputAttributes() { | ||
| 792 | if (stage == ShaderType::Compute || stage == ShaderType::Fragment) { | ||
| 793 | return; | ||
| 794 | } | ||
| 795 | |||
| 796 | UNIMPLEMENTED_IF(registry.GetGraphicsInfo().tfb_enabled && stage != ShaderType::Vertex); | ||
| 797 | for (const auto index : ir.GetOutputAttributes()) { | ||
| 798 | if (!IsGenericAttribute(index)) { | ||
| 799 | continue; | ||
| 800 | } | ||
| 801 | DeclareOutputAttribute(index); | ||
| 802 | } | ||
| 803 | } | ||
| 804 | |||
| 805 | void DeclareOutputAttribute(Attribute::Index index) { | ||
| 806 | static constexpr std::string_view swizzle = "xyzw"; | ||
| 807 | |||
| 808 | const u32 location = GetGenericAttributeLocation(index); | ||
| 809 | u8 element = 0; | ||
| 810 | while (element < 4) { | ||
| 811 | const std::size_t remainder = 4 - element; | ||
| 812 | |||
| 813 | std::size_t num_components = remainder; | ||
| 814 | const std::optional tfb = GetTransformFeedbackInfo(index, element); | ||
| 815 | if (tfb) { | ||
| 816 | num_components = tfb->components; | ||
| 817 | } | ||
| 818 | |||
| 819 | Id type = GetTypeVectorDefinitionLut(Type::Float).at(num_components - 1); | ||
| 820 | Id varying_default = v_varying_default; | ||
| 821 | if (IsOutputAttributeArray()) { | ||
| 822 | const u32 num = GetNumOutputVertices(); | ||
| 823 | type = TypeArray(type, Constant(t_uint, num)); | ||
| 824 | if (device.GetDriverID() != VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR) { | ||
| 825 | // Intel's proprietary driver fails to setup defaults for arrayed output | ||
| 826 | // attributes. | ||
| 827 | varying_default = ConstantComposite(type, std::vector(num, varying_default)); | ||
| 828 | } | ||
| 829 | } | ||
| 830 | type = TypePointer(spv::StorageClass::Output, type); | ||
| 831 | |||
| 832 | std::string name = fmt::format("out_attr{}", location); | ||
| 833 | if (num_components < 4 || element > 0) { | ||
| 834 | name = fmt::format("{}_{}", name, swizzle.substr(element, num_components)); | ||
| 835 | } | ||
| 836 | |||
| 837 | const Id id = OpVariable(type, spv::StorageClass::Output, varying_default); | ||
| 838 | Name(AddGlobalVariable(id), name); | ||
| 839 | |||
| 840 | GenericVaryingDescription description; | ||
| 841 | description.id = id; | ||
| 842 | description.first_element = element; | ||
| 843 | description.is_scalar = num_components == 1; | ||
| 844 | for (u32 i = 0; i < num_components; ++i) { | ||
| 845 | const u8 offset = static_cast<u8>(static_cast<u32>(index) * 4 + element + i); | ||
| 846 | output_attributes.emplace(offset, description); | ||
| 847 | } | ||
| 848 | interfaces.push_back(id); | ||
| 849 | |||
| 850 | Decorate(id, spv::Decoration::Location, location); | ||
| 851 | if (element > 0) { | ||
| 852 | Decorate(id, spv::Decoration::Component, static_cast<u32>(element)); | ||
| 853 | } | ||
| 854 | if (tfb && device.IsExtTransformFeedbackSupported()) { | ||
| 855 | Decorate(id, spv::Decoration::XfbBuffer, static_cast<u32>(tfb->buffer)); | ||
| 856 | Decorate(id, spv::Decoration::XfbStride, static_cast<u32>(tfb->stride)); | ||
| 857 | Decorate(id, spv::Decoration::Offset, static_cast<u32>(tfb->offset)); | ||
| 858 | } | ||
| 859 | |||
| 860 | element = static_cast<u8>(static_cast<std::size_t>(element) + num_components); | ||
| 861 | } | ||
| 862 | } | ||
| 863 | |||
| 864 | std::optional<VaryingTFB> GetTransformFeedbackInfo(Attribute::Index index, u8 element = 0) { | ||
| 865 | const u8 location = static_cast<u8>(static_cast<u32>(index) * 4 + element); | ||
| 866 | const auto it = transform_feedback.find(location); | ||
| 867 | if (it == transform_feedback.end()) { | ||
| 868 | return {}; | ||
| 869 | } | ||
| 870 | return it->second; | ||
| 871 | } | ||
| 872 | |||
| 873 | u32 DeclareConstantBuffers(u32 binding) { | ||
| 874 | for (const auto& [index, size] : ir.GetConstantBuffers()) { | ||
| 875 | const Id type = device.IsKhrUniformBufferStandardLayoutSupported() ? t_cbuf_scalar_ubo | ||
| 876 | : t_cbuf_std140_ubo; | ||
| 877 | const Id id = OpVariable(type, spv::StorageClass::Uniform); | ||
| 878 | AddGlobalVariable(Name(id, fmt::format("cbuf_{}", index))); | ||
| 879 | |||
| 880 | Decorate(id, spv::Decoration::Binding, binding++); | ||
| 881 | Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); | ||
| 882 | constant_buffers.emplace(index, id); | ||
| 883 | } | ||
| 884 | return binding; | ||
| 885 | } | ||
| 886 | |||
| 887 | u32 DeclareGlobalBuffers(u32 binding) { | ||
| 888 | for (const auto& [base, usage] : ir.GetGlobalMemory()) { | ||
| 889 | const Id id = OpVariable(t_gmem_ssbo, spv::StorageClass::StorageBuffer); | ||
| 890 | AddGlobalVariable( | ||
| 891 | Name(id, fmt::format("gmem_{}_{}", base.cbuf_index, base.cbuf_offset))); | ||
| 892 | |||
| 893 | Decorate(id, spv::Decoration::Binding, binding++); | ||
| 894 | Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); | ||
| 895 | global_buffers.emplace(base, id); | ||
| 896 | } | ||
| 897 | return binding; | ||
| 898 | } | ||
| 899 | |||
| 900 | u32 DeclareUniformTexels(u32 binding) { | ||
| 901 | for (const auto& sampler : ir.GetSamplers()) { | ||
| 902 | if (!sampler.is_buffer) { | ||
| 903 | continue; | ||
| 904 | } | ||
| 905 | ASSERT(!sampler.is_array); | ||
| 906 | ASSERT(!sampler.is_shadow); | ||
| 907 | |||
| 908 | constexpr auto dim = spv::Dim::Buffer; | ||
| 909 | constexpr int depth = 0; | ||
| 910 | constexpr int arrayed = 0; | ||
| 911 | constexpr bool ms = false; | ||
| 912 | constexpr int sampled = 1; | ||
| 913 | constexpr auto format = spv::ImageFormat::Unknown; | ||
| 914 | const Id image_type = TypeImage(t_float, dim, depth, arrayed, ms, sampled, format); | ||
| 915 | const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type); | ||
| 916 | const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant); | ||
| 917 | AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.index))); | ||
| 918 | Decorate(id, spv::Decoration::Binding, binding++); | ||
| 919 | Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); | ||
| 920 | |||
| 921 | uniform_texels.emplace(sampler.index, TexelBuffer{image_type, id}); | ||
| 922 | } | ||
| 923 | return binding; | ||
| 924 | } | ||
| 925 | |||
| 926 | u32 DeclareSamplers(u32 binding) { | ||
| 927 | for (const auto& sampler : ir.GetSamplers()) { | ||
| 928 | if (sampler.is_buffer) { | ||
| 929 | continue; | ||
| 930 | } | ||
| 931 | const auto dim = GetSamplerDim(sampler); | ||
| 932 | const int depth = sampler.is_shadow ? 1 : 0; | ||
| 933 | const int arrayed = sampler.is_array ? 1 : 0; | ||
| 934 | constexpr bool ms = false; | ||
| 935 | constexpr int sampled = 1; | ||
| 936 | constexpr auto format = spv::ImageFormat::Unknown; | ||
| 937 | const Id image_type = TypeImage(t_float, dim, depth, arrayed, ms, sampled, format); | ||
| 938 | const Id sampler_type = TypeSampledImage(image_type); | ||
| 939 | const Id sampler_pointer_type = | ||
| 940 | TypePointer(spv::StorageClass::UniformConstant, sampler_type); | ||
| 941 | const Id type = sampler.is_indexed | ||
| 942 | ? TypeArray(sampler_type, Constant(t_uint, sampler.size)) | ||
| 943 | : sampler_type; | ||
| 944 | const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, type); | ||
| 945 | const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant); | ||
| 946 | AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.index))); | ||
| 947 | Decorate(id, spv::Decoration::Binding, binding++); | ||
| 948 | Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); | ||
| 949 | |||
| 950 | sampled_images.emplace( | ||
| 951 | sampler.index, SampledImage{image_type, sampler_type, sampler_pointer_type, id}); | ||
| 952 | } | ||
| 953 | return binding; | ||
| 954 | } | ||
| 955 | |||
| 956 | u32 DeclareStorageTexels(u32 binding) { | ||
| 957 | for (const auto& image : ir.GetImages()) { | ||
| 958 | if (image.type != Tegra::Shader::ImageType::TextureBuffer) { | ||
| 959 | continue; | ||
| 960 | } | ||
| 961 | DeclareImage(image, binding); | ||
| 962 | } | ||
| 963 | return binding; | ||
| 964 | } | ||
| 965 | |||
| 966 | u32 DeclareImages(u32 binding) { | ||
| 967 | for (const auto& image : ir.GetImages()) { | ||
| 968 | if (image.type == Tegra::Shader::ImageType::TextureBuffer) { | ||
| 969 | continue; | ||
| 970 | } | ||
| 971 | DeclareImage(image, binding); | ||
| 972 | } | ||
| 973 | return binding; | ||
| 974 | } | ||
| 975 | |||
| 976 | void DeclareImage(const ImageEntry& image, u32& binding) { | ||
| 977 | const auto [dim, arrayed] = GetImageDim(image); | ||
| 978 | constexpr int depth = 0; | ||
| 979 | constexpr bool ms = false; | ||
| 980 | constexpr int sampled = 2; // This won't be accessed with a sampler | ||
| 981 | const auto format = image.is_atomic ? spv::ImageFormat::R32ui : spv::ImageFormat::Unknown; | ||
| 982 | const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {}); | ||
| 983 | const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type); | ||
| 984 | const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant); | ||
| 985 | AddGlobalVariable(Name(id, fmt::format("image_{}", image.index))); | ||
| 986 | |||
| 987 | Decorate(id, spv::Decoration::Binding, binding++); | ||
| 988 | Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); | ||
| 989 | if (image.is_read && !image.is_written) { | ||
| 990 | Decorate(id, spv::Decoration::NonWritable); | ||
| 991 | } else if (image.is_written && !image.is_read) { | ||
| 992 | Decorate(id, spv::Decoration::NonReadable); | ||
| 993 | } | ||
| 994 | |||
| 995 | images.emplace(image.index, StorageImage{image_type, id}); | ||
| 996 | } | ||
| 997 | |||
| 998 | bool IsRenderTargetEnabled(u32 rt) const { | ||
| 999 | for (u32 component = 0; component < 4; ++component) { | ||
| 1000 | if (header.ps.IsColorComponentOutputEnabled(rt, component)) { | ||
| 1001 | return true; | ||
| 1002 | } | ||
| 1003 | } | ||
| 1004 | return false; | ||
| 1005 | } | ||
| 1006 | |||
| 1007 | bool IsInputAttributeArray() const { | ||
| 1008 | return stage == ShaderType::TesselationControl || stage == ShaderType::TesselationEval || | ||
| 1009 | stage == ShaderType::Geometry; | ||
| 1010 | } | ||
| 1011 | |||
| 1012 | bool IsOutputAttributeArray() const { | ||
| 1013 | return stage == ShaderType::TesselationControl; | ||
| 1014 | } | ||
| 1015 | |||
| 1016 | bool IsAttributeEnabled(u32 location) const { | ||
| 1017 | return stage != ShaderType::Vertex || specialization.enabled_attributes[location]; | ||
| 1018 | } | ||
| 1019 | |||
| 1020 | u32 GetNumInputVertices() const { | ||
| 1021 | switch (stage) { | ||
| 1022 | case ShaderType::Geometry: | ||
| 1023 | return GetNumPrimitiveTopologyVertices(registry.GetGraphicsInfo().primitive_topology); | ||
| 1024 | case ShaderType::TesselationControl: | ||
| 1025 | case ShaderType::TesselationEval: | ||
| 1026 | return NumInputPatches; | ||
| 1027 | default: | ||
| 1028 | UNREACHABLE(); | ||
| 1029 | return 1; | ||
| 1030 | } | ||
| 1031 | } | ||
| 1032 | |||
| 1033 | u32 GetNumOutputVertices() const { | ||
| 1034 | switch (stage) { | ||
| 1035 | case ShaderType::TesselationControl: | ||
| 1036 | return header.common2.threads_per_input_primitive; | ||
| 1037 | default: | ||
| 1038 | UNREACHABLE(); | ||
| 1039 | return 1; | ||
| 1040 | } | ||
| 1041 | } | ||
| 1042 | |||
| 1043 | std::tuple<Id, VertexIndices> DeclareVertexStruct() { | ||
| 1044 | struct BuiltIn { | ||
| 1045 | Id type; | ||
| 1046 | spv::BuiltIn builtin; | ||
| 1047 | const char* name; | ||
| 1048 | }; | ||
| 1049 | std::vector<BuiltIn> members; | ||
| 1050 | members.reserve(4); | ||
| 1051 | |||
| 1052 | const auto AddBuiltIn = [&](Id type, spv::BuiltIn builtin, const char* name) { | ||
| 1053 | const auto index = static_cast<u32>(members.size()); | ||
| 1054 | members.push_back(BuiltIn{type, builtin, name}); | ||
| 1055 | return index; | ||
| 1056 | }; | ||
| 1057 | |||
| 1058 | VertexIndices indices; | ||
| 1059 | indices.position = AddBuiltIn(t_float4, spv::BuiltIn::Position, "position"); | ||
| 1060 | |||
| 1061 | if (ir.UsesLayer()) { | ||
| 1062 | if (stage != ShaderType::Vertex || device.IsExtShaderViewportIndexLayerSupported()) { | ||
| 1063 | indices.layer = AddBuiltIn(t_int, spv::BuiltIn::Layer, "layer"); | ||
| 1064 | } else { | ||
| 1065 | LOG_ERROR( | ||
| 1066 | Render_Vulkan, | ||
| 1067 | "Shader requires Layer but it's not supported on this stage with this device."); | ||
| 1068 | } | ||
| 1069 | } | ||
| 1070 | |||
| 1071 | if (ir.UsesViewportIndex()) { | ||
| 1072 | if (stage != ShaderType::Vertex || device.IsExtShaderViewportIndexLayerSupported()) { | ||
| 1073 | indices.viewport = AddBuiltIn(t_int, spv::BuiltIn::ViewportIndex, "viewport_index"); | ||
| 1074 | } else { | ||
| 1075 | LOG_ERROR(Render_Vulkan, "Shader requires ViewportIndex but it's not supported on " | ||
| 1076 | "this stage with this device."); | ||
| 1077 | } | ||
| 1078 | } | ||
| 1079 | |||
| 1080 | if (ir.UsesPointSize() || specialization.point_size) { | ||
| 1081 | indices.point_size = AddBuiltIn(t_float, spv::BuiltIn::PointSize, "point_size"); | ||
| 1082 | } | ||
| 1083 | |||
| 1084 | const auto& ir_output_attributes = ir.GetOutputAttributes(); | ||
| 1085 | const bool declare_clip_distances = std::any_of( | ||
| 1086 | ir_output_attributes.begin(), ir_output_attributes.end(), [](const auto& index) { | ||
| 1087 | return index == Attribute::Index::ClipDistances0123 || | ||
| 1088 | index == Attribute::Index::ClipDistances4567; | ||
| 1089 | }); | ||
| 1090 | if (declare_clip_distances) { | ||
| 1091 | indices.clip_distances = AddBuiltIn(TypeArray(t_float, Constant(t_uint, 8)), | ||
| 1092 | spv::BuiltIn::ClipDistance, "clip_distances"); | ||
| 1093 | } | ||
| 1094 | |||
| 1095 | std::vector<Id> member_types; | ||
| 1096 | member_types.reserve(members.size()); | ||
| 1097 | for (std::size_t i = 0; i < members.size(); ++i) { | ||
| 1098 | member_types.push_back(members[i].type); | ||
| 1099 | } | ||
| 1100 | const Id per_vertex_struct = Name(TypeStruct(member_types), "PerVertex"); | ||
| 1101 | Decorate(per_vertex_struct, spv::Decoration::Block); | ||
| 1102 | |||
| 1103 | for (std::size_t index = 0; index < members.size(); ++index) { | ||
| 1104 | const auto& member = members[index]; | ||
| 1105 | MemberName(per_vertex_struct, static_cast<u32>(index), member.name); | ||
| 1106 | MemberDecorate(per_vertex_struct, static_cast<u32>(index), spv::Decoration::BuiltIn, | ||
| 1107 | static_cast<u32>(member.builtin)); | ||
| 1108 | } | ||
| 1109 | |||
| 1110 | return {per_vertex_struct, indices}; | ||
| 1111 | } | ||
| 1112 | |||
| 1113 | void VisitBasicBlock(const NodeBlock& bb) { | ||
| 1114 | for (const auto& node : bb) { | ||
| 1115 | Visit(node); | ||
| 1116 | } | ||
| 1117 | } | ||
| 1118 | |||
| 1119 | Expression Visit(const Node& node) { | ||
| 1120 | if (const auto operation = std::get_if<OperationNode>(&*node)) { | ||
| 1121 | if (const auto amend_index = operation->GetAmendIndex()) { | ||
| 1122 | [[maybe_unused]] const Type type = Visit(ir.GetAmendNode(*amend_index)).type; | ||
| 1123 | ASSERT(type == Type::Void); | ||
| 1124 | } | ||
| 1125 | const auto operation_index = static_cast<std::size_t>(operation->GetCode()); | ||
| 1126 | const auto decompiler = operation_decompilers[operation_index]; | ||
| 1127 | if (decompiler == nullptr) { | ||
| 1128 | UNREACHABLE_MSG("Operation decompiler {} not defined", operation_index); | ||
| 1129 | } | ||
| 1130 | return (this->*decompiler)(*operation); | ||
| 1131 | } | ||
| 1132 | |||
| 1133 | if (const auto gpr = std::get_if<GprNode>(&*node)) { | ||
| 1134 | const u32 index = gpr->GetIndex(); | ||
| 1135 | if (index == Register::ZeroIndex) { | ||
| 1136 | return {v_float_zero, Type::Float}; | ||
| 1137 | } | ||
| 1138 | return {OpLoad(t_float, registers.at(index)), Type::Float}; | ||
| 1139 | } | ||
| 1140 | |||
| 1141 | if (const auto cv = std::get_if<CustomVarNode>(&*node)) { | ||
| 1142 | const u32 index = cv->GetIndex(); | ||
| 1143 | return {OpLoad(t_float, custom_variables.at(index)), Type::Float}; | ||
| 1144 | } | ||
| 1145 | |||
| 1146 | if (const auto immediate = std::get_if<ImmediateNode>(&*node)) { | ||
| 1147 | return {Constant(t_uint, immediate->GetValue()), Type::Uint}; | ||
| 1148 | } | ||
| 1149 | |||
| 1150 | if (const auto predicate = std::get_if<PredicateNode>(&*node)) { | ||
| 1151 | const auto value = [&]() -> Id { | ||
| 1152 | switch (const auto index = predicate->GetIndex(); index) { | ||
| 1153 | case Tegra::Shader::Pred::UnusedIndex: | ||
| 1154 | return v_true; | ||
| 1155 | case Tegra::Shader::Pred::NeverExecute: | ||
| 1156 | return v_false; | ||
| 1157 | default: | ||
| 1158 | return OpLoad(t_bool, predicates.at(index)); | ||
| 1159 | } | ||
| 1160 | }(); | ||
| 1161 | if (predicate->IsNegated()) { | ||
| 1162 | return {OpLogicalNot(t_bool, value), Type::Bool}; | ||
| 1163 | } | ||
| 1164 | return {value, Type::Bool}; | ||
| 1165 | } | ||
| 1166 | |||
| 1167 | if (const auto abuf = std::get_if<AbufNode>(&*node)) { | ||
| 1168 | const auto attribute = abuf->GetIndex(); | ||
| 1169 | const u32 element = abuf->GetElement(); | ||
| 1170 | const auto& buffer = abuf->GetBuffer(); | ||
| 1171 | |||
| 1172 | const auto ArrayPass = [&](Id pointer_type, Id composite, std::vector<u32> indices) { | ||
| 1173 | std::vector<Id> members; | ||
| 1174 | members.reserve(std::size(indices) + 1); | ||
| 1175 | |||
| 1176 | if (buffer && IsInputAttributeArray()) { | ||
| 1177 | members.push_back(AsUint(Visit(buffer))); | ||
| 1178 | } | ||
| 1179 | for (const u32 index : indices) { | ||
| 1180 | members.push_back(Constant(t_uint, index)); | ||
| 1181 | } | ||
| 1182 | return OpAccessChain(pointer_type, composite, members); | ||
| 1183 | }; | ||
| 1184 | |||
| 1185 | switch (attribute) { | ||
| 1186 | case Attribute::Index::Position: { | ||
| 1187 | if (stage == ShaderType::Fragment) { | ||
| 1188 | return {OpLoad(t_float, AccessElement(t_in_float, frag_coord, element)), | ||
| 1189 | Type::Float}; | ||
| 1190 | } | ||
| 1191 | const std::vector elements = {in_indices.position.value(), element}; | ||
| 1192 | return {OpLoad(t_float, ArrayPass(t_in_float, in_vertex, elements)), Type::Float}; | ||
| 1193 | } | ||
| 1194 | case Attribute::Index::PointCoord: { | ||
| 1195 | switch (element) { | ||
| 1196 | case 0: | ||
| 1197 | case 1: | ||
| 1198 | return {OpCompositeExtract(t_float, OpLoad(t_float2, point_coord), element), | ||
| 1199 | Type::Float}; | ||
| 1200 | } | ||
| 1201 | UNIMPLEMENTED_MSG("Unimplemented point coord element={}", element); | ||
| 1202 | return {v_float_zero, Type::Float}; | ||
| 1203 | } | ||
| 1204 | case Attribute::Index::TessCoordInstanceIDVertexID: | ||
| 1205 | // TODO(Subv): Find out what the values are for the first two elements when inside a | ||
| 1206 | // vertex shader, and what's the value of the fourth element when inside a Tess Eval | ||
| 1207 | // shader. | ||
| 1208 | switch (element) { | ||
| 1209 | case 0: | ||
| 1210 | case 1: | ||
| 1211 | return {OpLoad(t_float, AccessElement(t_in_float, tess_coord, element)), | ||
| 1212 | Type::Float}; | ||
| 1213 | case 2: | ||
| 1214 | return { | ||
| 1215 | OpISub(t_int, OpLoad(t_int, instance_index), OpLoad(t_int, base_instance)), | ||
| 1216 | Type::Int}; | ||
| 1217 | case 3: | ||
| 1218 | return {OpISub(t_int, OpLoad(t_int, vertex_index), OpLoad(t_int, base_vertex)), | ||
| 1219 | Type::Int}; | ||
| 1220 | } | ||
| 1221 | UNIMPLEMENTED_MSG("Unmanaged TessCoordInstanceIDVertexID element={}", element); | ||
| 1222 | return {Constant(t_uint, 0U), Type::Uint}; | ||
| 1223 | case Attribute::Index::FrontFacing: | ||
| 1224 | // TODO(Subv): Find out what the values are for the other elements. | ||
| 1225 | ASSERT(stage == ShaderType::Fragment); | ||
| 1226 | if (element == 3) { | ||
| 1227 | const Id is_front_facing = OpLoad(t_bool, front_facing); | ||
| 1228 | const Id true_value = Constant(t_int, static_cast<s32>(-1)); | ||
| 1229 | const Id false_value = Constant(t_int, 0); | ||
| 1230 | return {OpSelect(t_int, is_front_facing, true_value, false_value), Type::Int}; | ||
| 1231 | } | ||
| 1232 | UNIMPLEMENTED_MSG("Unmanaged FrontFacing element={}", element); | ||
| 1233 | return {v_float_zero, Type::Float}; | ||
| 1234 | default: | ||
| 1235 | if (!IsGenericAttribute(attribute)) { | ||
| 1236 | break; | ||
| 1237 | } | ||
| 1238 | const u32 location = GetGenericAttributeLocation(attribute); | ||
| 1239 | if (!IsAttributeEnabled(location)) { | ||
| 1240 | // Disabled attributes (also known as constant attributes) always return zero. | ||
| 1241 | return {v_float_zero, Type::Float}; | ||
| 1242 | } | ||
| 1243 | const auto type_descriptor = GetAttributeType(location); | ||
| 1244 | const Type type = type_descriptor.type; | ||
| 1245 | const Id attribute_id = input_attributes.at(attribute); | ||
| 1246 | const std::vector elements = {element}; | ||
| 1247 | const Id pointer = ArrayPass(type_descriptor.scalar, attribute_id, elements); | ||
| 1248 | return {OpLoad(GetTypeDefinition(type), pointer), type}; | ||
| 1249 | } | ||
| 1250 | UNIMPLEMENTED_MSG("Unhandled input attribute: {}", attribute); | ||
| 1251 | return {v_float_zero, Type::Float}; | ||
| 1252 | } | ||
| 1253 | |||
| 1254 | if (const auto cbuf = std::get_if<CbufNode>(&*node)) { | ||
| 1255 | const Node& offset = cbuf->GetOffset(); | ||
| 1256 | const Id buffer_id = constant_buffers.at(cbuf->GetIndex()); | ||
| 1257 | |||
| 1258 | Id pointer{}; | ||
| 1259 | if (device.IsKhrUniformBufferStandardLayoutSupported()) { | ||
| 1260 | const Id buffer_offset = | ||
| 1261 | OpShiftRightLogical(t_uint, AsUint(Visit(offset)), Constant(t_uint, 2U)); | ||
| 1262 | pointer = | ||
| 1263 | OpAccessChain(t_cbuf_float, buffer_id, Constant(t_uint, 0U), buffer_offset); | ||
| 1264 | } else { | ||
| 1265 | Id buffer_index{}; | ||
| 1266 | Id buffer_element{}; | ||
| 1267 | if (const auto immediate = std::get_if<ImmediateNode>(&*offset)) { | ||
| 1268 | // Direct access | ||
| 1269 | const u32 offset_imm = immediate->GetValue(); | ||
| 1270 | ASSERT(offset_imm % 4 == 0); | ||
| 1271 | buffer_index = Constant(t_uint, offset_imm / 16); | ||
| 1272 | buffer_element = Constant(t_uint, (offset_imm / 4) % 4); | ||
| 1273 | } else if (std::holds_alternative<OperationNode>(*offset)) { | ||
| 1274 | // Indirect access | ||
| 1275 | const Id offset_id = AsUint(Visit(offset)); | ||
| 1276 | const Id unsafe_offset = OpUDiv(t_uint, offset_id, Constant(t_uint, 4)); | ||
| 1277 | const Id final_offset = | ||
| 1278 | OpUMod(t_uint, unsafe_offset, Constant(t_uint, MaxConstBufferElements - 1)); | ||
| 1279 | buffer_index = OpUDiv(t_uint, final_offset, Constant(t_uint, 4)); | ||
| 1280 | buffer_element = OpUMod(t_uint, final_offset, Constant(t_uint, 4)); | ||
| 1281 | } else { | ||
| 1282 | UNREACHABLE_MSG("Unmanaged offset node type"); | ||
| 1283 | } | ||
| 1284 | pointer = OpAccessChain(t_cbuf_float, buffer_id, v_uint_zero, buffer_index, | ||
| 1285 | buffer_element); | ||
| 1286 | } | ||
| 1287 | return {OpLoad(t_float, pointer), Type::Float}; | ||
| 1288 | } | ||
| 1289 | |||
| 1290 | if (const auto gmem = std::get_if<GmemNode>(&*node)) { | ||
| 1291 | return {OpLoad(t_uint, GetGlobalMemoryPointer(*gmem)), Type::Uint}; | ||
| 1292 | } | ||
| 1293 | |||
| 1294 | if (const auto lmem = std::get_if<LmemNode>(&*node)) { | ||
| 1295 | Id address = AsUint(Visit(lmem->GetAddress())); | ||
| 1296 | address = OpShiftRightLogical(t_uint, address, Constant(t_uint, 2U)); | ||
| 1297 | const Id pointer = OpAccessChain(t_prv_float, local_memory, address); | ||
| 1298 | return {OpLoad(t_float, pointer), Type::Float}; | ||
| 1299 | } | ||
| 1300 | |||
| 1301 | if (const auto smem = std::get_if<SmemNode>(&*node)) { | ||
| 1302 | return {OpLoad(t_uint, GetSharedMemoryPointer(*smem)), Type::Uint}; | ||
| 1303 | } | ||
| 1304 | |||
| 1305 | if (const auto internal_flag = std::get_if<InternalFlagNode>(&*node)) { | ||
| 1306 | const Id flag = internal_flags.at(static_cast<std::size_t>(internal_flag->GetFlag())); | ||
| 1307 | return {OpLoad(t_bool, flag), Type::Bool}; | ||
| 1308 | } | ||
| 1309 | |||
| 1310 | if (const auto conditional = std::get_if<ConditionalNode>(&*node)) { | ||
| 1311 | if (const auto amend_index = conditional->GetAmendIndex()) { | ||
| 1312 | [[maybe_unused]] const Type type = Visit(ir.GetAmendNode(*amend_index)).type; | ||
| 1313 | ASSERT(type == Type::Void); | ||
| 1314 | } | ||
| 1315 | // It's invalid to call conditional on nested nodes, use an operation instead | ||
| 1316 | const Id true_label = OpLabel(); | ||
| 1317 | const Id skip_label = OpLabel(); | ||
| 1318 | const Id condition = AsBool(Visit(conditional->GetCondition())); | ||
| 1319 | OpSelectionMerge(skip_label, spv::SelectionControlMask::MaskNone); | ||
| 1320 | OpBranchConditional(condition, true_label, skip_label); | ||
| 1321 | AddLabel(true_label); | ||
| 1322 | |||
| 1323 | conditional_branch_set = true; | ||
| 1324 | inside_branch = false; | ||
| 1325 | VisitBasicBlock(conditional->GetCode()); | ||
| 1326 | conditional_branch_set = false; | ||
| 1327 | if (!inside_branch) { | ||
| 1328 | OpBranch(skip_label); | ||
| 1329 | } else { | ||
| 1330 | inside_branch = false; | ||
| 1331 | } | ||
| 1332 | AddLabel(skip_label); | ||
| 1333 | return {}; | ||
| 1334 | } | ||
| 1335 | |||
| 1336 | if (const auto comment = std::get_if<CommentNode>(&*node)) { | ||
| 1337 | if (device.HasDebuggingToolAttached()) { | ||
| 1338 | // We should insert comments with OpString instead of using named variables | ||
| 1339 | Name(OpUndef(t_int), comment->GetText()); | ||
| 1340 | } | ||
| 1341 | return {}; | ||
| 1342 | } | ||
| 1343 | |||
| 1344 | UNREACHABLE(); | ||
| 1345 | return {}; | ||
| 1346 | } | ||
| 1347 | |||
| 1348 | template <Id (Module::*func)(Id, Id), Type result_type, Type type_a = result_type> | ||
| 1349 | Expression Unary(Operation operation) { | ||
| 1350 | const Id type_def = GetTypeDefinition(result_type); | ||
| 1351 | const Id op_a = As(Visit(operation[0]), type_a); | ||
| 1352 | |||
| 1353 | const Id value = (this->*func)(type_def, op_a); | ||
| 1354 | if (IsPrecise(operation)) { | ||
| 1355 | Decorate(value, spv::Decoration::NoContraction); | ||
| 1356 | } | ||
| 1357 | return {value, result_type}; | ||
| 1358 | } | ||
| 1359 | |||
| 1360 | template <Id (Module::*func)(Id, Id, Id), Type result_type, Type type_a = result_type, | ||
| 1361 | Type type_b = type_a> | ||
| 1362 | Expression Binary(Operation operation) { | ||
| 1363 | const Id type_def = GetTypeDefinition(result_type); | ||
| 1364 | const Id op_a = As(Visit(operation[0]), type_a); | ||
| 1365 | const Id op_b = As(Visit(operation[1]), type_b); | ||
| 1366 | |||
| 1367 | const Id value = (this->*func)(type_def, op_a, op_b); | ||
| 1368 | if (IsPrecise(operation)) { | ||
| 1369 | Decorate(value, spv::Decoration::NoContraction); | ||
| 1370 | } | ||
| 1371 | return {value, result_type}; | ||
| 1372 | } | ||
| 1373 | |||
| 1374 | template <Id (Module::*func)(Id, Id, Id, Id), Type result_type, Type type_a = result_type, | ||
| 1375 | Type type_b = type_a, Type type_c = type_b> | ||
| 1376 | Expression Ternary(Operation operation) { | ||
| 1377 | const Id type_def = GetTypeDefinition(result_type); | ||
| 1378 | const Id op_a = As(Visit(operation[0]), type_a); | ||
| 1379 | const Id op_b = As(Visit(operation[1]), type_b); | ||
| 1380 | const Id op_c = As(Visit(operation[2]), type_c); | ||
| 1381 | |||
| 1382 | const Id value = (this->*func)(type_def, op_a, op_b, op_c); | ||
| 1383 | if (IsPrecise(operation)) { | ||
| 1384 | Decorate(value, spv::Decoration::NoContraction); | ||
| 1385 | } | ||
| 1386 | return {value, result_type}; | ||
| 1387 | } | ||
| 1388 | |||
| 1389 | template <Id (Module::*func)(Id, Id, Id, Id, Id), Type result_type, Type type_a = result_type, | ||
| 1390 | Type type_b = type_a, Type type_c = type_b, Type type_d = type_c> | ||
| 1391 | Expression Quaternary(Operation operation) { | ||
| 1392 | const Id type_def = GetTypeDefinition(result_type); | ||
| 1393 | const Id op_a = As(Visit(operation[0]), type_a); | ||
| 1394 | const Id op_b = As(Visit(operation[1]), type_b); | ||
| 1395 | const Id op_c = As(Visit(operation[2]), type_c); | ||
| 1396 | const Id op_d = As(Visit(operation[3]), type_d); | ||
| 1397 | |||
| 1398 | const Id value = (this->*func)(type_def, op_a, op_b, op_c, op_d); | ||
| 1399 | if (IsPrecise(operation)) { | ||
| 1400 | Decorate(value, spv::Decoration::NoContraction); | ||
| 1401 | } | ||
| 1402 | return {value, result_type}; | ||
| 1403 | } | ||
| 1404 | |||
| 1405 | Expression Assign(Operation operation) { | ||
| 1406 | const Node& dest = operation[0]; | ||
| 1407 | const Node& src = operation[1]; | ||
| 1408 | |||
| 1409 | Expression target{}; | ||
| 1410 | if (const auto gpr = std::get_if<GprNode>(&*dest)) { | ||
| 1411 | if (gpr->GetIndex() == Register::ZeroIndex) { | ||
| 1412 | // Writing to Register::ZeroIndex is a no op but we still have to visit its source | ||
| 1413 | // because it might have side effects. | ||
| 1414 | Visit(src); | ||
| 1415 | return {}; | ||
| 1416 | } | ||
| 1417 | target = {registers.at(gpr->GetIndex()), Type::Float}; | ||
| 1418 | |||
| 1419 | } else if (const auto abuf = std::get_if<AbufNode>(&*dest)) { | ||
| 1420 | const auto& buffer = abuf->GetBuffer(); | ||
| 1421 | const auto ArrayPass = [&](Id pointer_type, Id composite, std::vector<u32> indices) { | ||
| 1422 | std::vector<Id> members; | ||
| 1423 | members.reserve(std::size(indices) + 1); | ||
| 1424 | |||
| 1425 | if (buffer && IsOutputAttributeArray()) { | ||
| 1426 | members.push_back(AsUint(Visit(buffer))); | ||
| 1427 | } | ||
| 1428 | for (const u32 index : indices) { | ||
| 1429 | members.push_back(Constant(t_uint, index)); | ||
| 1430 | } | ||
| 1431 | return OpAccessChain(pointer_type, composite, members); | ||
| 1432 | }; | ||
| 1433 | |||
| 1434 | target = [&]() -> Expression { | ||
| 1435 | const u32 element = abuf->GetElement(); | ||
| 1436 | switch (const auto attribute = abuf->GetIndex(); attribute) { | ||
| 1437 | case Attribute::Index::Position: { | ||
| 1438 | const u32 index = out_indices.position.value(); | ||
| 1439 | return {ArrayPass(t_out_float, out_vertex, {index, element}), Type::Float}; | ||
| 1440 | } | ||
| 1441 | case Attribute::Index::LayerViewportPointSize: | ||
| 1442 | switch (element) { | ||
| 1443 | case 1: { | ||
| 1444 | if (!out_indices.layer) { | ||
| 1445 | return {}; | ||
| 1446 | } | ||
| 1447 | const u32 index = out_indices.layer.value(); | ||
| 1448 | return {AccessElement(t_out_int, out_vertex, index), Type::Int}; | ||
| 1449 | } | ||
| 1450 | case 2: { | ||
| 1451 | if (!out_indices.viewport) { | ||
| 1452 | return {}; | ||
| 1453 | } | ||
| 1454 | const u32 index = out_indices.viewport.value(); | ||
| 1455 | return {AccessElement(t_out_int, out_vertex, index), Type::Int}; | ||
| 1456 | } | ||
| 1457 | case 3: { | ||
| 1458 | const auto index = out_indices.point_size.value(); | ||
| 1459 | return {AccessElement(t_out_float, out_vertex, index), Type::Float}; | ||
| 1460 | } | ||
| 1461 | default: | ||
| 1462 | UNIMPLEMENTED_MSG("LayerViewportPoint element={}", abuf->GetElement()); | ||
| 1463 | return {}; | ||
| 1464 | } | ||
| 1465 | case Attribute::Index::ClipDistances0123: { | ||
| 1466 | const u32 index = out_indices.clip_distances.value(); | ||
| 1467 | return {AccessElement(t_out_float, out_vertex, index, element), Type::Float}; | ||
| 1468 | } | ||
| 1469 | case Attribute::Index::ClipDistances4567: { | ||
| 1470 | const u32 index = out_indices.clip_distances.value(); | ||
| 1471 | return {AccessElement(t_out_float, out_vertex, index, element + 4), | ||
| 1472 | Type::Float}; | ||
| 1473 | } | ||
| 1474 | default: | ||
| 1475 | if (IsGenericAttribute(attribute)) { | ||
| 1476 | const u8 offset = static_cast<u8>(static_cast<u8>(attribute) * 4 + element); | ||
| 1477 | const GenericVaryingDescription description = output_attributes.at(offset); | ||
| 1478 | const Id composite = description.id; | ||
| 1479 | std::vector<u32> indices; | ||
| 1480 | if (!description.is_scalar) { | ||
| 1481 | indices.push_back(element - description.first_element); | ||
| 1482 | } | ||
| 1483 | return {ArrayPass(t_out_float, composite, indices), Type::Float}; | ||
| 1484 | } | ||
| 1485 | UNIMPLEMENTED_MSG("Unhandled output attribute: {}", | ||
| 1486 | static_cast<u32>(attribute)); | ||
| 1487 | return {}; | ||
| 1488 | } | ||
| 1489 | }(); | ||
| 1490 | |||
| 1491 | } else if (const auto patch = std::get_if<PatchNode>(&*dest)) { | ||
| 1492 | target = [&]() -> Expression { | ||
| 1493 | const u32 offset = patch->GetOffset(); | ||
| 1494 | switch (offset) { | ||
| 1495 | case 0: | ||
| 1496 | case 1: | ||
| 1497 | case 2: | ||
| 1498 | case 3: | ||
| 1499 | return {AccessElement(t_out_float, tess_level_outer, offset % 4), Type::Float}; | ||
| 1500 | case 4: | ||
| 1501 | case 5: | ||
| 1502 | return {AccessElement(t_out_float, tess_level_inner, offset % 4), Type::Float}; | ||
| 1503 | } | ||
| 1504 | UNIMPLEMENTED_MSG("Unhandled patch output offset: {}", offset); | ||
| 1505 | return {}; | ||
| 1506 | }(); | ||
| 1507 | |||
| 1508 | } else if (const auto lmem = std::get_if<LmemNode>(&*dest)) { | ||
| 1509 | Id address = AsUint(Visit(lmem->GetAddress())); | ||
| 1510 | address = OpUDiv(t_uint, address, Constant(t_uint, 4)); | ||
| 1511 | target = {OpAccessChain(t_prv_float, local_memory, address), Type::Float}; | ||
| 1512 | |||
| 1513 | } else if (const auto smem = std::get_if<SmemNode>(&*dest)) { | ||
| 1514 | target = {GetSharedMemoryPointer(*smem), Type::Uint}; | ||
| 1515 | |||
| 1516 | } else if (const auto gmem = std::get_if<GmemNode>(&*dest)) { | ||
| 1517 | target = {GetGlobalMemoryPointer(*gmem), Type::Uint}; | ||
| 1518 | |||
| 1519 | } else if (const auto cv = std::get_if<CustomVarNode>(&*dest)) { | ||
| 1520 | target = {custom_variables.at(cv->GetIndex()), Type::Float}; | ||
| 1521 | |||
| 1522 | } else { | ||
| 1523 | UNIMPLEMENTED(); | ||
| 1524 | } | ||
| 1525 | |||
| 1526 | if (!target.id) { | ||
| 1527 | // On failure we return a nullptr target.id, skip these stores. | ||
| 1528 | return {}; | ||
| 1529 | } | ||
| 1530 | |||
| 1531 | OpStore(target.id, As(Visit(src), target.type)); | ||
| 1532 | return {}; | ||
| 1533 | } | ||
| 1534 | |||
| 1535 | template <u32 offset> | ||
| 1536 | Expression FCastHalf(Operation operation) { | ||
| 1537 | const Id value = AsHalfFloat(Visit(operation[0])); | ||
| 1538 | return {GetFloatFromHalfScalar(OpCompositeExtract(t_scalar_half, value, offset)), | ||
| 1539 | Type::Float}; | ||
| 1540 | } | ||
| 1541 | |||
| 1542 | Expression FSwizzleAdd(Operation operation) { | ||
| 1543 | const Id minus = Constant(t_float, -1.0f); | ||
| 1544 | const Id plus = v_float_one; | ||
| 1545 | const Id zero = v_float_zero; | ||
| 1546 | const Id lut_a = ConstantComposite(t_float4, minus, plus, minus, zero); | ||
| 1547 | const Id lut_b = ConstantComposite(t_float4, minus, minus, plus, minus); | ||
| 1548 | |||
| 1549 | Id mask = OpLoad(t_uint, thread_id); | ||
| 1550 | mask = OpBitwiseAnd(t_uint, mask, Constant(t_uint, 3)); | ||
| 1551 | mask = OpShiftLeftLogical(t_uint, mask, Constant(t_uint, 1)); | ||
| 1552 | mask = OpShiftRightLogical(t_uint, AsUint(Visit(operation[2])), mask); | ||
| 1553 | mask = OpBitwiseAnd(t_uint, mask, Constant(t_uint, 3)); | ||
| 1554 | |||
| 1555 | const Id modifier_a = OpVectorExtractDynamic(t_float, lut_a, mask); | ||
| 1556 | const Id modifier_b = OpVectorExtractDynamic(t_float, lut_b, mask); | ||
| 1557 | |||
| 1558 | const Id op_a = OpFMul(t_float, AsFloat(Visit(operation[0])), modifier_a); | ||
| 1559 | const Id op_b = OpFMul(t_float, AsFloat(Visit(operation[1])), modifier_b); | ||
| 1560 | return {OpFAdd(t_float, op_a, op_b), Type::Float}; | ||
| 1561 | } | ||
| 1562 | |||
| 1563 | Expression HNegate(Operation operation) { | ||
| 1564 | const bool is_f16 = device.IsFloat16Supported(); | ||
| 1565 | const Id minus_one = Constant(t_scalar_half, is_f16 ? 0xbc00 : 0xbf800000); | ||
| 1566 | const Id one = Constant(t_scalar_half, is_f16 ? 0x3c00 : 0x3f800000); | ||
| 1567 | const auto GetNegate = [&](std::size_t index) { | ||
| 1568 | return OpSelect(t_scalar_half, AsBool(Visit(operation[index])), minus_one, one); | ||
| 1569 | }; | ||
| 1570 | const Id negation = OpCompositeConstruct(t_half, GetNegate(1), GetNegate(2)); | ||
| 1571 | return {OpFMul(t_half, AsHalfFloat(Visit(operation[0])), negation), Type::HalfFloat}; | ||
| 1572 | } | ||
| 1573 | |||
| 1574 | Expression HClamp(Operation operation) { | ||
| 1575 | const auto Pack = [&](std::size_t index) { | ||
| 1576 | const Id scalar = GetHalfScalarFromFloat(AsFloat(Visit(operation[index]))); | ||
| 1577 | return OpCompositeConstruct(t_half, scalar, scalar); | ||
| 1578 | }; | ||
| 1579 | const Id value = AsHalfFloat(Visit(operation[0])); | ||
| 1580 | const Id min = Pack(1); | ||
| 1581 | const Id max = Pack(2); | ||
| 1582 | |||
| 1583 | const Id clamped = OpFClamp(t_half, value, min, max); | ||
| 1584 | if (IsPrecise(operation)) { | ||
| 1585 | Decorate(clamped, spv::Decoration::NoContraction); | ||
| 1586 | } | ||
| 1587 | return {clamped, Type::HalfFloat}; | ||
| 1588 | } | ||
| 1589 | |||
| 1590 | Expression HCastFloat(Operation operation) { | ||
| 1591 | const Id value = GetHalfScalarFromFloat(AsFloat(Visit(operation[0]))); | ||
| 1592 | return {OpCompositeConstruct(t_half, value, Constant(t_scalar_half, 0)), Type::HalfFloat}; | ||
| 1593 | } | ||
| 1594 | |||
| 1595 | Expression HUnpack(Operation operation) { | ||
| 1596 | Expression operand = Visit(operation[0]); | ||
| 1597 | const auto type = std::get<Tegra::Shader::HalfType>(operation.GetMeta()); | ||
| 1598 | if (type == Tegra::Shader::HalfType::H0_H1) { | ||
| 1599 | return operand; | ||
| 1600 | } | ||
| 1601 | const auto value = [&] { | ||
| 1602 | switch (std::get<Tegra::Shader::HalfType>(operation.GetMeta())) { | ||
| 1603 | case Tegra::Shader::HalfType::F32: | ||
| 1604 | return GetHalfScalarFromFloat(AsFloat(operand)); | ||
| 1605 | case Tegra::Shader::HalfType::H0_H0: | ||
| 1606 | return OpCompositeExtract(t_scalar_half, AsHalfFloat(operand), 0); | ||
| 1607 | case Tegra::Shader::HalfType::H1_H1: | ||
| 1608 | return OpCompositeExtract(t_scalar_half, AsHalfFloat(operand), 1); | ||
| 1609 | default: | ||
| 1610 | UNREACHABLE(); | ||
| 1611 | return ConstantNull(t_half); | ||
| 1612 | } | ||
| 1613 | }(); | ||
| 1614 | return {OpCompositeConstruct(t_half, value, value), Type::HalfFloat}; | ||
| 1615 | } | ||
| 1616 | |||
| 1617 | Expression HMergeF32(Operation operation) { | ||
| 1618 | const Id value = AsHalfFloat(Visit(operation[0])); | ||
| 1619 | return {GetFloatFromHalfScalar(OpCompositeExtract(t_scalar_half, value, 0)), Type::Float}; | ||
| 1620 | } | ||
| 1621 | |||
| 1622 | template <u32 offset> | ||
| 1623 | Expression HMergeHN(Operation operation) { | ||
| 1624 | const Id target = AsHalfFloat(Visit(operation[0])); | ||
| 1625 | const Id source = AsHalfFloat(Visit(operation[1])); | ||
| 1626 | const Id object = OpCompositeExtract(t_scalar_half, source, offset); | ||
| 1627 | return {OpCompositeInsert(t_half, object, target, offset), Type::HalfFloat}; | ||
| 1628 | } | ||
| 1629 | |||
| 1630 | Expression HPack2(Operation operation) { | ||
| 1631 | const Id low = GetHalfScalarFromFloat(AsFloat(Visit(operation[0]))); | ||
| 1632 | const Id high = GetHalfScalarFromFloat(AsFloat(Visit(operation[1]))); | ||
| 1633 | return {OpCompositeConstruct(t_half, low, high), Type::HalfFloat}; | ||
| 1634 | } | ||
| 1635 | |||
| 1636 | Expression LogicalAddCarry(Operation operation) { | ||
| 1637 | const Id op_a = AsUint(Visit(operation[0])); | ||
| 1638 | const Id op_b = AsUint(Visit(operation[1])); | ||
| 1639 | |||
| 1640 | const Id result = OpIAddCarry(TypeStruct({t_uint, t_uint}), op_a, op_b); | ||
| 1641 | const Id carry = OpCompositeExtract(t_uint, result, 1); | ||
| 1642 | return {OpINotEqual(t_bool, carry, v_uint_zero), Type::Bool}; | ||
| 1643 | } | ||
| 1644 | |||
| 1645 | Expression LogicalAssign(Operation operation) { | ||
| 1646 | const Node& dest = operation[0]; | ||
| 1647 | const Node& src = operation[1]; | ||
| 1648 | |||
| 1649 | Id target{}; | ||
| 1650 | if (const auto pred = std::get_if<PredicateNode>(&*dest)) { | ||
| 1651 | ASSERT_MSG(!pred->IsNegated(), "Negating logical assignment"); | ||
| 1652 | |||
| 1653 | const auto index = pred->GetIndex(); | ||
| 1654 | switch (index) { | ||
| 1655 | case Tegra::Shader::Pred::NeverExecute: | ||
| 1656 | case Tegra::Shader::Pred::UnusedIndex: | ||
| 1657 | // Writing to these predicates is a no-op | ||
| 1658 | return {}; | ||
| 1659 | } | ||
| 1660 | target = predicates.at(index); | ||
| 1661 | |||
| 1662 | } else if (const auto flag = std::get_if<InternalFlagNode>(&*dest)) { | ||
| 1663 | target = internal_flags.at(static_cast<u32>(flag->GetFlag())); | ||
| 1664 | } | ||
| 1665 | |||
| 1666 | OpStore(target, AsBool(Visit(src))); | ||
| 1667 | return {}; | ||
| 1668 | } | ||
| 1669 | |||
| 1670 | Expression LogicalFOrdered(Operation operation) { | ||
| 1671 | // Emulate SPIR-V's OpOrdered | ||
| 1672 | const Id op_a = AsFloat(Visit(operation[0])); | ||
| 1673 | const Id op_b = AsFloat(Visit(operation[1])); | ||
| 1674 | const Id is_num_a = OpFOrdEqual(t_bool, op_a, op_a); | ||
| 1675 | const Id is_num_b = OpFOrdEqual(t_bool, op_b, op_b); | ||
| 1676 | return {OpLogicalAnd(t_bool, is_num_a, is_num_b), Type::Bool}; | ||
| 1677 | } | ||
| 1678 | |||
| 1679 | Expression LogicalFUnordered(Operation operation) { | ||
| 1680 | // Emulate SPIR-V's OpUnordered | ||
| 1681 | const Id op_a = AsFloat(Visit(operation[0])); | ||
| 1682 | const Id op_b = AsFloat(Visit(operation[1])); | ||
| 1683 | const Id is_nan_a = OpIsNan(t_bool, op_a); | ||
| 1684 | const Id is_nan_b = OpIsNan(t_bool, op_b); | ||
| 1685 | return {OpLogicalOr(t_bool, is_nan_a, is_nan_b), Type::Bool}; | ||
| 1686 | } | ||
| 1687 | |||
| 1688 | Id GetTextureSampler(Operation operation) { | ||
| 1689 | const auto& meta = std::get<MetaTexture>(operation.GetMeta()); | ||
| 1690 | ASSERT(!meta.sampler.is_buffer); | ||
| 1691 | |||
| 1692 | const auto& entry = sampled_images.at(meta.sampler.index); | ||
| 1693 | Id sampler = entry.variable; | ||
| 1694 | if (meta.sampler.is_indexed) { | ||
| 1695 | const Id index = AsInt(Visit(meta.index)); | ||
| 1696 | sampler = OpAccessChain(entry.sampler_pointer_type, sampler, index); | ||
| 1697 | } | ||
| 1698 | return OpLoad(entry.sampler_type, sampler); | ||
| 1699 | } | ||
| 1700 | |||
| 1701 | Id GetTextureImage(Operation operation) { | ||
| 1702 | const auto& meta = std::get<MetaTexture>(operation.GetMeta()); | ||
| 1703 | const u32 index = meta.sampler.index; | ||
| 1704 | if (meta.sampler.is_buffer) { | ||
| 1705 | const auto& entry = uniform_texels.at(index); | ||
| 1706 | return OpLoad(entry.image_type, entry.image); | ||
| 1707 | } else { | ||
| 1708 | const auto& entry = sampled_images.at(index); | ||
| 1709 | return OpImage(entry.image_type, GetTextureSampler(operation)); | ||
| 1710 | } | ||
| 1711 | } | ||
| 1712 | |||
| 1713 | Id GetImage(Operation operation) { | ||
| 1714 | const auto& meta = std::get<MetaImage>(operation.GetMeta()); | ||
| 1715 | const auto entry = images.at(meta.image.index); | ||
| 1716 | return OpLoad(entry.image_type, entry.image); | ||
| 1717 | } | ||
| 1718 | |||
| 1719 | Id AssembleVector(const std::vector<Id>& coords, Type type) { | ||
| 1720 | const Id coords_type = GetTypeVectorDefinitionLut(type).at(coords.size() - 1); | ||
| 1721 | return coords.size() == 1 ? coords[0] : OpCompositeConstruct(coords_type, coords); | ||
| 1722 | } | ||
| 1723 | |||
| 1724 | Id GetCoordinates(Operation operation, Type type) { | ||
| 1725 | std::vector<Id> coords; | ||
| 1726 | for (std::size_t i = 0; i < operation.GetOperandsCount(); ++i) { | ||
| 1727 | coords.push_back(As(Visit(operation[i]), type)); | ||
| 1728 | } | ||
| 1729 | if (const auto meta = std::get_if<MetaTexture>(&operation.GetMeta())) { | ||
| 1730 | // Add array coordinate for textures | ||
| 1731 | if (meta->sampler.is_array) { | ||
| 1732 | Id array = AsInt(Visit(meta->array)); | ||
| 1733 | if (type == Type::Float) { | ||
| 1734 | array = OpConvertSToF(t_float, array); | ||
| 1735 | } | ||
| 1736 | coords.push_back(array); | ||
| 1737 | } | ||
| 1738 | } | ||
| 1739 | return AssembleVector(coords, type); | ||
| 1740 | } | ||
| 1741 | |||
| 1742 | Id GetOffsetCoordinates(Operation operation) { | ||
| 1743 | const auto& meta = std::get<MetaTexture>(operation.GetMeta()); | ||
| 1744 | std::vector<Id> coords; | ||
| 1745 | coords.reserve(meta.aoffi.size()); | ||
| 1746 | for (const auto& coord : meta.aoffi) { | ||
| 1747 | coords.push_back(AsInt(Visit(coord))); | ||
| 1748 | } | ||
| 1749 | return AssembleVector(coords, Type::Int); | ||
| 1750 | } | ||
| 1751 | |||
| 1752 | std::pair<Id, Id> GetDerivatives(Operation operation) { | ||
| 1753 | const auto& meta = std::get<MetaTexture>(operation.GetMeta()); | ||
| 1754 | const auto& derivatives = meta.derivates; | ||
| 1755 | ASSERT(derivatives.size() % 2 == 0); | ||
| 1756 | |||
| 1757 | const std::size_t components = derivatives.size() / 2; | ||
| 1758 | std::vector<Id> dx, dy; | ||
| 1759 | dx.reserve(components); | ||
| 1760 | dy.reserve(components); | ||
| 1761 | for (std::size_t index = 0; index < components; ++index) { | ||
| 1762 | dx.push_back(AsFloat(Visit(derivatives.at(index * 2 + 0)))); | ||
| 1763 | dy.push_back(AsFloat(Visit(derivatives.at(index * 2 + 1)))); | ||
| 1764 | } | ||
| 1765 | return {AssembleVector(dx, Type::Float), AssembleVector(dy, Type::Float)}; | ||
| 1766 | } | ||
| 1767 | |||
| 1768 | Expression GetTextureElement(Operation operation, Id sample_value, Type type) { | ||
| 1769 | const auto& meta = std::get<MetaTexture>(operation.GetMeta()); | ||
| 1770 | const auto type_def = GetTypeDefinition(type); | ||
| 1771 | return {OpCompositeExtract(type_def, sample_value, meta.element), type}; | ||
| 1772 | } | ||
| 1773 | |||
| 1774 | Expression Texture(Operation operation) { | ||
| 1775 | const auto& meta = std::get<MetaTexture>(operation.GetMeta()); | ||
| 1776 | |||
| 1777 | const bool can_implicit = stage == ShaderType::Fragment; | ||
| 1778 | const Id sampler = GetTextureSampler(operation); | ||
| 1779 | const Id coords = GetCoordinates(operation, Type::Float); | ||
| 1780 | |||
| 1781 | std::vector<Id> operands; | ||
| 1782 | spv::ImageOperandsMask mask{}; | ||
| 1783 | if (meta.bias) { | ||
| 1784 | mask = mask | spv::ImageOperandsMask::Bias; | ||
| 1785 | operands.push_back(AsFloat(Visit(meta.bias))); | ||
| 1786 | } | ||
| 1787 | |||
| 1788 | if (!can_implicit) { | ||
| 1789 | mask = mask | spv::ImageOperandsMask::Lod; | ||
| 1790 | operands.push_back(v_float_zero); | ||
| 1791 | } | ||
| 1792 | |||
| 1793 | if (!meta.aoffi.empty()) { | ||
| 1794 | mask = mask | spv::ImageOperandsMask::Offset; | ||
| 1795 | operands.push_back(GetOffsetCoordinates(operation)); | ||
| 1796 | } | ||
| 1797 | |||
| 1798 | if (meta.depth_compare) { | ||
| 1799 | // Depth sampling | ||
| 1800 | UNIMPLEMENTED_IF(meta.bias); | ||
| 1801 | const Id dref = AsFloat(Visit(meta.depth_compare)); | ||
| 1802 | if (can_implicit) { | ||
| 1803 | return { | ||
| 1804 | OpImageSampleDrefImplicitLod(t_float, sampler, coords, dref, mask, operands), | ||
| 1805 | Type::Float}; | ||
| 1806 | } else { | ||
| 1807 | return { | ||
| 1808 | OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, operands), | ||
| 1809 | Type::Float}; | ||
| 1810 | } | ||
| 1811 | } | ||
| 1812 | |||
| 1813 | Id texture; | ||
| 1814 | if (can_implicit) { | ||
| 1815 | texture = OpImageSampleImplicitLod(t_float4, sampler, coords, mask, operands); | ||
| 1816 | } else { | ||
| 1817 | texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, operands); | ||
| 1818 | } | ||
| 1819 | return GetTextureElement(operation, texture, Type::Float); | ||
| 1820 | } | ||
| 1821 | |||
| 1822 | Expression TextureLod(Operation operation) { | ||
| 1823 | const auto& meta = std::get<MetaTexture>(operation.GetMeta()); | ||
| 1824 | |||
| 1825 | const Id sampler = GetTextureSampler(operation); | ||
| 1826 | const Id coords = GetCoordinates(operation, Type::Float); | ||
| 1827 | const Id lod = AsFloat(Visit(meta.lod)); | ||
| 1828 | |||
| 1829 | spv::ImageOperandsMask mask = spv::ImageOperandsMask::Lod; | ||
| 1830 | std::vector<Id> operands{lod}; | ||
| 1831 | |||
| 1832 | if (!meta.aoffi.empty()) { | ||
| 1833 | mask = mask | spv::ImageOperandsMask::Offset; | ||
| 1834 | operands.push_back(GetOffsetCoordinates(operation)); | ||
| 1835 | } | ||
| 1836 | |||
| 1837 | if (meta.sampler.is_shadow) { | ||
| 1838 | const Id dref = AsFloat(Visit(meta.depth_compare)); | ||
| 1839 | return {OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, operands), | ||
| 1840 | Type::Float}; | ||
| 1841 | } | ||
| 1842 | const Id texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, operands); | ||
| 1843 | return GetTextureElement(operation, texture, Type::Float); | ||
| 1844 | } | ||
| 1845 | |||
| 1846 | Expression TextureGather(Operation operation) { | ||
| 1847 | const auto& meta = std::get<MetaTexture>(operation.GetMeta()); | ||
| 1848 | |||
| 1849 | const Id coords = GetCoordinates(operation, Type::Float); | ||
| 1850 | |||
| 1851 | spv::ImageOperandsMask mask = spv::ImageOperandsMask::MaskNone; | ||
| 1852 | std::vector<Id> operands; | ||
| 1853 | Id texture{}; | ||
| 1854 | |||
| 1855 | if (!meta.aoffi.empty()) { | ||
| 1856 | mask = mask | spv::ImageOperandsMask::Offset; | ||
| 1857 | operands.push_back(GetOffsetCoordinates(operation)); | ||
| 1858 | } | ||
| 1859 | |||
| 1860 | if (meta.sampler.is_shadow) { | ||
| 1861 | texture = OpImageDrefGather(t_float4, GetTextureSampler(operation), coords, | ||
| 1862 | AsFloat(Visit(meta.depth_compare)), mask, operands); | ||
| 1863 | } else { | ||
| 1864 | u32 component_value = 0; | ||
| 1865 | if (meta.component) { | ||
| 1866 | const auto component = std::get_if<ImmediateNode>(&*meta.component); | ||
| 1867 | ASSERT_MSG(component, "Component is not an immediate value"); | ||
| 1868 | component_value = component->GetValue(); | ||
| 1869 | } | ||
| 1870 | texture = OpImageGather(t_float4, GetTextureSampler(operation), coords, | ||
| 1871 | Constant(t_uint, component_value), mask, operands); | ||
| 1872 | } | ||
| 1873 | return GetTextureElement(operation, texture, Type::Float); | ||
| 1874 | } | ||
| 1875 | |||
| 1876 | Expression TextureQueryDimensions(Operation operation) { | ||
| 1877 | const auto& meta = std::get<MetaTexture>(operation.GetMeta()); | ||
| 1878 | UNIMPLEMENTED_IF(!meta.aoffi.empty()); | ||
| 1879 | UNIMPLEMENTED_IF(meta.depth_compare); | ||
| 1880 | |||
| 1881 | const auto image_id = GetTextureImage(operation); | ||
| 1882 | if (meta.element == 3) { | ||
| 1883 | return {OpImageQueryLevels(t_int, image_id), Type::Int}; | ||
| 1884 | } | ||
| 1885 | |||
| 1886 | const Id lod = AsUint(Visit(operation[0])); | ||
| 1887 | const std::size_t coords_count = [&meta] { | ||
| 1888 | switch (const auto type = meta.sampler.type) { | ||
| 1889 | case Tegra::Shader::TextureType::Texture1D: | ||
| 1890 | return 1; | ||
| 1891 | case Tegra::Shader::TextureType::Texture2D: | ||
| 1892 | case Tegra::Shader::TextureType::TextureCube: | ||
| 1893 | return 2; | ||
| 1894 | case Tegra::Shader::TextureType::Texture3D: | ||
| 1895 | return 3; | ||
| 1896 | default: | ||
| 1897 | UNREACHABLE_MSG("Invalid texture type={}", type); | ||
| 1898 | return 2; | ||
| 1899 | } | ||
| 1900 | }(); | ||
| 1901 | |||
| 1902 | if (meta.element >= coords_count) { | ||
| 1903 | return {v_float_zero, Type::Float}; | ||
| 1904 | } | ||
| 1905 | |||
| 1906 | const std::array<Id, 3> types = {t_int, t_int2, t_int3}; | ||
| 1907 | const Id sizes = OpImageQuerySizeLod(types.at(coords_count - 1), image_id, lod); | ||
| 1908 | const Id size = OpCompositeExtract(t_int, sizes, meta.element); | ||
| 1909 | return {size, Type::Int}; | ||
| 1910 | } | ||
| 1911 | |||
| 1912 | Expression TextureQueryLod(Operation operation) { | ||
| 1913 | const auto& meta = std::get<MetaTexture>(operation.GetMeta()); | ||
| 1914 | UNIMPLEMENTED_IF(!meta.aoffi.empty()); | ||
| 1915 | UNIMPLEMENTED_IF(meta.depth_compare); | ||
| 1916 | |||
| 1917 | if (meta.element >= 2) { | ||
| 1918 | UNREACHABLE_MSG("Invalid element"); | ||
| 1919 | return {v_float_zero, Type::Float}; | ||
| 1920 | } | ||
| 1921 | const auto sampler_id = GetTextureSampler(operation); | ||
| 1922 | |||
| 1923 | const Id multiplier = Constant(t_float, 256.0f); | ||
| 1924 | const Id multipliers = ConstantComposite(t_float2, multiplier, multiplier); | ||
| 1925 | |||
| 1926 | const Id coords = GetCoordinates(operation, Type::Float); | ||
| 1927 | Id size = OpImageQueryLod(t_float2, sampler_id, coords); | ||
| 1928 | size = OpFMul(t_float2, size, multipliers); | ||
| 1929 | size = OpConvertFToS(t_int2, size); | ||
| 1930 | return GetTextureElement(operation, size, Type::Int); | ||
| 1931 | } | ||
| 1932 | |||
| 1933 | Expression TexelFetch(Operation operation) { | ||
| 1934 | const auto& meta = std::get<MetaTexture>(operation.GetMeta()); | ||
| 1935 | UNIMPLEMENTED_IF(meta.depth_compare); | ||
| 1936 | |||
| 1937 | const Id image = GetTextureImage(operation); | ||
| 1938 | const Id coords = GetCoordinates(operation, Type::Int); | ||
| 1939 | |||
| 1940 | spv::ImageOperandsMask mask = spv::ImageOperandsMask::MaskNone; | ||
| 1941 | std::vector<Id> operands; | ||
| 1942 | Id fetch; | ||
| 1943 | |||
| 1944 | if (meta.lod && !meta.sampler.is_buffer) { | ||
| 1945 | mask = mask | spv::ImageOperandsMask::Lod; | ||
| 1946 | operands.push_back(AsInt(Visit(meta.lod))); | ||
| 1947 | } | ||
| 1948 | |||
| 1949 | if (!meta.aoffi.empty()) { | ||
| 1950 | mask = mask | spv::ImageOperandsMask::Offset; | ||
| 1951 | operands.push_back(GetOffsetCoordinates(operation)); | ||
| 1952 | } | ||
| 1953 | |||
| 1954 | fetch = OpImageFetch(t_float4, image, coords, mask, operands); | ||
| 1955 | return GetTextureElement(operation, fetch, Type::Float); | ||
| 1956 | } | ||
| 1957 | |||
| 1958 | Expression TextureGradient(Operation operation) { | ||
| 1959 | const auto& meta = std::get<MetaTexture>(operation.GetMeta()); | ||
| 1960 | UNIMPLEMENTED_IF(!meta.aoffi.empty()); | ||
| 1961 | |||
| 1962 | const Id sampler = GetTextureSampler(operation); | ||
| 1963 | const Id coords = GetCoordinates(operation, Type::Float); | ||
| 1964 | const auto [dx, dy] = GetDerivatives(operation); | ||
| 1965 | const std::vector grad = {dx, dy}; | ||
| 1966 | |||
| 1967 | static constexpr auto mask = spv::ImageOperandsMask::Grad; | ||
| 1968 | const Id texture = OpImageSampleExplicitLod(t_float4, sampler, coords, mask, grad); | ||
| 1969 | return GetTextureElement(operation, texture, Type::Float); | ||
| 1970 | } | ||
| 1971 | |||
| 1972 | Expression ImageLoad(Operation operation) { | ||
| 1973 | if (!device.IsFormatlessImageLoadSupported()) { | ||
| 1974 | return {v_float_zero, Type::Float}; | ||
| 1975 | } | ||
| 1976 | |||
| 1977 | const auto& meta{std::get<MetaImage>(operation.GetMeta())}; | ||
| 1978 | |||
| 1979 | const Id coords = GetCoordinates(operation, Type::Int); | ||
| 1980 | const Id texel = OpImageRead(t_uint4, GetImage(operation), coords); | ||
| 1981 | |||
| 1982 | return {OpCompositeExtract(t_uint, texel, meta.element), Type::Uint}; | ||
| 1983 | } | ||
| 1984 | |||
| 1985 | Expression ImageStore(Operation operation) { | ||
| 1986 | const auto meta{std::get<MetaImage>(operation.GetMeta())}; | ||
| 1987 | std::vector<Id> colors; | ||
| 1988 | for (const auto& value : meta.values) { | ||
| 1989 | colors.push_back(AsUint(Visit(value))); | ||
| 1990 | } | ||
| 1991 | |||
| 1992 | const Id coords = GetCoordinates(operation, Type::Int); | ||
| 1993 | const Id texel = OpCompositeConstruct(t_uint4, colors); | ||
| 1994 | |||
| 1995 | OpImageWrite(GetImage(operation), coords, texel, {}); | ||
| 1996 | return {}; | ||
| 1997 | } | ||
| 1998 | |||
| 1999 | template <Id (Module::*func)(Id, Id, Id, Id, Id)> | ||
| 2000 | Expression AtomicImage(Operation operation) { | ||
| 2001 | const auto& meta{std::get<MetaImage>(operation.GetMeta())}; | ||
| 2002 | ASSERT(meta.values.size() == 1); | ||
| 2003 | |||
| 2004 | const Id coordinate = GetCoordinates(operation, Type::Int); | ||
| 2005 | const Id image = images.at(meta.image.index).image; | ||
| 2006 | const Id sample = v_uint_zero; | ||
| 2007 | const Id pointer = OpImageTexelPointer(t_image_uint, image, coordinate, sample); | ||
| 2008 | |||
| 2009 | const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device)); | ||
| 2010 | const Id semantics = v_uint_zero; | ||
| 2011 | const Id value = AsUint(Visit(meta.values[0])); | ||
| 2012 | return {(this->*func)(t_uint, pointer, scope, semantics, value), Type::Uint}; | ||
| 2013 | } | ||
| 2014 | |||
| 2015 | template <Id (Module::*func)(Id, Id, Id, Id, Id)> | ||
| 2016 | Expression Atomic(Operation operation) { | ||
| 2017 | Id pointer; | ||
| 2018 | if (const auto smem = std::get_if<SmemNode>(&*operation[0])) { | ||
| 2019 | pointer = GetSharedMemoryPointer(*smem); | ||
| 2020 | } else if (const auto gmem = std::get_if<GmemNode>(&*operation[0])) { | ||
| 2021 | pointer = GetGlobalMemoryPointer(*gmem); | ||
| 2022 | } else { | ||
| 2023 | UNREACHABLE(); | ||
| 2024 | return {v_float_zero, Type::Float}; | ||
| 2025 | } | ||
| 2026 | const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device)); | ||
| 2027 | const Id semantics = v_uint_zero; | ||
| 2028 | const Id value = AsUint(Visit(operation[1])); | ||
| 2029 | |||
| 2030 | return {(this->*func)(t_uint, pointer, scope, semantics, value), Type::Uint}; | ||
| 2031 | } | ||
| 2032 | |||
| 2033 | template <Id (Module::*func)(Id, Id, Id, Id, Id)> | ||
| 2034 | Expression Reduce(Operation operation) { | ||
| 2035 | Atomic<func>(operation); | ||
| 2036 | return {}; | ||
| 2037 | } | ||
| 2038 | |||
| 2039 | Expression Branch(Operation operation) { | ||
| 2040 | const auto& target = std::get<ImmediateNode>(*operation[0]); | ||
| 2041 | OpStore(jmp_to, Constant(t_uint, target.GetValue())); | ||
| 2042 | OpBranch(continue_label); | ||
| 2043 | inside_branch = true; | ||
| 2044 | if (!conditional_branch_set) { | ||
| 2045 | AddLabel(); | ||
| 2046 | } | ||
| 2047 | return {}; | ||
| 2048 | } | ||
| 2049 | |||
| 2050 | Expression BranchIndirect(Operation operation) { | ||
| 2051 | const Id op_a = AsUint(Visit(operation[0])); | ||
| 2052 | |||
| 2053 | OpStore(jmp_to, op_a); | ||
| 2054 | OpBranch(continue_label); | ||
| 2055 | inside_branch = true; | ||
| 2056 | if (!conditional_branch_set) { | ||
| 2057 | AddLabel(); | ||
| 2058 | } | ||
| 2059 | return {}; | ||
| 2060 | } | ||
| 2061 | |||
| 2062 | Expression PushFlowStack(Operation operation) { | ||
| 2063 | const auto& target = std::get<ImmediateNode>(*operation[0]); | ||
| 2064 | const auto [flow_stack, flow_stack_top] = GetFlowStack(operation); | ||
| 2065 | const Id current = OpLoad(t_uint, flow_stack_top); | ||
| 2066 | const Id next = OpIAdd(t_uint, current, Constant(t_uint, 1)); | ||
| 2067 | const Id access = OpAccessChain(t_func_uint, flow_stack, current); | ||
| 2068 | |||
| 2069 | OpStore(access, Constant(t_uint, target.GetValue())); | ||
| 2070 | OpStore(flow_stack_top, next); | ||
| 2071 | return {}; | ||
| 2072 | } | ||
| 2073 | |||
| 2074 | Expression PopFlowStack(Operation operation) { | ||
| 2075 | const auto [flow_stack, flow_stack_top] = GetFlowStack(operation); | ||
| 2076 | const Id current = OpLoad(t_uint, flow_stack_top); | ||
| 2077 | const Id previous = OpISub(t_uint, current, Constant(t_uint, 1)); | ||
| 2078 | const Id access = OpAccessChain(t_func_uint, flow_stack, previous); | ||
| 2079 | const Id target = OpLoad(t_uint, access); | ||
| 2080 | |||
| 2081 | OpStore(flow_stack_top, previous); | ||
| 2082 | OpStore(jmp_to, target); | ||
| 2083 | OpBranch(continue_label); | ||
| 2084 | inside_branch = true; | ||
| 2085 | if (!conditional_branch_set) { | ||
| 2086 | AddLabel(); | ||
| 2087 | } | ||
| 2088 | return {}; | ||
| 2089 | } | ||
| 2090 | |||
| 2091 | Id MaxwellToSpirvComparison(Maxwell::ComparisonOp compare_op, Id operand_1, Id operand_2) { | ||
| 2092 | using Compare = Maxwell::ComparisonOp; | ||
| 2093 | switch (compare_op) { | ||
| 2094 | case Compare::NeverOld: | ||
| 2095 | return v_false; // Never let the test pass | ||
| 2096 | case Compare::LessOld: | ||
| 2097 | return OpFOrdLessThan(t_bool, operand_1, operand_2); | ||
| 2098 | case Compare::EqualOld: | ||
| 2099 | return OpFOrdEqual(t_bool, operand_1, operand_2); | ||
| 2100 | case Compare::LessEqualOld: | ||
| 2101 | return OpFOrdLessThanEqual(t_bool, operand_1, operand_2); | ||
| 2102 | case Compare::GreaterOld: | ||
| 2103 | return OpFOrdGreaterThan(t_bool, operand_1, operand_2); | ||
| 2104 | case Compare::NotEqualOld: | ||
| 2105 | return OpFOrdNotEqual(t_bool, operand_1, operand_2); | ||
| 2106 | case Compare::GreaterEqualOld: | ||
| 2107 | return OpFOrdGreaterThanEqual(t_bool, operand_1, operand_2); | ||
| 2108 | default: | ||
| 2109 | UNREACHABLE(); | ||
| 2110 | return v_true; | ||
| 2111 | } | ||
| 2112 | } | ||
| 2113 | |||
| 2114 | void AlphaTest(Id pointer) { | ||
| 2115 | if (specialization.alpha_test_func == Maxwell::ComparisonOp::AlwaysOld) { | ||
| 2116 | return; | ||
| 2117 | } | ||
| 2118 | const Id true_label = OpLabel(); | ||
| 2119 | const Id discard_label = OpLabel(); | ||
| 2120 | const Id alpha_reference = Constant(t_float, specialization.alpha_test_ref); | ||
| 2121 | const Id alpha_value = OpLoad(t_float, pointer); | ||
| 2122 | const Id condition = | ||
| 2123 | MaxwellToSpirvComparison(specialization.alpha_test_func, alpha_value, alpha_reference); | ||
| 2124 | |||
| 2125 | OpBranchConditional(condition, true_label, discard_label); | ||
| 2126 | AddLabel(discard_label); | ||
| 2127 | OpKill(); | ||
| 2128 | AddLabel(true_label); | ||
| 2129 | } | ||
| 2130 | |||
| 2131 | void PreExit() { | ||
| 2132 | if (stage == ShaderType::Vertex && specialization.ndc_minus_one_to_one) { | ||
| 2133 | const u32 position_index = out_indices.position.value(); | ||
| 2134 | const Id z_pointer = AccessElement(t_out_float, out_vertex, position_index, 2U); | ||
| 2135 | const Id w_pointer = AccessElement(t_out_float, out_vertex, position_index, 3U); | ||
| 2136 | Id depth = OpLoad(t_float, z_pointer); | ||
| 2137 | depth = OpFAdd(t_float, depth, OpLoad(t_float, w_pointer)); | ||
| 2138 | depth = OpFMul(t_float, depth, Constant(t_float, 0.5f)); | ||
| 2139 | OpStore(z_pointer, depth); | ||
| 2140 | } | ||
| 2141 | if (stage == ShaderType::Fragment) { | ||
| 2142 | const auto SafeGetRegister = [this](u32 reg) { | ||
| 2143 | if (const auto it = registers.find(reg); it != registers.end()) { | ||
| 2144 | return OpLoad(t_float, it->second); | ||
| 2145 | } | ||
| 2146 | return v_float_zero; | ||
| 2147 | }; | ||
| 2148 | |||
| 2149 | UNIMPLEMENTED_IF_MSG(header.ps.omap.sample_mask != 0, | ||
| 2150 | "Sample mask write is unimplemented"); | ||
| 2151 | |||
| 2152 | // Write the color outputs using the data in the shader registers, disabled | ||
| 2153 | // rendertargets/components are skipped in the register assignment. | ||
| 2154 | u32 current_reg = 0; | ||
| 2155 | for (u32 rt = 0; rt < Maxwell::NumRenderTargets; ++rt) { | ||
| 2156 | // TODO(Subv): Figure out how dual-source blending is configured in the Switch. | ||
| 2157 | for (u32 component = 0; component < 4; ++component) { | ||
| 2158 | if (!header.ps.IsColorComponentOutputEnabled(rt, component)) { | ||
| 2159 | continue; | ||
| 2160 | } | ||
| 2161 | const Id pointer = AccessElement(t_out_float, frag_colors[rt], component); | ||
| 2162 | OpStore(pointer, SafeGetRegister(current_reg)); | ||
| 2163 | if (rt == 0 && component == 3) { | ||
| 2164 | AlphaTest(pointer); | ||
| 2165 | } | ||
| 2166 | ++current_reg; | ||
| 2167 | } | ||
| 2168 | } | ||
| 2169 | if (header.ps.omap.depth) { | ||
| 2170 | // The depth output is always 2 registers after the last color output, and | ||
| 2171 | // current_reg already contains one past the last color register. | ||
| 2172 | OpStore(frag_depth, SafeGetRegister(current_reg + 1)); | ||
| 2173 | } | ||
| 2174 | } | ||
| 2175 | } | ||
| 2176 | |||
| 2177 | Expression Exit(Operation operation) { | ||
| 2178 | PreExit(); | ||
| 2179 | inside_branch = true; | ||
| 2180 | if (conditional_branch_set) { | ||
| 2181 | OpReturn(); | ||
| 2182 | } else { | ||
| 2183 | const Id dummy = OpLabel(); | ||
| 2184 | OpBranch(dummy); | ||
| 2185 | AddLabel(dummy); | ||
| 2186 | OpReturn(); | ||
| 2187 | AddLabel(); | ||
| 2188 | } | ||
| 2189 | return {}; | ||
| 2190 | } | ||
| 2191 | |||
| 2192 | Expression Discard(Operation operation) { | ||
| 2193 | inside_branch = true; | ||
| 2194 | if (conditional_branch_set) { | ||
| 2195 | OpKill(); | ||
| 2196 | } else { | ||
| 2197 | const Id dummy = OpLabel(); | ||
| 2198 | OpBranch(dummy); | ||
| 2199 | AddLabel(dummy); | ||
| 2200 | OpKill(); | ||
| 2201 | AddLabel(); | ||
| 2202 | } | ||
| 2203 | return {}; | ||
| 2204 | } | ||
| 2205 | |||
| 2206 | Expression EmitVertex(Operation) { | ||
| 2207 | OpEmitVertex(); | ||
| 2208 | return {}; | ||
| 2209 | } | ||
| 2210 | |||
| 2211 | Expression EndPrimitive(Operation operation) { | ||
| 2212 | OpEndPrimitive(); | ||
| 2213 | return {}; | ||
| 2214 | } | ||
| 2215 | |||
| 2216 | Expression InvocationId(Operation) { | ||
| 2217 | return {OpLoad(t_int, invocation_id), Type::Int}; | ||
| 2218 | } | ||
| 2219 | |||
| 2220 | Expression YNegate(Operation) { | ||
| 2221 | LOG_WARNING(Render_Vulkan, "(STUBBED)"); | ||
| 2222 | return {Constant(t_float, 1.0f), Type::Float}; | ||
| 2223 | } | ||
| 2224 | |||
| 2225 | template <u32 element> | ||
| 2226 | Expression LocalInvocationId(Operation) { | ||
| 2227 | const Id id = OpLoad(t_uint3, local_invocation_id); | ||
| 2228 | return {OpCompositeExtract(t_uint, id, element), Type::Uint}; | ||
| 2229 | } | ||
| 2230 | |||
| 2231 | template <u32 element> | ||
| 2232 | Expression WorkGroupId(Operation operation) { | ||
| 2233 | const Id id = OpLoad(t_uint3, workgroup_id); | ||
| 2234 | return {OpCompositeExtract(t_uint, id, element), Type::Uint}; | ||
| 2235 | } | ||
| 2236 | |||
| 2237 | Expression BallotThread(Operation operation) { | ||
| 2238 | const Id predicate = AsBool(Visit(operation[0])); | ||
| 2239 | const Id ballot = OpSubgroupBallotKHR(t_uint4, predicate); | ||
| 2240 | |||
| 2241 | if (!device.IsWarpSizePotentiallyBiggerThanGuest()) { | ||
| 2242 | // Guest-like devices can just return the first index. | ||
| 2243 | return {OpCompositeExtract(t_uint, ballot, 0U), Type::Uint}; | ||
| 2244 | } | ||
| 2245 | |||
| 2246 | // The others will have to return what is local to the current thread. | ||
| 2247 | // For instance a device with a warp size of 64 will return the upper uint when the current | ||
| 2248 | // thread is 38. | ||
| 2249 | const Id tid = OpLoad(t_uint, thread_id); | ||
| 2250 | const Id thread_index = OpShiftRightLogical(t_uint, tid, Constant(t_uint, 5)); | ||
| 2251 | return {OpVectorExtractDynamic(t_uint, ballot, thread_index), Type::Uint}; | ||
| 2252 | } | ||
| 2253 | |||
| 2254 | template <Id (Module::*func)(Id, Id)> | ||
| 2255 | Expression Vote(Operation operation) { | ||
| 2256 | // TODO(Rodrigo): Handle devices with different warp sizes | ||
| 2257 | const Id predicate = AsBool(Visit(operation[0])); | ||
| 2258 | return {(this->*func)(t_bool, predicate), Type::Bool}; | ||
| 2259 | } | ||
| 2260 | |||
| 2261 | Expression ThreadId(Operation) { | ||
| 2262 | return {OpLoad(t_uint, thread_id), Type::Uint}; | ||
| 2263 | } | ||
| 2264 | |||
| 2265 | template <std::size_t index> | ||
| 2266 | Expression ThreadMask(Operation) { | ||
| 2267 | // TODO(Rodrigo): Handle devices with different warp sizes | ||
| 2268 | const Id mask = thread_masks[index]; | ||
| 2269 | return {OpLoad(t_uint, AccessElement(t_in_uint, mask, 0)), Type::Uint}; | ||
| 2270 | } | ||
| 2271 | |||
| 2272 | Expression ShuffleIndexed(Operation operation) { | ||
| 2273 | const Id value = AsFloat(Visit(operation[0])); | ||
| 2274 | const Id index = AsUint(Visit(operation[1])); | ||
| 2275 | return {OpSubgroupReadInvocationKHR(t_float, value, index), Type::Float}; | ||
| 2276 | } | ||
| 2277 | |||
| 2278 | Expression Barrier(Operation) { | ||
| 2279 | if (!ir.IsDecompiled()) { | ||
| 2280 | LOG_ERROR(Render_Vulkan, "OpBarrier used by shader is not decompiled"); | ||
| 2281 | return {}; | ||
| 2282 | } | ||
| 2283 | |||
| 2284 | const auto scope = spv::Scope::Workgroup; | ||
| 2285 | const auto memory = spv::Scope::Workgroup; | ||
| 2286 | const auto semantics = | ||
| 2287 | spv::MemorySemanticsMask::WorkgroupMemory | spv::MemorySemanticsMask::AcquireRelease; | ||
| 2288 | OpControlBarrier(Constant(t_uint, static_cast<u32>(scope)), | ||
| 2289 | Constant(t_uint, static_cast<u32>(memory)), | ||
| 2290 | Constant(t_uint, static_cast<u32>(semantics))); | ||
| 2291 | return {}; | ||
| 2292 | } | ||
| 2293 | |||
| 2294 | template <spv::Scope scope> | ||
| 2295 | Expression MemoryBarrier(Operation) { | ||
| 2296 | const auto semantics = | ||
| 2297 | spv::MemorySemanticsMask::AcquireRelease | spv::MemorySemanticsMask::UniformMemory | | ||
| 2298 | spv::MemorySemanticsMask::WorkgroupMemory | | ||
| 2299 | spv::MemorySemanticsMask::AtomicCounterMemory | spv::MemorySemanticsMask::ImageMemory; | ||
| 2300 | |||
| 2301 | OpMemoryBarrier(Constant(t_uint, static_cast<u32>(scope)), | ||
| 2302 | Constant(t_uint, static_cast<u32>(semantics))); | ||
| 2303 | return {}; | ||
| 2304 | } | ||
| 2305 | |||
| 2306 | Id DeclareBuiltIn(spv::BuiltIn builtin, spv::StorageClass storage, Id type, std::string name) { | ||
| 2307 | const Id id = OpVariable(type, storage); | ||
| 2308 | Decorate(id, spv::Decoration::BuiltIn, static_cast<u32>(builtin)); | ||
| 2309 | AddGlobalVariable(Name(id, std::move(name))); | ||
| 2310 | interfaces.push_back(id); | ||
| 2311 | return id; | ||
| 2312 | } | ||
| 2313 | |||
| 2314 | Id DeclareInputBuiltIn(spv::BuiltIn builtin, Id type, std::string name) { | ||
| 2315 | return DeclareBuiltIn(builtin, spv::StorageClass::Input, type, std::move(name)); | ||
| 2316 | } | ||
| 2317 | |||
| 2318 | template <typename... Args> | ||
| 2319 | Id AccessElement(Id pointer_type, Id composite, Args... elements_) { | ||
| 2320 | std::vector<Id> members; | ||
| 2321 | auto elements = {elements_...}; | ||
| 2322 | for (const auto element : elements) { | ||
| 2323 | members.push_back(Constant(t_uint, element)); | ||
| 2324 | } | ||
| 2325 | |||
| 2326 | return OpAccessChain(pointer_type, composite, members); | ||
| 2327 | } | ||
| 2328 | |||
| 2329 | Id As(Expression expr, Type wanted_type) { | ||
| 2330 | switch (wanted_type) { | ||
| 2331 | case Type::Bool: | ||
| 2332 | return AsBool(expr); | ||
| 2333 | case Type::Bool2: | ||
| 2334 | return AsBool2(expr); | ||
| 2335 | case Type::Float: | ||
| 2336 | return AsFloat(expr); | ||
| 2337 | case Type::Int: | ||
| 2338 | return AsInt(expr); | ||
| 2339 | case Type::Uint: | ||
| 2340 | return AsUint(expr); | ||
| 2341 | case Type::HalfFloat: | ||
| 2342 | return AsHalfFloat(expr); | ||
| 2343 | default: | ||
| 2344 | UNREACHABLE(); | ||
| 2345 | return expr.id; | ||
| 2346 | } | ||
| 2347 | } | ||
| 2348 | |||
| 2349 | Id AsBool(Expression expr) { | ||
| 2350 | ASSERT(expr.type == Type::Bool); | ||
| 2351 | return expr.id; | ||
| 2352 | } | ||
| 2353 | |||
| 2354 | Id AsBool2(Expression expr) { | ||
| 2355 | ASSERT(expr.type == Type::Bool2); | ||
| 2356 | return expr.id; | ||
| 2357 | } | ||
| 2358 | |||
| 2359 | Id AsFloat(Expression expr) { | ||
| 2360 | switch (expr.type) { | ||
| 2361 | case Type::Float: | ||
| 2362 | return expr.id; | ||
| 2363 | case Type::Int: | ||
| 2364 | case Type::Uint: | ||
| 2365 | return OpBitcast(t_float, expr.id); | ||
| 2366 | case Type::HalfFloat: | ||
| 2367 | if (device.IsFloat16Supported()) { | ||
| 2368 | return OpBitcast(t_float, expr.id); | ||
| 2369 | } | ||
| 2370 | return OpBitcast(t_float, OpPackHalf2x16(t_uint, expr.id)); | ||
| 2371 | default: | ||
| 2372 | UNREACHABLE(); | ||
| 2373 | return expr.id; | ||
| 2374 | } | ||
| 2375 | } | ||
| 2376 | |||
| 2377 | Id AsInt(Expression expr) { | ||
| 2378 | switch (expr.type) { | ||
| 2379 | case Type::Int: | ||
| 2380 | return expr.id; | ||
| 2381 | case Type::Float: | ||
| 2382 | case Type::Uint: | ||
| 2383 | return OpBitcast(t_int, expr.id); | ||
| 2384 | case Type::HalfFloat: | ||
| 2385 | if (device.IsFloat16Supported()) { | ||
| 2386 | return OpBitcast(t_int, expr.id); | ||
| 2387 | } | ||
| 2388 | return OpPackHalf2x16(t_int, expr.id); | ||
| 2389 | default: | ||
| 2390 | UNREACHABLE(); | ||
| 2391 | return expr.id; | ||
| 2392 | } | ||
| 2393 | } | ||
| 2394 | |||
| 2395 | Id AsUint(Expression expr) { | ||
| 2396 | switch (expr.type) { | ||
| 2397 | case Type::Uint: | ||
| 2398 | return expr.id; | ||
| 2399 | case Type::Float: | ||
| 2400 | case Type::Int: | ||
| 2401 | return OpBitcast(t_uint, expr.id); | ||
| 2402 | case Type::HalfFloat: | ||
| 2403 | if (device.IsFloat16Supported()) { | ||
| 2404 | return OpBitcast(t_uint, expr.id); | ||
| 2405 | } | ||
| 2406 | return OpPackHalf2x16(t_uint, expr.id); | ||
| 2407 | default: | ||
| 2408 | UNREACHABLE(); | ||
| 2409 | return expr.id; | ||
| 2410 | } | ||
| 2411 | } | ||
| 2412 | |||
| 2413 | Id AsHalfFloat(Expression expr) { | ||
| 2414 | switch (expr.type) { | ||
| 2415 | case Type::HalfFloat: | ||
| 2416 | return expr.id; | ||
| 2417 | case Type::Float: | ||
| 2418 | case Type::Int: | ||
| 2419 | case Type::Uint: | ||
| 2420 | if (device.IsFloat16Supported()) { | ||
| 2421 | return OpBitcast(t_half, expr.id); | ||
| 2422 | } | ||
| 2423 | return OpUnpackHalf2x16(t_half, AsUint(expr)); | ||
| 2424 | default: | ||
| 2425 | UNREACHABLE(); | ||
| 2426 | return expr.id; | ||
| 2427 | } | ||
| 2428 | } | ||
| 2429 | |||
| 2430 | Id GetHalfScalarFromFloat(Id value) { | ||
| 2431 | if (device.IsFloat16Supported()) { | ||
| 2432 | return OpFConvert(t_scalar_half, value); | ||
| 2433 | } | ||
| 2434 | return value; | ||
| 2435 | } | ||
| 2436 | |||
| 2437 | Id GetFloatFromHalfScalar(Id value) { | ||
| 2438 | if (device.IsFloat16Supported()) { | ||
| 2439 | return OpFConvert(t_float, value); | ||
| 2440 | } | ||
| 2441 | return value; | ||
| 2442 | } | ||
| 2443 | |||
| 2444 | AttributeType GetAttributeType(u32 location) const { | ||
| 2445 | if (stage != ShaderType::Vertex) { | ||
| 2446 | return {Type::Float, t_in_float, t_in_float4}; | ||
| 2447 | } | ||
| 2448 | switch (specialization.attribute_types.at(location)) { | ||
| 2449 | case Maxwell::VertexAttribute::Type::SignedNorm: | ||
| 2450 | case Maxwell::VertexAttribute::Type::UnsignedNorm: | ||
| 2451 | case Maxwell::VertexAttribute::Type::UnsignedScaled: | ||
| 2452 | case Maxwell::VertexAttribute::Type::SignedScaled: | ||
| 2453 | case Maxwell::VertexAttribute::Type::Float: | ||
| 2454 | return {Type::Float, t_in_float, t_in_float4}; | ||
| 2455 | case Maxwell::VertexAttribute::Type::SignedInt: | ||
| 2456 | return {Type::Int, t_in_int, t_in_int4}; | ||
| 2457 | case Maxwell::VertexAttribute::Type::UnsignedInt: | ||
| 2458 | return {Type::Uint, t_in_uint, t_in_uint4}; | ||
| 2459 | default: | ||
| 2460 | UNREACHABLE(); | ||
| 2461 | return {Type::Float, t_in_float, t_in_float4}; | ||
| 2462 | } | ||
| 2463 | } | ||
| 2464 | |||
| 2465 | Id GetTypeDefinition(Type type) const { | ||
| 2466 | switch (type) { | ||
| 2467 | case Type::Bool: | ||
| 2468 | return t_bool; | ||
| 2469 | case Type::Bool2: | ||
| 2470 | return t_bool2; | ||
| 2471 | case Type::Float: | ||
| 2472 | return t_float; | ||
| 2473 | case Type::Int: | ||
| 2474 | return t_int; | ||
| 2475 | case Type::Uint: | ||
| 2476 | return t_uint; | ||
| 2477 | case Type::HalfFloat: | ||
| 2478 | return t_half; | ||
| 2479 | default: | ||
| 2480 | UNREACHABLE(); | ||
| 2481 | return {}; | ||
| 2482 | } | ||
| 2483 | } | ||
| 2484 | |||
| 2485 | std::array<Id, 4> GetTypeVectorDefinitionLut(Type type) const { | ||
| 2486 | switch (type) { | ||
| 2487 | case Type::Float: | ||
| 2488 | return {t_float, t_float2, t_float3, t_float4}; | ||
| 2489 | case Type::Int: | ||
| 2490 | return {t_int, t_int2, t_int3, t_int4}; | ||
| 2491 | case Type::Uint: | ||
| 2492 | return {t_uint, t_uint2, t_uint3, t_uint4}; | ||
| 2493 | default: | ||
| 2494 | UNIMPLEMENTED(); | ||
| 2495 | return {}; | ||
| 2496 | } | ||
| 2497 | } | ||
| 2498 | |||
| 2499 | std::tuple<Id, Id> CreateFlowStack() { | ||
| 2500 | // TODO(Rodrigo): Figure out the actual depth of the flow stack, for now it seems unlikely | ||
| 2501 | // that shaders will use 20 nested SSYs and PBKs. | ||
| 2502 | constexpr u32 FLOW_STACK_SIZE = 20; | ||
| 2503 | constexpr auto storage_class = spv::StorageClass::Function; | ||
| 2504 | |||
| 2505 | const Id flow_stack_type = TypeArray(t_uint, Constant(t_uint, FLOW_STACK_SIZE)); | ||
| 2506 | const Id stack = OpVariable(TypePointer(storage_class, flow_stack_type), storage_class, | ||
| 2507 | ConstantNull(flow_stack_type)); | ||
| 2508 | const Id top = OpVariable(t_func_uint, storage_class, Constant(t_uint, 0)); | ||
| 2509 | AddLocalVariable(stack); | ||
| 2510 | AddLocalVariable(top); | ||
| 2511 | return std::tie(stack, top); | ||
| 2512 | } | ||
| 2513 | |||
| 2514 | std::pair<Id, Id> GetFlowStack(Operation operation) { | ||
| 2515 | const auto stack_class = std::get<MetaStackClass>(operation.GetMeta()); | ||
| 2516 | switch (stack_class) { | ||
| 2517 | case MetaStackClass::Ssy: | ||
| 2518 | return {ssy_flow_stack, ssy_flow_stack_top}; | ||
| 2519 | case MetaStackClass::Pbk: | ||
| 2520 | return {pbk_flow_stack, pbk_flow_stack_top}; | ||
| 2521 | } | ||
| 2522 | UNREACHABLE(); | ||
| 2523 | return {}; | ||
| 2524 | } | ||
| 2525 | |||
| 2526 | Id GetGlobalMemoryPointer(const GmemNode& gmem) { | ||
| 2527 | const Id real = AsUint(Visit(gmem.GetRealAddress())); | ||
| 2528 | const Id base = AsUint(Visit(gmem.GetBaseAddress())); | ||
| 2529 | const Id diff = OpISub(t_uint, real, base); | ||
| 2530 | const Id offset = OpShiftRightLogical(t_uint, diff, Constant(t_uint, 2)); | ||
| 2531 | const Id buffer = global_buffers.at(gmem.GetDescriptor()); | ||
| 2532 | return OpAccessChain(t_gmem_uint, buffer, Constant(t_uint, 0), offset); | ||
| 2533 | } | ||
| 2534 | |||
| 2535 | Id GetSharedMemoryPointer(const SmemNode& smem) { | ||
| 2536 | ASSERT(stage == ShaderType::Compute); | ||
| 2537 | Id address = AsUint(Visit(smem.GetAddress())); | ||
| 2538 | address = OpShiftRightLogical(t_uint, address, Constant(t_uint, 2U)); | ||
| 2539 | return OpAccessChain(t_smem_uint, shared_memory, address); | ||
| 2540 | } | ||
| 2541 | |||
| 2542 | static constexpr std::array operation_decompilers = { | ||
| 2543 | &SPIRVDecompiler::Assign, | ||
| 2544 | |||
| 2545 | &SPIRVDecompiler::Ternary<&Module::OpSelect, Type::Float, Type::Bool, Type::Float, | ||
| 2546 | Type::Float>, | ||
| 2547 | |||
| 2548 | &SPIRVDecompiler::Binary<&Module::OpFAdd, Type::Float>, | ||
| 2549 | &SPIRVDecompiler::Binary<&Module::OpFMul, Type::Float>, | ||
| 2550 | &SPIRVDecompiler::Binary<&Module::OpFDiv, Type::Float>, | ||
| 2551 | &SPIRVDecompiler::Ternary<&Module::OpFma, Type::Float>, | ||
| 2552 | &SPIRVDecompiler::Unary<&Module::OpFNegate, Type::Float>, | ||
| 2553 | &SPIRVDecompiler::Unary<&Module::OpFAbs, Type::Float>, | ||
| 2554 | &SPIRVDecompiler::Ternary<&Module::OpFClamp, Type::Float>, | ||
| 2555 | &SPIRVDecompiler::FCastHalf<0>, | ||
| 2556 | &SPIRVDecompiler::FCastHalf<1>, | ||
| 2557 | &SPIRVDecompiler::Binary<&Module::OpFMin, Type::Float>, | ||
| 2558 | &SPIRVDecompiler::Binary<&Module::OpFMax, Type::Float>, | ||
| 2559 | &SPIRVDecompiler::Unary<&Module::OpCos, Type::Float>, | ||
| 2560 | &SPIRVDecompiler::Unary<&Module::OpSin, Type::Float>, | ||
| 2561 | &SPIRVDecompiler::Unary<&Module::OpExp2, Type::Float>, | ||
| 2562 | &SPIRVDecompiler::Unary<&Module::OpLog2, Type::Float>, | ||
| 2563 | &SPIRVDecompiler::Unary<&Module::OpInverseSqrt, Type::Float>, | ||
| 2564 | &SPIRVDecompiler::Unary<&Module::OpSqrt, Type::Float>, | ||
| 2565 | &SPIRVDecompiler::Unary<&Module::OpRoundEven, Type::Float>, | ||
| 2566 | &SPIRVDecompiler::Unary<&Module::OpFloor, Type::Float>, | ||
| 2567 | &SPIRVDecompiler::Unary<&Module::OpCeil, Type::Float>, | ||
| 2568 | &SPIRVDecompiler::Unary<&Module::OpTrunc, Type::Float>, | ||
| 2569 | &SPIRVDecompiler::Unary<&Module::OpConvertSToF, Type::Float, Type::Int>, | ||
| 2570 | &SPIRVDecompiler::Unary<&Module::OpConvertUToF, Type::Float, Type::Uint>, | ||
| 2571 | &SPIRVDecompiler::FSwizzleAdd, | ||
| 2572 | |||
| 2573 | &SPIRVDecompiler::Binary<&Module::OpIAdd, Type::Int>, | ||
| 2574 | &SPIRVDecompiler::Binary<&Module::OpIMul, Type::Int>, | ||
| 2575 | &SPIRVDecompiler::Binary<&Module::OpSDiv, Type::Int>, | ||
| 2576 | &SPIRVDecompiler::Unary<&Module::OpSNegate, Type::Int>, | ||
| 2577 | &SPIRVDecompiler::Unary<&Module::OpSAbs, Type::Int>, | ||
| 2578 | &SPIRVDecompiler::Binary<&Module::OpSMin, Type::Int>, | ||
| 2579 | &SPIRVDecompiler::Binary<&Module::OpSMax, Type::Int>, | ||
| 2580 | |||
| 2581 | &SPIRVDecompiler::Unary<&Module::OpConvertFToS, Type::Int, Type::Float>, | ||
| 2582 | &SPIRVDecompiler::Unary<&Module::OpBitcast, Type::Int, Type::Uint>, | ||
| 2583 | &SPIRVDecompiler::Binary<&Module::OpShiftLeftLogical, Type::Int, Type::Int, Type::Uint>, | ||
| 2584 | &SPIRVDecompiler::Binary<&Module::OpShiftRightLogical, Type::Int, Type::Int, Type::Uint>, | ||
| 2585 | &SPIRVDecompiler::Binary<&Module::OpShiftRightArithmetic, Type::Int, Type::Int, Type::Uint>, | ||
| 2586 | &SPIRVDecompiler::Binary<&Module::OpBitwiseAnd, Type::Int>, | ||
| 2587 | &SPIRVDecompiler::Binary<&Module::OpBitwiseOr, Type::Int>, | ||
| 2588 | &SPIRVDecompiler::Binary<&Module::OpBitwiseXor, Type::Int>, | ||
| 2589 | &SPIRVDecompiler::Unary<&Module::OpNot, Type::Int>, | ||
| 2590 | &SPIRVDecompiler::Quaternary<&Module::OpBitFieldInsert, Type::Int>, | ||
| 2591 | &SPIRVDecompiler::Ternary<&Module::OpBitFieldSExtract, Type::Int>, | ||
| 2592 | &SPIRVDecompiler::Unary<&Module::OpBitCount, Type::Int>, | ||
| 2593 | &SPIRVDecompiler::Unary<&Module::OpFindSMsb, Type::Int>, | ||
| 2594 | |||
| 2595 | &SPIRVDecompiler::Binary<&Module::OpIAdd, Type::Uint>, | ||
| 2596 | &SPIRVDecompiler::Binary<&Module::OpIMul, Type::Uint>, | ||
| 2597 | &SPIRVDecompiler::Binary<&Module::OpUDiv, Type::Uint>, | ||
| 2598 | &SPIRVDecompiler::Binary<&Module::OpUMin, Type::Uint>, | ||
| 2599 | &SPIRVDecompiler::Binary<&Module::OpUMax, Type::Uint>, | ||
| 2600 | &SPIRVDecompiler::Unary<&Module::OpConvertFToU, Type::Uint, Type::Float>, | ||
| 2601 | &SPIRVDecompiler::Unary<&Module::OpBitcast, Type::Uint, Type::Int>, | ||
| 2602 | &SPIRVDecompiler::Binary<&Module::OpShiftLeftLogical, Type::Uint>, | ||
| 2603 | &SPIRVDecompiler::Binary<&Module::OpShiftRightLogical, Type::Uint>, | ||
| 2604 | &SPIRVDecompiler::Binary<&Module::OpShiftRightLogical, Type::Uint>, | ||
| 2605 | &SPIRVDecompiler::Binary<&Module::OpBitwiseAnd, Type::Uint>, | ||
| 2606 | &SPIRVDecompiler::Binary<&Module::OpBitwiseOr, Type::Uint>, | ||
| 2607 | &SPIRVDecompiler::Binary<&Module::OpBitwiseXor, Type::Uint>, | ||
| 2608 | &SPIRVDecompiler::Unary<&Module::OpNot, Type::Uint>, | ||
| 2609 | &SPIRVDecompiler::Quaternary<&Module::OpBitFieldInsert, Type::Uint>, | ||
| 2610 | &SPIRVDecompiler::Ternary<&Module::OpBitFieldUExtract, Type::Uint>, | ||
| 2611 | &SPIRVDecompiler::Unary<&Module::OpBitCount, Type::Uint>, | ||
| 2612 | &SPIRVDecompiler::Unary<&Module::OpFindUMsb, Type::Uint>, | ||
| 2613 | |||
| 2614 | &SPIRVDecompiler::Binary<&Module::OpFAdd, Type::HalfFloat>, | ||
| 2615 | &SPIRVDecompiler::Binary<&Module::OpFMul, Type::HalfFloat>, | ||
| 2616 | &SPIRVDecompiler::Ternary<&Module::OpFma, Type::HalfFloat>, | ||
| 2617 | &SPIRVDecompiler::Unary<&Module::OpFAbs, Type::HalfFloat>, | ||
| 2618 | &SPIRVDecompiler::HNegate, | ||
| 2619 | &SPIRVDecompiler::HClamp, | ||
| 2620 | &SPIRVDecompiler::HCastFloat, | ||
| 2621 | &SPIRVDecompiler::HUnpack, | ||
| 2622 | &SPIRVDecompiler::HMergeF32, | ||
| 2623 | &SPIRVDecompiler::HMergeHN<0>, | ||
| 2624 | &SPIRVDecompiler::HMergeHN<1>, | ||
| 2625 | &SPIRVDecompiler::HPack2, | ||
| 2626 | |||
| 2627 | &SPIRVDecompiler::LogicalAssign, | ||
| 2628 | &SPIRVDecompiler::Binary<&Module::OpLogicalAnd, Type::Bool>, | ||
| 2629 | &SPIRVDecompiler::Binary<&Module::OpLogicalOr, Type::Bool>, | ||
| 2630 | &SPIRVDecompiler::Binary<&Module::OpLogicalNotEqual, Type::Bool>, | ||
| 2631 | &SPIRVDecompiler::Unary<&Module::OpLogicalNot, Type::Bool>, | ||
| 2632 | &SPIRVDecompiler::Binary<&Module::OpVectorExtractDynamic, Type::Bool, Type::Bool2, | ||
| 2633 | Type::Uint>, | ||
| 2634 | &SPIRVDecompiler::Unary<&Module::OpAll, Type::Bool, Type::Bool2>, | ||
| 2635 | |||
| 2636 | &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool, Type::Float>, | ||
| 2637 | &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool, Type::Float>, | ||
| 2638 | &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool, Type::Float>, | ||
| 2639 | &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool, Type::Float>, | ||
| 2640 | &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool, Type::Float>, | ||
| 2641 | &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool, Type::Float>, | ||
| 2642 | &SPIRVDecompiler::LogicalFOrdered, | ||
| 2643 | &SPIRVDecompiler::LogicalFUnordered, | ||
| 2644 | &SPIRVDecompiler::Binary<&Module::OpFUnordLessThan, Type::Bool, Type::Float>, | ||
| 2645 | &SPIRVDecompiler::Binary<&Module::OpFUnordEqual, Type::Bool, Type::Float>, | ||
| 2646 | &SPIRVDecompiler::Binary<&Module::OpFUnordLessThanEqual, Type::Bool, Type::Float>, | ||
| 2647 | &SPIRVDecompiler::Binary<&Module::OpFUnordGreaterThan, Type::Bool, Type::Float>, | ||
| 2648 | &SPIRVDecompiler::Binary<&Module::OpFUnordNotEqual, Type::Bool, Type::Float>, | ||
| 2649 | &SPIRVDecompiler::Binary<&Module::OpFUnordGreaterThanEqual, Type::Bool, Type::Float>, | ||
| 2650 | |||
| 2651 | &SPIRVDecompiler::Binary<&Module::OpSLessThan, Type::Bool, Type::Int>, | ||
| 2652 | &SPIRVDecompiler::Binary<&Module::OpIEqual, Type::Bool, Type::Int>, | ||
| 2653 | &SPIRVDecompiler::Binary<&Module::OpSLessThanEqual, Type::Bool, Type::Int>, | ||
| 2654 | &SPIRVDecompiler::Binary<&Module::OpSGreaterThan, Type::Bool, Type::Int>, | ||
| 2655 | &SPIRVDecompiler::Binary<&Module::OpINotEqual, Type::Bool, Type::Int>, | ||
| 2656 | &SPIRVDecompiler::Binary<&Module::OpSGreaterThanEqual, Type::Bool, Type::Int>, | ||
| 2657 | |||
| 2658 | &SPIRVDecompiler::Binary<&Module::OpULessThan, Type::Bool, Type::Uint>, | ||
| 2659 | &SPIRVDecompiler::Binary<&Module::OpIEqual, Type::Bool, Type::Uint>, | ||
| 2660 | &SPIRVDecompiler::Binary<&Module::OpULessThanEqual, Type::Bool, Type::Uint>, | ||
| 2661 | &SPIRVDecompiler::Binary<&Module::OpUGreaterThan, Type::Bool, Type::Uint>, | ||
| 2662 | &SPIRVDecompiler::Binary<&Module::OpINotEqual, Type::Bool, Type::Uint>, | ||
| 2663 | &SPIRVDecompiler::Binary<&Module::OpUGreaterThanEqual, Type::Bool, Type::Uint>, | ||
| 2664 | |||
| 2665 | &SPIRVDecompiler::LogicalAddCarry, | ||
| 2666 | |||
| 2667 | &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool2, Type::HalfFloat>, | ||
| 2668 | &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool2, Type::HalfFloat>, | ||
| 2669 | &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool2, Type::HalfFloat>, | ||
| 2670 | &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool2, Type::HalfFloat>, | ||
| 2671 | &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool2, Type::HalfFloat>, | ||
| 2672 | &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool2, Type::HalfFloat>, | ||
| 2673 | // TODO(Rodrigo): Should these use the OpFUnord* variants? | ||
| 2674 | &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool2, Type::HalfFloat>, | ||
| 2675 | &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool2, Type::HalfFloat>, | ||
| 2676 | &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool2, Type::HalfFloat>, | ||
| 2677 | &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThan, Type::Bool2, Type::HalfFloat>, | ||
| 2678 | &SPIRVDecompiler::Binary<&Module::OpFOrdNotEqual, Type::Bool2, Type::HalfFloat>, | ||
| 2679 | &SPIRVDecompiler::Binary<&Module::OpFOrdGreaterThanEqual, Type::Bool2, Type::HalfFloat>, | ||
| 2680 | |||
| 2681 | &SPIRVDecompiler::Texture, | ||
| 2682 | &SPIRVDecompiler::TextureLod, | ||
| 2683 | &SPIRVDecompiler::TextureGather, | ||
| 2684 | &SPIRVDecompiler::TextureQueryDimensions, | ||
| 2685 | &SPIRVDecompiler::TextureQueryLod, | ||
| 2686 | &SPIRVDecompiler::TexelFetch, | ||
| 2687 | &SPIRVDecompiler::TextureGradient, | ||
| 2688 | |||
| 2689 | &SPIRVDecompiler::ImageLoad, | ||
| 2690 | &SPIRVDecompiler::ImageStore, | ||
| 2691 | &SPIRVDecompiler::AtomicImage<&Module::OpAtomicIAdd>, | ||
| 2692 | &SPIRVDecompiler::AtomicImage<&Module::OpAtomicAnd>, | ||
| 2693 | &SPIRVDecompiler::AtomicImage<&Module::OpAtomicOr>, | ||
| 2694 | &SPIRVDecompiler::AtomicImage<&Module::OpAtomicXor>, | ||
| 2695 | &SPIRVDecompiler::AtomicImage<&Module::OpAtomicExchange>, | ||
| 2696 | |||
| 2697 | &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange>, | ||
| 2698 | &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd>, | ||
| 2699 | &SPIRVDecompiler::Atomic<&Module::OpAtomicUMin>, | ||
| 2700 | &SPIRVDecompiler::Atomic<&Module::OpAtomicUMax>, | ||
| 2701 | &SPIRVDecompiler::Atomic<&Module::OpAtomicAnd>, | ||
| 2702 | &SPIRVDecompiler::Atomic<&Module::OpAtomicOr>, | ||
| 2703 | &SPIRVDecompiler::Atomic<&Module::OpAtomicXor>, | ||
| 2704 | |||
| 2705 | &SPIRVDecompiler::Atomic<&Module::OpAtomicExchange>, | ||
| 2706 | &SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd>, | ||
| 2707 | &SPIRVDecompiler::Atomic<&Module::OpAtomicSMin>, | ||
| 2708 | &SPIRVDecompiler::Atomic<&Module::OpAtomicSMax>, | ||
| 2709 | &SPIRVDecompiler::Atomic<&Module::OpAtomicAnd>, | ||
| 2710 | &SPIRVDecompiler::Atomic<&Module::OpAtomicOr>, | ||
| 2711 | &SPIRVDecompiler::Atomic<&Module::OpAtomicXor>, | ||
| 2712 | |||
| 2713 | &SPIRVDecompiler::Reduce<&Module::OpAtomicIAdd>, | ||
| 2714 | &SPIRVDecompiler::Reduce<&Module::OpAtomicUMin>, | ||
| 2715 | &SPIRVDecompiler::Reduce<&Module::OpAtomicUMax>, | ||
| 2716 | &SPIRVDecompiler::Reduce<&Module::OpAtomicAnd>, | ||
| 2717 | &SPIRVDecompiler::Reduce<&Module::OpAtomicOr>, | ||
| 2718 | &SPIRVDecompiler::Reduce<&Module::OpAtomicXor>, | ||
| 2719 | |||
| 2720 | &SPIRVDecompiler::Reduce<&Module::OpAtomicIAdd>, | ||
| 2721 | &SPIRVDecompiler::Reduce<&Module::OpAtomicSMin>, | ||
| 2722 | &SPIRVDecompiler::Reduce<&Module::OpAtomicSMax>, | ||
| 2723 | &SPIRVDecompiler::Reduce<&Module::OpAtomicAnd>, | ||
| 2724 | &SPIRVDecompiler::Reduce<&Module::OpAtomicOr>, | ||
| 2725 | &SPIRVDecompiler::Reduce<&Module::OpAtomicXor>, | ||
| 2726 | |||
| 2727 | &SPIRVDecompiler::Branch, | ||
| 2728 | &SPIRVDecompiler::BranchIndirect, | ||
| 2729 | &SPIRVDecompiler::PushFlowStack, | ||
| 2730 | &SPIRVDecompiler::PopFlowStack, | ||
| 2731 | &SPIRVDecompiler::Exit, | ||
| 2732 | &SPIRVDecompiler::Discard, | ||
| 2733 | |||
| 2734 | &SPIRVDecompiler::EmitVertex, | ||
| 2735 | &SPIRVDecompiler::EndPrimitive, | ||
| 2736 | |||
| 2737 | &SPIRVDecompiler::InvocationId, | ||
| 2738 | &SPIRVDecompiler::YNegate, | ||
| 2739 | &SPIRVDecompiler::LocalInvocationId<0>, | ||
| 2740 | &SPIRVDecompiler::LocalInvocationId<1>, | ||
| 2741 | &SPIRVDecompiler::LocalInvocationId<2>, | ||
| 2742 | &SPIRVDecompiler::WorkGroupId<0>, | ||
| 2743 | &SPIRVDecompiler::WorkGroupId<1>, | ||
| 2744 | &SPIRVDecompiler::WorkGroupId<2>, | ||
| 2745 | |||
| 2746 | &SPIRVDecompiler::BallotThread, | ||
| 2747 | &SPIRVDecompiler::Vote<&Module::OpSubgroupAllKHR>, | ||
| 2748 | &SPIRVDecompiler::Vote<&Module::OpSubgroupAnyKHR>, | ||
| 2749 | &SPIRVDecompiler::Vote<&Module::OpSubgroupAllEqualKHR>, | ||
| 2750 | |||
| 2751 | &SPIRVDecompiler::ThreadId, | ||
| 2752 | &SPIRVDecompiler::ThreadMask<0>, // Eq | ||
| 2753 | &SPIRVDecompiler::ThreadMask<1>, // Ge | ||
| 2754 | &SPIRVDecompiler::ThreadMask<2>, // Gt | ||
| 2755 | &SPIRVDecompiler::ThreadMask<3>, // Le | ||
| 2756 | &SPIRVDecompiler::ThreadMask<4>, // Lt | ||
| 2757 | &SPIRVDecompiler::ShuffleIndexed, | ||
| 2758 | |||
| 2759 | &SPIRVDecompiler::Barrier, | ||
| 2760 | &SPIRVDecompiler::MemoryBarrier<spv::Scope::Workgroup>, | ||
| 2761 | &SPIRVDecompiler::MemoryBarrier<spv::Scope::Device>, | ||
| 2762 | }; | ||
| 2763 | static_assert(operation_decompilers.size() == static_cast<std::size_t>(OperationCode::Amount)); | ||
| 2764 | |||
| 2765 | const Device& device; | ||
| 2766 | const ShaderIR& ir; | ||
| 2767 | const ShaderType stage; | ||
| 2768 | const Tegra::Shader::Header header; | ||
| 2769 | const Registry& registry; | ||
| 2770 | const Specialization& specialization; | ||
| 2771 | std::unordered_map<u8, VaryingTFB> transform_feedback; | ||
| 2772 | |||
| 2773 | const Id t_void = Name(TypeVoid(), "void"); | ||
| 2774 | |||
| 2775 | const Id t_bool = Name(TypeBool(), "bool"); | ||
| 2776 | const Id t_bool2 = Name(TypeVector(t_bool, 2), "bool2"); | ||
| 2777 | |||
| 2778 | const Id t_int = Name(TypeInt(32, true), "int"); | ||
| 2779 | const Id t_int2 = Name(TypeVector(t_int, 2), "int2"); | ||
| 2780 | const Id t_int3 = Name(TypeVector(t_int, 3), "int3"); | ||
| 2781 | const Id t_int4 = Name(TypeVector(t_int, 4), "int4"); | ||
| 2782 | |||
| 2783 | const Id t_uint = Name(TypeInt(32, false), "uint"); | ||
| 2784 | const Id t_uint2 = Name(TypeVector(t_uint, 2), "uint2"); | ||
| 2785 | const Id t_uint3 = Name(TypeVector(t_uint, 3), "uint3"); | ||
| 2786 | const Id t_uint4 = Name(TypeVector(t_uint, 4), "uint4"); | ||
| 2787 | |||
| 2788 | const Id t_float = Name(TypeFloat(32), "float"); | ||
| 2789 | const Id t_float2 = Name(TypeVector(t_float, 2), "float2"); | ||
| 2790 | const Id t_float3 = Name(TypeVector(t_float, 3), "float3"); | ||
| 2791 | const Id t_float4 = Name(TypeVector(t_float, 4), "float4"); | ||
| 2792 | |||
| 2793 | const Id t_prv_bool = Name(TypePointer(spv::StorageClass::Private, t_bool), "prv_bool"); | ||
| 2794 | const Id t_prv_float = Name(TypePointer(spv::StorageClass::Private, t_float), "prv_float"); | ||
| 2795 | |||
| 2796 | const Id t_func_uint = Name(TypePointer(spv::StorageClass::Function, t_uint), "func_uint"); | ||
| 2797 | |||
| 2798 | const Id t_in_bool = Name(TypePointer(spv::StorageClass::Input, t_bool), "in_bool"); | ||
| 2799 | const Id t_in_int = Name(TypePointer(spv::StorageClass::Input, t_int), "in_int"); | ||
| 2800 | const Id t_in_int4 = Name(TypePointer(spv::StorageClass::Input, t_int4), "in_int4"); | ||
| 2801 | const Id t_in_uint = Name(TypePointer(spv::StorageClass::Input, t_uint), "in_uint"); | ||
| 2802 | const Id t_in_uint3 = Name(TypePointer(spv::StorageClass::Input, t_uint3), "in_uint3"); | ||
| 2803 | const Id t_in_uint4 = Name(TypePointer(spv::StorageClass::Input, t_uint4), "in_uint4"); | ||
| 2804 | const Id t_in_float = Name(TypePointer(spv::StorageClass::Input, t_float), "in_float"); | ||
| 2805 | const Id t_in_float2 = Name(TypePointer(spv::StorageClass::Input, t_float2), "in_float2"); | ||
| 2806 | const Id t_in_float3 = Name(TypePointer(spv::StorageClass::Input, t_float3), "in_float3"); | ||
| 2807 | const Id t_in_float4 = Name(TypePointer(spv::StorageClass::Input, t_float4), "in_float4"); | ||
| 2808 | |||
| 2809 | const Id t_out_int = Name(TypePointer(spv::StorageClass::Output, t_int), "out_int"); | ||
| 2810 | |||
| 2811 | const Id t_out_float = Name(TypePointer(spv::StorageClass::Output, t_float), "out_float"); | ||
| 2812 | const Id t_out_float4 = Name(TypePointer(spv::StorageClass::Output, t_float4), "out_float4"); | ||
| 2813 | |||
| 2814 | const Id t_cbuf_float = TypePointer(spv::StorageClass::Uniform, t_float); | ||
| 2815 | const Id t_cbuf_std140 = Decorate( | ||
| 2816 | Name(TypeArray(t_float4, Constant(t_uint, MaxConstBufferElements)), "CbufStd140Array"), | ||
| 2817 | spv::Decoration::ArrayStride, 16U); | ||
| 2818 | const Id t_cbuf_scalar = Decorate( | ||
| 2819 | Name(TypeArray(t_float, Constant(t_uint, MaxConstBufferFloats)), "CbufScalarArray"), | ||
| 2820 | spv::Decoration::ArrayStride, 4U); | ||
| 2821 | const Id t_cbuf_std140_struct = MemberDecorate( | ||
| 2822 | Decorate(TypeStruct(t_cbuf_std140), spv::Decoration::Block), 0, spv::Decoration::Offset, 0); | ||
| 2823 | const Id t_cbuf_scalar_struct = MemberDecorate( | ||
| 2824 | Decorate(TypeStruct(t_cbuf_scalar), spv::Decoration::Block), 0, spv::Decoration::Offset, 0); | ||
| 2825 | const Id t_cbuf_std140_ubo = TypePointer(spv::StorageClass::Uniform, t_cbuf_std140_struct); | ||
| 2826 | const Id t_cbuf_scalar_ubo = TypePointer(spv::StorageClass::Uniform, t_cbuf_scalar_struct); | ||
| 2827 | |||
| 2828 | Id t_smem_uint{}; | ||
| 2829 | |||
| 2830 | const Id t_gmem_uint = TypePointer(spv::StorageClass::StorageBuffer, t_uint); | ||
| 2831 | const Id t_gmem_array = | ||
| 2832 | Name(Decorate(TypeRuntimeArray(t_uint), spv::Decoration::ArrayStride, 4U), "GmemArray"); | ||
| 2833 | const Id t_gmem_struct = MemberDecorate( | ||
| 2834 | Decorate(TypeStruct(t_gmem_array), spv::Decoration::Block), 0, spv::Decoration::Offset, 0); | ||
| 2835 | const Id t_gmem_ssbo = TypePointer(spv::StorageClass::StorageBuffer, t_gmem_struct); | ||
| 2836 | |||
| 2837 | const Id t_image_uint = TypePointer(spv::StorageClass::Image, t_uint); | ||
| 2838 | |||
| 2839 | const Id v_float_zero = Constant(t_float, 0.0f); | ||
| 2840 | const Id v_float_one = Constant(t_float, 1.0f); | ||
| 2841 | const Id v_uint_zero = Constant(t_uint, 0); | ||
| 2842 | |||
| 2843 | // Nvidia uses these defaults for varyings (e.g. position and generic attributes) | ||
| 2844 | const Id v_varying_default = | ||
| 2845 | ConstantComposite(t_float4, v_float_zero, v_float_zero, v_float_zero, v_float_one); | ||
| 2846 | |||
| 2847 | const Id v_true = ConstantTrue(t_bool); | ||
| 2848 | const Id v_false = ConstantFalse(t_bool); | ||
| 2849 | |||
| 2850 | Id t_scalar_half{}; | ||
| 2851 | Id t_half{}; | ||
| 2852 | |||
| 2853 | Id out_vertex{}; | ||
| 2854 | Id in_vertex{}; | ||
| 2855 | std::map<u32, Id> registers; | ||
| 2856 | std::map<u32, Id> custom_variables; | ||
| 2857 | std::map<Tegra::Shader::Pred, Id> predicates; | ||
| 2858 | std::map<u32, Id> flow_variables; | ||
| 2859 | Id local_memory{}; | ||
| 2860 | Id shared_memory{}; | ||
| 2861 | std::array<Id, INTERNAL_FLAGS_COUNT> internal_flags{}; | ||
| 2862 | std::map<Attribute::Index, Id> input_attributes; | ||
| 2863 | std::unordered_map<u8, GenericVaryingDescription> output_attributes; | ||
| 2864 | std::map<u32, Id> constant_buffers; | ||
| 2865 | std::map<GlobalMemoryBase, Id> global_buffers; | ||
| 2866 | std::map<u32, TexelBuffer> uniform_texels; | ||
| 2867 | std::map<u32, SampledImage> sampled_images; | ||
| 2868 | std::map<u32, StorageImage> images; | ||
| 2869 | |||
| 2870 | std::array<Id, Maxwell::NumRenderTargets> frag_colors{}; | ||
| 2871 | Id instance_index{}; | ||
| 2872 | Id vertex_index{}; | ||
| 2873 | Id base_instance{}; | ||
| 2874 | Id base_vertex{}; | ||
| 2875 | Id frag_depth{}; | ||
| 2876 | Id frag_coord{}; | ||
| 2877 | Id front_facing{}; | ||
| 2878 | Id point_coord{}; | ||
| 2879 | Id tess_level_outer{}; | ||
| 2880 | Id tess_level_inner{}; | ||
| 2881 | Id tess_coord{}; | ||
| 2882 | Id invocation_id{}; | ||
| 2883 | Id workgroup_id{}; | ||
| 2884 | Id local_invocation_id{}; | ||
| 2885 | Id thread_id{}; | ||
| 2886 | std::array<Id, 5> thread_masks{}; // eq, ge, gt, le, lt | ||
| 2887 | |||
| 2888 | VertexIndices in_indices; | ||
| 2889 | VertexIndices out_indices; | ||
| 2890 | |||
| 2891 | std::vector<Id> interfaces; | ||
| 2892 | |||
| 2893 | Id jmp_to{}; | ||
| 2894 | Id ssy_flow_stack_top{}; | ||
| 2895 | Id pbk_flow_stack_top{}; | ||
| 2896 | Id ssy_flow_stack{}; | ||
| 2897 | Id pbk_flow_stack{}; | ||
| 2898 | Id continue_label{}; | ||
| 2899 | std::map<u32, Id> labels; | ||
| 2900 | |||
| 2901 | bool conditional_branch_set{}; | ||
| 2902 | bool inside_branch{}; | ||
| 2903 | }; | ||
| 2904 | |||
| 2905 | class ExprDecompiler { | ||
| 2906 | public: | ||
| 2907 | explicit ExprDecompiler(SPIRVDecompiler& decomp_) : decomp{decomp_} {} | ||
| 2908 | |||
| 2909 | Id operator()(const ExprAnd& expr) { | ||
| 2910 | const Id type_def = decomp.GetTypeDefinition(Type::Bool); | ||
| 2911 | const Id op1 = Visit(expr.operand1); | ||
| 2912 | const Id op2 = Visit(expr.operand2); | ||
| 2913 | return decomp.OpLogicalAnd(type_def, op1, op2); | ||
| 2914 | } | ||
| 2915 | |||
| 2916 | Id operator()(const ExprOr& expr) { | ||
| 2917 | const Id type_def = decomp.GetTypeDefinition(Type::Bool); | ||
| 2918 | const Id op1 = Visit(expr.operand1); | ||
| 2919 | const Id op2 = Visit(expr.operand2); | ||
| 2920 | return decomp.OpLogicalOr(type_def, op1, op2); | ||
| 2921 | } | ||
| 2922 | |||
| 2923 | Id operator()(const ExprNot& expr) { | ||
| 2924 | const Id type_def = decomp.GetTypeDefinition(Type::Bool); | ||
| 2925 | const Id op1 = Visit(expr.operand1); | ||
| 2926 | return decomp.OpLogicalNot(type_def, op1); | ||
| 2927 | } | ||
| 2928 | |||
| 2929 | Id operator()(const ExprPredicate& expr) { | ||
| 2930 | const auto pred = static_cast<Tegra::Shader::Pred>(expr.predicate); | ||
| 2931 | return decomp.OpLoad(decomp.t_bool, decomp.predicates.at(pred)); | ||
| 2932 | } | ||
| 2933 | |||
| 2934 | Id operator()(const ExprCondCode& expr) { | ||
| 2935 | return decomp.AsBool(decomp.Visit(decomp.ir.GetConditionCode(expr.cc))); | ||
| 2936 | } | ||
| 2937 | |||
| 2938 | Id operator()(const ExprVar& expr) { | ||
| 2939 | return decomp.OpLoad(decomp.t_bool, decomp.flow_variables.at(expr.var_index)); | ||
| 2940 | } | ||
| 2941 | |||
| 2942 | Id operator()(const ExprBoolean& expr) { | ||
| 2943 | return expr.value ? decomp.v_true : decomp.v_false; | ||
| 2944 | } | ||
| 2945 | |||
| 2946 | Id operator()(const ExprGprEqual& expr) { | ||
| 2947 | const Id target = decomp.Constant(decomp.t_uint, expr.value); | ||
| 2948 | Id gpr = decomp.OpLoad(decomp.t_float, decomp.registers.at(expr.gpr)); | ||
| 2949 | gpr = decomp.OpBitcast(decomp.t_uint, gpr); | ||
| 2950 | return decomp.OpIEqual(decomp.t_bool, gpr, target); | ||
| 2951 | } | ||
| 2952 | |||
| 2953 | Id Visit(const Expr& node) { | ||
| 2954 | return std::visit(*this, *node); | ||
| 2955 | } | ||
| 2956 | |||
| 2957 | private: | ||
| 2958 | SPIRVDecompiler& decomp; | ||
| 2959 | }; | ||
| 2960 | |||
| 2961 | class ASTDecompiler { | ||
| 2962 | public: | ||
| 2963 | explicit ASTDecompiler(SPIRVDecompiler& decomp_) : decomp{decomp_} {} | ||
| 2964 | |||
| 2965 | void operator()(const ASTProgram& ast) { | ||
| 2966 | ASTNode current = ast.nodes.GetFirst(); | ||
| 2967 | while (current) { | ||
| 2968 | Visit(current); | ||
| 2969 | current = current->GetNext(); | ||
| 2970 | } | ||
| 2971 | } | ||
| 2972 | |||
| 2973 | void operator()(const ASTIfThen& ast) { | ||
| 2974 | ExprDecompiler expr_parser{decomp}; | ||
| 2975 | const Id condition = expr_parser.Visit(ast.condition); | ||
| 2976 | const Id then_label = decomp.OpLabel(); | ||
| 2977 | const Id endif_label = decomp.OpLabel(); | ||
| 2978 | decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone); | ||
| 2979 | decomp.OpBranchConditional(condition, then_label, endif_label); | ||
| 2980 | decomp.AddLabel(then_label); | ||
| 2981 | ASTNode current = ast.nodes.GetFirst(); | ||
| 2982 | while (current) { | ||
| 2983 | Visit(current); | ||
| 2984 | current = current->GetNext(); | ||
| 2985 | } | ||
| 2986 | decomp.OpBranch(endif_label); | ||
| 2987 | decomp.AddLabel(endif_label); | ||
| 2988 | } | ||
| 2989 | |||
| 2990 | void operator()([[maybe_unused]] const ASTIfElse& ast) { | ||
| 2991 | UNREACHABLE(); | ||
| 2992 | } | ||
| 2993 | |||
| 2994 | void operator()([[maybe_unused]] const ASTBlockEncoded& ast) { | ||
| 2995 | UNREACHABLE(); | ||
| 2996 | } | ||
| 2997 | |||
| 2998 | void operator()(const ASTBlockDecoded& ast) { | ||
| 2999 | decomp.VisitBasicBlock(ast.nodes); | ||
| 3000 | } | ||
| 3001 | |||
| 3002 | void operator()(const ASTVarSet& ast) { | ||
| 3003 | ExprDecompiler expr_parser{decomp}; | ||
| 3004 | const Id condition = expr_parser.Visit(ast.condition); | ||
| 3005 | decomp.OpStore(decomp.flow_variables.at(ast.index), condition); | ||
| 3006 | } | ||
| 3007 | |||
| 3008 | void operator()([[maybe_unused]] const ASTLabel& ast) { | ||
| 3009 | // Do nothing | ||
| 3010 | } | ||
| 3011 | |||
| 3012 | void operator()([[maybe_unused]] const ASTGoto& ast) { | ||
| 3013 | UNREACHABLE(); | ||
| 3014 | } | ||
| 3015 | |||
| 3016 | void operator()(const ASTDoWhile& ast) { | ||
| 3017 | const Id loop_label = decomp.OpLabel(); | ||
| 3018 | const Id endloop_label = decomp.OpLabel(); | ||
| 3019 | const Id loop_start_block = decomp.OpLabel(); | ||
| 3020 | const Id loop_continue_block = decomp.OpLabel(); | ||
| 3021 | current_loop_exit = endloop_label; | ||
| 3022 | decomp.OpBranch(loop_label); | ||
| 3023 | decomp.AddLabel(loop_label); | ||
| 3024 | decomp.OpLoopMerge(endloop_label, loop_continue_block, spv::LoopControlMask::MaskNone); | ||
| 3025 | decomp.OpBranch(loop_start_block); | ||
| 3026 | decomp.AddLabel(loop_start_block); | ||
| 3027 | ASTNode current = ast.nodes.GetFirst(); | ||
| 3028 | while (current) { | ||
| 3029 | Visit(current); | ||
| 3030 | current = current->GetNext(); | ||
| 3031 | } | ||
| 3032 | decomp.OpBranch(loop_continue_block); | ||
| 3033 | decomp.AddLabel(loop_continue_block); | ||
| 3034 | ExprDecompiler expr_parser{decomp}; | ||
| 3035 | const Id condition = expr_parser.Visit(ast.condition); | ||
| 3036 | decomp.OpBranchConditional(condition, loop_label, endloop_label); | ||
| 3037 | decomp.AddLabel(endloop_label); | ||
| 3038 | } | ||
| 3039 | |||
| 3040 | void operator()(const ASTReturn& ast) { | ||
| 3041 | if (!VideoCommon::Shader::ExprIsTrue(ast.condition)) { | ||
| 3042 | ExprDecompiler expr_parser{decomp}; | ||
| 3043 | const Id condition = expr_parser.Visit(ast.condition); | ||
| 3044 | const Id then_label = decomp.OpLabel(); | ||
| 3045 | const Id endif_label = decomp.OpLabel(); | ||
| 3046 | decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone); | ||
| 3047 | decomp.OpBranchConditional(condition, then_label, endif_label); | ||
| 3048 | decomp.AddLabel(then_label); | ||
| 3049 | if (ast.kills) { | ||
| 3050 | decomp.OpKill(); | ||
| 3051 | } else { | ||
| 3052 | decomp.PreExit(); | ||
| 3053 | decomp.OpReturn(); | ||
| 3054 | } | ||
| 3055 | decomp.AddLabel(endif_label); | ||
| 3056 | } else { | ||
| 3057 | const Id next_block = decomp.OpLabel(); | ||
| 3058 | decomp.OpBranch(next_block); | ||
| 3059 | decomp.AddLabel(next_block); | ||
| 3060 | if (ast.kills) { | ||
| 3061 | decomp.OpKill(); | ||
| 3062 | } else { | ||
| 3063 | decomp.PreExit(); | ||
| 3064 | decomp.OpReturn(); | ||
| 3065 | } | ||
| 3066 | decomp.AddLabel(decomp.OpLabel()); | ||
| 3067 | } | ||
| 3068 | } | ||
| 3069 | |||
| 3070 | void operator()(const ASTBreak& ast) { | ||
| 3071 | if (!VideoCommon::Shader::ExprIsTrue(ast.condition)) { | ||
| 3072 | ExprDecompiler expr_parser{decomp}; | ||
| 3073 | const Id condition = expr_parser.Visit(ast.condition); | ||
| 3074 | const Id then_label = decomp.OpLabel(); | ||
| 3075 | const Id endif_label = decomp.OpLabel(); | ||
| 3076 | decomp.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone); | ||
| 3077 | decomp.OpBranchConditional(condition, then_label, endif_label); | ||
| 3078 | decomp.AddLabel(then_label); | ||
| 3079 | decomp.OpBranch(current_loop_exit); | ||
| 3080 | decomp.AddLabel(endif_label); | ||
| 3081 | } else { | ||
| 3082 | const Id next_block = decomp.OpLabel(); | ||
| 3083 | decomp.OpBranch(next_block); | ||
| 3084 | decomp.AddLabel(next_block); | ||
| 3085 | decomp.OpBranch(current_loop_exit); | ||
| 3086 | decomp.AddLabel(decomp.OpLabel()); | ||
| 3087 | } | ||
| 3088 | } | ||
| 3089 | |||
| 3090 | void Visit(const ASTNode& node) { | ||
| 3091 | std::visit(*this, *node->GetInnerData()); | ||
| 3092 | } | ||
| 3093 | |||
| 3094 | private: | ||
| 3095 | SPIRVDecompiler& decomp; | ||
| 3096 | Id current_loop_exit{}; | ||
| 3097 | }; | ||
| 3098 | |||
| 3099 | void SPIRVDecompiler::DecompileAST() { | ||
| 3100 | const u32 num_flow_variables = ir.GetASTNumVariables(); | ||
| 3101 | for (u32 i = 0; i < num_flow_variables; i++) { | ||
| 3102 | const Id id = OpVariable(t_prv_bool, spv::StorageClass::Private, v_false); | ||
| 3103 | Name(id, fmt::format("flow_var_{}", i)); | ||
| 3104 | flow_variables.emplace(i, AddGlobalVariable(id)); | ||
| 3105 | } | ||
| 3106 | |||
| 3107 | DefinePrologue(); | ||
| 3108 | |||
| 3109 | const ASTNode program = ir.GetASTProgram(); | ||
| 3110 | ASTDecompiler decompiler{*this}; | ||
| 3111 | decompiler.Visit(program); | ||
| 3112 | |||
| 3113 | const Id next_block = OpLabel(); | ||
| 3114 | OpBranch(next_block); | ||
| 3115 | AddLabel(next_block); | ||
| 3116 | } | ||
| 3117 | |||
| 3118 | } // Anonymous namespace | ||
| 3119 | |||
| 3120 | ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir) { | ||
| 3121 | ShaderEntries entries; | ||
| 3122 | for (const auto& cbuf : ir.GetConstantBuffers()) { | ||
| 3123 | entries.const_buffers.emplace_back(cbuf.second, cbuf.first); | ||
| 3124 | } | ||
| 3125 | for (const auto& [base, usage] : ir.GetGlobalMemory()) { | ||
| 3126 | entries.global_buffers.emplace_back(GlobalBufferEntry{ | ||
| 3127 | .cbuf_index = base.cbuf_index, | ||
| 3128 | .cbuf_offset = base.cbuf_offset, | ||
| 3129 | .is_written = usage.is_written, | ||
| 3130 | }); | ||
| 3131 | } | ||
| 3132 | for (const auto& sampler : ir.GetSamplers()) { | ||
| 3133 | if (sampler.is_buffer) { | ||
| 3134 | entries.uniform_texels.emplace_back(sampler); | ||
| 3135 | } else { | ||
| 3136 | entries.samplers.emplace_back(sampler); | ||
| 3137 | } | ||
| 3138 | } | ||
| 3139 | for (const auto& image : ir.GetImages()) { | ||
| 3140 | if (image.type == Tegra::Shader::ImageType::TextureBuffer) { | ||
| 3141 | entries.storage_texels.emplace_back(image); | ||
| 3142 | } else { | ||
| 3143 | entries.images.emplace_back(image); | ||
| 3144 | } | ||
| 3145 | } | ||
| 3146 | for (const auto& attribute : ir.GetInputAttributes()) { | ||
| 3147 | if (IsGenericAttribute(attribute)) { | ||
| 3148 | entries.attributes.insert(GetGenericAttributeLocation(attribute)); | ||
| 3149 | } | ||
| 3150 | } | ||
| 3151 | for (const auto& buffer : entries.const_buffers) { | ||
| 3152 | entries.enabled_uniform_buffers |= 1U << buffer.GetIndex(); | ||
| 3153 | } | ||
| 3154 | entries.clip_distances = ir.GetClipDistances(); | ||
| 3155 | entries.shader_length = ir.GetLength(); | ||
| 3156 | entries.uses_warps = ir.UsesWarps(); | ||
| 3157 | return entries; | ||
| 3158 | } | ||
| 3159 | |||
| 3160 | std::vector<u32> Decompile(const Device& device, const VideoCommon::Shader::ShaderIR& ir, | ||
| 3161 | ShaderType stage, const VideoCommon::Shader::Registry& registry, | ||
| 3162 | const Specialization& specialization) { | ||
| 3163 | return SPIRVDecompiler(device, ir, stage, registry, specialization).Assemble(); | ||
| 3164 | } | ||
| 3165 | |||
| 3166 | } // namespace Vulkan | ||
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.h b/src/video_core/renderer_vulkan/vk_shader_decompiler.h deleted file mode 100644 index 5d94132a5..000000000 --- a/src/video_core/renderer_vulkan/vk_shader_decompiler.h +++ /dev/null | |||
| @@ -1,99 +0,0 @@ | |||
| 1 | // Copyright 2019 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #pragma once | ||
| 6 | |||
| 7 | #include <array> | ||
| 8 | #include <set> | ||
| 9 | #include <vector> | ||
| 10 | |||
| 11 | #include "common/common_types.h" | ||
| 12 | #include "video_core/engines/maxwell_3d.h" | ||
| 13 | #include "video_core/engines/shader_type.h" | ||
| 14 | #include "video_core/shader/registry.h" | ||
| 15 | #include "video_core/shader/shader_ir.h" | ||
| 16 | |||
| 17 | namespace Vulkan { | ||
| 18 | |||
| 19 | class Device; | ||
| 20 | |||
| 21 | using Maxwell = Tegra::Engines::Maxwell3D::Regs; | ||
| 22 | using UniformTexelEntry = VideoCommon::Shader::SamplerEntry; | ||
| 23 | using SamplerEntry = VideoCommon::Shader::SamplerEntry; | ||
| 24 | using StorageTexelEntry = VideoCommon::Shader::ImageEntry; | ||
| 25 | using ImageEntry = VideoCommon::Shader::ImageEntry; | ||
| 26 | |||
| 27 | constexpr u32 DESCRIPTOR_SET = 0; | ||
| 28 | |||
| 29 | class ConstBufferEntry : public VideoCommon::Shader::ConstBuffer { | ||
| 30 | public: | ||
| 31 | explicit constexpr ConstBufferEntry(const ConstBuffer& entry_, u32 index_) | ||
| 32 | : ConstBuffer{entry_}, index{index_} {} | ||
| 33 | |||
| 34 | constexpr u32 GetIndex() const { | ||
| 35 | return index; | ||
| 36 | } | ||
| 37 | |||
| 38 | private: | ||
| 39 | u32 index{}; | ||
| 40 | }; | ||
| 41 | |||
| 42 | struct GlobalBufferEntry { | ||
| 43 | u32 cbuf_index{}; | ||
| 44 | u32 cbuf_offset{}; | ||
| 45 | bool is_written{}; | ||
| 46 | }; | ||
| 47 | |||
| 48 | struct ShaderEntries { | ||
| 49 | u32 NumBindings() const { | ||
| 50 | return static_cast<u32>(const_buffers.size() + global_buffers.size() + | ||
| 51 | uniform_texels.size() + samplers.size() + storage_texels.size() + | ||
| 52 | images.size()); | ||
| 53 | } | ||
| 54 | |||
| 55 | std::vector<ConstBufferEntry> const_buffers; | ||
| 56 | std::vector<GlobalBufferEntry> global_buffers; | ||
| 57 | std::vector<UniformTexelEntry> uniform_texels; | ||
| 58 | std::vector<SamplerEntry> samplers; | ||
| 59 | std::vector<StorageTexelEntry> storage_texels; | ||
| 60 | std::vector<ImageEntry> images; | ||
| 61 | std::set<u32> attributes; | ||
| 62 | std::array<bool, Maxwell::NumClipDistances> clip_distances{}; | ||
| 63 | std::size_t shader_length{}; | ||
| 64 | u32 enabled_uniform_buffers{}; | ||
| 65 | bool uses_warps{}; | ||
| 66 | }; | ||
| 67 | |||
| 68 | struct Specialization final { | ||
| 69 | u32 base_binding{}; | ||
| 70 | |||
| 71 | // Compute specific | ||
| 72 | std::array<u32, 3> workgroup_size{}; | ||
| 73 | u32 shared_memory_size{}; | ||
| 74 | |||
| 75 | // Graphics specific | ||
| 76 | std::optional<float> point_size; | ||
| 77 | std::bitset<Maxwell::NumVertexAttributes> enabled_attributes; | ||
| 78 | std::array<Maxwell::VertexAttribute::Type, Maxwell::NumVertexAttributes> attribute_types{}; | ||
| 79 | bool ndc_minus_one_to_one{}; | ||
| 80 | bool early_fragment_tests{}; | ||
| 81 | float alpha_test_ref{}; | ||
| 82 | Maxwell::ComparisonOp alpha_test_func{}; | ||
| 83 | }; | ||
| 84 | // Old gcc versions don't consider this trivially copyable. | ||
| 85 | // static_assert(std::is_trivially_copyable_v<Specialization>); | ||
| 86 | |||
| 87 | struct SPIRVShader { | ||
| 88 | std::vector<u32> code; | ||
| 89 | ShaderEntries entries; | ||
| 90 | }; | ||
| 91 | |||
| 92 | ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir); | ||
| 93 | |||
| 94 | std::vector<u32> Decompile(const Device& device, const VideoCommon::Shader::ShaderIR& ir, | ||
| 95 | Tegra::Engines::ShaderType stage, | ||
| 96 | const VideoCommon::Shader::Registry& registry, | ||
| 97 | const Specialization& specialization); | ||
| 98 | |||
| 99 | } // namespace Vulkan | ||