summaryrefslogtreecommitdiff
path: root/src/shader_recompiler
diff options
context:
space:
mode:
authorGravatar lat9nq2021-07-11 22:10:38 -0400
committerGravatar ameerj2021-07-22 21:51:40 -0400
commit49946cf780c317b4c5ccabb52ec433eba01c1970 (patch)
tree628060b15b133cf3a1aaf716fba3517fc5c983f0 /src/shader_recompiler
parentmain: Update Shader Cache menu options (diff)
downloadyuzu-49946cf780c317b4c5ccabb52ec433eba01c1970.tar.gz
yuzu-49946cf780c317b4c5ccabb52ec433eba01c1970.tar.xz
yuzu-49946cf780c317b4c5ccabb52ec433eba01c1970.zip
shader_recompiler, video_core: Resolve clang errors
Silences the following warnings-turned-errors: -Wsign-conversion -Wunused-private-field -Wbraced-scalar-init -Wunused-variable And some other errors
Diffstat (limited to '')
-rw-r--r--src/shader_recompiler/backend/glasm/emit_context.h2
-rw-r--r--src/shader_recompiler/backend/glasm/reg_alloc.h3
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_floating_point.cpp2
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_image.cpp15
-rw-r--r--src/shader_recompiler/frontend/ir/opcodes.h3
-rw-r--r--src/shader_recompiler/frontend/maxwell/control_flow.h1
-rw-r--r--src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp9
-rw-r--r--src/shader_recompiler/frontend/maxwell/translate/impl/atomic_operations_global_memory.cpp12
-rw-r--r--src/shader_recompiler/frontend/maxwell/translate/impl/integer_floating_point_conversion.cpp4
-rw-r--r--src/shader_recompiler/frontend/maxwell/translate/impl/load_store_attribute.cpp12
-rw-r--r--src/shader_recompiler/frontend/maxwell/translate/impl/surface_atomic_operations.cpp3
-rw-r--r--src/shader_recompiler/frontend/maxwell/translate/impl/surface_load_store.cpp8
-rw-r--r--src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp6
13 files changed, 39 insertions, 41 deletions
diff --git a/src/shader_recompiler/backend/glasm/emit_context.h b/src/shader_recompiler/backend/glasm/emit_context.h
index 1da51a996..8433e5c00 100644
--- a/src/shader_recompiler/backend/glasm/emit_context.h
+++ b/src/shader_recompiler/backend/glasm/emit_context.h
@@ -59,7 +59,7 @@ public:
59 } 59 }
60 60
61 std::string code; 61 std::string code;
62 RegAlloc reg_alloc{*this}; 62 RegAlloc reg_alloc{};
63 const Info& info; 63 const Info& info;
64 const Profile& profile; 64 const Profile& profile;
65 const RuntimeInfo& runtime_info; 65 const RuntimeInfo& runtime_info;
diff --git a/src/shader_recompiler/backend/glasm/reg_alloc.h b/src/shader_recompiler/backend/glasm/reg_alloc.h
index 5a703daf2..82aec66c6 100644
--- a/src/shader_recompiler/backend/glasm/reg_alloc.h
+++ b/src/shader_recompiler/backend/glasm/reg_alloc.h
@@ -86,7 +86,7 @@ struct ScalarF64 : Value {};
86 86
87class RegAlloc { 87class RegAlloc {
88public: 88public:
89 RegAlloc(EmitContext& ctx_) : ctx{ctx_} {} 89 RegAlloc() = default;
90 90
91 Register Define(IR::Inst& inst); 91 Register Define(IR::Inst& inst);
92 92
@@ -142,7 +142,6 @@ private:
142 142
143 void Free(Id id); 143 void Free(Id id);
144 144
145 EmitContext& ctx;
146 size_t num_used_registers{}; 145 size_t num_used_registers{};
147 size_t num_used_long_registers{}; 146 size_t num_used_long_registers{};
148 std::bitset<NUM_REGS> register_use{}; 147 std::bitset<NUM_REGS> register_use{};
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_floating_point.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_floating_point.cpp
index b11be5bd7..2edcf592e 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_floating_point.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_floating_point.cpp
@@ -22,7 +22,7 @@ void Compare(EmitContext& ctx, IR::Inst& inst, std::string_view lhs, std::string
22} 22}
23 23
24bool IsPrecise(const IR::Inst& inst) { 24bool IsPrecise(const IR::Inst& inst) {
25 return {inst.Flags<IR::FpControl>().no_contraction}; 25 return inst.Flags<IR::FpControl>().no_contraction;
26} 26}
27} // Anonymous namespace 27} // Anonymous namespace
28 28
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
index 647804814..3588f052b 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp
@@ -109,7 +109,7 @@ private:
109 return; 109 return;
110 } 110 }
111 if (offset.IsImmediate()) { 111 if (offset.IsImmediate()) {
112 Add(spv::ImageOperandsMask::ConstOffset, ctx.SConst(offset.U32())); 112 Add(spv::ImageOperandsMask::ConstOffset, ctx.SConst(static_cast<s32>(offset.U32())));
113 return; 113 return;
114 } 114 }
115 IR::Inst* const inst{offset.InstRecursive()}; 115 IR::Inst* const inst{offset.InstRecursive()};
@@ -117,16 +117,21 @@ private:
117 switch (inst->GetOpcode()) { 117 switch (inst->GetOpcode()) {
118 case IR::Opcode::CompositeConstructU32x2: 118 case IR::Opcode::CompositeConstructU32x2:
119 Add(spv::ImageOperandsMask::ConstOffset, 119 Add(spv::ImageOperandsMask::ConstOffset,
120 ctx.SConst(inst->Arg(0).U32(), inst->Arg(1).U32())); 120 ctx.SConst(static_cast<s32>(inst->Arg(0).U32()),
121 static_cast<s32>(inst->Arg(1).U32())));
121 return; 122 return;
122 case IR::Opcode::CompositeConstructU32x3: 123 case IR::Opcode::CompositeConstructU32x3:
123 Add(spv::ImageOperandsMask::ConstOffset, 124 Add(spv::ImageOperandsMask::ConstOffset,
124 ctx.SConst(inst->Arg(0).U32(), inst->Arg(1).U32(), inst->Arg(2).U32())); 125 ctx.SConst(static_cast<s32>(inst->Arg(0).U32()),
126 static_cast<s32>(inst->Arg(1).U32()),
127 static_cast<s32>(inst->Arg(2).U32())));
125 return; 128 return;
126 case IR::Opcode::CompositeConstructU32x4: 129 case IR::Opcode::CompositeConstructU32x4:
127 Add(spv::ImageOperandsMask::ConstOffset, 130 Add(spv::ImageOperandsMask::ConstOffset,
128 ctx.SConst(inst->Arg(0).U32(), inst->Arg(1).U32(), inst->Arg(2).U32(), 131 ctx.SConst(static_cast<s32>(inst->Arg(0).U32()),
129 inst->Arg(3).U32())); 132 static_cast<s32>(inst->Arg(1).U32()),
133 static_cast<s32>(inst->Arg(2).U32()),
134 static_cast<s32>(inst->Arg(3).U32())));
130 return; 135 return;
131 default: 136 default:
132 break; 137 break;
diff --git a/src/shader_recompiler/frontend/ir/opcodes.h b/src/shader_recompiler/frontend/ir/opcodes.h
index 56b001902..9ab108292 100644
--- a/src/shader_recompiler/frontend/ir/opcodes.h
+++ b/src/shader_recompiler/frontend/ir/opcodes.h
@@ -67,7 +67,8 @@ constexpr OpcodeMeta META_TABLE[]{
67}; 67};
68constexpr size_t CalculateNumArgsOf(Opcode op) { 68constexpr size_t CalculateNumArgsOf(Opcode op) {
69 const auto& arg_types{META_TABLE[static_cast<size_t>(op)].arg_types}; 69 const auto& arg_types{META_TABLE[static_cast<size_t>(op)].arg_types};
70 return std::distance(arg_types.begin(), std::ranges::find(arg_types, Type::Void)); 70 return static_cast<size_t>(
71 std::distance(arg_types.begin(), std::ranges::find(arg_types, Type::Void)));
71} 72}
72 73
73constexpr u8 NUM_ARGS[]{ 74constexpr u8 NUM_ARGS[]{
diff --git a/src/shader_recompiler/frontend/maxwell/control_flow.h b/src/shader_recompiler/frontend/maxwell/control_flow.h
index 0e515c3b6..a6bd3e196 100644
--- a/src/shader_recompiler/frontend/maxwell/control_flow.h
+++ b/src/shader_recompiler/frontend/maxwell/control_flow.h
@@ -161,7 +161,6 @@ private:
161 Environment& env; 161 Environment& env;
162 ObjectPool<Block>& block_pool; 162 ObjectPool<Block>& block_pool;
163 boost::container::small_vector<Function, 1> functions; 163 boost::container::small_vector<Function, 1> functions;
164 FunctionId current_function_id{0};
165 Location program_start; 164 Location program_start;
166 bool exits_to_dispatcher{}; 165 bool exits_to_dispatcher{};
167 Block* dispatch_block{}; 166 Block* dispatch_block{};
diff --git a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
index 06fde0017..221454b99 100644
--- a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
+++ b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
@@ -313,9 +313,7 @@ bool NeedsLift(Node goto_stmt, Node label_stmt) noexcept {
313 313
314class GotoPass { 314class GotoPass {
315public: 315public:
316 explicit GotoPass(Flow::CFG& cfg, ObjectPool<IR::Inst>& inst_pool_, 316 explicit GotoPass(Flow::CFG& cfg, ObjectPool<Statement>& stmt_pool) : pool{stmt_pool} {
317 ObjectPool<IR::Block>& block_pool_, ObjectPool<Statement>& stmt_pool)
318 : inst_pool{inst_pool_}, block_pool{block_pool_}, pool{stmt_pool} {
319 std::vector gotos{BuildTree(cfg)}; 317 std::vector gotos{BuildTree(cfg)};
320 for (const Node& goto_stmt : gotos | std::views::reverse) { 318 for (const Node& goto_stmt : gotos | std::views::reverse) {
321 RemoveGoto(goto_stmt); 319 RemoveGoto(goto_stmt);
@@ -616,8 +614,6 @@ private:
616 return parent_tree.insert(std::next(loop), *new_goto); 614 return parent_tree.insert(std::next(loop), *new_goto);
617 } 615 }
618 616
619 ObjectPool<IR::Inst>& inst_pool;
620 ObjectPool<IR::Block>& block_pool;
621 ObjectPool<Statement>& pool; 617 ObjectPool<Statement>& pool;
622 Statement root_stmt{FunctionTag{}}; 618 Statement root_stmt{FunctionTag{}};
623}; 619};
@@ -864,7 +860,6 @@ private:
864 ObjectPool<IR::Block>& block_pool; 860 ObjectPool<IR::Block>& block_pool;
865 Environment& env; 861 Environment& env;
866 IR::AbstractSyntaxList& syntax_list; 862 IR::AbstractSyntaxList& syntax_list;
867 u32 loop_id{};
868 863
869// TODO: C++20 Remove this when all compilers support constexpr std::vector 864// TODO: C++20 Remove this when all compilers support constexpr std::vector
870#if __cpp_lib_constexpr_vector >= 201907 865#if __cpp_lib_constexpr_vector >= 201907
@@ -878,7 +873,7 @@ private:
878IR::AbstractSyntaxList BuildASL(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Block>& block_pool, 873IR::AbstractSyntaxList BuildASL(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Block>& block_pool,
879 Environment& env, Flow::CFG& cfg) { 874 Environment& env, Flow::CFG& cfg) {
880 ObjectPool<Statement> stmt_pool{64}; 875 ObjectPool<Statement> stmt_pool{64};
881 GotoPass goto_pass{cfg, inst_pool, block_pool, stmt_pool}; 876 GotoPass goto_pass{cfg, stmt_pool};
882 Statement& root{goto_pass.RootStatement()}; 877 Statement& root{goto_pass.RootStatement()};
883 IR::AbstractSyntaxList syntax_list; 878 IR::AbstractSyntaxList syntax_list;
884 TranslatePass{inst_pool, block_pool, stmt_pool, env, root, syntax_list}; 879 TranslatePass{inst_pool, block_pool, stmt_pool, env, root, syntax_list};
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/atomic_operations_global_memory.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/atomic_operations_global_memory.cpp
index 66f39e44e..d9f999e05 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/atomic_operations_global_memory.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/atomic_operations_global_memory.cpp
@@ -59,14 +59,14 @@ IR::U32U64 ApplyIntegerAtomOp(IR::IREmitter& ir, const IR::U32U64& offset, const
59IR::Value ApplyFpAtomOp(IR::IREmitter& ir, const IR::U64& offset, const IR::Value& op_b, AtomOp op, 59IR::Value ApplyFpAtomOp(IR::IREmitter& ir, const IR::U64& offset, const IR::Value& op_b, AtomOp op,
60 AtomSize size) { 60 AtomSize size) {
61 static constexpr IR::FpControl f16_control{ 61 static constexpr IR::FpControl f16_control{
62 .no_contraction{false}, 62 .no_contraction = false,
63 .rounding{IR::FpRounding::RN}, 63 .rounding = IR::FpRounding::RN,
64 .fmz_mode{IR::FmzMode::DontCare}, 64 .fmz_mode = IR::FmzMode::DontCare,
65 }; 65 };
66 static constexpr IR::FpControl f32_control{ 66 static constexpr IR::FpControl f32_control{
67 .no_contraction{false}, 67 .no_contraction = false,
68 .rounding{IR::FpRounding::RN}, 68 .rounding = IR::FpRounding::RN,
69 .fmz_mode{IR::FmzMode::FTZ}, 69 .fmz_mode = IR::FmzMode::FTZ,
70 }; 70 };
71 switch (op) { 71 switch (op) {
72 case AtomOp::ADD: 72 case AtomOp::ADD:
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/integer_floating_point_conversion.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/integer_floating_point_conversion.cpp
index e0e157275..0b8119ddd 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/integer_floating_point_conversion.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/integer_floating_point_conversion.cpp
@@ -104,7 +104,9 @@ void I2F(TranslatorVisitor& v, u64 insn, IR::U32U64 src) {
104 .rounding = CastFpRounding(i2f.fp_rounding), 104 .rounding = CastFpRounding(i2f.fp_rounding),
105 .fmz_mode = IR::FmzMode::DontCare, 105 .fmz_mode = IR::FmzMode::DontCare,
106 }; 106 };
107 auto value{v.ir.ConvertIToF(dst_bitsize, conversion_src_bitsize, is_signed, src, fp_control)}; 107 auto value{v.ir.ConvertIToF(static_cast<size_t>(dst_bitsize),
108 static_cast<size_t>(conversion_src_bitsize), is_signed, src,
109 fp_control)};
108 if (i2f.neg != 0) { 110 if (i2f.neg != 0) {
109 if (i2f.abs != 0 || !is_signed) { 111 if (i2f.abs != 0 || !is_signed) {
110 // We know the value is positive 112 // We know the value is positive
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_attribute.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_attribute.cpp
index 7d7dcc3cb..924fb7a40 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_attribute.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/load_store_attribute.cpp
@@ -80,10 +80,10 @@ void TranslatorVisitor::ALD(u64 insn) {
80 for (u32 element = 0; element < num_elements; ++element) { 80 for (u32 element = 0; element < num_elements; ++element) {
81 if (ald.patch != 0) { 81 if (ald.patch != 0) {
82 const IR::Patch patch{offset / 4 + element}; 82 const IR::Patch patch{offset / 4 + element};
83 F(ald.dest_reg + element, ir.GetPatch(patch)); 83 F(ald.dest_reg + static_cast<int>(element), ir.GetPatch(patch));
84 } else { 84 } else {
85 const IR::Attribute attr{offset / 4 + element}; 85 const IR::Attribute attr{offset / 4 + element};
86 F(ald.dest_reg + element, ir.GetAttribute(attr, vertex)); 86 F(ald.dest_reg + static_cast<int>(element), ir.GetAttribute(attr, vertex));
87 } 87 }
88 } 88 }
89 return; 89 return;
@@ -92,7 +92,7 @@ void TranslatorVisitor::ALD(u64 insn) {
92 throw NotImplementedException("Indirect patch read"); 92 throw NotImplementedException("Indirect patch read");
93 } 93 }
94 HandleIndexed(*this, ald.index_reg, num_elements, [&](u32 element, IR::U32 final_offset) { 94 HandleIndexed(*this, ald.index_reg, num_elements, [&](u32 element, IR::U32 final_offset) {
95 F(ald.dest_reg + element, ir.GetAttributeIndexed(final_offset, vertex)); 95 F(ald.dest_reg + static_cast<int>(element), ir.GetAttributeIndexed(final_offset, vertex));
96 }); 96 });
97} 97}
98 98
@@ -121,10 +121,10 @@ void TranslatorVisitor::AST(u64 insn) {
121 for (u32 element = 0; element < num_elements; ++element) { 121 for (u32 element = 0; element < num_elements; ++element) {
122 if (ast.patch != 0) { 122 if (ast.patch != 0) {
123 const IR::Patch patch{offset / 4 + element}; 123 const IR::Patch patch{offset / 4 + element};
124 ir.SetPatch(patch, F(ast.src_reg + element)); 124 ir.SetPatch(patch, F(ast.src_reg + static_cast<int>(element)));
125 } else { 125 } else {
126 const IR::Attribute attr{offset / 4 + element}; 126 const IR::Attribute attr{offset / 4 + element};
127 ir.SetAttribute(attr, F(ast.src_reg + element), vertex); 127 ir.SetAttribute(attr, F(ast.src_reg + static_cast<int>(element)), vertex);
128 } 128 }
129 } 129 }
130 return; 130 return;
@@ -133,7 +133,7 @@ void TranslatorVisitor::AST(u64 insn) {
133 throw NotImplementedException("Indexed tessellation patch store"); 133 throw NotImplementedException("Indexed tessellation patch store");
134 } 134 }
135 HandleIndexed(*this, ast.index_reg, num_elements, [&](u32 element, IR::U32 final_offset) { 135 HandleIndexed(*this, ast.index_reg, num_elements, [&](u32 element, IR::U32 final_offset) {
136 ir.SetAttributeIndexed(final_offset, F(ast.src_reg + element), vertex); 136 ir.SetAttributeIndexed(final_offset, F(ast.src_reg + static_cast<int>(element)), vertex);
137 }); 137 });
138} 138}
139 139
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/surface_atomic_operations.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/surface_atomic_operations.cpp
index 44144f154..63b588ad4 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/surface_atomic_operations.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/surface_atomic_operations.cpp
@@ -69,9 +69,6 @@ TextureType GetType(Type type) {
69} 69}
70 70
71IR::Value MakeCoords(TranslatorVisitor& v, IR::Reg reg, Type type) { 71IR::Value MakeCoords(TranslatorVisitor& v, IR::Reg reg, Type type) {
72 const auto array{[&](int index) {
73 return v.ir.BitFieldExtract(v.X(reg + index), v.ir.Imm32(0), v.ir.Imm32(16));
74 }};
75 switch (type) { 72 switch (type) {
76 case Type::_1D: 73 case Type::_1D:
77 case Type::BUFFER_1D: 74 case Type::BUFFER_1D:
diff --git a/src/shader_recompiler/frontend/maxwell/translate/impl/surface_load_store.cpp b/src/shader_recompiler/frontend/maxwell/translate/impl/surface_load_store.cpp
index 7dc793ad7..681220a8d 100644
--- a/src/shader_recompiler/frontend/maxwell/translate/impl/surface_load_store.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate/impl/surface_load_store.cpp
@@ -160,10 +160,10 @@ unsigned SwizzleMask(u64 swizzle) {
160IR::Value MakeColor(IR::IREmitter& ir, IR::Reg reg, int num_regs) { 160IR::Value MakeColor(IR::IREmitter& ir, IR::Reg reg, int num_regs) {
161 std::array<IR::U32, 4> colors; 161 std::array<IR::U32, 4> colors;
162 for (int i = 0; i < num_regs; ++i) { 162 for (int i = 0; i < num_regs; ++i) {
163 colors[i] = ir.GetReg(reg + i); 163 colors[static_cast<size_t>(i)] = ir.GetReg(reg + i);
164 } 164 }
165 for (int i = num_regs; i < 4; ++i) { 165 for (int i = num_regs; i < 4; ++i) {
166 colors[i] = ir.Imm32(0); 166 colors[static_cast<size_t>(i)] = ir.Imm32(0);
167 } 167 }
168 return ir.CompositeConstruct(colors[0], colors[1], colors[2], colors[3]); 168 return ir.CompositeConstruct(colors[0], colors[1], colors[2], colors[3]);
169} 169}
@@ -211,12 +211,12 @@ void TranslatorVisitor::SULD(u64 insn) {
211 if (is_typed) { 211 if (is_typed) {
212 const int num_regs{SizeInRegs(suld.size)}; 212 const int num_regs{SizeInRegs(suld.size)};
213 for (int i = 0; i < num_regs; ++i) { 213 for (int i = 0; i < num_regs; ++i) {
214 X(dest_reg + i, IR::U32{ir.CompositeExtract(result, i)}); 214 X(dest_reg + i, IR::U32{ir.CompositeExtract(result, static_cast<size_t>(i))});
215 } 215 }
216 } else { 216 } else {
217 const unsigned mask{SwizzleMask(suld.swizzle)}; 217 const unsigned mask{SwizzleMask(suld.swizzle)};
218 const int bits{std::popcount(mask)}; 218 const int bits{std::popcount(mask)};
219 if (!IR::IsAligned(dest_reg, bits == 3 ? 4 : bits)) { 219 if (!IR::IsAligned(dest_reg, bits == 3 ? 4 : static_cast<size_t>(bits))) {
220 throw NotImplementedException("Unaligned destination register"); 220 throw NotImplementedException("Unaligned destination register");
221 } 221 }
222 for (unsigned component = 0; component < 4; ++component) { 222 for (unsigned component = 0; component < 4; ++component) {
diff --git a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
index 70449eeca..f9de17b25 100644
--- a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
+++ b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
@@ -314,8 +314,8 @@ std::optional<StorageBufferAddr> Track(const IR::Value& value, const Bias* bias)
314 return std::nullopt; 314 return std::nullopt;
315 } 315 }
316 const StorageBufferAddr storage_buffer{ 316 const StorageBufferAddr storage_buffer{
317 .index{index.U32()}, 317 .index = index.U32(),
318 .offset{offset.U32()}, 318 .offset = offset.U32(),
319 }; 319 };
320 if (!Common::IsAligned(storage_buffer.offset, 16)) { 320 if (!Common::IsAligned(storage_buffer.offset, 16)) {
321 // The SSBO pointer has to be aligned 321 // The SSBO pointer has to be aligned
@@ -484,7 +484,7 @@ void GlobalMemoryToStorageBufferPass(IR::Program& program) {
484 .cbuf_index = storage_buffer.index, 484 .cbuf_index = storage_buffer.index,
485 .cbuf_offset = storage_buffer.offset, 485 .cbuf_offset = storage_buffer.offset,
486 .count = 1, 486 .count = 1,
487 .is_written{info.writes.contains(storage_buffer)}, 487 .is_written = info.writes.contains(storage_buffer),
488 }); 488 });
489 } 489 }
490 for (const StorageInst& storage_inst : info.to_replace) { 490 for (const StorageInst& storage_inst : info.to_replace) {