summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGravatar Billy Laws2023-01-05 22:10:21 +0000
committerGravatar Billy Laws2023-01-05 22:18:10 +0000
commit58fec43768c837c63453e87df8f337a2d139324a (patch)
tree161a20fc84cdc3c3086a470ca7afd09fe9f56169 /src
parentexternals: Update sirit (diff)
downloadyuzu-58fec43768c837c63453e87df8f337a2d139324a.tar.gz
yuzu-58fec43768c837c63453e87df8f337a2d139324a.tar.xz
yuzu-58fec43768c837c63453e87df8f337a2d139324a.zip
Run clang-format
Diffstat (limited to 'src')
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp11
-rw-r--r--src/shader_recompiler/backend/spirv/spirv_emit_context.cpp5
-rw-r--r--src/shader_recompiler/frontend/maxwell/translate_program.cpp32
-rw-r--r--src/shader_recompiler/host_translate_info.h5
-rw-r--r--src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp3
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h3
6 files changed, 35 insertions, 24 deletions
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
index 1590debc4..0cd87a48f 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
@@ -321,11 +321,12 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) {
321 case IR::Attribute::PositionY: 321 case IR::Attribute::PositionY:
322 case IR::Attribute::PositionZ: 322 case IR::Attribute::PositionZ:
323 case IR::Attribute::PositionW: 323 case IR::Attribute::PositionW:
324 return ctx.OpLoad(ctx.F32[1], ctx.need_input_position_indirect ? 324 return ctx.OpLoad(
325 AttrPointer(ctx, ctx.input_f32, vertex, ctx.input_position, 325 ctx.F32[1],
326 ctx.u32_zero_value, ctx.Const(element)) 326 ctx.need_input_position_indirect
327 : AttrPointer(ctx, ctx.input_f32, vertex, ctx.input_position, 327 ? AttrPointer(ctx, ctx.input_f32, vertex, ctx.input_position, ctx.u32_zero_value,
328 ctx.Const(element))); 328 ctx.Const(element))
329 : AttrPointer(ctx, ctx.input_f32, vertex, ctx.input_position, ctx.Const(element)));
329 case IR::Attribute::InstanceId: 330 case IR::Attribute::InstanceId:
330 if (ctx.profile.support_vertex_instance_id) { 331 if (ctx.profile.support_vertex_instance_id) {
331 return ctx.OpBitcast(ctx.F32[1], ctx.OpLoad(ctx.U32[1], ctx.instance_id)); 332 return ctx.OpBitcast(ctx.F32[1], ctx.OpLoad(ctx.U32[1], ctx.instance_id));
diff --git a/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp b/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp
index f874622b8..a0c155fdb 100644
--- a/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp
+++ b/src/shader_recompiler/backend/spirv/spirv_emit_context.cpp
@@ -729,7 +729,7 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
729 else 729 else
730 return OpAccessChain(input_f32, input_position, u32_zero_value, 730 return OpAccessChain(input_f32, input_position, u32_zero_value,
731 masked_index); 731 masked_index);
732 } else { 732 } else {
733 if (is_array) 733 if (is_array)
734 return OpAccessChain(input_f32, input_position, vertex, masked_index); 734 return OpAccessChain(input_f32, input_position, vertex, masked_index);
735 else 735 else
@@ -1390,7 +1390,8 @@ void EmitContext::DefineInputs(const IR::Program& program) {
1390 static_cast<unsigned>(spv::BuiltIn::Position)); 1390 static_cast<unsigned>(spv::BuiltIn::Position));
1391 Decorate(input_position_struct, spv::Decoration::Block); 1391 Decorate(input_position_struct, spv::Decoration::Block);
1392 } else { 1392 } else {
1393 const spv::BuiltIn built_in{is_fragment ? spv::BuiltIn::FragCoord : spv::BuiltIn::Position}; 1393 const spv::BuiltIn built_in{is_fragment ? spv::BuiltIn::FragCoord
1394 : spv::BuiltIn::Position};
1394 input_position = DefineInput(*this, F32[4], true, built_in); 1395 input_position = DefineInput(*this, F32[4], true, built_in);
1395 1396
1396 if (profile.support_geometry_shader_passthrough) { 1397 if (profile.support_geometry_shader_passthrough) {
diff --git a/src/shader_recompiler/frontend/maxwell/translate_program.cpp b/src/shader_recompiler/frontend/maxwell/translate_program.cpp
index 4a0ccceb7..a3b99e24d 100644
--- a/src/shader_recompiler/frontend/maxwell/translate_program.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate_program.cpp
@@ -172,7 +172,10 @@ std::map<IR::Attribute, IR::Attribute> GenerateLegacyToGenericMappings(
172 return mapping; 172 return mapping;
173} 173}
174 174
175void EmitGeometryPassthrough(IR::IREmitter& ir, const IR::Program& program, const Shader::VaryingState &passthrough_mask, bool passthrough_position, std::optional<IR::Attribute> passthrough_layer_attr) { 175void EmitGeometryPassthrough(IR::IREmitter& ir, const IR::Program& program,
176 const Shader::VaryingState& passthrough_mask,
177 bool passthrough_position,
178 std::optional<IR::Attribute> passthrough_layer_attr) {
176 for (u32 i = 0; i < program.output_vertices; i++) { 179 for (u32 i = 0; i < program.output_vertices; i++) {
177 // Assign generics from input 180 // Assign generics from input
178 for (u32 j = 0; j < 32; j++) { 181 for (u32 j = 0; j < 32; j++) {
@@ -198,7 +201,8 @@ void EmitGeometryPassthrough(IR::IREmitter& ir, const IR::Program& program, cons
198 201
199 if (passthrough_layer_attr) { 202 if (passthrough_layer_attr) {
200 // Assign layer 203 // Assign layer
201 ir.SetAttribute(IR::Attribute::Layer, ir.GetAttribute(*passthrough_layer_attr), ir.Imm32(0)); 204 ir.SetAttribute(IR::Attribute::Layer, ir.GetAttribute(*passthrough_layer_attr),
205 ir.Imm32(0));
202 } 206 }
203 207
204 // Emit vertex 208 // Emit vertex
@@ -209,21 +213,23 @@ void EmitGeometryPassthrough(IR::IREmitter& ir, const IR::Program& program, cons
209 213
210u32 GetOutputTopologyVertices(OutputTopology output_topology) { 214u32 GetOutputTopologyVertices(OutputTopology output_topology) {
211 switch (output_topology) { 215 switch (output_topology) {
212 case OutputTopology::PointList: 216 case OutputTopology::PointList:
213 return 1; 217 return 1;
214 case OutputTopology::LineStrip: 218 case OutputTopology::LineStrip:
215 return 2; 219 return 2;
216 default: 220 default:
217 return 3; 221 return 3;
218 } 222 }
219} 223}
220 224
221void LowerGeometryPassthrough(const IR::Program& program, const HostTranslateInfo& host_info) { 225void LowerGeometryPassthrough(const IR::Program& program, const HostTranslateInfo& host_info) {
222 for (IR::Block *const block : program.blocks) { 226 for (IR::Block* const block : program.blocks) {
223 for (IR::Inst &inst : block->Instructions()) { 227 for (IR::Inst& inst : block->Instructions()) {
224 if (inst.GetOpcode() == IR::Opcode::Epilogue) { 228 if (inst.GetOpcode() == IR::Opcode::Epilogue) {
225 IR::IREmitter ir{*block, IR::Block::InstructionList::s_iterator_to(inst)}; 229 IR::IREmitter ir{*block, IR::Block::InstructionList::s_iterator_to(inst)};
226 EmitGeometryPassthrough(ir, program, program.info.passthrough, program.info.passthrough.AnyComponent(IR::Attribute::PositionX), {}); 230 EmitGeometryPassthrough(
231 ir, program, program.info.passthrough,
232 program.info.passthrough.AnyComponent(IR::Attribute::PositionX), {});
227 } 233 }
228 } 234 }
229 } 235 }
@@ -407,7 +413,6 @@ IR::Program GenerateGeometryPassthrough(ObjectPool<IR::Inst>& inst_pool,
407 program.output_topology = output_topology; 413 program.output_topology = output_topology;
408 program.output_vertices = GetOutputTopologyVertices(output_topology); 414 program.output_vertices = GetOutputTopologyVertices(output_topology);
409 415
410
411 program.is_geometry_passthrough = false; 416 program.is_geometry_passthrough = false;
412 program.info.loads.mask = source_program.info.stores.mask; 417 program.info.loads.mask = source_program.info.stores.mask;
413 program.info.stores.mask = source_program.info.stores.mask; 418 program.info.stores.mask = source_program.info.stores.mask;
@@ -420,7 +425,8 @@ IR::Program GenerateGeometryPassthrough(ObjectPool<IR::Inst>& inst_pool,
420 node.data.block = current_block; 425 node.data.block = current_block;
421 426
422 IR::IREmitter ir{*current_block}; 427 IR::IREmitter ir{*current_block};
423 EmitGeometryPassthrough(ir, program, program.info.stores, true, source_program.info.emulated_layer); 428 EmitGeometryPassthrough(ir, program, program.info.stores, true,
429 source_program.info.emulated_layer);
424 430
425 IR::Block* return_block{block_pool.Create(inst_pool)}; 431 IR::Block* return_block{block_pool.Create(inst_pool)};
426 IR::IREmitter{*return_block}.Epilogue(); 432 IR::IREmitter{*return_block}.Epilogue();
diff --git a/src/shader_recompiler/host_translate_info.h b/src/shader_recompiler/host_translate_info.h
index dc402ee47..55fc48768 100644
--- a/src/shader_recompiler/host_translate_info.h
+++ b/src/shader_recompiler/host_translate_info.h
@@ -15,8 +15,9 @@ struct HostTranslateInfo {
15 bool needs_demote_reorder{}; ///< True when the device needs DemoteToHelperInvocation reordered 15 bool needs_demote_reorder{}; ///< True when the device needs DemoteToHelperInvocation reordered
16 bool support_snorm_render_buffer{}; ///< True when the device supports SNORM render buffers 16 bool support_snorm_render_buffer{}; ///< True when the device supports SNORM render buffers
17 bool support_viewport_index_layer{}; ///< True when the device supports gl_Layer in VS 17 bool support_viewport_index_layer{}; ///< True when the device supports gl_Layer in VS
18 u32 min_ssbo_alignment{}; ///< Minimum alignment supported by the device for SSBOs 18 u32 min_ssbo_alignment{}; ///< Minimum alignment supported by the device for SSBOs
19 bool support_geometry_shader_passthrough{}; ///< True when the device supports geometry passthrough shaders 19 bool support_geometry_shader_passthrough{}; ///< True when the device supports geometry
20 ///< passthrough shaders
20}; 21};
21 22
22} // namespace Shader 23} // namespace Shader
diff --git a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
index f8d20af3c..9101722ba 100644
--- a/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
+++ b/src/shader_recompiler/ir_opt/global_memory_to_storage_buffer_pass.cpp
@@ -538,7 +538,8 @@ void GlobalMemoryToStorageBufferPass(IR::Program& program, const HostTranslateIn
538 const IR::U32 index{IR::Value{static_cast<u32>(info.set.index_of(it))}}; 538 const IR::U32 index{IR::Value{static_cast<u32>(info.set.index_of(it))}};
539 IR::Block* const block{storage_inst.block}; 539 IR::Block* const block{storage_inst.block};
540 IR::Inst* const inst{storage_inst.inst}; 540 IR::Inst* const inst{storage_inst.inst};
541 const IR::U32 offset{StorageOffset(*block, *inst, storage_buffer, host_info.min_ssbo_alignment)}; 541 const IR::U32 offset{
542 StorageOffset(*block, *inst, storage_buffer, host_info.min_ssbo_alignment)};
542 Replace(*block, *inst, index, offset); 543 Replace(*block, *inst, index, offset);
543 } 544 }
544} 545}
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index e6414df78..627917ab6 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -1941,7 +1941,8 @@ typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr s
1941 const u32 alignment = runtime.GetStorageBufferAlignment(); 1941 const u32 alignment = runtime.GetStorageBufferAlignment();
1942 1942
1943 const GPUVAddr aligned_gpu_addr = Common::AlignDown(gpu_addr, alignment); 1943 const GPUVAddr aligned_gpu_addr = Common::AlignDown(gpu_addr, alignment);
1944 const u32 aligned_size = Common::AlignUp(static_cast<u32>(gpu_addr - aligned_gpu_addr) + size, alignment); 1944 const u32 aligned_size =
1945 Common::AlignUp(static_cast<u32>(gpu_addr - aligned_gpu_addr) + size, alignment);
1945 1946
1946 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(aligned_gpu_addr); 1947 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(aligned_gpu_addr);
1947 if (!cpu_addr || size == 0) { 1948 if (!cpu_addr || size == 0) {