summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/shader_recompiler/CMakeLists.txt1
-rw-r--r--src/shader_recompiler/backend/glasm/emit_context.cpp15
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm.cpp6
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp6
-rw-r--r--src/shader_recompiler/backend/glsl/emit_context.cpp58
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl.cpp2
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp2
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_special.cpp4
-rw-r--r--src/shader_recompiler/backend/spirv/emit_context.cpp97
-rw-r--r--src/shader_recompiler/backend/spirv/emit_context.h2
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv.cpp19
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp2
-rw-r--r--src/shader_recompiler/environment.h5
-rw-r--r--src/shader_recompiler/frontend/ir/attribute.h6
-rw-r--r--src/shader_recompiler/frontend/ir/program.h1
-rw-r--r--src/shader_recompiler/frontend/maxwell/translate_program.cpp18
-rw-r--r--src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp202
-rw-r--r--src/shader_recompiler/profile.h1
-rw-r--r--src/shader_recompiler/program_header.h62
-rw-r--r--src/shader_recompiler/runtime_info.h3
-rw-r--r--src/shader_recompiler/shader_info.h37
-rw-r--r--src/shader_recompiler/varying_state.h69
-rw-r--r--src/video_core/engines/maxwell_3d.h7
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp7
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp6
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp16
-rw-r--r--src/video_core/shader_environment.cpp10
-rw-r--r--src/video_core/vulkan_common/vulkan_device.cpp6
-rw-r--r--src/video_core/vulkan_common/vulkan_device.h6
29 files changed, 345 insertions, 331 deletions
diff --git a/src/shader_recompiler/CMakeLists.txt b/src/shader_recompiler/CMakeLists.txt
index 3b5708cb9..b5b7e5e83 100644
--- a/src/shader_recompiler/CMakeLists.txt
+++ b/src/shader_recompiler/CMakeLists.txt
@@ -229,6 +229,7 @@ add_library(shader_recompiler STATIC
229 program_header.h 229 program_header.h
230 runtime_info.h 230 runtime_info.h
231 shader_info.h 231 shader_info.h
232 varying_state.h
232) 233)
233 234
234target_link_libraries(shader_recompiler PUBLIC common fmt::fmt sirit) 235target_link_libraries(shader_recompiler PUBLIC common fmt::fmt sirit)
diff --git a/src/shader_recompiler/backend/glasm/emit_context.cpp b/src/shader_recompiler/backend/glasm/emit_context.cpp
index 21e14867c..80dad9ff3 100644
--- a/src/shader_recompiler/backend/glasm/emit_context.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_context.cpp
@@ -83,14 +83,13 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
83 break; 83 break;
84 } 84 }
85 const std::string_view attr_stage{stage == Stage::Fragment ? "fragment" : "vertex"}; 85 const std::string_view attr_stage{stage == Stage::Fragment ? "fragment" : "vertex"};
86 for (size_t index = 0; index < info.input_generics.size(); ++index) { 86 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
87 const auto& generic{info.input_generics[index]}; 87 if (info.loads.Generic(index)) {
88 if (generic.used) {
89 Add("{}ATTRIB in_attr{}[]={{{}.attrib[{}..{}]}};", 88 Add("{}ATTRIB in_attr{}[]={{{}.attrib[{}..{}]}};",
90 InterpDecorator(generic.interpolation), index, attr_stage, index, index); 89 InterpDecorator(info.interpolation[index]), index, attr_stage, index, index);
91 } 90 }
92 } 91 }
93 if (IsInputArray(stage) && info.loads_position) { 92 if (IsInputArray(stage) && info.loads.AnyComponent(IR::Attribute::PositionX)) {
94 Add("ATTRIB vertex_position=vertex.position;"); 93 Add("ATTRIB vertex_position=vertex.position;");
95 } 94 }
96 if (info.uses_invocation_id) { 95 if (info.uses_invocation_id) {
@@ -102,7 +101,7 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
102 if (info.stores_tess_level_inner) { 101 if (info.stores_tess_level_inner) {
103 Add("OUTPUT result_patch_tessinner[]={{result.patch.tessinner[0..1]}};"); 102 Add("OUTPUT result_patch_tessinner[]={{result.patch.tessinner[0..1]}};");
104 } 103 }
105 if (info.stores_clip_distance) { 104 if (info.stores.ClipDistances()) {
106 Add("OUTPUT result_clip[]={{result.clip[0..7]}};"); 105 Add("OUTPUT result_clip[]={{result.clip[0..7]}};");
107 } 106 }
108 for (size_t index = 0; index < info.uses_patches.size(); ++index) { 107 for (size_t index = 0; index < info.uses_patches.size(); ++index) {
@@ -124,8 +123,8 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
124 Add("OUTPUT frag_color{}=result.color[{}];", index, index); 123 Add("OUTPUT frag_color{}=result.color[{}];", index, index);
125 } 124 }
126 } 125 }
127 for (size_t index = 0; index < info.stores_generics.size(); ++index) { 126 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
128 if (info.stores_generics[index]) { 127 if (info.stores.Generic(index)) {
129 Add("OUTPUT out_attr{}[]={{result.attrib[{}..{}]}};", index, index, index); 128 Add("OUTPUT out_attr{}[]={{result.attrib[{}..{}]}};", index, index, index);
130 } 129 }
131 } 130 }
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm.cpp b/src/shader_recompiler/backend/glasm/emit_glasm.cpp
index 79314f130..2b96977b3 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm.cpp
@@ -296,8 +296,10 @@ void SetupOptions(const IR::Program& program, const Profile& profile,
296 if (info.uses_sparse_residency) { 296 if (info.uses_sparse_residency) {
297 header += "OPTION EXT_sparse_texture2;"; 297 header += "OPTION EXT_sparse_texture2;";
298 } 298 }
299 if (((info.stores_viewport_index || info.stores_layer) && stage != Stage::Geometry) || 299 const bool stores_viewport_layer{info.stores[IR::Attribute::ViewportIndex] ||
300 info.stores_viewport_mask) { 300 info.stores[IR::Attribute::Layer]};
301 if ((stage != Stage::Geometry && stores_viewport_layer) ||
302 info.stores[IR::Attribute::ViewportMask]) {
301 if (profile.support_viewport_index_layer_non_geometry) { 303 if (profile.support_viewport_index_layer_non_geometry) {
302 header += "OPTION NV_viewport_array2;"; 304 header += "OPTION NV_viewport_array2;";
303 } 305 }
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp
index bc195d248..02c9dc6d7 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp
@@ -261,7 +261,7 @@ void EmitGetAttributeIndexed(EmitContext& ctx, IR::Inst& inst, ScalarS32 offset,
261 fmt::format("{}.z", value), fmt::format("{}.w", value)}; 261 fmt::format("{}.z", value), fmt::format("{}.w", value)};
262 read(compare_index, values); 262 read(compare_index, values);
263 }}; 263 }};
264 if (ctx.info.loads_position) { 264 if (ctx.info.loads.AnyComponent(IR::Attribute::PositionX)) {
265 const u32 index{static_cast<u32>(IR::Attribute::PositionX)}; 265 const u32 index{static_cast<u32>(IR::Attribute::PositionX)};
266 if (IsInputArray(ctx.stage)) { 266 if (IsInputArray(ctx.stage)) {
267 read_swizzled(index, fmt::format("vertex_position{}", VertexIndex(ctx, vertex))); 267 read_swizzled(index, fmt::format("vertex_position{}", VertexIndex(ctx, vertex)));
@@ -269,8 +269,8 @@ void EmitGetAttributeIndexed(EmitContext& ctx, IR::Inst& inst, ScalarS32 offset,
269 read_swizzled(index, fmt::format("{}.position", ctx.attrib_name)); 269 read_swizzled(index, fmt::format("{}.position", ctx.attrib_name));
270 } 270 }
271 } 271 }
272 for (u32 index = 0; index < ctx.info.input_generics.size(); ++index) { 272 for (u32 index = 0; index < static_cast<u32>(IR::NUM_GENERICS); ++index) {
273 if (!ctx.info.input_generics[index].used) { 273 if (!ctx.info.loads.Generic(index)) {
274 continue; 274 continue;
275 } 275 }
276 read_swizzled(index, fmt::format("in_attr{}{}[0]", index, VertexIndex(ctx, vertex))); 276 read_swizzled(index, fmt::format("in_attr{}{}[0]", index, VertexIndex(ctx, vertex)));
diff --git a/src/shader_recompiler/backend/glsl/emit_context.cpp b/src/shader_recompiler/backend/glsl/emit_context.cpp
index 14c009535..0d7f7bc3b 100644
--- a/src/shader_recompiler/backend/glsl/emit_context.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_context.cpp
@@ -212,22 +212,22 @@ std::string_view OutputPrimitive(OutputTopology topology) {
212} 212}
213 213
214void SetupLegacyOutPerVertex(EmitContext& ctx, std::string& header) { 214void SetupLegacyOutPerVertex(EmitContext& ctx, std::string& header) {
215 if (!ctx.info.stores_legacy_varyings) { 215 if (!ctx.info.stores.Legacy()) {
216 return; 216 return;
217 } 217 }
218 if (ctx.info.stores_fixed_fnc_textures) { 218 if (ctx.info.stores.FixedFunctionTexture()) {
219 header += "vec4 gl_TexCoord[8];"; 219 header += "vec4 gl_TexCoord[8];";
220 } 220 }
221 if (ctx.info.stores_color_front_diffuse) { 221 if (ctx.info.stores.AnyComponent(IR::Attribute::ColorFrontDiffuseR)) {
222 header += "vec4 gl_FrontColor;"; 222 header += "vec4 gl_FrontColor;";
223 } 223 }
224 if (ctx.info.stores_color_front_specular) { 224 if (ctx.info.stores.AnyComponent(IR::Attribute::ColorFrontSpecularR)) {
225 header += "vec4 gl_FrontSecondaryColor;"; 225 header += "vec4 gl_FrontSecondaryColor;";
226 } 226 }
227 if (ctx.info.stores_color_back_diffuse) { 227 if (ctx.info.stores.AnyComponent(IR::Attribute::ColorBackDiffuseR)) {
228 header += "vec4 gl_BackColor;"; 228 header += "vec4 gl_BackColor;";
229 } 229 }
230 if (ctx.info.stores_color_back_specular) { 230 if (ctx.info.stores.AnyComponent(IR::Attribute::ColorBackSpecularR)) {
231 header += "vec4 gl_BackSecondaryColor;"; 231 header += "vec4 gl_BackSecondaryColor;";
232 } 232 }
233} 233}
@@ -237,32 +237,32 @@ void SetupOutPerVertex(EmitContext& ctx, std::string& header) {
237 return; 237 return;
238 } 238 }
239 header += "out gl_PerVertex{vec4 gl_Position;"; 239 header += "out gl_PerVertex{vec4 gl_Position;";
240 if (ctx.info.stores_point_size) { 240 if (ctx.info.stores[IR::Attribute::PointSize]) {
241 header += "float gl_PointSize;"; 241 header += "float gl_PointSize;";
242 } 242 }
243 if (ctx.info.stores_clip_distance) { 243 if (ctx.info.stores.ClipDistances()) {
244 header += "float gl_ClipDistance[];"; 244 header += "float gl_ClipDistance[];";
245 } 245 }
246 if (ctx.info.stores_viewport_index && ctx.profile.support_viewport_index_layer_non_geometry && 246 if (ctx.info.stores[IR::Attribute::ViewportIndex] &&
247 ctx.stage != Stage::Geometry) { 247 ctx.profile.support_viewport_index_layer_non_geometry && ctx.stage != Stage::Geometry) {
248 header += "int gl_ViewportIndex;"; 248 header += "int gl_ViewportIndex;";
249 } 249 }
250 SetupLegacyOutPerVertex(ctx, header); 250 SetupLegacyOutPerVertex(ctx, header);
251 header += "};"; 251 header += "};";
252 if (ctx.info.stores_viewport_index && ctx.stage == Stage::Geometry) { 252 if (ctx.info.stores[IR::Attribute::ViewportIndex] && ctx.stage == Stage::Geometry) {
253 header += "out int gl_ViewportIndex;"; 253 header += "out int gl_ViewportIndex;";
254 } 254 }
255} 255}
256 256
257void SetupLegacyInPerFragment(EmitContext& ctx, std::string& header) { 257void SetupLegacyInPerFragment(EmitContext& ctx, std::string& header) {
258 if (!ctx.info.loads_legacy_varyings) { 258 if (!ctx.info.loads.Legacy()) {
259 return; 259 return;
260 } 260 }
261 header += "in gl_PerFragment{"; 261 header += "in gl_PerFragment{";
262 if (ctx.info.loads_fixed_fnc_textures) { 262 if (ctx.info.loads.FixedFunctionTexture()) {
263 header += "vec4 gl_TexCoord[8];"; 263 header += "vec4 gl_TexCoord[8];";
264 } 264 }
265 if (ctx.info.loads_color_front_diffuse) { 265 if (ctx.info.loads.AnyComponent(IR::Attribute::ColorFrontDiffuseR)) {
266 header += "vec4 gl_Color;"; 266 header += "vec4 gl_Color;";
267 } 267 }
268 header += "};"; 268 header += "};";
@@ -325,14 +325,13 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
325 SetupOutPerVertex(*this, header); 325 SetupOutPerVertex(*this, header);
326 SetupLegacyInPerFragment(*this, header); 326 SetupLegacyInPerFragment(*this, header);
327 327
328 for (size_t index = 0; index < info.input_generics.size(); ++index) { 328 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
329 const auto& generic{info.input_generics[index]}; 329 if (!info.loads.Generic(index) || !runtime_info.previous_stage_stores.Generic(index)) {
330 if (!generic.used || !runtime_info.previous_stage_stores_generic[index]) {
331 continue; 330 continue;
332 } 331 }
333 header += 332 header += fmt::format("layout(location={}){}in vec4 in_attr{}{};", index,
334 fmt::format("layout(location={}){}in vec4 in_attr{}{};", index, 333 InterpDecorator(info.interpolation[index]), index,
335 InterpDecorator(generic.interpolation), index, InputArrayDecorator(stage)); 334 InputArrayDecorator(stage));
336 } 335 }
337 for (size_t index = 0; index < info.uses_patches.size(); ++index) { 336 for (size_t index = 0; index < info.uses_patches.size(); ++index) {
338 if (!info.uses_patches[index]) { 337 if (!info.uses_patches[index]) {
@@ -349,11 +348,10 @@ EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile
349 header += fmt::format("layout(location={})out vec4 frag_color{};", index, index); 348 header += fmt::format("layout(location={})out vec4 frag_color{};", index, index);
350 } 349 }
351 } 350 }
352 for (size_t index = 0; index < info.stores_generics.size(); ++index) { 351 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
353 if (!info.stores_generics[index]) { 352 if (info.stores.Generic(index)) {
354 continue; 353 DefineGenericOutput(index, program.invocations);
355 } 354 }
356 DefineGenericOutput(index, program.invocations);
357 } 355 }
358 DefineConstantBuffers(bindings); 356 DefineConstantBuffers(bindings);
359 DefineStorageBuffers(bindings); 357 DefineStorageBuffers(bindings);
@@ -398,14 +396,14 @@ void EmitContext::SetupExtensions() {
398 header += "#extension GL_NV_shader_thread_shuffle : enable\n"; 396 header += "#extension GL_NV_shader_thread_shuffle : enable\n";
399 } 397 }
400 } 398 }
401 if ((info.stores_viewport_index || info.stores_layer) && 399 if ((info.stores[IR::Attribute::ViewportIndex] || info.stores[IR::Attribute::Layer]) &&
402 profile.support_viewport_index_layer_non_geometry && stage != Stage::Geometry) { 400 profile.support_viewport_index_layer_non_geometry && stage != Stage::Geometry) {
403 header += "#extension GL_ARB_shader_viewport_layer_array : enable\n"; 401 header += "#extension GL_ARB_shader_viewport_layer_array : enable\n";
404 } 402 }
405 if (info.uses_sparse_residency && profile.support_gl_sparse_textures) { 403 if (info.uses_sparse_residency && profile.support_gl_sparse_textures) {
406 header += "#extension GL_ARB_sparse_texture2 : enable\n"; 404 header += "#extension GL_ARB_sparse_texture2 : enable\n";
407 } 405 }
408 if (info.stores_viewport_mask && profile.support_viewport_mask) { 406 if (info.stores[IR::Attribute::ViewportMask] && profile.support_viewport_mask) {
409 header += "#extension GL_NV_viewport_array2 : enable\n"; 407 header += "#extension GL_NV_viewport_array2 : enable\n";
410 } 408 }
411 if (info.uses_typeless_image_reads) { 409 if (info.uses_typeless_image_reads) {
@@ -535,20 +533,20 @@ void EmitContext::DefineHelperFunctions() {
535 fmt::format("float IndexedAttrLoad(int offset{}){{int base_index=offset>>2;uint " 533 fmt::format("float IndexedAttrLoad(int offset{}){{int base_index=offset>>2;uint "
536 "masked_index=uint(base_index)&3u;switch(base_index>>2){{", 534 "masked_index=uint(base_index)&3u;switch(base_index>>2){{",
537 vertex_arg)}; 535 vertex_arg)};
538 if (info.loads_position) { 536 if (info.loads.AnyComponent(IR::Attribute::PositionX)) {
539 const auto position_idx{is_array ? "gl_in[vertex]." : ""}; 537 const auto position_idx{is_array ? "gl_in[vertex]." : ""};
540 func += fmt::format("case {}:return {}{}[masked_index];", 538 func += fmt::format("case {}:return {}{}[masked_index];",
541 static_cast<u32>(IR::Attribute::PositionX) >> 2, position_idx, 539 static_cast<u32>(IR::Attribute::PositionX) >> 2, position_idx,
542 position_name); 540 position_name);
543 } 541 }
544 const u32 base_attribute_value = static_cast<u32>(IR::Attribute::Generic0X) >> 2; 542 const u32 base_attribute_value = static_cast<u32>(IR::Attribute::Generic0X) >> 2;
545 for (u32 i = 0; i < info.input_generics.size(); ++i) { 543 for (u32 index = 0; index < IR::NUM_GENERICS; ++index) {
546 if (!info.input_generics[i].used) { 544 if (!info.loads.Generic(index)) {
547 continue; 545 continue;
548 } 546 }
549 const auto vertex_idx{is_array ? "[vertex]" : ""}; 547 const auto vertex_idx{is_array ? "[vertex]" : ""};
550 func += fmt::format("case {}:return in_attr{}{}[masked_index];", 548 func += fmt::format("case {}:return in_attr{}{}[masked_index];",
551 base_attribute_value + i, i, vertex_idx); 549 base_attribute_value + index, index, vertex_idx);
552 } 550 }
553 func += "default: return 0.0;}}"; 551 func += "default: return 0.0;}}";
554 header += func; 552 header += func;
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl.cpp b/src/shader_recompiler/backend/glsl/emit_glsl.cpp
index 32c4f1da2..8deaf5760 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl.cpp
@@ -171,7 +171,7 @@ void EmitCode(EmitContext& ctx, const IR::Program& program) {
171} 171}
172 172
173std::string GlslVersionSpecifier(const EmitContext& ctx) { 173std::string GlslVersionSpecifier(const EmitContext& ctx) {
174 if (ctx.uses_y_direction || ctx.info.stores_legacy_varyings || ctx.info.loads_legacy_varyings) { 174 if (ctx.uses_y_direction || ctx.info.stores.Legacy() || ctx.info.loads.Legacy()) {
175 return " compatibility"; 175 return " compatibility";
176 } 176 }
177 return ""; 177 return "";
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
index 3d2ba2eee..16e2a8502 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
@@ -179,7 +179,7 @@ void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr,
179 const char swizzle{"xyzw"[element]}; 179 const char swizzle{"xyzw"[element]};
180 if (IR::IsGeneric(attr)) { 180 if (IR::IsGeneric(attr)) {
181 const u32 index{IR::GenericAttributeIndex(attr)}; 181 const u32 index{IR::GenericAttributeIndex(attr)};
182 if (!ctx.runtime_info.previous_stage_stores_generic[index]) { 182 if (!ctx.runtime_info.previous_stage_stores.Generic(index)) {
183 ctx.AddF32("{}=0.f;", inst, attr); 183 ctx.AddF32("{}=0.f;", inst, attr);
184 return; 184 return;
185 } 185 }
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_special.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_special.cpp
index 6420aaa21..298881c7b 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_special.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_special.cpp
@@ -20,8 +20,8 @@ void InitializeOutputVaryings(EmitContext& ctx) {
20 if (ctx.stage == Stage::VertexB || ctx.stage == Stage::Geometry) { 20 if (ctx.stage == Stage::VertexB || ctx.stage == Stage::Geometry) {
21 ctx.Add("gl_Position=vec4(0,0,0,1);"); 21 ctx.Add("gl_Position=vec4(0,0,0,1);");
22 } 22 }
23 for (size_t index = 0; index < ctx.info.stores_generics.size(); ++index) { 23 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
24 if (!ctx.info.stores_generics[index]) { 24 if (!ctx.info.stores.Generic(index)) {
25 continue; 25 continue;
26 } 26 }
27 const auto& info_array{ctx.output_generics.at(index)}; 27 const auto& info_array{ctx.output_generics.at(index)};
diff --git a/src/shader_recompiler/backend/spirv/emit_context.cpp b/src/shader_recompiler/backend/spirv/emit_context.cpp
index 4c6501129..af4fb0c69 100644
--- a/src/shader_recompiler/backend/spirv/emit_context.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_context.cpp
@@ -557,7 +557,7 @@ void EmitContext::DefineCommonConstants() {
557} 557}
558 558
559void EmitContext::DefineInterfaces(const IR::Program& program) { 559void EmitContext::DefineInterfaces(const IR::Program& program) {
560 DefineInputs(program.info); 560 DefineInputs(program);
561 DefineOutputs(program); 561 DefineOutputs(program);
562} 562}
563 563
@@ -693,16 +693,16 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
693 const Id compare_index{OpShiftRightArithmetic(U32[1], base_index, Const(2U))}; 693 const Id compare_index{OpShiftRightArithmetic(U32[1], base_index, Const(2U))};
694 std::vector<Sirit::Literal> literals; 694 std::vector<Sirit::Literal> literals;
695 std::vector<Id> labels; 695 std::vector<Id> labels;
696 if (info.loads_position) { 696 if (info.loads.AnyComponent(IR::Attribute::PositionX)) {
697 literals.push_back(static_cast<u32>(IR::Attribute::PositionX) >> 2); 697 literals.push_back(static_cast<u32>(IR::Attribute::PositionX) >> 2);
698 labels.push_back(OpLabel()); 698 labels.push_back(OpLabel());
699 } 699 }
700 const u32 base_attribute_value = static_cast<u32>(IR::Attribute::Generic0X) >> 2; 700 const u32 base_attribute_value = static_cast<u32>(IR::Attribute::Generic0X) >> 2;
701 for (u32 i = 0; i < info.input_generics.size(); ++i) { 701 for (u32 index = 0; index < static_cast<u32>(IR::NUM_GENERICS); ++index) {
702 if (!info.input_generics[i].used) { 702 if (!info.loads.Generic(index)) {
703 continue; 703 continue;
704 } 704 }
705 literals.push_back(base_attribute_value + i); 705 literals.push_back(base_attribute_value + index);
706 labels.push_back(OpLabel()); 706 labels.push_back(OpLabel());
707 } 707 }
708 OpSelectionMerge(end_block, spv::SelectionControlMask::MaskNone); 708 OpSelectionMerge(end_block, spv::SelectionControlMask::MaskNone);
@@ -710,7 +710,7 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
710 AddLabel(default_label); 710 AddLabel(default_label);
711 OpReturnValue(Const(0.0f)); 711 OpReturnValue(Const(0.0f));
712 size_t label_index{0}; 712 size_t label_index{0};
713 if (info.loads_position) { 713 if (info.loads.AnyComponent(IR::Attribute::PositionX)) {
714 AddLabel(labels[label_index]); 714 AddLabel(labels[label_index]);
715 const Id pointer{is_array 715 const Id pointer{is_array
716 ? OpAccessChain(input_f32, input_position, vertex, masked_index) 716 ? OpAccessChain(input_f32, input_position, vertex, masked_index)
@@ -719,18 +719,18 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
719 OpReturnValue(result); 719 OpReturnValue(result);
720 ++label_index; 720 ++label_index;
721 } 721 }
722 for (size_t i = 0; i < info.input_generics.size(); i++) { 722 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
723 if (!info.input_generics[i].used) { 723 if (!info.loads.Generic(index)) {
724 continue; 724 continue;
725 } 725 }
726 AddLabel(labels[label_index]); 726 AddLabel(labels[label_index]);
727 const auto type{AttrTypes(*this, static_cast<u32>(i))}; 727 const auto type{AttrTypes(*this, static_cast<u32>(index))};
728 if (!type) { 728 if (!type) {
729 OpReturnValue(Const(0.0f)); 729 OpReturnValue(Const(0.0f));
730 ++label_index; 730 ++label_index;
731 continue; 731 continue;
732 } 732 }
733 const Id generic_id{input_generics.at(i)}; 733 const Id generic_id{input_generics.at(index)};
734 const Id pointer{is_array 734 const Id pointer{is_array
735 ? OpAccessChain(type->pointer, generic_id, vertex, masked_index) 735 ? OpAccessChain(type->pointer, generic_id, vertex, masked_index)
736 : OpAccessChain(type->pointer, generic_id, masked_index)}; 736 : OpAccessChain(type->pointer, generic_id, masked_index)};
@@ -758,19 +758,19 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
758 const Id compare_index{OpShiftRightArithmetic(U32[1], base_index, Const(2U))}; 758 const Id compare_index{OpShiftRightArithmetic(U32[1], base_index, Const(2U))};
759 std::vector<Sirit::Literal> literals; 759 std::vector<Sirit::Literal> literals;
760 std::vector<Id> labels; 760 std::vector<Id> labels;
761 if (info.stores_position) { 761 if (info.stores.AnyComponent(IR::Attribute::PositionX)) {
762 literals.push_back(static_cast<u32>(IR::Attribute::PositionX) >> 2); 762 literals.push_back(static_cast<u32>(IR::Attribute::PositionX) >> 2);
763 labels.push_back(OpLabel()); 763 labels.push_back(OpLabel());
764 } 764 }
765 const u32 base_attribute_value = static_cast<u32>(IR::Attribute::Generic0X) >> 2; 765 const u32 base_attribute_value = static_cast<u32>(IR::Attribute::Generic0X) >> 2;
766 for (size_t i = 0; i < info.stores_generics.size(); i++) { 766 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
767 if (!info.stores_generics[i]) { 767 if (!info.stores.Generic(index)) {
768 continue; 768 continue;
769 } 769 }
770 literals.push_back(base_attribute_value + static_cast<u32>(i)); 770 literals.push_back(base_attribute_value + static_cast<u32>(index));
771 labels.push_back(OpLabel()); 771 labels.push_back(OpLabel());
772 } 772 }
773 if (info.stores_clip_distance) { 773 if (info.stores.ClipDistances()) {
774 literals.push_back(static_cast<u32>(IR::Attribute::ClipDistance0) >> 2); 774 literals.push_back(static_cast<u32>(IR::Attribute::ClipDistance0) >> 2);
775 labels.push_back(OpLabel()); 775 labels.push_back(OpLabel());
776 literals.push_back(static_cast<u32>(IR::Attribute::ClipDistance4) >> 2); 776 literals.push_back(static_cast<u32>(IR::Attribute::ClipDistance4) >> 2);
@@ -781,28 +781,28 @@ void EmitContext::DefineAttributeMemAccess(const Info& info) {
781 AddLabel(default_label); 781 AddLabel(default_label);
782 OpReturn(); 782 OpReturn();
783 size_t label_index{0}; 783 size_t label_index{0};
784 if (info.stores_position) { 784 if (info.stores.AnyComponent(IR::Attribute::PositionX)) {
785 AddLabel(labels[label_index]); 785 AddLabel(labels[label_index]);
786 const Id pointer{OpAccessChain(output_f32, output_position, masked_index)}; 786 const Id pointer{OpAccessChain(output_f32, output_position, masked_index)};
787 OpStore(pointer, store_value); 787 OpStore(pointer, store_value);
788 OpReturn(); 788 OpReturn();
789 ++label_index; 789 ++label_index;
790 } 790 }
791 for (size_t i = 0; i < info.stores_generics.size(); ++i) { 791 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
792 if (!info.stores_generics[i]) { 792 if (!info.stores.Generic(index)) {
793 continue; 793 continue;
794 } 794 }
795 if (output_generics[i][0].num_components != 4) { 795 if (output_generics[index][0].num_components != 4) {
796 throw NotImplementedException("Physical stores and transform feedbacks"); 796 throw NotImplementedException("Physical stores and transform feedbacks");
797 } 797 }
798 AddLabel(labels[label_index]); 798 AddLabel(labels[label_index]);
799 const Id generic_id{output_generics[i][0].id}; 799 const Id generic_id{output_generics[index][0].id};
800 const Id pointer{OpAccessChain(output_f32, generic_id, masked_index)}; 800 const Id pointer{OpAccessChain(output_f32, generic_id, masked_index)};
801 OpStore(pointer, store_value); 801 OpStore(pointer, store_value);
802 OpReturn(); 802 OpReturn();
803 ++label_index; 803 ++label_index;
804 } 804 }
805 if (info.stores_clip_distance) { 805 if (info.stores.ClipDistances()) {
806 AddLabel(labels[label_index]); 806 AddLabel(labels[label_index]);
807 const Id pointer{OpAccessChain(output_f32, clip_distances, masked_index)}; 807 const Id pointer{OpAccessChain(output_f32, clip_distances, masked_index)};
808 OpStore(pointer, store_value); 808 OpStore(pointer, store_value);
@@ -1146,7 +1146,10 @@ void EmitContext::DefineImages(const Info& info, u32& binding) {
1146 } 1146 }
1147} 1147}
1148 1148
1149void EmitContext::DefineInputs(const Info& info) { 1149void EmitContext::DefineInputs(const IR::Program& program) {
1150 const Info& info{program.info};
1151 const VaryingState loads{info.loads.mask | info.passthrough.mask};
1152
1150 if (info.uses_workgroup_id) { 1153 if (info.uses_workgroup_id) {
1151 workgroup_id = DefineInput(*this, U32[3], false, spv::BuiltIn::WorkgroupId); 1154 workgroup_id = DefineInput(*this, U32[3], false, spv::BuiltIn::WorkgroupId);
1152 } 1155 }
@@ -1183,15 +1186,20 @@ void EmitContext::DefineInputs(const Info& info) {
1183 fswzadd_lut_b = 1186 fswzadd_lut_b =
1184 ConstantComposite(F32[4], f32_minus_one, f32_minus_one, f32_one, f32_minus_one); 1187 ConstantComposite(F32[4], f32_minus_one, f32_minus_one, f32_one, f32_minus_one);
1185 } 1188 }
1186 if (info.loads_primitive_id) { 1189 if (loads[IR::Attribute::PrimitiveId]) {
1187 primitive_id = DefineInput(*this, U32[1], false, spv::BuiltIn::PrimitiveId); 1190 primitive_id = DefineInput(*this, U32[1], false, spv::BuiltIn::PrimitiveId);
1188 } 1191 }
1189 if (info.loads_position) { 1192 if (loads.AnyComponent(IR::Attribute::PositionX)) {
1190 const bool is_fragment{stage != Stage::Fragment}; 1193 const bool is_fragment{stage != Stage::Fragment};
1191 const spv::BuiltIn built_in{is_fragment ? spv::BuiltIn::Position : spv::BuiltIn::FragCoord}; 1194 const spv::BuiltIn built_in{is_fragment ? spv::BuiltIn::Position : spv::BuiltIn::FragCoord};
1192 input_position = DefineInput(*this, F32[4], true, built_in); 1195 input_position = DefineInput(*this, F32[4], true, built_in);
1196 if (profile.support_geometry_shader_passthrough) {
1197 if (info.passthrough.AnyComponent(IR::Attribute::PositionX)) {
1198 Decorate(input_position, spv::Decoration::PassthroughNV);
1199 }
1200 }
1193 } 1201 }
1194 if (info.loads_instance_id) { 1202 if (loads[IR::Attribute::InstanceId]) {
1195 if (profile.support_vertex_instance_id) { 1203 if (profile.support_vertex_instance_id) {
1196 instance_id = DefineInput(*this, U32[1], true, spv::BuiltIn::InstanceId); 1204 instance_id = DefineInput(*this, U32[1], true, spv::BuiltIn::InstanceId);
1197 } else { 1205 } else {
@@ -1199,7 +1207,7 @@ void EmitContext::DefineInputs(const Info& info) {
1199 base_instance = DefineInput(*this, U32[1], true, spv::BuiltIn::BaseInstance); 1207 base_instance = DefineInput(*this, U32[1], true, spv::BuiltIn::BaseInstance);
1200 } 1208 }
1201 } 1209 }
1202 if (info.loads_vertex_id) { 1210 if (loads[IR::Attribute::VertexId]) {
1203 if (profile.support_vertex_instance_id) { 1211 if (profile.support_vertex_instance_id) {
1204 vertex_id = DefineInput(*this, U32[1], true, spv::BuiltIn::VertexId); 1212 vertex_id = DefineInput(*this, U32[1], true, spv::BuiltIn::VertexId);
1205 } else { 1213 } else {
@@ -1207,24 +1215,24 @@ void EmitContext::DefineInputs(const Info& info) {
1207 base_vertex = DefineInput(*this, U32[1], true, spv::BuiltIn::BaseVertex); 1215 base_vertex = DefineInput(*this, U32[1], true, spv::BuiltIn::BaseVertex);
1208 } 1216 }
1209 } 1217 }
1210 if (info.loads_front_face) { 1218 if (loads[IR::Attribute::FrontFace]) {
1211 front_face = DefineInput(*this, U1, true, spv::BuiltIn::FrontFacing); 1219 front_face = DefineInput(*this, U1, true, spv::BuiltIn::FrontFacing);
1212 } 1220 }
1213 if (info.loads_point_coord) { 1221 if (loads[IR::Attribute::PointSpriteS] || loads[IR::Attribute::PointSpriteT]) {
1214 point_coord = DefineInput(*this, F32[2], true, spv::BuiltIn::PointCoord); 1222 point_coord = DefineInput(*this, F32[2], true, spv::BuiltIn::PointCoord);
1215 } 1223 }
1216 if (info.loads_tess_coord) { 1224 if (loads[IR::Attribute::TessellationEvaluationPointU] ||
1225 loads[IR::Attribute::TessellationEvaluationPointV]) {
1217 tess_coord = DefineInput(*this, F32[3], false, spv::BuiltIn::TessCoord); 1226 tess_coord = DefineInput(*this, F32[3], false, spv::BuiltIn::TessCoord);
1218 } 1227 }
1219 for (size_t index = 0; index < info.input_generics.size(); ++index) { 1228 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
1220 if (!runtime_info.previous_stage_stores_generic[index]) { 1229 const AttributeType input_type{runtime_info.generic_input_types[index]};
1230 if (!runtime_info.previous_stage_stores.Generic(index)) {
1221 continue; 1231 continue;
1222 } 1232 }
1223 const InputVarying generic{info.input_generics[index]}; 1233 if (!loads.Generic(index)) {
1224 if (!generic.used) {
1225 continue; 1234 continue;
1226 } 1235 }
1227 const AttributeType input_type{runtime_info.generic_input_types[index]};
1228 if (input_type == AttributeType::Disabled) { 1236 if (input_type == AttributeType::Disabled) {
1229 continue; 1237 continue;
1230 } 1238 }
@@ -1234,10 +1242,13 @@ void EmitContext::DefineInputs(const Info& info) {
1234 Name(id, fmt::format("in_attr{}", index)); 1242 Name(id, fmt::format("in_attr{}", index));
1235 input_generics[index] = id; 1243 input_generics[index] = id;
1236 1244
1245 if (info.passthrough.Generic(index) && profile.support_geometry_shader_passthrough) {
1246 Decorate(id, spv::Decoration::PassthroughNV);
1247 }
1237 if (stage != Stage::Fragment) { 1248 if (stage != Stage::Fragment) {
1238 continue; 1249 continue;
1239 } 1250 }
1240 switch (generic.interpolation) { 1251 switch (info.interpolation[index]) {
1241 case Interpolation::Smooth: 1252 case Interpolation::Smooth:
1242 // Default 1253 // Default
1243 // Decorate(id, spv::Decoration::Smooth); 1254 // Decorate(id, spv::Decoration::Smooth);
@@ -1266,42 +1277,42 @@ void EmitContext::DefineInputs(const Info& info) {
1266void EmitContext::DefineOutputs(const IR::Program& program) { 1277void EmitContext::DefineOutputs(const IR::Program& program) {
1267 const Info& info{program.info}; 1278 const Info& info{program.info};
1268 const std::optional<u32> invocations{program.invocations}; 1279 const std::optional<u32> invocations{program.invocations};
1269 if (info.stores_position || stage == Stage::VertexB) { 1280 if (info.stores.AnyComponent(IR::Attribute::PositionX) || stage == Stage::VertexB) {
1270 output_position = DefineOutput(*this, F32[4], invocations, spv::BuiltIn::Position); 1281 output_position = DefineOutput(*this, F32[4], invocations, spv::BuiltIn::Position);
1271 } 1282 }
1272 if (info.stores_point_size || runtime_info.fixed_state_point_size) { 1283 if (info.stores[IR::Attribute::PointSize] || runtime_info.fixed_state_point_size) {
1273 if (stage == Stage::Fragment) { 1284 if (stage == Stage::Fragment) {
1274 throw NotImplementedException("Storing PointSize in fragment stage"); 1285 throw NotImplementedException("Storing PointSize in fragment stage");
1275 } 1286 }
1276 output_point_size = DefineOutput(*this, F32[1], invocations, spv::BuiltIn::PointSize); 1287 output_point_size = DefineOutput(*this, F32[1], invocations, spv::BuiltIn::PointSize);
1277 } 1288 }
1278 if (info.stores_clip_distance) { 1289 if (info.stores.ClipDistances()) {
1279 if (stage == Stage::Fragment) { 1290 if (stage == Stage::Fragment) {
1280 throw NotImplementedException("Storing ClipDistance in fragment stage"); 1291 throw NotImplementedException("Storing ClipDistance in fragment stage");
1281 } 1292 }
1282 const Id type{TypeArray(F32[1], Const(8U))}; 1293 const Id type{TypeArray(F32[1], Const(8U))};
1283 clip_distances = DefineOutput(*this, type, invocations, spv::BuiltIn::ClipDistance); 1294 clip_distances = DefineOutput(*this, type, invocations, spv::BuiltIn::ClipDistance);
1284 } 1295 }
1285 if (info.stores_layer && 1296 if (info.stores[IR::Attribute::Layer] &&
1286 (profile.support_viewport_index_layer_non_geometry || stage == Stage::Geometry)) { 1297 (profile.support_viewport_index_layer_non_geometry || stage == Stage::Geometry)) {
1287 if (stage == Stage::Fragment) { 1298 if (stage == Stage::Fragment) {
1288 throw NotImplementedException("Storing Layer in fragment stage"); 1299 throw NotImplementedException("Storing Layer in fragment stage");
1289 } 1300 }
1290 layer = DefineOutput(*this, U32[1], invocations, spv::BuiltIn::Layer); 1301 layer = DefineOutput(*this, U32[1], invocations, spv::BuiltIn::Layer);
1291 } 1302 }
1292 if (info.stores_viewport_index && 1303 if (info.stores[IR::Attribute::ViewportIndex] &&
1293 (profile.support_viewport_index_layer_non_geometry || stage == Stage::Geometry)) { 1304 (profile.support_viewport_index_layer_non_geometry || stage == Stage::Geometry)) {
1294 if (stage == Stage::Fragment) { 1305 if (stage == Stage::Fragment) {
1295 throw NotImplementedException("Storing ViewportIndex in fragment stage"); 1306 throw NotImplementedException("Storing ViewportIndex in fragment stage");
1296 } 1307 }
1297 viewport_index = DefineOutput(*this, U32[1], invocations, spv::BuiltIn::ViewportIndex); 1308 viewport_index = DefineOutput(*this, U32[1], invocations, spv::BuiltIn::ViewportIndex);
1298 } 1309 }
1299 if (info.stores_viewport_mask && profile.support_viewport_mask) { 1310 if (info.stores[IR::Attribute::ViewportMask] && profile.support_viewport_mask) {
1300 viewport_mask = DefineOutput(*this, TypeArray(U32[1], Const(1u)), std::nullopt, 1311 viewport_mask = DefineOutput(*this, TypeArray(U32[1], Const(1u)), std::nullopt,
1301 spv::BuiltIn::ViewportMaskNV); 1312 spv::BuiltIn::ViewportMaskNV);
1302 } 1313 }
1303 for (size_t index = 0; index < info.stores_generics.size(); ++index) { 1314 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
1304 if (info.stores_generics[index]) { 1315 if (info.stores.Generic(index)) {
1305 DefineGenericOutput(*this, index, invocations); 1316 DefineGenericOutput(*this, index, invocations);
1306 } 1317 }
1307 } 1318 }
diff --git a/src/shader_recompiler/backend/spirv/emit_context.h b/src/shader_recompiler/backend/spirv/emit_context.h
index 527685fb8..e277bc358 100644
--- a/src/shader_recompiler/backend/spirv/emit_context.h
+++ b/src/shader_recompiler/backend/spirv/emit_context.h
@@ -300,7 +300,7 @@ private:
300 void DefineAttributeMemAccess(const Info& info); 300 void DefineAttributeMemAccess(const Info& info);
301 void DefineGlobalMemoryFunctions(const Info& info); 301 void DefineGlobalMemoryFunctions(const Info& info);
302 302
303 void DefineInputs(const Info& info); 303 void DefineInputs(const IR::Program& program);
304 void DefineOutputs(const IR::Program& program); 304 void DefineOutputs(const IR::Program& program);
305}; 305};
306 306
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv.cpp b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
index 278c262f8..ddb86d070 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv.cpp
@@ -281,11 +281,19 @@ void DefineEntryPoint(const IR::Program& program, EmitContext& ctx, Id main) {
281 ctx.AddExecutionMode(main, spv::ExecutionMode::OutputTriangleStrip); 281 ctx.AddExecutionMode(main, spv::ExecutionMode::OutputTriangleStrip);
282 break; 282 break;
283 } 283 }
284 if (program.info.stores_point_size) { 284 if (program.info.stores[IR::Attribute::PointSize]) {
285 ctx.AddCapability(spv::Capability::GeometryPointSize); 285 ctx.AddCapability(spv::Capability::GeometryPointSize);
286 } 286 }
287 ctx.AddExecutionMode(main, spv::ExecutionMode::OutputVertices, program.output_vertices); 287 ctx.AddExecutionMode(main, spv::ExecutionMode::OutputVertices, program.output_vertices);
288 ctx.AddExecutionMode(main, spv::ExecutionMode::Invocations, program.invocations); 288 ctx.AddExecutionMode(main, spv::ExecutionMode::Invocations, program.invocations);
289 if (program.is_geometry_passthrough) {
290 if (ctx.profile.support_geometry_shader_passthrough) {
291 ctx.AddExtension("SPV_NV_geometry_shader_passthrough");
292 ctx.AddCapability(spv::Capability::GeometryShaderPassthroughNV);
293 } else {
294 LOG_WARNING(Shader_SPIRV, "Geometry shader passthrough used with no support");
295 }
296 }
289 break; 297 break;
290 case Stage::Fragment: 298 case Stage::Fragment:
291 execution_model = spv::ExecutionModel::Fragment; 299 execution_model = spv::ExecutionModel::Fragment;
@@ -377,20 +385,21 @@ void SetupCapabilities(const Profile& profile, const Info& info, EmitContext& ct
377 ctx.AddExtension("SPV_EXT_demote_to_helper_invocation"); 385 ctx.AddExtension("SPV_EXT_demote_to_helper_invocation");
378 ctx.AddCapability(spv::Capability::DemoteToHelperInvocationEXT); 386 ctx.AddCapability(spv::Capability::DemoteToHelperInvocationEXT);
379 } 387 }
380 if (info.stores_viewport_index) { 388 if (info.stores[IR::Attribute::ViewportIndex]) {
381 ctx.AddCapability(spv::Capability::MultiViewport); 389 ctx.AddCapability(spv::Capability::MultiViewport);
382 } 390 }
383 if (info.stores_viewport_mask && profile.support_viewport_mask) { 391 if (info.stores[IR::Attribute::ViewportMask] && profile.support_viewport_mask) {
384 ctx.AddExtension("SPV_NV_viewport_array2"); 392 ctx.AddExtension("SPV_NV_viewport_array2");
385 ctx.AddCapability(spv::Capability::ShaderViewportMaskNV); 393 ctx.AddCapability(spv::Capability::ShaderViewportMaskNV);
386 } 394 }
387 if (info.stores_layer || info.stores_viewport_index) { 395 if (info.stores[IR::Attribute::Layer] || info.stores[IR::Attribute::ViewportIndex]) {
388 if (profile.support_viewport_index_layer_non_geometry && ctx.stage != Stage::Geometry) { 396 if (profile.support_viewport_index_layer_non_geometry && ctx.stage != Stage::Geometry) {
389 ctx.AddExtension("SPV_EXT_shader_viewport_index_layer"); 397 ctx.AddExtension("SPV_EXT_shader_viewport_index_layer");
390 ctx.AddCapability(spv::Capability::ShaderViewportIndexLayerEXT); 398 ctx.AddCapability(spv::Capability::ShaderViewportIndexLayerEXT);
391 } 399 }
392 } 400 }
393 if (!profile.support_vertex_instance_id && (info.loads_instance_id || info.loads_vertex_id)) { 401 if (!profile.support_vertex_instance_id &&
402 (info.loads[IR::Attribute::InstanceId] || info.loads[IR::Attribute::VertexId])) {
394 ctx.AddExtension("SPV_KHR_shader_draw_parameters"); 403 ctx.AddExtension("SPV_KHR_shader_draw_parameters");
395 ctx.AddCapability(spv::Capability::DrawParameters); 404 ctx.AddCapability(spv::Capability::DrawParameters);
396 } 405 }
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
index 85bd72389..77fbb2b2f 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
@@ -298,7 +298,7 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) {
298 if (IR::IsGeneric(attr)) { 298 if (IR::IsGeneric(attr)) {
299 const u32 index{IR::GenericAttributeIndex(attr)}; 299 const u32 index{IR::GenericAttributeIndex(attr)};
300 const std::optional<AttrInfo> type{AttrTypes(ctx, index)}; 300 const std::optional<AttrInfo> type{AttrTypes(ctx, index)};
301 if (!type || !ctx.runtime_info.previous_stage_stores_generic[index]) { 301 if (!type || !ctx.runtime_info.previous_stage_stores.Generic(index)) {
302 // Attribute is disabled 302 // Attribute is disabled
303 return ctx.Const(0.0f); 303 return ctx.Const(0.0f);
304 } 304 }
diff --git a/src/shader_recompiler/environment.h b/src/shader_recompiler/environment.h
index 090bc1c08..8369d0d84 100644
--- a/src/shader_recompiler/environment.h
+++ b/src/shader_recompiler/environment.h
@@ -31,6 +31,10 @@ public:
31 return sph; 31 return sph;
32 } 32 }
33 33
34 [[nodiscard]] const std::array<u32, 8>& GpPassthroughMask() const noexcept {
35 return gp_passthrough_mask;
36 }
37
34 [[nodiscard]] Stage ShaderStage() const noexcept { 38 [[nodiscard]] Stage ShaderStage() const noexcept {
35 return stage; 39 return stage;
36 } 40 }
@@ -41,6 +45,7 @@ public:
41 45
42protected: 46protected:
43 ProgramHeader sph{}; 47 ProgramHeader sph{};
48 std::array<u32, 8> gp_passthrough_mask{};
44 Stage stage{}; 49 Stage stage{};
45 u32 start_address{}; 50 u32 start_address{};
46}; 51};
diff --git a/src/shader_recompiler/frontend/ir/attribute.h b/src/shader_recompiler/frontend/ir/attribute.h
index 8bf2ddf30..ca1199494 100644
--- a/src/shader_recompiler/frontend/ir/attribute.h
+++ b/src/shader_recompiler/frontend/ir/attribute.h
@@ -222,6 +222,8 @@ enum class Attribute : u64 {
222 FrontFace = 255, 222 FrontFace = 255,
223}; 223};
224 224
225constexpr size_t NUM_GENERICS = 32;
226
225[[nodiscard]] bool IsGeneric(Attribute attribute) noexcept; 227[[nodiscard]] bool IsGeneric(Attribute attribute) noexcept;
226 228
227[[nodiscard]] u32 GenericAttributeIndex(Attribute attribute); 229[[nodiscard]] u32 GenericAttributeIndex(Attribute attribute);
@@ -230,6 +232,10 @@ enum class Attribute : u64 {
230 232
231[[nodiscard]] std::string NameOf(Attribute attribute); 233[[nodiscard]] std::string NameOf(Attribute attribute);
232 234
235[[nodiscard]] constexpr IR::Attribute operator+(IR::Attribute attribute, size_t value) noexcept {
236 return static_cast<IR::Attribute>(static_cast<size_t>(attribute) + value);
237}
238
233} // namespace Shader::IR 239} // namespace Shader::IR
234 240
235template <> 241template <>
diff --git a/src/shader_recompiler/frontend/ir/program.h b/src/shader_recompiler/frontend/ir/program.h
index 9ede5b48d..ebcaa8bc2 100644
--- a/src/shader_recompiler/frontend/ir/program.h
+++ b/src/shader_recompiler/frontend/ir/program.h
@@ -27,6 +27,7 @@ struct Program {
27 u32 invocations{}; 27 u32 invocations{};
28 u32 local_memory_size{}; 28 u32 local_memory_size{};
29 u32 shared_memory_size{}; 29 u32 shared_memory_size{};
30 bool is_geometry_passthrough{};
30}; 31};
31 32
32[[nodiscard]] std::string DumpProgram(const Program& program); 33[[nodiscard]] std::string DumpProgram(const Program& program);
diff --git a/src/shader_recompiler/frontend/maxwell/translate_program.cpp b/src/shader_recompiler/frontend/maxwell/translate_program.cpp
index a8b727f1a..6b4b0ce5b 100644
--- a/src/shader_recompiler/frontend/maxwell/translate_program.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate_program.cpp
@@ -46,7 +46,7 @@ void CollectInterpolationInfo(Environment& env, IR::Program& program) {
46 return; 46 return;
47 } 47 }
48 const ProgramHeader& sph{env.SPH()}; 48 const ProgramHeader& sph{env.SPH()};
49 for (size_t index = 0; index < program.info.input_generics.size(); ++index) { 49 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
50 std::optional<PixelImap> imap; 50 std::optional<PixelImap> imap;
51 for (const PixelImap value : sph.ps.GenericInputMap(static_cast<u32>(index))) { 51 for (const PixelImap value : sph.ps.GenericInputMap(static_cast<u32>(index))) {
52 if (value == PixelImap::Unused) { 52 if (value == PixelImap::Unused) {
@@ -60,7 +60,7 @@ void CollectInterpolationInfo(Environment& env, IR::Program& program) {
60 if (!imap) { 60 if (!imap) {
61 continue; 61 continue;
62 } 62 }
63 program.info.input_generics[index].interpolation = [&] { 63 program.info.interpolation[index] = [&] {
64 switch (*imap) { 64 switch (*imap) {
65 case PixelImap::Unused: 65 case PixelImap::Unused:
66 case PixelImap::Perspective: 66 case PixelImap::Perspective:
@@ -140,6 +140,11 @@ IR::Program TranslateProgram(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Blo
140 program.output_topology = sph.common3.output_topology; 140 program.output_topology = sph.common3.output_topology;
141 program.output_vertices = sph.common4.max_output_vertices; 141 program.output_vertices = sph.common4.max_output_vertices;
142 program.invocations = sph.common2.threads_per_input_primitive; 142 program.invocations = sph.common2.threads_per_input_primitive;
143 program.is_geometry_passthrough = sph.common0.geometry_passthrough != 0;
144 if (program.is_geometry_passthrough) {
145 const auto mask{env.GpPassthroughMask()};
146 program.info.passthrough.mask |= ~Common::BitCast<std::bitset<256>>(mask);
147 }
143 break; 148 break;
144 } 149 }
145 case Stage::Compute: 150 case Stage::Compute:
@@ -194,12 +199,9 @@ IR::Program MergeDualVertexPrograms(IR::Program& vertex_a, IR::Program& vertex_b
194 result.stage = Stage::VertexB; 199 result.stage = Stage::VertexB;
195 result.info = vertex_a.info; 200 result.info = vertex_a.info;
196 result.local_memory_size = std::max(vertex_a.local_memory_size, vertex_b.local_memory_size); 201 result.local_memory_size = std::max(vertex_a.local_memory_size, vertex_b.local_memory_size);
197 for (size_t index = 0; index < 32; ++index) { 202 result.info.loads.mask |= vertex_b.info.loads.mask;
198 result.info.input_generics[index].used |= vertex_b.info.input_generics[index].used; 203 result.info.stores.mask |= vertex_b.info.stores.mask;
199 if (vertex_b.info.stores_generics[index]) { 204
200 result.info.stores_generics[index] = true;
201 }
202 }
203 Optimization::JoinTextureInfo(result.info, vertex_b.info); 205 Optimization::JoinTextureInfo(result.info, vertex_b.info);
204 Optimization::JoinStorageInfo(result.info, vertex_b.info); 206 Optimization::JoinStorageInfo(result.info, vertex_b.info);
205 Optimization::DeadCodeEliminationPass(result); 207 Optimization::DeadCodeEliminationPass(result);
diff --git a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
index a82472152..5e32ac784 100644
--- a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
+++ b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
@@ -29,130 +29,6 @@ void AddConstantBufferDescriptor(Info& info, u32 index, u32 count) {
29 }); 29 });
30} 30}
31 31
32void GetAttribute(Info& info, IR::Attribute attr) {
33 if (IR::IsGeneric(attr)) {
34 info.input_generics.at(IR::GenericAttributeIndex(attr)).used = true;
35 return;
36 }
37 if (attr >= IR::Attribute::FixedFncTexture0S && attr <= IR::Attribute::FixedFncTexture9Q) {
38 info.loads_fixed_fnc_textures = true;
39 info.loads_legacy_varyings = true;
40 return;
41 }
42 switch (attr) {
43 case IR::Attribute::PrimitiveId:
44 info.loads_primitive_id = true;
45 break;
46 case IR::Attribute::PositionX:
47 case IR::Attribute::PositionY:
48 case IR::Attribute::PositionZ:
49 case IR::Attribute::PositionW:
50 info.loads_position = true;
51 break;
52 case IR::Attribute::ColorFrontDiffuseR:
53 case IR::Attribute::ColorFrontDiffuseG:
54 case IR::Attribute::ColorFrontDiffuseB:
55 case IR::Attribute::ColorFrontDiffuseA:
56 info.loads_color_front_diffuse = true;
57 info.loads_legacy_varyings = true;
58 break;
59 case IR::Attribute::PointSpriteS:
60 case IR::Attribute::PointSpriteT:
61 info.loads_point_coord = true;
62 break;
63 case IR::Attribute::TessellationEvaluationPointU:
64 case IR::Attribute::TessellationEvaluationPointV:
65 info.loads_tess_coord = true;
66 break;
67 case IR::Attribute::InstanceId:
68 info.loads_instance_id = true;
69 break;
70 case IR::Attribute::VertexId:
71 info.loads_vertex_id = true;
72 break;
73 case IR::Attribute::FrontFace:
74 info.loads_front_face = true;
75 break;
76 default:
77 throw NotImplementedException("Get attribute {}", attr);
78 }
79}
80
81void SetAttribute(Info& info, IR::Attribute attr) {
82 if (IR::IsGeneric(attr)) {
83 info.stores_generics[IR::GenericAttributeIndex(attr)] = true;
84 return;
85 }
86 if (attr >= IR::Attribute::FixedFncTexture0S && attr <= IR::Attribute::FixedFncTexture9Q) {
87 info.stores_fixed_fnc_textures = true;
88 info.stores_legacy_varyings = true;
89 return;
90 }
91 switch (attr) {
92 case IR::Attribute::Layer:
93 info.stores_layer = true;
94 break;
95 case IR::Attribute::ViewportIndex:
96 info.stores_viewport_index = true;
97 break;
98 case IR::Attribute::PointSize:
99 info.stores_point_size = true;
100 break;
101 case IR::Attribute::PositionX:
102 case IR::Attribute::PositionY:
103 case IR::Attribute::PositionZ:
104 case IR::Attribute::PositionW:
105 info.stores_position = true;
106 break;
107 case IR::Attribute::ColorFrontDiffuseR:
108 case IR::Attribute::ColorFrontDiffuseG:
109 case IR::Attribute::ColorFrontDiffuseB:
110 case IR::Attribute::ColorFrontDiffuseA:
111 info.stores_color_front_diffuse = true;
112 info.stores_legacy_varyings = true;
113 break;
114 case IR::Attribute::ColorFrontSpecularR:
115 case IR::Attribute::ColorFrontSpecularG:
116 case IR::Attribute::ColorFrontSpecularB:
117 case IR::Attribute::ColorFrontSpecularA:
118 info.stores_color_front_specular = true;
119 info.stores_legacy_varyings = true;
120 break;
121 case IR::Attribute::ColorBackDiffuseR:
122 case IR::Attribute::ColorBackDiffuseG:
123 case IR::Attribute::ColorBackDiffuseB:
124 case IR::Attribute::ColorBackDiffuseA:
125 info.stores_color_back_diffuse = true;
126 info.stores_legacy_varyings = true;
127 break;
128 case IR::Attribute::ColorBackSpecularR:
129 case IR::Attribute::ColorBackSpecularG:
130 case IR::Attribute::ColorBackSpecularB:
131 case IR::Attribute::ColorBackSpecularA:
132 info.stores_color_back_specular = true;
133 info.stores_legacy_varyings = true;
134 break;
135 case IR::Attribute::ClipDistance0:
136 case IR::Attribute::ClipDistance1:
137 case IR::Attribute::ClipDistance2:
138 case IR::Attribute::ClipDistance3:
139 case IR::Attribute::ClipDistance4:
140 case IR::Attribute::ClipDistance5:
141 case IR::Attribute::ClipDistance6:
142 case IR::Attribute::ClipDistance7:
143 info.stores_clip_distance = true;
144 break;
145 case IR::Attribute::FogCoordinate:
146 info.stores_fog_coordinate = true;
147 break;
148 case IR::Attribute::ViewportMask:
149 info.stores_viewport_mask = true;
150 break;
151 default:
152 throw NotImplementedException("Set attribute {}", attr);
153 }
154}
155
156void GetPatch(Info& info, IR::Patch patch) { 32void GetPatch(Info& info, IR::Patch patch) {
157 if (!IR::IsGeneric(patch)) { 33 if (!IR::IsGeneric(patch)) {
158 throw NotImplementedException("Reading non-generic patch {}", patch); 34 throw NotImplementedException("Reading non-generic patch {}", patch);
@@ -511,10 +387,10 @@ void VisitUsages(Info& info, IR::Inst& inst) {
511 info.uses_demote_to_helper_invocation = true; 387 info.uses_demote_to_helper_invocation = true;
512 break; 388 break;
513 case IR::Opcode::GetAttribute: 389 case IR::Opcode::GetAttribute:
514 GetAttribute(info, inst.Arg(0).Attribute()); 390 info.loads.mask[static_cast<size_t>(inst.Arg(0).Attribute())] = true;
515 break; 391 break;
516 case IR::Opcode::SetAttribute: 392 case IR::Opcode::SetAttribute:
517 SetAttribute(info, inst.Arg(0).Attribute()); 393 info.stores.mask[static_cast<size_t>(inst.Arg(0).Attribute())] = true;
518 break; 394 break;
519 case IR::Opcode::GetPatch: 395 case IR::Opcode::GetPatch:
520 GetPatch(info, inst.Arg(0).Patch()); 396 GetPatch(info, inst.Arg(0).Patch());
@@ -943,26 +819,78 @@ void GatherInfoFromHeader(Environment& env, Info& info) {
943 if (!info.loads_indexed_attributes) { 819 if (!info.loads_indexed_attributes) {
944 return; 820 return;
945 } 821 }
946 for (size_t i = 0; i < info.input_generics.size(); i++) { 822 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
947 info.input_generics[i].used |= header.ps.IsGenericVectorActive(i); 823 const size_t offset{static_cast<size_t>(IR::Attribute::Generic0X) + index * 4};
824 const auto vector{header.ps.imap_generic_vector[index]};
825 info.loads.mask[offset + 0] = vector.x != PixelImap::Unused;
826 info.loads.mask[offset + 1] = vector.y != PixelImap::Unused;
827 info.loads.mask[offset + 2] = vector.z != PixelImap::Unused;
828 info.loads.mask[offset + 3] = vector.w != PixelImap::Unused;
948 } 829 }
949 info.loads_position |= header.ps.imap_systemb.position != 0;
950 return; 830 return;
951 } 831 }
952 if (info.loads_indexed_attributes) { 832 if (info.loads_indexed_attributes) {
953 for (size_t i = 0; i < info.input_generics.size(); i++) { 833 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
954 info.input_generics[i].used |= header.vtg.IsInputGenericVectorActive(i); 834 const IR::Attribute attribute{IR::Attribute::Generic0X + index * 4};
835 const auto mask = header.vtg.InputGeneric(index);
836 for (size_t i = 0; i < 4; ++i) {
837 info.loads.Set(attribute + i, mask[i]);
838 }
839 }
840 for (size_t index = 0; index < 8; ++index) {
841 const u16 mask{header.vtg.clip_distances};
842 info.loads.Set(IR::Attribute::ClipDistance0 + index, ((mask >> index) & 1) != 0);
955 } 843 }
956 info.loads_position |= header.vtg.imap_systemb.position != 0; 844 info.loads.Set(IR::Attribute::PrimitiveId, header.vtg.imap_systemb.primitive_array_id != 0);
845 info.loads.Set(IR::Attribute::Layer, header.vtg.imap_systemb.rt_array_index != 0);
846 info.loads.Set(IR::Attribute::ViewportIndex, header.vtg.imap_systemb.viewport_index != 0);
847 info.loads.Set(IR::Attribute::PointSize, header.vtg.imap_systemb.point_size != 0);
848 info.loads.Set(IR::Attribute::PositionX, header.vtg.imap_systemb.position_x != 0);
849 info.loads.Set(IR::Attribute::PositionY, header.vtg.imap_systemb.position_y != 0);
850 info.loads.Set(IR::Attribute::PositionZ, header.vtg.imap_systemb.position_z != 0);
851 info.loads.Set(IR::Attribute::PositionW, header.vtg.imap_systemb.position_w != 0);
852 info.loads.Set(IR::Attribute::PointSpriteS, header.vtg.point_sprite_s != 0);
853 info.loads.Set(IR::Attribute::PointSpriteT, header.vtg.point_sprite_t != 0);
854 info.loads.Set(IR::Attribute::FogCoordinate, header.vtg.fog_coordinate != 0);
855 info.loads.Set(IR::Attribute::TessellationEvaluationPointU,
856 header.vtg.tessellation_eval_point_u != 0);
857 info.loads.Set(IR::Attribute::TessellationEvaluationPointV,
858 header.vtg.tessellation_eval_point_v != 0);
859 info.loads.Set(IR::Attribute::InstanceId, header.vtg.instance_id != 0);
860 info.loads.Set(IR::Attribute::VertexId, header.vtg.vertex_id != 0);
861 // TODO: Legacy varyings
957 } 862 }
958 if (info.stores_indexed_attributes) { 863 if (info.stores_indexed_attributes) {
959 for (size_t i = 0; i < info.stores_generics.size(); i++) { 864 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
960 if (header.vtg.IsOutputGenericVectorActive(i)) { 865 const IR::Attribute attribute{IR::Attribute::Generic0X + index * 4};
961 info.stores_generics[i] = true; 866 const auto mask{header.vtg.OutputGeneric(index)};
867 for (size_t i = 0; i < 4; ++i) {
868 info.stores.Set(attribute + i, mask[i]);
962 } 869 }
963 } 870 }
964 info.stores_clip_distance |= header.vtg.omap_systemc.clip_distances != 0; 871 for (size_t index = 0; index < 8; ++index) {
965 info.stores_position |= header.vtg.omap_systemb.position != 0; 872 const u16 mask{header.vtg.omap_systemc.clip_distances};
873 info.stores.Set(IR::Attribute::ClipDistance0 + index, ((mask >> index) & 1) != 0);
874 }
875 info.stores.Set(IR::Attribute::PrimitiveId,
876 header.vtg.omap_systemb.primitive_array_id != 0);
877 info.stores.Set(IR::Attribute::Layer, header.vtg.omap_systemb.rt_array_index != 0);
878 info.stores.Set(IR::Attribute::ViewportIndex, header.vtg.omap_systemb.viewport_index != 0);
879 info.stores.Set(IR::Attribute::PointSize, header.vtg.omap_systemb.point_size != 0);
880 info.stores.Set(IR::Attribute::PositionX, header.vtg.omap_systemb.position_x != 0);
881 info.stores.Set(IR::Attribute::PositionY, header.vtg.omap_systemb.position_y != 0);
882 info.stores.Set(IR::Attribute::PositionZ, header.vtg.omap_systemb.position_z != 0);
883 info.stores.Set(IR::Attribute::PositionW, header.vtg.omap_systemb.position_w != 0);
884 info.stores.Set(IR::Attribute::PointSpriteS, header.vtg.omap_systemc.point_sprite_s != 0);
885 info.stores.Set(IR::Attribute::PointSpriteT, header.vtg.omap_systemc.point_sprite_t != 0);
886 info.stores.Set(IR::Attribute::FogCoordinate, header.vtg.omap_systemc.fog_coordinate != 0);
887 info.stores.Set(IR::Attribute::TessellationEvaluationPointU,
888 header.vtg.omap_systemc.tessellation_eval_point_u != 0);
889 info.stores.Set(IR::Attribute::TessellationEvaluationPointV,
890 header.vtg.omap_systemc.tessellation_eval_point_v != 0);
891 info.stores.Set(IR::Attribute::InstanceId, header.vtg.omap_systemc.instance_id != 0);
892 info.stores.Set(IR::Attribute::VertexId, header.vtg.omap_systemc.vertex_id != 0);
893 // TODO: Legacy varyings
966 } 894 }
967} 895}
968} // Anonymous namespace 896} // Anonymous namespace
diff --git a/src/shader_recompiler/profile.h b/src/shader_recompiler/profile.h
index d46be1638..ee1887b56 100644
--- a/src/shader_recompiler/profile.h
+++ b/src/shader_recompiler/profile.h
@@ -34,6 +34,7 @@ struct Profile {
34 bool support_demote_to_helper_invocation{}; 34 bool support_demote_to_helper_invocation{};
35 bool support_int64_atomics{}; 35 bool support_int64_atomics{};
36 bool support_derivative_control{}; 36 bool support_derivative_control{};
37 bool support_geometry_shader_passthrough{};
37 bool support_gl_nv_gpu_shader_5{}; 38 bool support_gl_nv_gpu_shader_5{};
38 bool support_gl_amd_gpu_shader_half_float{}; 39 bool support_gl_amd_gpu_shader_half_float{};
39 bool support_gl_texture_shadow_lod{}; 40 bool support_gl_texture_shadow_lod{};
diff --git a/src/shader_recompiler/program_header.h b/src/shader_recompiler/program_header.h
index 6933750aa..bd6c2bfb5 100644
--- a/src/shader_recompiler/program_header.h
+++ b/src/shader_recompiler/program_header.h
@@ -37,7 +37,9 @@ struct ProgramHeader {
37 BitField<15, 1, u32> kills_pixels; 37 BitField<15, 1, u32> kills_pixels;
38 BitField<16, 1, u32> does_global_store; 38 BitField<16, 1, u32> does_global_store;
39 BitField<17, 4, u32> sass_version; 39 BitField<17, 4, u32> sass_version;
40 BitField<21, 5, u32> reserved; 40 BitField<21, 2, u32> reserved1;
41 BitField<24, 1, u32> geometry_passthrough;
42 BitField<25, 1, u32> reserved2;
41 BitField<26, 1, u32> does_load_or_store; 43 BitField<26, 1, u32> does_load_or_store;
42 BitField<27, 1, u32> does_fp64; 44 BitField<27, 1, u32> does_fp64;
43 BitField<28, 4, u32> stream_out_mask; 45 BitField<28, 4, u32> stream_out_mask;
@@ -79,24 +81,10 @@ struct ProgramHeader {
79 BitField<5, 1, u8> position_y; 81 BitField<5, 1, u8> position_y;
80 BitField<6, 1, u8> position_z; 82 BitField<6, 1, u8> position_z;
81 BitField<7, 1, u8> position_w; 83 BitField<7, 1, u8> position_w;
82 BitField<0, 4, u8> first;
83 BitField<4, 4, u8> position;
84 u8 raw; 84 u8 raw;
85 } imap_systemb; 85 } imap_systemb;
86 86
87 union { 87 std::array<u8, 16> imap_generic_vector;
88 BitField<0, 1, u8> x;
89 BitField<1, 1, u8> y;
90 BitField<2, 1, u8> z;
91 BitField<3, 1, u8> w;
92 BitField<4, 1, u8> x2;
93 BitField<5, 1, u8> y2;
94 BitField<6, 1, u8> z2;
95 BitField<7, 1, u8> w2;
96 BitField<0, 4, u8> first;
97 BitField<4, 4, u8> second;
98 u8 raw;
99 } imap_generic_vector[16];
100 88
101 INSERT_PADDING_BYTES_NOINIT(2); // ImapColor 89 INSERT_PADDING_BYTES_NOINIT(2); // ImapColor
102 union { 90 union {
@@ -122,24 +110,10 @@ struct ProgramHeader {
122 BitField<5, 1, u8> position_y; 110 BitField<5, 1, u8> position_y;
123 BitField<6, 1, u8> position_z; 111 BitField<6, 1, u8> position_z;
124 BitField<7, 1, u8> position_w; 112 BitField<7, 1, u8> position_w;
125 BitField<0, 4, u8> first;
126 BitField<4, 4, u8> position;
127 u8 raw; 113 u8 raw;
128 } omap_systemb; 114 } omap_systemb;
129 115
130 union { 116 std::array<u8, 16> omap_generic_vector;
131 BitField<0, 1, u8> x;
132 BitField<1, 1, u8> y;
133 BitField<2, 1, u8> z;
134 BitField<3, 1, u8> w;
135 BitField<4, 1, u8> x2;
136 BitField<5, 1, u8> y2;
137 BitField<6, 1, u8> z2;
138 BitField<7, 1, u8> w2;
139 BitField<0, 4, u8> first;
140 BitField<4, 4, u8> second;
141 u8 raw;
142 } omap_generic_vector[16];
143 117
144 INSERT_PADDING_BYTES_NOINIT(2); // OmapColor 118 INSERT_PADDING_BYTES_NOINIT(2); // OmapColor
145 119
@@ -157,18 +131,24 @@ struct ProgramHeader {
157 INSERT_PADDING_BYTES_NOINIT(5); // OmapFixedFncTexture[10] 131 INSERT_PADDING_BYTES_NOINIT(5); // OmapFixedFncTexture[10]
158 INSERT_PADDING_BYTES_NOINIT(1); // OmapReserved 132 INSERT_PADDING_BYTES_NOINIT(1); // OmapReserved
159 133
160 [[nodiscard]] bool IsInputGenericVectorActive(size_t index) const { 134 [[nodiscard]] std::array<bool, 4> InputGeneric(size_t index) const noexcept {
161 if ((index & 1) == 0) { 135 const int data{imap_generic_vector[index >> 1] >> ((index % 2) * 4)};
162 return imap_generic_vector[index >> 1].first != 0; 136 return {
163 } 137 (data & 1) != 0,
164 return imap_generic_vector[index >> 1].second != 0; 138 (data & 2) != 0,
139 (data & 4) != 0,
140 (data & 8) != 0,
141 };
165 } 142 }
166 143
167 [[nodiscard]] bool IsOutputGenericVectorActive(size_t index) const { 144 [[nodiscard]] std::array<bool, 4> OutputGeneric(size_t index) const noexcept {
168 if ((index & 1) == 0) { 145 const int data{omap_generic_vector[index >> 1] >> ((index % 2) * 4)};
169 return omap_generic_vector[index >> 1].first != 0; 146 return {
170 } 147 (data & 1) != 0,
171 return omap_generic_vector[index >> 1].second != 0; 148 (data & 2) != 0,
149 (data & 4) != 0,
150 (data & 8) != 0,
151 };
172 } 152 }
173 } vtg; 153 } vtg;
174 154
diff --git a/src/shader_recompiler/runtime_info.h b/src/shader_recompiler/runtime_info.h
index 63fe2afaf..f3f83a258 100644
--- a/src/shader_recompiler/runtime_info.h
+++ b/src/shader_recompiler/runtime_info.h
@@ -10,6 +10,7 @@
10#include <vector> 10#include <vector>
11 11
12#include "common/common_types.h" 12#include "common/common_types.h"
13#include "shader_recompiler/varying_state.h"
13 14
14namespace Shader { 15namespace Shader {
15 16
@@ -60,7 +61,7 @@ struct TransformFeedbackVarying {
60 61
61struct RuntimeInfo { 62struct RuntimeInfo {
62 std::array<AttributeType, 32> generic_input_types{}; 63 std::array<AttributeType, 32> generic_input_types{};
63 std::bitset<32> previous_stage_stores_generic{}; 64 VaryingState previous_stage_stores;
64 65
65 bool convert_depth_mode{}; 66 bool convert_depth_mode{};
66 bool force_early_z{}; 67 bool force_early_z{};
diff --git a/src/shader_recompiler/shader_info.h b/src/shader_recompiler/shader_info.h
index a20e15d2e..4ef4dbd40 100644
--- a/src/shader_recompiler/shader_info.h
+++ b/src/shader_recompiler/shader_info.h
@@ -9,6 +9,7 @@
9 9
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "shader_recompiler/frontend/ir/type.h" 11#include "shader_recompiler/frontend/ir/type.h"
12#include "shader_recompiler/varying_state.h"
12 13
13#include <boost/container/small_vector.hpp> 14#include <boost/container/small_vector.hpp>
14#include <boost/container/static_vector.hpp> 15#include <boost/container/static_vector.hpp>
@@ -44,11 +45,6 @@ enum class Interpolation {
44 NoPerspective, 45 NoPerspective,
45}; 46};
46 47
47struct InputVarying {
48 Interpolation interpolation{Interpolation::Smooth};
49 bool used{false};
50};
51
52struct ConstantBufferDescriptor { 48struct ConstantBufferDescriptor {
53 u32 index; 49 u32 index;
54 u32 count; 50 u32 count;
@@ -121,18 +117,10 @@ struct Info {
121 bool uses_subgroup_shuffles{}; 117 bool uses_subgroup_shuffles{};
122 std::array<bool, 30> uses_patches{}; 118 std::array<bool, 30> uses_patches{};
123 119
124 std::array<InputVarying, 32> input_generics{}; 120 std::array<Interpolation, 32> interpolation{};
125 bool loads_primitive_id{}; 121 VaryingState loads;
126 bool loads_position{}; 122 VaryingState stores;
127 bool loads_color_front_diffuse{}; 123 VaryingState passthrough;
128 bool loads_fixed_fnc_textures{};
129 bool loads_point_coord{};
130 bool loads_instance_id{};
131 bool loads_vertex_id{};
132 bool loads_front_face{};
133 bool loads_legacy_varyings{};
134
135 bool loads_tess_coord{};
136 124
137 bool loads_indexed_attributes{}; 125 bool loads_indexed_attributes{};
138 126
@@ -140,21 +128,6 @@ struct Info {
140 bool stores_sample_mask{}; 128 bool stores_sample_mask{};
141 bool stores_frag_depth{}; 129 bool stores_frag_depth{};
142 130
143 std::bitset<32> stores_generics{};
144 bool stores_layer{};
145 bool stores_viewport_index{};
146 bool stores_point_size{};
147 bool stores_position{};
148 bool stores_color_front_diffuse{};
149 bool stores_color_front_specular{};
150 bool stores_color_back_diffuse{};
151 bool stores_color_back_specular{};
152 bool stores_fixed_fnc_textures{};
153 bool stores_clip_distance{};
154 bool stores_fog_coordinate{};
155 bool stores_viewport_mask{};
156 bool stores_legacy_varyings{};
157
158 bool stores_tess_level_outer{}; 131 bool stores_tess_level_outer{};
159 bool stores_tess_level_inner{}; 132 bool stores_tess_level_inner{};
160 133
diff --git a/src/shader_recompiler/varying_state.h b/src/shader_recompiler/varying_state.h
new file mode 100644
index 000000000..9d7b24a76
--- /dev/null
+++ b/src/shader_recompiler/varying_state.h
@@ -0,0 +1,69 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <bitset>
8#include <cstddef>
9
10#include "shader_recompiler/frontend/ir/attribute.h"
11
12namespace Shader {
13
14struct VaryingState {
15 std::bitset<256> mask{};
16
17 void Set(IR::Attribute attribute, bool state = true) {
18 mask[static_cast<size_t>(attribute)] = state;
19 }
20
21 [[nodiscard]] bool operator[](IR::Attribute attribute) const noexcept {
22 return mask[static_cast<size_t>(attribute)];
23 }
24
25 [[nodiscard]] bool AnyComponent(IR::Attribute base) const noexcept {
26 return mask[static_cast<size_t>(base) + 0] || mask[static_cast<size_t>(base) + 1] ||
27 mask[static_cast<size_t>(base) + 2] || mask[static_cast<size_t>(base) + 3];
28 }
29
30 [[nodiscard]] bool AllComponents(IR::Attribute base) const noexcept {
31 return mask[static_cast<size_t>(base) + 0] && mask[static_cast<size_t>(base) + 1] &&
32 mask[static_cast<size_t>(base) + 2] && mask[static_cast<size_t>(base) + 3];
33 }
34
35 [[nodiscard]] bool IsUniform(IR::Attribute base) const noexcept {
36 return AnyComponent(base) == AllComponents(base);
37 }
38
39 [[nodiscard]] bool Generic(size_t index, size_t component) const noexcept {
40 return mask[static_cast<size_t>(IR::Attribute::Generic0X) + index * 4 + component];
41 }
42
43 [[nodiscard]] bool Generic(size_t index) const noexcept {
44 return Generic(index, 0) || Generic(index, 1) || Generic(index, 2) || Generic(index, 3);
45 }
46
47 [[nodiscard]] bool ClipDistances() const noexcept {
48 return AnyComponent(IR::Attribute::ClipDistance0) ||
49 AnyComponent(IR::Attribute::ClipDistance4);
50 }
51
52 [[nodiscard]] bool Legacy() const noexcept {
53 return AnyComponent(IR::Attribute::ColorFrontDiffuseR) ||
54 AnyComponent(IR::Attribute::ColorFrontSpecularR) ||
55 AnyComponent(IR::Attribute::ColorBackDiffuseR) ||
56 AnyComponent(IR::Attribute::ColorBackSpecularR) || FixedFunctionTexture();
57 }
58
59 [[nodiscard]] bool FixedFunctionTexture() const noexcept {
60 for (size_t index = 0; index < 10; ++index) {
61 if (AnyComponent(IR::Attribute::FixedFncTexture0S + index * 4)) {
62 return true;
63 }
64 }
65 return false;
66 }
67};
68
69} // namespace Shader
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index da2ded671..471d5686a 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -961,7 +961,11 @@ public:
961 961
962 SamplerIndex sampler_index; 962 SamplerIndex sampler_index;
963 963
964 INSERT_PADDING_WORDS_NOINIT(0x25); 964 INSERT_PADDING_WORDS_NOINIT(0x2);
965
966 std::array<u32, 8> gp_passthrough_mask;
967
968 INSERT_PADDING_WORDS_NOINIT(0x1B);
965 969
966 u32 depth_test_enable; 970 u32 depth_test_enable;
967 971
@@ -1628,6 +1632,7 @@ ASSERT_REG_POSITION(zeta_width, 0x48a);
1628ASSERT_REG_POSITION(zeta_height, 0x48b); 1632ASSERT_REG_POSITION(zeta_height, 0x48b);
1629ASSERT_REG_POSITION(zeta_depth, 0x48c); 1633ASSERT_REG_POSITION(zeta_depth, 0x48c);
1630ASSERT_REG_POSITION(sampler_index, 0x48D); 1634ASSERT_REG_POSITION(sampler_index, 0x48D);
1635ASSERT_REG_POSITION(gp_passthrough_mask, 0x490);
1631ASSERT_REG_POSITION(depth_test_enable, 0x4B3); 1636ASSERT_REG_POSITION(depth_test_enable, 0x4B3);
1632ASSERT_REG_POSITION(independent_blend_enable, 0x4B9); 1637ASSERT_REG_POSITION(independent_blend_enable, 0x4B9);
1633ASSERT_REG_POSITION(depth_write_enabled, 0x4BA); 1638ASSERT_REG_POSITION(depth_write_enabled, 0x4BA);
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 5af9b7745..06e39a503 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -61,10 +61,10 @@ Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineKey& key,
61 bool glasm_use_storage_buffers, bool use_assembly_shaders) { 61 bool glasm_use_storage_buffers, bool use_assembly_shaders) {
62 Shader::RuntimeInfo info; 62 Shader::RuntimeInfo info;
63 if (previous_program) { 63 if (previous_program) {
64 info.previous_stage_stores_generic = previous_program->info.stores_generics; 64 info.previous_stage_stores = previous_program->info.stores;
65 } else { 65 } else {
66 // Mark all stores as available 66 // Mark all stores as available for vertex shaders
67 info.previous_stage_stores_generic.flip(); 67 info.previous_stage_stores.mask.set();
68 } 68 }
69 switch (program.stage) { 69 switch (program.stage) {
70 case Shader::Stage::VertexB: 70 case Shader::Stage::VertexB:
@@ -187,6 +187,7 @@ ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindo
187 .support_demote_to_helper_invocation = false, 187 .support_demote_to_helper_invocation = false,
188 .support_int64_atomics = false, 188 .support_int64_atomics = false,
189 .support_derivative_control = device.HasDerivativeControl(), 189 .support_derivative_control = device.HasDerivativeControl(),
190 .support_geometry_shader_passthrough = false, // TODO
190 .support_gl_nv_gpu_shader_5 = device.HasNvGpuShader5(), 191 .support_gl_nv_gpu_shader_5 = device.HasNvGpuShader5(),
191 .support_gl_amd_gpu_shader_half_float = device.HasAmdShaderHalfFloat(), 192 .support_gl_amd_gpu_shader_half_float = device.HasAmdShaderHalfFloat(),
192 .support_gl_texture_shadow_lod = device.HasTextureShadowLod(), 193 .support_gl_texture_shadow_lod = device.HasTextureShadowLod(),
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index 70e183e65..6d664ed6b 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -487,10 +487,9 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
487 static_vector<VkVertexInputBindingDivisorDescriptionEXT, 32> vertex_binding_divisors; 487 static_vector<VkVertexInputBindingDivisorDescriptionEXT, 32> vertex_binding_divisors;
488 static_vector<VkVertexInputAttributeDescription, 32> vertex_attributes; 488 static_vector<VkVertexInputAttributeDescription, 32> vertex_attributes;
489 if (key.state.dynamic_vertex_input) { 489 if (key.state.dynamic_vertex_input) {
490 const auto& input_attributes = stage_infos[0].input_generics;
491 for (size_t index = 0; index < key.state.attributes.size(); ++index) { 490 for (size_t index = 0; index < key.state.attributes.size(); ++index) {
492 const u32 type = key.state.DynamicAttributeType(index); 491 const u32 type = key.state.DynamicAttributeType(index);
493 if (!input_attributes[index].used || type == 0) { 492 if (!stage_infos[0].loads.Generic(index) || type == 0) {
494 continue; 493 continue;
495 } 494 }
496 vertex_attributes.push_back({ 495 vertex_attributes.push_back({
@@ -526,10 +525,9 @@ void GraphicsPipeline::MakePipeline(VkRenderPass render_pass) {
526 }); 525 });
527 } 526 }
528 } 527 }
529 const auto& input_attributes = stage_infos[0].input_generics;
530 for (size_t index = 0; index < key.state.attributes.size(); ++index) { 528 for (size_t index = 0; index < key.state.attributes.size(); ++index) {
531 const auto& attribute = key.state.attributes[index]; 529 const auto& attribute = key.state.attributes[index];
532 if (!attribute.enabled || !input_attributes[index].used) { 530 if (!attribute.enabled || !stage_infos[0].loads.Generic(index)) {
533 continue; 531 continue;
534 } 532 }
535 vertex_attributes.push_back({ 533 vertex_attributes.push_back({
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index ec06b124f..7aaa40ef2 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -123,18 +123,21 @@ Shader::AttributeType AttributeType(const FixedPipelineState& state, size_t inde
123 return Shader::AttributeType::Disabled; 123 return Shader::AttributeType::Disabled;
124} 124}
125 125
126Shader::RuntimeInfo MakeRuntimeInfo(const GraphicsPipelineCacheKey& key, 126Shader::RuntimeInfo MakeRuntimeInfo(std::span<const Shader::IR::Program> programs,
127 const GraphicsPipelineCacheKey& key,
127 const Shader::IR::Program& program, 128 const Shader::IR::Program& program,
128 const Shader::IR::Program* previous_program) { 129 const Shader::IR::Program* previous_program) {
129 Shader::RuntimeInfo info; 130 Shader::RuntimeInfo info;
130 if (previous_program) { 131 if (previous_program) {
131 info.previous_stage_stores_generic = previous_program->info.stores_generics; 132 info.previous_stage_stores = previous_program->info.stores;
133 if (previous_program->is_geometry_passthrough) {
134 info.previous_stage_stores.mask |= previous_program->info.passthrough.mask;
135 }
132 } else { 136 } else {
133 // Mark all stores as available 137 info.previous_stage_stores.mask.set();
134 info.previous_stage_stores_generic.flip();
135 } 138 }
136 const Shader::Stage stage{program.stage}; 139 const Shader::Stage stage{program.stage};
137 const bool has_geometry{key.unique_hashes[4] != 0}; 140 const bool has_geometry{key.unique_hashes[4] != 0 && !programs[4].is_geometry_passthrough};
138 const bool gl_ndc{key.state.ndc_minus_one_to_one != 0}; 141 const bool gl_ndc{key.state.ndc_minus_one_to_one != 0};
139 const float point_size{Common::BitCast<float>(key.state.point_size)}; 142 const float point_size{Common::BitCast<float>(key.state.point_size)};
140 switch (stage) { 143 switch (stage) {
@@ -302,6 +305,7 @@ PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxw
302 .support_demote_to_helper_invocation = true, 305 .support_demote_to_helper_invocation = true,
303 .support_int64_atomics = device.IsExtShaderAtomicInt64Supported(), 306 .support_int64_atomics = device.IsExtShaderAtomicInt64Supported(),
304 .support_derivative_control = true, 307 .support_derivative_control = true,
308 .support_geometry_shader_passthrough = device.IsNvGeometryShaderPassthroughSupported(),
305 309
306 .warp_size_potentially_larger_than_guest = device.IsWarpSizePotentiallyBiggerThanGuest(), 310 .warp_size_potentially_larger_than_guest = device.IsWarpSizePotentiallyBiggerThanGuest(),
307 311
@@ -518,7 +522,7 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
518 const size_t stage_index{index - 1}; 522 const size_t stage_index{index - 1};
519 infos[stage_index] = &program.info; 523 infos[stage_index] = &program.info;
520 524
521 const Shader::RuntimeInfo runtime_info{MakeRuntimeInfo(key, program, previous_stage)}; 525 const auto runtime_info{MakeRuntimeInfo(programs, key, program, previous_stage)};
522 const std::vector<u32> code{EmitSPIRV(profile, runtime_info, program, binding)}; 526 const std::vector<u32> code{EmitSPIRV(profile, runtime_info, program, binding)};
523 device.SaveShader(code); 527 device.SaveShader(code);
524 modules[stage_index] = BuildShader(device, code); 528 modules[stage_index] = BuildShader(device, code);
diff --git a/src/video_core/shader_environment.cpp b/src/video_core/shader_environment.cpp
index d463e2b56..429cab30d 100644
--- a/src/video_core/shader_environment.cpp
+++ b/src/video_core/shader_environment.cpp
@@ -22,7 +22,7 @@
22namespace VideoCommon { 22namespace VideoCommon {
23 23
24constexpr std::array<char, 8> MAGIC_NUMBER{'y', 'u', 'z', 'u', 'c', 'a', 'c', 'h'}; 24constexpr std::array<char, 8> MAGIC_NUMBER{'y', 'u', 'z', 'u', 'c', 'a', 'c', 'h'};
25constexpr u32 CACHE_VERSION = 4; 25constexpr u32 CACHE_VERSION = 5;
26 26
27constexpr size_t INST_SIZE = sizeof(u64); 27constexpr size_t INST_SIZE = sizeof(u64);
28 28
@@ -155,6 +155,10 @@ void GenericEnvironment::Serialize(std::ofstream& file) const {
155 .write(reinterpret_cast<const char*>(&shared_memory_size), sizeof(shared_memory_size)); 155 .write(reinterpret_cast<const char*>(&shared_memory_size), sizeof(shared_memory_size));
156 } else { 156 } else {
157 file.write(reinterpret_cast<const char*>(&sph), sizeof(sph)); 157 file.write(reinterpret_cast<const char*>(&sph), sizeof(sph));
158 if (stage == Shader::Stage::Geometry) {
159 file.write(reinterpret_cast<const char*>(&gp_passthrough_mask),
160 sizeof(gp_passthrough_mask));
161 }
158 } 162 }
159} 163}
160 164
@@ -202,6 +206,7 @@ GraphicsEnvironment::GraphicsEnvironment(Tegra::Engines::Maxwell3D& maxwell3d_,
202 u32 start_address_) 206 u32 start_address_)
203 : GenericEnvironment{gpu_memory_, program_base_, start_address_}, maxwell3d{&maxwell3d_} { 207 : GenericEnvironment{gpu_memory_, program_base_, start_address_}, maxwell3d{&maxwell3d_} {
204 gpu_memory->ReadBlock(program_base + start_address, &sph, sizeof(sph)); 208 gpu_memory->ReadBlock(program_base + start_address, &sph, sizeof(sph));
209 gp_passthrough_mask = maxwell3d->regs.gp_passthrough_mask;
205 switch (program) { 210 switch (program) {
206 case Maxwell::ShaderProgram::VertexA: 211 case Maxwell::ShaderProgram::VertexA:
207 stage = Shader::Stage::VertexA; 212 stage = Shader::Stage::VertexA;
@@ -319,6 +324,9 @@ void FileEnvironment::Deserialize(std::ifstream& file) {
319 .read(reinterpret_cast<char*>(&shared_memory_size), sizeof(shared_memory_size)); 324 .read(reinterpret_cast<char*>(&shared_memory_size), sizeof(shared_memory_size));
320 } else { 325 } else {
321 file.read(reinterpret_cast<char*>(&sph), sizeof(sph)); 326 file.read(reinterpret_cast<char*>(&sph), sizeof(sph));
327 if (stage == Shader::Stage::Geometry) {
328 file.read(reinterpret_cast<char*>(&gp_passthrough_mask), sizeof(gp_passthrough_mask));
329 }
322 } 330 }
323} 331}
324 332
diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp
index 7b184d2f8..da4721e6b 100644
--- a/src/video_core/vulkan_common/vulkan_device.cpp
+++ b/src/video_core/vulkan_common/vulkan_device.cpp
@@ -350,6 +350,10 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
350 LOG_INFO(Render_Vulkan, "Device doesn't support viewport masks"); 350 LOG_INFO(Render_Vulkan, "Device doesn't support viewport masks");
351 } 351 }
352 352
353 if (!nv_geometry_shader_passthrough) {
354 LOG_INFO(Render_Vulkan, "Device doesn't support passthrough geometry shaders");
355 }
356
353 VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR std430_layout; 357 VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR std430_layout;
354 if (khr_uniform_buffer_standard_layout) { 358 if (khr_uniform_buffer_standard_layout) {
355 std430_layout = { 359 std430_layout = {
@@ -768,6 +772,8 @@ std::vector<const char*> Device::LoadExtensions(bool requires_surface) {
768 }; 772 };
769 test(nv_viewport_swizzle, VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME, true); 773 test(nv_viewport_swizzle, VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME, true);
770 test(nv_viewport_array2, VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME, true); 774 test(nv_viewport_array2, VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME, true);
775 test(nv_geometry_shader_passthrough, VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME,
776 true);
771 test(khr_uniform_buffer_standard_layout, 777 test(khr_uniform_buffer_standard_layout,
772 VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME, true); 778 VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME, true);
773 test(khr_spirv_1_4, VK_KHR_SPIRV_1_4_EXTENSION_NAME, true); 779 test(khr_spirv_1_4, VK_KHR_SPIRV_1_4_EXTENSION_NAME, true);
diff --git a/src/video_core/vulkan_common/vulkan_device.h b/src/video_core/vulkan_common/vulkan_device.h
index a9c0a0e4d..d0adc0127 100644
--- a/src/video_core/vulkan_common/vulkan_device.h
+++ b/src/video_core/vulkan_common/vulkan_device.h
@@ -194,6 +194,11 @@ public:
194 return nv_viewport_array2; 194 return nv_viewport_array2;
195 } 195 }
196 196
197 /// Returns true if the device supports VK_NV_geometry_shader_passthrough.
198 bool IsNvGeometryShaderPassthroughSupported() const {
199 return nv_geometry_shader_passthrough;
200 }
201
197 /// Returns true if the device supports VK_KHR_uniform_buffer_standard_layout. 202 /// Returns true if the device supports VK_KHR_uniform_buffer_standard_layout.
198 bool IsKhrUniformBufferStandardLayoutSupported() const { 203 bool IsKhrUniformBufferStandardLayoutSupported() const {
199 return khr_uniform_buffer_standard_layout; 204 return khr_uniform_buffer_standard_layout;
@@ -363,6 +368,7 @@ private:
363 bool is_blit_depth_stencil_supported{}; ///< Support for blitting from and to depth stencil. 368 bool is_blit_depth_stencil_supported{}; ///< Support for blitting from and to depth stencil.
364 bool nv_viewport_swizzle{}; ///< Support for VK_NV_viewport_swizzle. 369 bool nv_viewport_swizzle{}; ///< Support for VK_NV_viewport_swizzle.
365 bool nv_viewport_array2{}; ///< Support for VK_NV_viewport_array2. 370 bool nv_viewport_array2{}; ///< Support for VK_NV_viewport_array2.
371 bool nv_geometry_shader_passthrough{}; ///< Support for VK_NV_geometry_shader_passthrough.
366 bool khr_uniform_buffer_standard_layout{}; ///< Support for scalar uniform buffer layouts. 372 bool khr_uniform_buffer_standard_layout{}; ///< Support for scalar uniform buffer layouts.
367 bool khr_spirv_1_4{}; ///< Support for VK_KHR_spirv_1_4. 373 bool khr_spirv_1_4{}; ///< Support for VK_KHR_spirv_1_4.
368 bool khr_workgroup_memory_explicit_layout{}; ///< Support for explicit workgroup layouts. 374 bool khr_workgroup_memory_explicit_layout{}; ///< Support for explicit workgroup layouts.