diff options
Diffstat (limited to 'src/shader_recompiler/backend/spirv/emit_spirv_image.cpp')
| -rw-r--r-- | src/shader_recompiler/backend/spirv/emit_spirv_image.cpp | 462 |
1 files changed, 462 insertions, 0 deletions
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp new file mode 100644 index 000000000..3588f052b --- /dev/null +++ b/src/shader_recompiler/backend/spirv/emit_spirv_image.cpp | |||
| @@ -0,0 +1,462 @@ | |||
| 1 | // Copyright 2021 yuzu Emulator Project | ||
| 2 | // Licensed under GPLv2 or any later version | ||
| 3 | // Refer to the license.txt file included. | ||
| 4 | |||
| 5 | #include <boost/container/static_vector.hpp> | ||
| 6 | |||
| 7 | #include "shader_recompiler/backend/spirv/emit_spirv.h" | ||
| 8 | #include "shader_recompiler/backend/spirv/emit_spirv_instructions.h" | ||
| 9 | #include "shader_recompiler/frontend/ir/modifiers.h" | ||
| 10 | |||
| 11 | namespace Shader::Backend::SPIRV { | ||
| 12 | namespace { | ||
| 13 | class ImageOperands { | ||
| 14 | public: | ||
| 15 | explicit ImageOperands(EmitContext& ctx, bool has_bias, bool has_lod, bool has_lod_clamp, | ||
| 16 | Id lod, const IR::Value& offset) { | ||
| 17 | if (has_bias) { | ||
| 18 | const Id bias{has_lod_clamp ? ctx.OpCompositeExtract(ctx.F32[1], lod, 0) : lod}; | ||
| 19 | Add(spv::ImageOperandsMask::Bias, bias); | ||
| 20 | } | ||
| 21 | if (has_lod) { | ||
| 22 | const Id lod_value{has_lod_clamp ? ctx.OpCompositeExtract(ctx.F32[1], lod, 0) : lod}; | ||
| 23 | Add(spv::ImageOperandsMask::Lod, lod_value); | ||
| 24 | } | ||
| 25 | AddOffset(ctx, offset); | ||
| 26 | if (has_lod_clamp) { | ||
| 27 | const Id lod_clamp{has_bias ? ctx.OpCompositeExtract(ctx.F32[1], lod, 1) : lod}; | ||
| 28 | Add(spv::ImageOperandsMask::MinLod, lod_clamp); | ||
| 29 | } | ||
| 30 | } | ||
| 31 | |||
| 32 | explicit ImageOperands(EmitContext& ctx, const IR::Value& offset, const IR::Value& offset2) { | ||
| 33 | if (offset2.IsEmpty()) { | ||
| 34 | if (offset.IsEmpty()) { | ||
| 35 | return; | ||
| 36 | } | ||
| 37 | Add(spv::ImageOperandsMask::Offset, ctx.Def(offset)); | ||
| 38 | return; | ||
| 39 | } | ||
| 40 | const std::array values{offset.InstRecursive(), offset2.InstRecursive()}; | ||
| 41 | if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) { | ||
| 42 | LOG_WARNING(Shader_SPIRV, "Not all arguments in PTP are immediate, ignoring"); | ||
| 43 | return; | ||
| 44 | } | ||
| 45 | const IR::Opcode opcode{values[0]->GetOpcode()}; | ||
| 46 | if (opcode != values[1]->GetOpcode() || opcode != IR::Opcode::CompositeConstructU32x4) { | ||
| 47 | throw LogicError("Invalid PTP arguments"); | ||
| 48 | } | ||
| 49 | auto read{[&](unsigned int a, unsigned int b) { return values[a]->Arg(b).U32(); }}; | ||
| 50 | |||
| 51 | const Id offsets{ctx.ConstantComposite( | ||
| 52 | ctx.TypeArray(ctx.U32[2], ctx.Const(4U)), ctx.Const(read(0, 0), read(0, 1)), | ||
| 53 | ctx.Const(read(0, 2), read(0, 3)), ctx.Const(read(1, 0), read(1, 1)), | ||
| 54 | ctx.Const(read(1, 2), read(1, 3)))}; | ||
| 55 | Add(spv::ImageOperandsMask::ConstOffsets, offsets); | ||
| 56 | } | ||
| 57 | |||
| 58 | explicit ImageOperands(Id offset, Id lod, Id ms) { | ||
| 59 | if (Sirit::ValidId(lod)) { | ||
| 60 | Add(spv::ImageOperandsMask::Lod, lod); | ||
| 61 | } | ||
| 62 | if (Sirit::ValidId(offset)) { | ||
| 63 | Add(spv::ImageOperandsMask::Offset, offset); | ||
| 64 | } | ||
| 65 | if (Sirit::ValidId(ms)) { | ||
| 66 | Add(spv::ImageOperandsMask::Sample, ms); | ||
| 67 | } | ||
| 68 | } | ||
| 69 | |||
| 70 | explicit ImageOperands(EmitContext& ctx, bool has_lod_clamp, Id derivates, u32 num_derivates, | ||
| 71 | Id offset, Id lod_clamp) { | ||
| 72 | if (!Sirit::ValidId(derivates)) { | ||
| 73 | throw LogicError("Derivates must be present"); | ||
| 74 | } | ||
| 75 | boost::container::static_vector<Id, 3> deriv_x_accum; | ||
| 76 | boost::container::static_vector<Id, 3> deriv_y_accum; | ||
| 77 | for (u32 i = 0; i < num_derivates; ++i) { | ||
| 78 | deriv_x_accum.push_back(ctx.OpCompositeExtract(ctx.F32[1], derivates, i * 2)); | ||
| 79 | deriv_y_accum.push_back(ctx.OpCompositeExtract(ctx.F32[1], derivates, i * 2 + 1)); | ||
| 80 | } | ||
| 81 | const Id derivates_X{ctx.OpCompositeConstruct( | ||
| 82 | ctx.F32[num_derivates], std::span{deriv_x_accum.data(), deriv_x_accum.size()})}; | ||
| 83 | const Id derivates_Y{ctx.OpCompositeConstruct( | ||
| 84 | ctx.F32[num_derivates], std::span{deriv_y_accum.data(), deriv_y_accum.size()})}; | ||
| 85 | Add(spv::ImageOperandsMask::Grad, derivates_X, derivates_Y); | ||
| 86 | if (Sirit::ValidId(offset)) { | ||
| 87 | Add(spv::ImageOperandsMask::Offset, offset); | ||
| 88 | } | ||
| 89 | if (has_lod_clamp) { | ||
| 90 | Add(spv::ImageOperandsMask::MinLod, lod_clamp); | ||
| 91 | } | ||
| 92 | } | ||
| 93 | |||
| 94 | std::span<const Id> Span() const noexcept { | ||
| 95 | return std::span{operands.data(), operands.size()}; | ||
| 96 | } | ||
| 97 | |||
| 98 | std::optional<spv::ImageOperandsMask> MaskOptional() const noexcept { | ||
| 99 | return mask != spv::ImageOperandsMask{} ? std::make_optional(mask) : std::nullopt; | ||
| 100 | } | ||
| 101 | |||
| 102 | spv::ImageOperandsMask Mask() const noexcept { | ||
| 103 | return mask; | ||
| 104 | } | ||
| 105 | |||
| 106 | private: | ||
| 107 | void AddOffset(EmitContext& ctx, const IR::Value& offset) { | ||
| 108 | if (offset.IsEmpty()) { | ||
| 109 | return; | ||
| 110 | } | ||
| 111 | if (offset.IsImmediate()) { | ||
| 112 | Add(spv::ImageOperandsMask::ConstOffset, ctx.SConst(static_cast<s32>(offset.U32()))); | ||
| 113 | return; | ||
| 114 | } | ||
| 115 | IR::Inst* const inst{offset.InstRecursive()}; | ||
| 116 | if (inst->AreAllArgsImmediates()) { | ||
| 117 | switch (inst->GetOpcode()) { | ||
| 118 | case IR::Opcode::CompositeConstructU32x2: | ||
| 119 | Add(spv::ImageOperandsMask::ConstOffset, | ||
| 120 | ctx.SConst(static_cast<s32>(inst->Arg(0).U32()), | ||
| 121 | static_cast<s32>(inst->Arg(1).U32()))); | ||
| 122 | return; | ||
| 123 | case IR::Opcode::CompositeConstructU32x3: | ||
| 124 | Add(spv::ImageOperandsMask::ConstOffset, | ||
| 125 | ctx.SConst(static_cast<s32>(inst->Arg(0).U32()), | ||
| 126 | static_cast<s32>(inst->Arg(1).U32()), | ||
| 127 | static_cast<s32>(inst->Arg(2).U32()))); | ||
| 128 | return; | ||
| 129 | case IR::Opcode::CompositeConstructU32x4: | ||
| 130 | Add(spv::ImageOperandsMask::ConstOffset, | ||
| 131 | ctx.SConst(static_cast<s32>(inst->Arg(0).U32()), | ||
| 132 | static_cast<s32>(inst->Arg(1).U32()), | ||
| 133 | static_cast<s32>(inst->Arg(2).U32()), | ||
| 134 | static_cast<s32>(inst->Arg(3).U32()))); | ||
| 135 | return; | ||
| 136 | default: | ||
| 137 | break; | ||
| 138 | } | ||
| 139 | } | ||
| 140 | Add(spv::ImageOperandsMask::Offset, ctx.Def(offset)); | ||
| 141 | } | ||
| 142 | |||
| 143 | void Add(spv::ImageOperandsMask new_mask, Id value) { | ||
| 144 | mask = static_cast<spv::ImageOperandsMask>(static_cast<unsigned>(mask) | | ||
| 145 | static_cast<unsigned>(new_mask)); | ||
| 146 | operands.push_back(value); | ||
| 147 | } | ||
| 148 | |||
| 149 | void Add(spv::ImageOperandsMask new_mask, Id value_1, Id value_2) { | ||
| 150 | mask = static_cast<spv::ImageOperandsMask>(static_cast<unsigned>(mask) | | ||
| 151 | static_cast<unsigned>(new_mask)); | ||
| 152 | operands.push_back(value_1); | ||
| 153 | operands.push_back(value_2); | ||
| 154 | } | ||
| 155 | |||
| 156 | boost::container::static_vector<Id, 4> operands; | ||
| 157 | spv::ImageOperandsMask mask{}; | ||
| 158 | }; | ||
| 159 | |||
| 160 | Id Texture(EmitContext& ctx, IR::TextureInstInfo info, [[maybe_unused]] const IR::Value& index) { | ||
| 161 | const TextureDefinition& def{ctx.textures.at(info.descriptor_index)}; | ||
| 162 | if (def.count > 1) { | ||
| 163 | const Id pointer{ctx.OpAccessChain(def.pointer_type, def.id, ctx.Def(index))}; | ||
| 164 | return ctx.OpLoad(def.sampled_type, pointer); | ||
| 165 | } else { | ||
| 166 | return ctx.OpLoad(def.sampled_type, def.id); | ||
| 167 | } | ||
| 168 | } | ||
| 169 | |||
| 170 | Id TextureImage(EmitContext& ctx, IR::TextureInstInfo info, const IR::Value& index) { | ||
| 171 | if (!index.IsImmediate() || index.U32() != 0) { | ||
| 172 | throw NotImplementedException("Indirect image indexing"); | ||
| 173 | } | ||
| 174 | if (info.type == TextureType::Buffer) { | ||
| 175 | const TextureBufferDefinition& def{ctx.texture_buffers.at(info.descriptor_index)}; | ||
| 176 | if (def.count > 1) { | ||
| 177 | throw NotImplementedException("Indirect texture sample"); | ||
| 178 | } | ||
| 179 | const Id sampler_id{def.id}; | ||
| 180 | const Id id{ctx.OpLoad(ctx.sampled_texture_buffer_type, sampler_id)}; | ||
| 181 | return ctx.OpImage(ctx.image_buffer_type, id); | ||
| 182 | } else { | ||
| 183 | const TextureDefinition& def{ctx.textures.at(info.descriptor_index)}; | ||
| 184 | if (def.count > 1) { | ||
| 185 | throw NotImplementedException("Indirect texture sample"); | ||
| 186 | } | ||
| 187 | return ctx.OpImage(def.image_type, ctx.OpLoad(def.sampled_type, def.id)); | ||
| 188 | } | ||
| 189 | } | ||
| 190 | |||
| 191 | Id Image(EmitContext& ctx, const IR::Value& index, IR::TextureInstInfo info) { | ||
| 192 | if (!index.IsImmediate() || index.U32() != 0) { | ||
| 193 | throw NotImplementedException("Indirect image indexing"); | ||
| 194 | } | ||
| 195 | if (info.type == TextureType::Buffer) { | ||
| 196 | const ImageBufferDefinition def{ctx.image_buffers.at(info.descriptor_index)}; | ||
| 197 | return ctx.OpLoad(def.image_type, def.id); | ||
| 198 | } else { | ||
| 199 | const ImageDefinition def{ctx.images.at(info.descriptor_index)}; | ||
| 200 | return ctx.OpLoad(def.image_type, def.id); | ||
| 201 | } | ||
| 202 | } | ||
| 203 | |||
| 204 | Id Decorate(EmitContext& ctx, IR::Inst* inst, Id sample) { | ||
| 205 | const auto info{inst->Flags<IR::TextureInstInfo>()}; | ||
| 206 | if (info.relaxed_precision != 0) { | ||
| 207 | ctx.Decorate(sample, spv::Decoration::RelaxedPrecision); | ||
| 208 | } | ||
| 209 | return sample; | ||
| 210 | } | ||
| 211 | |||
| 212 | template <typename MethodPtrType, typename... Args> | ||
| 213 | Id Emit(MethodPtrType sparse_ptr, MethodPtrType non_sparse_ptr, EmitContext& ctx, IR::Inst* inst, | ||
| 214 | Id result_type, Args&&... args) { | ||
| 215 | IR::Inst* const sparse{inst->GetAssociatedPseudoOperation(IR::Opcode::GetSparseFromOp)}; | ||
| 216 | if (!sparse) { | ||
| 217 | return Decorate(ctx, inst, (ctx.*non_sparse_ptr)(result_type, std::forward<Args>(args)...)); | ||
| 218 | } | ||
| 219 | const Id struct_type{ctx.TypeStruct(ctx.U32[1], result_type)}; | ||
| 220 | const Id sample{(ctx.*sparse_ptr)(struct_type, std::forward<Args>(args)...)}; | ||
| 221 | const Id resident_code{ctx.OpCompositeExtract(ctx.U32[1], sample, 0U)}; | ||
| 222 | sparse->SetDefinition(ctx.OpImageSparseTexelsResident(ctx.U1, resident_code)); | ||
| 223 | sparse->Invalidate(); | ||
| 224 | Decorate(ctx, inst, sample); | ||
| 225 | return ctx.OpCompositeExtract(result_type, sample, 1U); | ||
| 226 | } | ||
| 227 | } // Anonymous namespace | ||
| 228 | |||
| 229 | Id EmitBindlessImageSampleImplicitLod(EmitContext&) { | ||
| 230 | throw LogicError("Unreachable instruction"); | ||
| 231 | } | ||
| 232 | |||
| 233 | Id EmitBindlessImageSampleExplicitLod(EmitContext&) { | ||
| 234 | throw LogicError("Unreachable instruction"); | ||
| 235 | } | ||
| 236 | |||
| 237 | Id EmitBindlessImageSampleDrefImplicitLod(EmitContext&) { | ||
| 238 | throw LogicError("Unreachable instruction"); | ||
| 239 | } | ||
| 240 | |||
| 241 | Id EmitBindlessImageSampleDrefExplicitLod(EmitContext&) { | ||
| 242 | throw LogicError("Unreachable instruction"); | ||
| 243 | } | ||
| 244 | |||
| 245 | Id EmitBindlessImageGather(EmitContext&) { | ||
| 246 | throw LogicError("Unreachable instruction"); | ||
| 247 | } | ||
| 248 | |||
| 249 | Id EmitBindlessImageGatherDref(EmitContext&) { | ||
| 250 | throw LogicError("Unreachable instruction"); | ||
| 251 | } | ||
| 252 | |||
| 253 | Id EmitBindlessImageFetch(EmitContext&) { | ||
| 254 | throw LogicError("Unreachable instruction"); | ||
| 255 | } | ||
| 256 | |||
| 257 | Id EmitBindlessImageQueryDimensions(EmitContext&) { | ||
| 258 | throw LogicError("Unreachable instruction"); | ||
| 259 | } | ||
| 260 | |||
| 261 | Id EmitBindlessImageQueryLod(EmitContext&) { | ||
| 262 | throw LogicError("Unreachable instruction"); | ||
| 263 | } | ||
| 264 | |||
| 265 | Id EmitBindlessImageGradient(EmitContext&) { | ||
| 266 | throw LogicError("Unreachable instruction"); | ||
| 267 | } | ||
| 268 | |||
| 269 | Id EmitBindlessImageRead(EmitContext&) { | ||
| 270 | throw LogicError("Unreachable instruction"); | ||
| 271 | } | ||
| 272 | |||
| 273 | Id EmitBindlessImageWrite(EmitContext&) { | ||
| 274 | throw LogicError("Unreachable instruction"); | ||
| 275 | } | ||
| 276 | |||
| 277 | Id EmitBoundImageSampleImplicitLod(EmitContext&) { | ||
| 278 | throw LogicError("Unreachable instruction"); | ||
| 279 | } | ||
| 280 | |||
| 281 | Id EmitBoundImageSampleExplicitLod(EmitContext&) { | ||
| 282 | throw LogicError("Unreachable instruction"); | ||
| 283 | } | ||
| 284 | |||
| 285 | Id EmitBoundImageSampleDrefImplicitLod(EmitContext&) { | ||
| 286 | throw LogicError("Unreachable instruction"); | ||
| 287 | } | ||
| 288 | |||
| 289 | Id EmitBoundImageSampleDrefExplicitLod(EmitContext&) { | ||
| 290 | throw LogicError("Unreachable instruction"); | ||
| 291 | } | ||
| 292 | |||
| 293 | Id EmitBoundImageGather(EmitContext&) { | ||
| 294 | throw LogicError("Unreachable instruction"); | ||
| 295 | } | ||
| 296 | |||
| 297 | Id EmitBoundImageGatherDref(EmitContext&) { | ||
| 298 | throw LogicError("Unreachable instruction"); | ||
| 299 | } | ||
| 300 | |||
| 301 | Id EmitBoundImageFetch(EmitContext&) { | ||
| 302 | throw LogicError("Unreachable instruction"); | ||
| 303 | } | ||
| 304 | |||
| 305 | Id EmitBoundImageQueryDimensions(EmitContext&) { | ||
| 306 | throw LogicError("Unreachable instruction"); | ||
| 307 | } | ||
| 308 | |||
| 309 | Id EmitBoundImageQueryLod(EmitContext&) { | ||
| 310 | throw LogicError("Unreachable instruction"); | ||
| 311 | } | ||
| 312 | |||
| 313 | Id EmitBoundImageGradient(EmitContext&) { | ||
| 314 | throw LogicError("Unreachable instruction"); | ||
| 315 | } | ||
| 316 | |||
| 317 | Id EmitBoundImageRead(EmitContext&) { | ||
| 318 | throw LogicError("Unreachable instruction"); | ||
| 319 | } | ||
| 320 | |||
| 321 | Id EmitBoundImageWrite(EmitContext&) { | ||
| 322 | throw LogicError("Unreachable instruction"); | ||
| 323 | } | ||
| 324 | |||
| 325 | Id EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, | ||
| 326 | Id bias_lc, const IR::Value& offset) { | ||
| 327 | const auto info{inst->Flags<IR::TextureInstInfo>()}; | ||
| 328 | if (ctx.stage == Stage::Fragment) { | ||
| 329 | const ImageOperands operands(ctx, info.has_bias != 0, false, info.has_lod_clamp != 0, | ||
| 330 | bias_lc, offset); | ||
| 331 | return Emit(&EmitContext::OpImageSparseSampleImplicitLod, | ||
| 332 | &EmitContext::OpImageSampleImplicitLod, ctx, inst, ctx.F32[4], | ||
| 333 | Texture(ctx, info, index), coords, operands.MaskOptional(), operands.Span()); | ||
| 334 | } else { | ||
| 335 | // We can't use implicit lods on non-fragment stages on SPIR-V. Maxwell hardware behaves as | ||
| 336 | // if the lod was explicitly zero. This may change on Turing with implicit compute | ||
| 337 | // derivatives | ||
| 338 | const Id lod{ctx.Const(0.0f)}; | ||
| 339 | const ImageOperands operands(ctx, false, true, info.has_lod_clamp != 0, lod, offset); | ||
| 340 | return Emit(&EmitContext::OpImageSparseSampleExplicitLod, | ||
| 341 | &EmitContext::OpImageSampleExplicitLod, ctx, inst, ctx.F32[4], | ||
| 342 | Texture(ctx, info, index), coords, operands.Mask(), operands.Span()); | ||
| 343 | } | ||
| 344 | } | ||
| 345 | |||
| 346 | Id EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, | ||
| 347 | Id lod, const IR::Value& offset) { | ||
| 348 | const auto info{inst->Flags<IR::TextureInstInfo>()}; | ||
| 349 | const ImageOperands operands(ctx, false, true, false, lod, offset); | ||
| 350 | return Emit(&EmitContext::OpImageSparseSampleExplicitLod, | ||
| 351 | &EmitContext::OpImageSampleExplicitLod, ctx, inst, ctx.F32[4], | ||
| 352 | Texture(ctx, info, index), coords, operands.Mask(), operands.Span()); | ||
| 353 | } | ||
| 354 | |||
| 355 | Id EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, | ||
| 356 | Id coords, Id dref, Id bias_lc, const IR::Value& offset) { | ||
| 357 | const auto info{inst->Flags<IR::TextureInstInfo>()}; | ||
| 358 | const ImageOperands operands(ctx, info.has_bias != 0, false, info.has_lod_clamp != 0, bias_lc, | ||
| 359 | offset); | ||
| 360 | return Emit(&EmitContext::OpImageSparseSampleDrefImplicitLod, | ||
| 361 | &EmitContext::OpImageSampleDrefImplicitLod, ctx, inst, ctx.F32[1], | ||
| 362 | Texture(ctx, info, index), coords, dref, operands.MaskOptional(), operands.Span()); | ||
| 363 | } | ||
| 364 | |||
| 365 | Id EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, | ||
| 366 | Id coords, Id dref, Id lod, const IR::Value& offset) { | ||
| 367 | const auto info{inst->Flags<IR::TextureInstInfo>()}; | ||
| 368 | const ImageOperands operands(ctx, false, true, false, lod, offset); | ||
| 369 | return Emit(&EmitContext::OpImageSparseSampleDrefExplicitLod, | ||
| 370 | &EmitContext::OpImageSampleDrefExplicitLod, ctx, inst, ctx.F32[1], | ||
| 371 | Texture(ctx, info, index), coords, dref, operands.Mask(), operands.Span()); | ||
| 372 | } | ||
| 373 | |||
| 374 | Id EmitImageGather(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, | ||
| 375 | const IR::Value& offset, const IR::Value& offset2) { | ||
| 376 | const auto info{inst->Flags<IR::TextureInstInfo>()}; | ||
| 377 | const ImageOperands operands(ctx, offset, offset2); | ||
| 378 | return Emit(&EmitContext::OpImageSparseGather, &EmitContext::OpImageGather, ctx, inst, | ||
| 379 | ctx.F32[4], Texture(ctx, info, index), coords, ctx.Const(info.gather_component), | ||
| 380 | operands.MaskOptional(), operands.Span()); | ||
| 381 | } | ||
| 382 | |||
| 383 | Id EmitImageGatherDref(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, | ||
| 384 | const IR::Value& offset, const IR::Value& offset2, Id dref) { | ||
| 385 | const auto info{inst->Flags<IR::TextureInstInfo>()}; | ||
| 386 | const ImageOperands operands(ctx, offset, offset2); | ||
| 387 | return Emit(&EmitContext::OpImageSparseDrefGather, &EmitContext::OpImageDrefGather, ctx, inst, | ||
| 388 | ctx.F32[4], Texture(ctx, info, index), coords, dref, operands.MaskOptional(), | ||
| 389 | operands.Span()); | ||
| 390 | } | ||
| 391 | |||
| 392 | Id EmitImageFetch(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id offset, | ||
| 393 | Id lod, Id ms) { | ||
| 394 | const auto info{inst->Flags<IR::TextureInstInfo>()}; | ||
| 395 | if (info.type == TextureType::Buffer) { | ||
| 396 | lod = Id{}; | ||
| 397 | } | ||
| 398 | const ImageOperands operands(offset, lod, ms); | ||
| 399 | return Emit(&EmitContext::OpImageSparseFetch, &EmitContext::OpImageFetch, ctx, inst, ctx.F32[4], | ||
| 400 | TextureImage(ctx, info, index), coords, operands.MaskOptional(), operands.Span()); | ||
| 401 | } | ||
| 402 | |||
| 403 | Id EmitImageQueryDimensions(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id lod) { | ||
| 404 | const auto info{inst->Flags<IR::TextureInstInfo>()}; | ||
| 405 | const Id image{TextureImage(ctx, info, index)}; | ||
| 406 | const Id zero{ctx.u32_zero_value}; | ||
| 407 | const auto mips{[&] { return ctx.OpImageQueryLevels(ctx.U32[1], image); }}; | ||
| 408 | switch (info.type) { | ||
| 409 | case TextureType::Color1D: | ||
| 410 | return ctx.OpCompositeConstruct(ctx.U32[4], ctx.OpImageQuerySizeLod(ctx.U32[1], image, lod), | ||
| 411 | zero, zero, mips()); | ||
| 412 | case TextureType::ColorArray1D: | ||
| 413 | case TextureType::Color2D: | ||
| 414 | case TextureType::ColorCube: | ||
| 415 | return ctx.OpCompositeConstruct(ctx.U32[4], ctx.OpImageQuerySizeLod(ctx.U32[2], image, lod), | ||
| 416 | zero, mips()); | ||
| 417 | case TextureType::ColorArray2D: | ||
| 418 | case TextureType::Color3D: | ||
| 419 | case TextureType::ColorArrayCube: | ||
| 420 | return ctx.OpCompositeConstruct(ctx.U32[4], ctx.OpImageQuerySizeLod(ctx.U32[3], image, lod), | ||
| 421 | mips()); | ||
| 422 | case TextureType::Buffer: | ||
| 423 | return ctx.OpCompositeConstruct(ctx.U32[4], ctx.OpImageQuerySize(ctx.U32[1], image), zero, | ||
| 424 | zero, mips()); | ||
| 425 | } | ||
| 426 | throw LogicError("Unspecified image type {}", info.type.Value()); | ||
| 427 | } | ||
| 428 | |||
| 429 | Id EmitImageQueryLod(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords) { | ||
| 430 | const auto info{inst->Flags<IR::TextureInstInfo>()}; | ||
| 431 | const Id zero{ctx.f32_zero_value}; | ||
| 432 | const Id sampler{Texture(ctx, info, index)}; | ||
| 433 | return ctx.OpCompositeConstruct(ctx.F32[4], ctx.OpImageQueryLod(ctx.F32[2], sampler, coords), | ||
| 434 | zero, zero); | ||
| 435 | } | ||
| 436 | |||
| 437 | Id EmitImageGradient(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, | ||
| 438 | Id derivates, Id offset, Id lod_clamp) { | ||
| 439 | const auto info{inst->Flags<IR::TextureInstInfo>()}; | ||
| 440 | const ImageOperands operands(ctx, info.has_lod_clamp != 0, derivates, info.num_derivates, | ||
| 441 | offset, lod_clamp); | ||
| 442 | return Emit(&EmitContext::OpImageSparseSampleExplicitLod, | ||
| 443 | &EmitContext::OpImageSampleExplicitLod, ctx, inst, ctx.F32[4], | ||
| 444 | Texture(ctx, info, index), coords, operands.Mask(), operands.Span()); | ||
| 445 | } | ||
| 446 | |||
| 447 | Id EmitImageRead(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords) { | ||
| 448 | const auto info{inst->Flags<IR::TextureInstInfo>()}; | ||
| 449 | if (info.image_format == ImageFormat::Typeless && !ctx.profile.support_typeless_image_loads) { | ||
| 450 | LOG_WARNING(Shader_SPIRV, "Typeless image read not supported by host"); | ||
| 451 | return ctx.ConstantNull(ctx.U32[4]); | ||
| 452 | } | ||
| 453 | return Emit(&EmitContext::OpImageSparseRead, &EmitContext::OpImageRead, ctx, inst, ctx.U32[4], | ||
| 454 | Image(ctx, index, info), coords, std::nullopt, std::span<const Id>{}); | ||
| 455 | } | ||
| 456 | |||
| 457 | void EmitImageWrite(EmitContext& ctx, IR::Inst* inst, const IR::Value& index, Id coords, Id color) { | ||
| 458 | const auto info{inst->Flags<IR::TextureInstInfo>()}; | ||
| 459 | ctx.OpImageWrite(Image(ctx, index, info), coords, color); | ||
| 460 | } | ||
| 461 | |||
| 462 | } // namespace Shader::Backend::SPIRV | ||