diff options
| author | 2021-04-22 16:17:59 -0300 | |
|---|---|---|
| committer | 2021-07-22 21:51:28 -0400 | |
| commit | d10cf55353175b13bed4cf18791e080ecb7fd95b (patch) | |
| tree | 9e26b823d7b48f532914a0511a157c14806debf5 /src/video_core | |
| parent | shader: Refactor atomic_operations_global_memory (diff) | |
| download | yuzu-d10cf55353175b13bed4cf18791e080ecb7fd95b.tar.gz yuzu-d10cf55353175b13bed4cf18791e080ecb7fd95b.tar.xz yuzu-d10cf55353175b13bed4cf18791e080ecb7fd95b.zip | |
shader: Implement indexed textures
Diffstat (limited to 'src/video_core')
| -rw-r--r-- | src/video_core/renderer_vulkan/pipeline_helper.h | 50 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_compute_pipeline.cpp | 46 | ||||
| -rw-r--r-- | src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp | 63 |
3 files changed, 95 insertions, 64 deletions
diff --git a/src/video_core/renderer_vulkan/pipeline_helper.h b/src/video_core/renderer_vulkan/pipeline_helper.h index aaf9a735e..dd7d2cc0c 100644 --- a/src/video_core/renderer_vulkan/pipeline_helper.h +++ b/src/video_core/renderer_vulkan/pipeline_helper.h | |||
| @@ -85,28 +85,30 @@ public: | |||
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | void Add(const Shader::Info& info, VkShaderStageFlags stage) { | 87 | void Add(const Shader::Info& info, VkShaderStageFlags stage) { |
| 88 | Add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, stage, info.constant_buffer_descriptors.size()); | 88 | Add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, stage, info.constant_buffer_descriptors); |
| 89 | Add(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, stage, info.storage_buffers_descriptors.size()); | 89 | Add(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, stage, info.storage_buffers_descriptors); |
| 90 | Add(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, stage, info.texture_buffer_descriptors.size()); | 90 | Add(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, stage, info.texture_buffer_descriptors); |
| 91 | Add(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, stage, info.image_buffer_descriptors.size()); | 91 | Add(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, stage, info.image_buffer_descriptors); |
| 92 | Add(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, stage, info.texture_descriptors.size()); | 92 | Add(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, stage, info.texture_descriptors); |
| 93 | Add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, stage, info.image_descriptors.size()); | 93 | Add(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, stage, info.image_descriptors); |
| 94 | } | 94 | } |
| 95 | 95 | ||
| 96 | private: | 96 | private: |
| 97 | void Add(VkDescriptorType type, VkShaderStageFlags stage, size_t num) { | 97 | template <typename Descriptors> |
| 98 | void Add(VkDescriptorType type, VkShaderStageFlags stage, const Descriptors& descriptors) { | ||
| 99 | const size_t num{descriptors.size()}; | ||
| 98 | for (size_t i = 0; i < num; ++i) { | 100 | for (size_t i = 0; i < num; ++i) { |
| 99 | bindings.push_back({ | 101 | bindings.push_back({ |
| 100 | .binding = binding, | 102 | .binding = binding, |
| 101 | .descriptorType = type, | 103 | .descriptorType = type, |
| 102 | .descriptorCount = 1, | 104 | .descriptorCount = descriptors[i].count, |
| 103 | .stageFlags = stage, | 105 | .stageFlags = stage, |
| 104 | .pImmutableSamplers = nullptr, | 106 | .pImmutableSamplers = nullptr, |
| 105 | }); | 107 | }); |
| 106 | entries.push_back({ | 108 | entries.push_back({ |
| 107 | .dstBinding = binding, | 109 | .dstBinding = binding, |
| 108 | .dstArrayElement = 0, | 110 | .dstArrayElement = 0, |
| 109 | .descriptorCount = 1, | 111 | .descriptorCount = descriptors[i].count, |
| 110 | .descriptorType = type, | 112 | .descriptorType = type, |
| 111 | .offset = offset, | 113 | .offset = offset, |
| 112 | .stride = sizeof(DescriptorUpdateEntry), | 114 | .stride = sizeof(DescriptorUpdateEntry), |
| @@ -126,21 +128,29 @@ private: | |||
| 126 | inline void PushImageDescriptors(const Shader::Info& info, const VkSampler*& samplers, | 128 | inline void PushImageDescriptors(const Shader::Info& info, const VkSampler*& samplers, |
| 127 | const ImageId*& image_view_ids, TextureCache& texture_cache, | 129 | const ImageId*& image_view_ids, TextureCache& texture_cache, |
| 128 | VKUpdateDescriptorQueue& update_descriptor_queue) { | 130 | VKUpdateDescriptorQueue& update_descriptor_queue) { |
| 129 | image_view_ids += info.texture_buffer_descriptors.size(); | 131 | for (const auto& desc : info.texture_buffer_descriptors) { |
| 130 | image_view_ids += info.image_buffer_descriptors.size(); | 132 | image_view_ids += desc.count; |
| 133 | } | ||
| 134 | for (const auto& desc : info.image_buffer_descriptors) { | ||
| 135 | image_view_ids += desc.count; | ||
| 136 | } | ||
| 131 | for (const auto& desc : info.texture_descriptors) { | 137 | for (const auto& desc : info.texture_descriptors) { |
| 132 | const VkSampler sampler{*(samplers++)}; | 138 | for (u32 index = 0; index < desc.count; ++index) { |
| 133 | ImageView& image_view{texture_cache.GetImageView(*(image_view_ids++))}; | 139 | const VkSampler sampler{*(samplers++)}; |
| 134 | const VkImageView vk_image_view{image_view.Handle(desc.type)}; | 140 | ImageView& image_view{texture_cache.GetImageView(*(image_view_ids++))}; |
| 135 | update_descriptor_queue.AddSampledImage(vk_image_view, sampler); | 141 | const VkImageView vk_image_view{image_view.Handle(desc.type)}; |
| 142 | update_descriptor_queue.AddSampledImage(vk_image_view, sampler); | ||
| 143 | } | ||
| 136 | } | 144 | } |
| 137 | for (const auto& desc : info.image_descriptors) { | 145 | for (const auto& desc : info.image_descriptors) { |
| 138 | ImageView& image_view{texture_cache.GetImageView(*(image_view_ids++))}; | 146 | for (u32 index = 0; index < desc.count; ++index) { |
| 139 | if (desc.is_written) { | 147 | ImageView& image_view{texture_cache.GetImageView(*(image_view_ids++))}; |
| 140 | texture_cache.MarkModification(image_view.image_id); | 148 | if (desc.is_written) { |
| 149 | texture_cache.MarkModification(image_view.image_id); | ||
| 150 | } | ||
| 151 | const VkImageView vk_image_view{image_view.StorageView(desc.type, desc.format)}; | ||
| 152 | update_descriptor_queue.AddImage(vk_image_view); | ||
| 141 | } | 153 | } |
| 142 | const VkImageView vk_image_view{image_view.StorageView(desc.type, desc.format)}; | ||
| 143 | update_descriptor_queue.AddImage(vk_image_view); | ||
| 144 | } | 154 | } |
| 145 | } | 155 | } |
| 146 | 156 | ||
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp index 45d837ca4..6e9f66262 100644 --- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp | |||
| @@ -91,35 +91,41 @@ void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute, | |||
| 91 | const auto& qmd{kepler_compute.launch_description}; | 91 | const auto& qmd{kepler_compute.launch_description}; |
| 92 | const auto& cbufs{qmd.const_buffer_config}; | 92 | const auto& cbufs{qmd.const_buffer_config}; |
| 93 | const bool via_header_index{qmd.linked_tsc != 0}; | 93 | const bool via_header_index{qmd.linked_tsc != 0}; |
| 94 | const auto read_handle{[&](const auto& desc) { | 94 | const auto read_handle{[&](const auto& desc, u32 index) { |
| 95 | ASSERT(((qmd.const_buffer_enable_mask >> desc.cbuf_index) & 1) != 0); | 95 | ASSERT(((qmd.const_buffer_enable_mask >> desc.cbuf_index) & 1) != 0); |
| 96 | const u32 index_offset{index << desc.size_shift}; | ||
| 97 | const u32 offset{desc.cbuf_offset + index_offset}; | ||
| 96 | const GPUVAddr addr{cbufs[desc.cbuf_index].Address() + desc.cbuf_offset}; | 98 | const GPUVAddr addr{cbufs[desc.cbuf_index].Address() + desc.cbuf_offset}; |
| 97 | if constexpr (std::is_same_v<decltype(desc), const Shader::TextureDescriptor&> || | 99 | if constexpr (std::is_same_v<decltype(desc), const Shader::TextureDescriptor&> || |
| 98 | std::is_same_v<decltype(desc), const Shader::TextureBufferDescriptor&>) { | 100 | std::is_same_v<decltype(desc), const Shader::TextureBufferDescriptor&>) { |
| 99 | if (desc.has_secondary) { | 101 | if (desc.has_secondary) { |
| 100 | ASSERT(((qmd.const_buffer_enable_mask >> desc.secondary_cbuf_index) & 1) != 0); | 102 | ASSERT(((qmd.const_buffer_enable_mask >> desc.secondary_cbuf_index) & 1) != 0); |
| 103 | const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset}; | ||
| 101 | const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() + | 104 | const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() + |
| 102 | desc.secondary_cbuf_offset}; | 105 | secondary_offset}; |
| 103 | const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; | 106 | const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; |
| 104 | const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; | 107 | const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; |
| 105 | const u32 raw{lhs_raw | rhs_raw}; | 108 | return TextureHandle{lhs_raw | rhs_raw, via_header_index}; |
| 106 | return TextureHandle{raw, via_header_index}; | ||
| 107 | } | 109 | } |
| 108 | } | 110 | } |
| 109 | return TextureHandle{gpu_memory.Read<u32>(addr), via_header_index}; | 111 | return TextureHandle{gpu_memory.Read<u32>(addr), via_header_index}; |
| 110 | }}; | 112 | }}; |
| 111 | const auto add_image{[&](const auto& desc) { | 113 | const auto add_image{[&](const auto& desc) { |
| 112 | const TextureHandle handle{read_handle(desc)}; | 114 | for (u32 index = 0; index < desc.count; ++index) { |
| 113 | image_view_indices.push_back(handle.image); | 115 | const TextureHandle handle{read_handle(desc, index)}; |
| 116 | image_view_indices.push_back(handle.image); | ||
| 117 | } | ||
| 114 | }}; | 118 | }}; |
| 115 | std::ranges::for_each(info.texture_buffer_descriptors, add_image); | 119 | std::ranges::for_each(info.texture_buffer_descriptors, add_image); |
| 116 | std::ranges::for_each(info.image_buffer_descriptors, add_image); | 120 | std::ranges::for_each(info.image_buffer_descriptors, add_image); |
| 117 | for (const auto& desc : info.texture_descriptors) { | 121 | for (const auto& desc : info.texture_descriptors) { |
| 118 | const TextureHandle handle{read_handle(desc)}; | 122 | for (u32 index = 0; index < desc.count; ++index) { |
| 119 | image_view_indices.push_back(handle.image); | 123 | const TextureHandle handle{read_handle(desc, index)}; |
| 124 | image_view_indices.push_back(handle.image); | ||
| 120 | 125 | ||
| 121 | Sampler* const sampler = texture_cache.GetComputeSampler(handle.sampler); | 126 | Sampler* const sampler = texture_cache.GetComputeSampler(handle.sampler); |
| 122 | samplers.push_back(sampler->Handle()); | 127 | samplers.push_back(sampler->Handle()); |
| 128 | } | ||
| 123 | } | 129 | } |
| 124 | std::ranges::for_each(info.image_descriptors, add_image); | 130 | std::ranges::for_each(info.image_descriptors, add_image); |
| 125 | 131 | ||
| @@ -130,16 +136,18 @@ void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute, | |||
| 130 | ImageId* texture_buffer_ids{image_view_ids.data()}; | 136 | ImageId* texture_buffer_ids{image_view_ids.data()}; |
| 131 | size_t index{}; | 137 | size_t index{}; |
| 132 | const auto add_buffer{[&](const auto& desc) { | 138 | const auto add_buffer{[&](const auto& desc) { |
| 133 | ASSERT(desc.count == 1); | 139 | for (u32 index = 0; index < desc.count; ++index) { |
| 134 | bool is_written{false}; | 140 | bool is_written{false}; |
| 135 | if constexpr (std::is_same_v<decltype(desc), const Shader::ImageBufferDescriptor&>) { | 141 | if constexpr (std::is_same_v<decltype(desc), const Shader::ImageBufferDescriptor&>) { |
| 136 | is_written = desc.is_written; | 142 | is_written = desc.is_written; |
| 143 | } | ||
| 144 | ImageView& image_view = texture_cache.GetImageView(*texture_buffer_ids); | ||
| 145 | buffer_cache.BindComputeTextureBuffer(index, image_view.GpuAddr(), | ||
| 146 | image_view.BufferSize(), image_view.format, | ||
| 147 | is_written); | ||
| 148 | ++texture_buffer_ids; | ||
| 149 | ++index; | ||
| 137 | } | 150 | } |
| 138 | ImageView& image_view = texture_cache.GetImageView(*texture_buffer_ids); | ||
| 139 | buffer_cache.BindComputeTextureBuffer(index, image_view.GpuAddr(), image_view.BufferSize(), | ||
| 140 | image_view.format, is_written); | ||
| 141 | ++texture_buffer_ids; | ||
| 142 | ++index; | ||
| 143 | }}; | 151 | }}; |
| 144 | std::ranges::for_each(info.texture_buffer_descriptors, add_buffer); | 152 | std::ranges::for_each(info.texture_buffer_descriptors, add_buffer); |
| 145 | std::ranges::for_each(info.image_buffer_descriptors, add_buffer); | 153 | std::ranges::for_each(info.image_buffer_descriptors, add_buffer); |
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp index 08f00b9ce..b7688aef9 100644 --- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp +++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp | |||
| @@ -161,23 +161,26 @@ void GraphicsPipeline::Configure(bool is_indexed) { | |||
| 161 | const Shader::Info& info{stage_infos[stage]}; | 161 | const Shader::Info& info{stage_infos[stage]}; |
| 162 | buffer_cache.SetEnabledUniformBuffers(stage, info.constant_buffer_mask); | 162 | buffer_cache.SetEnabledUniformBuffers(stage, info.constant_buffer_mask); |
| 163 | buffer_cache.UnbindGraphicsStorageBuffers(stage); | 163 | buffer_cache.UnbindGraphicsStorageBuffers(stage); |
| 164 | size_t index{}; | 164 | size_t ssbo_index{}; |
| 165 | for (const auto& desc : info.storage_buffers_descriptors) { | 165 | for (const auto& desc : info.storage_buffers_descriptors) { |
| 166 | ASSERT(desc.count == 1); | 166 | ASSERT(desc.count == 1); |
| 167 | buffer_cache.BindGraphicsStorageBuffer(stage, index, desc.cbuf_index, desc.cbuf_offset, | 167 | buffer_cache.BindGraphicsStorageBuffer(stage, ssbo_index, desc.cbuf_index, |
| 168 | desc.is_written); | 168 | desc.cbuf_offset, desc.is_written); |
| 169 | ++index; | 169 | ++ssbo_index; |
| 170 | } | 170 | } |
| 171 | const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers}; | 171 | const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers}; |
| 172 | const auto read_handle{[&](const auto& desc) { | 172 | const auto read_handle{[&](const auto& desc, u32 index) { |
| 173 | ASSERT(cbufs[desc.cbuf_index].enabled); | 173 | ASSERT(cbufs[desc.cbuf_index].enabled); |
| 174 | const GPUVAddr addr{cbufs[desc.cbuf_index].address + desc.cbuf_offset}; | 174 | const u32 index_offset{index << desc.size_shift}; |
| 175 | const u32 offset{desc.cbuf_offset + index_offset}; | ||
| 176 | const GPUVAddr addr{cbufs[desc.cbuf_index].address + offset}; | ||
| 175 | if constexpr (std::is_same_v<decltype(desc), const Shader::TextureDescriptor&> || | 177 | if constexpr (std::is_same_v<decltype(desc), const Shader::TextureDescriptor&> || |
| 176 | std::is_same_v<decltype(desc), const Shader::TextureBufferDescriptor&>) { | 178 | std::is_same_v<decltype(desc), const Shader::TextureBufferDescriptor&>) { |
| 177 | if (desc.has_secondary) { | 179 | if (desc.has_secondary) { |
| 178 | ASSERT(cbufs[desc.secondary_cbuf_index].enabled); | 180 | ASSERT(cbufs[desc.secondary_cbuf_index].enabled); |
| 181 | const u32 second_offset{desc.secondary_cbuf_offset + index_offset}; | ||
| 179 | const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address + | 182 | const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address + |
| 180 | desc.secondary_cbuf_offset}; | 183 | second_offset}; |
| 181 | const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; | 184 | const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; |
| 182 | const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; | 185 | const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; |
| 183 | const u32 raw{lhs_raw | rhs_raw}; | 186 | const u32 raw{lhs_raw | rhs_raw}; |
| @@ -187,17 +190,21 @@ void GraphicsPipeline::Configure(bool is_indexed) { | |||
| 187 | return TextureHandle{gpu_memory.Read<u32>(addr), via_header_index}; | 190 | return TextureHandle{gpu_memory.Read<u32>(addr), via_header_index}; |
| 188 | }}; | 191 | }}; |
| 189 | const auto add_image{[&](const auto& desc) { | 192 | const auto add_image{[&](const auto& desc) { |
| 190 | const TextureHandle handle{read_handle(desc)}; | 193 | for (u32 index = 0; index < desc.count; ++index) { |
| 191 | image_view_indices.push_back(handle.image); | 194 | const TextureHandle handle{read_handle(desc, index)}; |
| 195 | image_view_indices.push_back(handle.image); | ||
| 196 | } | ||
| 192 | }}; | 197 | }}; |
| 193 | std::ranges::for_each(info.texture_buffer_descriptors, add_image); | 198 | std::ranges::for_each(info.texture_buffer_descriptors, add_image); |
| 194 | std::ranges::for_each(info.image_buffer_descriptors, add_image); | 199 | std::ranges::for_each(info.image_buffer_descriptors, add_image); |
| 195 | for (const auto& desc : info.texture_descriptors) { | 200 | for (const auto& desc : info.texture_descriptors) { |
| 196 | const TextureHandle handle{read_handle(desc)}; | 201 | for (u32 index = 0; index < desc.count; ++index) { |
| 197 | image_view_indices.push_back(handle.image); | 202 | const TextureHandle handle{read_handle(desc, index)}; |
| 203 | image_view_indices.push_back(handle.image); | ||
| 198 | 204 | ||
| 199 | Sampler* const sampler{texture_cache.GetGraphicsSampler(handle.sampler)}; | 205 | Sampler* const sampler{texture_cache.GetGraphicsSampler(handle.sampler)}; |
| 200 | samplers.push_back(sampler->Handle()); | 206 | samplers.push_back(sampler->Handle()); |
| 207 | } | ||
| 201 | } | 208 | } |
| 202 | std::ranges::for_each(info.image_descriptors, add_image); | 209 | std::ranges::for_each(info.image_descriptors, add_image); |
| 203 | } | 210 | } |
| @@ -208,24 +215,30 @@ void GraphicsPipeline::Configure(bool is_indexed) { | |||
| 208 | for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) { | 215 | for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) { |
| 209 | size_t index{}; | 216 | size_t index{}; |
| 210 | const auto add_buffer{[&](const auto& desc) { | 217 | const auto add_buffer{[&](const auto& desc) { |
| 211 | ASSERT(desc.count == 1); | 218 | for (u32 index = 0; index < desc.count; ++index) { |
| 212 | bool is_written{false}; | 219 | bool is_written{false}; |
| 213 | if constexpr (std::is_same_v<decltype(desc), const Shader::ImageBufferDescriptor&>) { | 220 | if constexpr (std::is_same_v<decltype(desc), |
| 214 | is_written = desc.is_written; | 221 | const Shader::ImageBufferDescriptor&>) { |
| 222 | is_written = desc.is_written; | ||
| 223 | } | ||
| 224 | ImageView& image_view{texture_cache.GetImageView(*texture_buffer_index)}; | ||
| 225 | buffer_cache.BindGraphicsTextureBuffer(stage, index, image_view.GpuAddr(), | ||
| 226 | image_view.BufferSize(), image_view.format, | ||
| 227 | is_written); | ||
| 228 | ++index; | ||
| 229 | ++texture_buffer_index; | ||
| 215 | } | 230 | } |
| 216 | ImageView& image_view{texture_cache.GetImageView(*texture_buffer_index)}; | ||
| 217 | buffer_cache.BindGraphicsTextureBuffer(stage, index, image_view.GpuAddr(), | ||
| 218 | image_view.BufferSize(), image_view.format, | ||
| 219 | is_written); | ||
| 220 | ++index; | ||
| 221 | ++texture_buffer_index; | ||
| 222 | }}; | 231 | }}; |
| 223 | const Shader::Info& info{stage_infos[stage]}; | 232 | const Shader::Info& info{stage_infos[stage]}; |
| 224 | buffer_cache.UnbindGraphicsTextureBuffers(stage); | 233 | buffer_cache.UnbindGraphicsTextureBuffers(stage); |
| 225 | std::ranges::for_each(info.texture_buffer_descriptors, add_buffer); | 234 | std::ranges::for_each(info.texture_buffer_descriptors, add_buffer); |
| 226 | std::ranges::for_each(info.image_buffer_descriptors, add_buffer); | 235 | std::ranges::for_each(info.image_buffer_descriptors, add_buffer); |
| 227 | texture_buffer_index += info.texture_descriptors.size(); | 236 | for (const auto& desc : info.texture_descriptors) { |
| 228 | texture_buffer_index += info.image_descriptors.size(); | 237 | texture_buffer_index += desc.count; |
| 238 | } | ||
| 239 | for (const auto& desc : info.image_descriptors) { | ||
| 240 | texture_buffer_index += desc.count; | ||
| 241 | } | ||
| 229 | } | 242 | } |
| 230 | buffer_cache.UpdateGraphicsBuffers(is_indexed); | 243 | buffer_cache.UpdateGraphicsBuffers(is_indexed); |
| 231 | 244 | ||