summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/video_core/CMakeLists.txt2
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp271
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.h90
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h32
4 files changed, 395 insertions, 0 deletions
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 61ac0f23a..caf03c2ae 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -161,6 +161,8 @@ if (ENABLE_VULKAN)
161 renderer_vulkan/vk_descriptor_pool.h 161 renderer_vulkan/vk_descriptor_pool.h
162 renderer_vulkan/vk_device.cpp 162 renderer_vulkan/vk_device.cpp
163 renderer_vulkan/vk_device.h 163 renderer_vulkan/vk_device.h
164 renderer_vulkan/vk_graphics_pipeline.cpp
165 renderer_vulkan/vk_graphics_pipeline.h
164 renderer_vulkan/vk_image.cpp 166 renderer_vulkan/vk_image.cpp
165 renderer_vulkan/vk_image.h 167 renderer_vulkan/vk_image.h
166 renderer_vulkan/vk_memory_manager.cpp 168 renderer_vulkan/vk_memory_manager.cpp
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
new file mode 100644
index 000000000..2e0536bf6
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -0,0 +1,271 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <vector>
6#include "common/assert.h"
7#include "common/common_types.h"
8#include "common/microprofile.h"
9#include "video_core/renderer_vulkan/declarations.h"
10#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
11#include "video_core/renderer_vulkan/maxwell_to_vk.h"
12#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
13#include "video_core/renderer_vulkan/vk_device.h"
14#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
15#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
16#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
17#include "video_core/renderer_vulkan/vk_scheduler.h"
18#include "video_core/renderer_vulkan/vk_update_descriptor.h"
19
20namespace Vulkan {
21
22MICROPROFILE_DECLARE(Vulkan_PipelineCache);
23
24namespace {
25
26vk::StencilOpState GetStencilFaceState(const FixedPipelineState::StencilFace& face) {
27 return vk::StencilOpState(MaxwellToVK::StencilOp(face.action_stencil_fail),
28 MaxwellToVK::StencilOp(face.action_depth_pass),
29 MaxwellToVK::StencilOp(face.action_depth_fail),
30 MaxwellToVK::ComparisonOp(face.test_func), 0, 0, 0);
31}
32
33bool SupportsPrimitiveRestart(vk::PrimitiveTopology topology) {
34 static constexpr std::array unsupported_topologies = {
35 vk::PrimitiveTopology::ePointList,
36 vk::PrimitiveTopology::eLineList,
37 vk::PrimitiveTopology::eTriangleList,
38 vk::PrimitiveTopology::eLineListWithAdjacency,
39 vk::PrimitiveTopology::eTriangleListWithAdjacency,
40 vk::PrimitiveTopology::ePatchList};
41 return std::find(std::begin(unsupported_topologies), std::end(unsupported_topologies),
42 topology) == std::end(unsupported_topologies);
43}
44
45} // Anonymous namespace
46
47VKGraphicsPipeline::VKGraphicsPipeline(const VKDevice& device, VKScheduler& scheduler,
48 VKDescriptorPool& descriptor_pool,
49 VKUpdateDescriptorQueue& update_descriptor_queue,
50 VKRenderPassCache& renderpass_cache,
51 const GraphicsPipelineCacheKey& key,
52 const std::vector<vk::DescriptorSetLayoutBinding>& bindings,
53 const SPIRVProgram& program)
54 : device{device}, scheduler{scheduler}, fixed_state{key.fixed_state}, hash{key.Hash()},
55 descriptor_set_layout{CreateDescriptorSetLayout(bindings)},
56 descriptor_allocator{descriptor_pool, *descriptor_set_layout},
57 update_descriptor_queue{update_descriptor_queue}, layout{CreatePipelineLayout()},
58 descriptor_template{CreateDescriptorUpdateTemplate(program)}, modules{CreateShaderModules(
59 program)},
60 renderpass{renderpass_cache.GetRenderPass(key.renderpass_params)}, pipeline{CreatePipeline(
61 key.renderpass_params,
62 program)} {}
63
64VKGraphicsPipeline::~VKGraphicsPipeline() = default;
65
66vk::DescriptorSet VKGraphicsPipeline::CommitDescriptorSet() {
67 if (!descriptor_template) {
68 return {};
69 }
70 const auto set = descriptor_allocator.Commit(scheduler.GetFence());
71 update_descriptor_queue.Send(*descriptor_template, set);
72 return set;
73}
74
75UniqueDescriptorSetLayout VKGraphicsPipeline::CreateDescriptorSetLayout(
76 const std::vector<vk::DescriptorSetLayoutBinding>& bindings) const {
77 const vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_ci(
78 {}, static_cast<u32>(bindings.size()), bindings.data());
79
80 const auto dev = device.GetLogical();
81 const auto& dld = device.GetDispatchLoader();
82 return dev.createDescriptorSetLayoutUnique(descriptor_set_layout_ci, nullptr, dld);
83}
84
85UniquePipelineLayout VKGraphicsPipeline::CreatePipelineLayout() const {
86 const vk::PipelineLayoutCreateInfo pipeline_layout_ci({}, 1, &*descriptor_set_layout, 0,
87 nullptr);
88 const auto dev = device.GetLogical();
89 const auto& dld = device.GetDispatchLoader();
90 return dev.createPipelineLayoutUnique(pipeline_layout_ci, nullptr, dld);
91}
92
93UniqueDescriptorUpdateTemplate VKGraphicsPipeline::CreateDescriptorUpdateTemplate(
94 const SPIRVProgram& program) const {
95 std::vector<vk::DescriptorUpdateTemplateEntry> template_entries;
96 u32 binding = 0;
97 u32 offset = 0;
98 for (const auto& stage : program) {
99 if (stage) {
100 FillDescriptorUpdateTemplateEntries(device, stage->entries, binding, offset,
101 template_entries);
102 }
103 }
104 if (template_entries.empty()) {
105 // If the shader doesn't use descriptor sets, skip template creation.
106 return UniqueDescriptorUpdateTemplate{};
107 }
108
109 const vk::DescriptorUpdateTemplateCreateInfo template_ci(
110 {}, static_cast<u32>(template_entries.size()), template_entries.data(),
111 vk::DescriptorUpdateTemplateType::eDescriptorSet, *descriptor_set_layout,
112 vk::PipelineBindPoint::eGraphics, *layout, DESCRIPTOR_SET);
113
114 const auto dev = device.GetLogical();
115 const auto& dld = device.GetDispatchLoader();
116 return dev.createDescriptorUpdateTemplateUnique(template_ci, nullptr, dld);
117}
118
119std::vector<UniqueShaderModule> VKGraphicsPipeline::CreateShaderModules(
120 const SPIRVProgram& program) const {
121 std::vector<UniqueShaderModule> modules;
122 const auto dev = device.GetLogical();
123 const auto& dld = device.GetDispatchLoader();
124 for (std::size_t i = 0; i < Maxwell::MaxShaderStage; ++i) {
125 const auto& stage = program[i];
126 if (!stage) {
127 continue;
128 }
129 const vk::ShaderModuleCreateInfo module_ci({}, stage->code.size() * sizeof(u32),
130 stage->code.data());
131 modules.emplace_back(dev.createShaderModuleUnique(module_ci, nullptr, dld));
132 }
133 return modules;
134}
135
136UniquePipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpass_params,
137 const SPIRVProgram& program) const {
138 const auto& vi = fixed_state.vertex_input;
139 const auto& ia = fixed_state.input_assembly;
140 const auto& ds = fixed_state.depth_stencil;
141 const auto& cd = fixed_state.color_blending;
142 const auto& ts = fixed_state.tessellation;
143 const auto& rs = fixed_state.rasterizer;
144
145 std::vector<vk::VertexInputBindingDescription> vertex_bindings;
146 std::vector<vk::VertexInputBindingDivisorDescriptionEXT> vertex_binding_divisors;
147 for (std::size_t i = 0; i < vi.num_bindings; ++i) {
148 const auto& binding = vi.bindings[i];
149 const bool instanced = binding.divisor != 0;
150 const auto rate = instanced ? vk::VertexInputRate::eInstance : vk::VertexInputRate::eVertex;
151 vertex_bindings.emplace_back(binding.index, binding.stride, rate);
152 if (instanced) {
153 vertex_binding_divisors.emplace_back(binding.index, binding.divisor);
154 }
155 }
156
157 std::vector<vk::VertexInputAttributeDescription> vertex_attributes;
158 const auto& input_attributes = program[0]->entries.attributes;
159 for (std::size_t i = 0; i < vi.num_attributes; ++i) {
160 const auto& attribute = vi.attributes[i];
161 if (input_attributes.find(attribute.index) == input_attributes.end()) {
162 // Skip attributes not used by the vertex shaders.
163 continue;
164 }
165 vertex_attributes.emplace_back(attribute.index, attribute.buffer,
166 MaxwellToVK::VertexFormat(attribute.type, attribute.size),
167 attribute.offset);
168 }
169
170 vk::PipelineVertexInputStateCreateInfo vertex_input_ci(
171 {}, static_cast<u32>(vertex_bindings.size()), vertex_bindings.data(),
172 static_cast<u32>(vertex_attributes.size()), vertex_attributes.data());
173
174 const vk::PipelineVertexInputDivisorStateCreateInfoEXT vertex_input_divisor_ci(
175 static_cast<u32>(vertex_binding_divisors.size()), vertex_binding_divisors.data());
176 if (!vertex_binding_divisors.empty()) {
177 vertex_input_ci.pNext = &vertex_input_divisor_ci;
178 }
179
180 const auto primitive_topology = MaxwellToVK::PrimitiveTopology(device, ia.topology);
181 const vk::PipelineInputAssemblyStateCreateInfo input_assembly_ci(
182 {}, primitive_topology,
183 ia.primitive_restart_enable && SupportsPrimitiveRestart(primitive_topology));
184
185 const vk::PipelineTessellationStateCreateInfo tessellation_ci({}, ts.patch_control_points);
186
187 const vk::PipelineViewportStateCreateInfo viewport_ci({}, Maxwell::NumViewports, nullptr,
188 Maxwell::NumViewports, nullptr);
189
190 // TODO(Rodrigo): Find out what's the default register value for front face
191 const vk::PipelineRasterizationStateCreateInfo rasterizer_ci(
192 {}, rs.depth_clamp_enable, false, vk::PolygonMode::eFill,
193 rs.cull_enable ? MaxwellToVK::CullFace(rs.cull_face) : vk::CullModeFlagBits::eNone,
194 rs.cull_enable ? MaxwellToVK::FrontFace(rs.front_face) : vk::FrontFace::eCounterClockwise,
195 rs.depth_bias_enable, 0.0f, 0.0f, 0.0f, 1.0f);
196
197 const vk::PipelineMultisampleStateCreateInfo multisampling_ci(
198 {}, vk::SampleCountFlagBits::e1, false, 0.0f, nullptr, false, false);
199
200 const vk::CompareOp depth_test_compare = ds.depth_test_enable
201 ? MaxwellToVK::ComparisonOp(ds.depth_test_function)
202 : vk::CompareOp::eAlways;
203
204 const vk::PipelineDepthStencilStateCreateInfo depth_stencil_ci(
205 {}, ds.depth_test_enable, ds.depth_write_enable, depth_test_compare, ds.depth_bounds_enable,
206 ds.stencil_enable, GetStencilFaceState(ds.front_stencil),
207 GetStencilFaceState(ds.back_stencil), 0.0f, 0.0f);
208
209 std::array<vk::PipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments;
210 const std::size_t num_attachments =
211 std::min(cd.attachments_count, renderpass_params.color_attachments.size());
212 for (std::size_t i = 0; i < num_attachments; ++i) {
213 constexpr std::array component_table{
214 vk::ColorComponentFlagBits::eR, vk::ColorComponentFlagBits::eG,
215 vk::ColorComponentFlagBits::eB, vk::ColorComponentFlagBits::eA};
216 const auto& blend = cd.attachments[i];
217
218 vk::ColorComponentFlags color_components{};
219 for (std::size_t j = 0; j < component_table.size(); ++j) {
220 if (blend.components[j])
221 color_components |= component_table[j];
222 }
223
224 cb_attachments[i] = vk::PipelineColorBlendAttachmentState(
225 blend.enable, MaxwellToVK::BlendFactor(blend.src_rgb_func),
226 MaxwellToVK::BlendFactor(blend.dst_rgb_func),
227 MaxwellToVK::BlendEquation(blend.rgb_equation),
228 MaxwellToVK::BlendFactor(blend.src_a_func), MaxwellToVK::BlendFactor(blend.dst_a_func),
229 MaxwellToVK::BlendEquation(blend.a_equation), color_components);
230 }
231 const vk::PipelineColorBlendStateCreateInfo color_blending_ci({}, false, vk::LogicOp::eCopy,
232 static_cast<u32>(num_attachments),
233 cb_attachments.data(), {});
234
235 constexpr std::array dynamic_states = {
236 vk::DynamicState::eViewport, vk::DynamicState::eScissor,
237 vk::DynamicState::eDepthBias, vk::DynamicState::eBlendConstants,
238 vk::DynamicState::eDepthBounds, vk::DynamicState::eStencilCompareMask,
239 vk::DynamicState::eStencilWriteMask, vk::DynamicState::eStencilReference};
240 const vk::PipelineDynamicStateCreateInfo dynamic_state_ci(
241 {}, static_cast<u32>(dynamic_states.size()), dynamic_states.data());
242
243 vk::PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci;
244 subgroup_size_ci.requiredSubgroupSize = GuestWarpSize;
245
246 std::vector<vk::PipelineShaderStageCreateInfo> shader_stages;
247 std::size_t module_index = 0;
248 for (std::size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
249 if (!program[stage]) {
250 continue;
251 }
252 const auto stage_enum = static_cast<Tegra::Engines::ShaderType>(stage);
253 const auto vk_stage = MaxwellToVK::ShaderStage(stage_enum);
254 auto& stage_ci = shader_stages.emplace_back(vk::PipelineShaderStageCreateFlags{}, vk_stage,
255 *modules[module_index++], "main", nullptr);
256 if (program[stage]->entries.uses_warps && device.IsGuestWarpSizeSupported(vk_stage)) {
257 stage_ci.pNext = &subgroup_size_ci;
258 }
259 }
260
261 const vk::GraphicsPipelineCreateInfo create_info(
262 {}, static_cast<u32>(shader_stages.size()), shader_stages.data(), &vertex_input_ci,
263 &input_assembly_ci, &tessellation_ci, &viewport_ci, &rasterizer_ci, &multisampling_ci,
264 &depth_stencil_ci, &color_blending_ci, &dynamic_state_ci, *layout, renderpass, 0, {}, 0);
265
266 const auto dev = device.GetLogical();
267 const auto& dld = device.GetDispatchLoader();
268 return dev.createGraphicsPipelineUnique(nullptr, create_info, nullptr, dld);
269}
270
271} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
new file mode 100644
index 000000000..4f5e4ea2d
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
@@ -0,0 +1,90 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8#include <memory>
9#include <optional>
10#include <unordered_map>
11#include <vector>
12
13#include "video_core/engines/maxwell_3d.h"
14#include "video_core/renderer_vulkan/declarations.h"
15#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
16#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
17#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
18#include "video_core/renderer_vulkan/vk_resource_manager.h"
19#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
20
21namespace Vulkan {
22
23using Maxwell = Tegra::Engines::Maxwell3D::Regs;
24
25struct GraphicsPipelineCacheKey;
26
27class VKDescriptorPool;
28class VKDevice;
29class VKRenderPassCache;
30class VKScheduler;
31class VKUpdateDescriptorQueue;
32
33using SPIRVProgram = std::array<std::optional<SPIRVShader>, Maxwell::MaxShaderStage>;
34
35class VKGraphicsPipeline final {
36public:
37 explicit VKGraphicsPipeline(const VKDevice& device, VKScheduler& scheduler,
38 VKDescriptorPool& descriptor_pool,
39 VKUpdateDescriptorQueue& update_descriptor_queue,
40 VKRenderPassCache& renderpass_cache,
41 const GraphicsPipelineCacheKey& key,
42 const std::vector<vk::DescriptorSetLayoutBinding>& bindings,
43 const SPIRVProgram& program);
44 ~VKGraphicsPipeline();
45
46 vk::DescriptorSet CommitDescriptorSet();
47
48 vk::Pipeline GetHandle() const {
49 return *pipeline;
50 }
51
52 vk::PipelineLayout GetLayout() const {
53 return *layout;
54 }
55
56 vk::RenderPass GetRenderPass() const {
57 return renderpass;
58 }
59
60private:
61 UniqueDescriptorSetLayout CreateDescriptorSetLayout(
62 const std::vector<vk::DescriptorSetLayoutBinding>& bindings) const;
63
64 UniquePipelineLayout CreatePipelineLayout() const;
65
66 UniqueDescriptorUpdateTemplate CreateDescriptorUpdateTemplate(
67 const SPIRVProgram& program) const;
68
69 std::vector<UniqueShaderModule> CreateShaderModules(const SPIRVProgram& program) const;
70
71 UniquePipeline CreatePipeline(const RenderPassParams& renderpass_params,
72 const SPIRVProgram& program) const;
73
74 const VKDevice& device;
75 VKScheduler& scheduler;
76 const FixedPipelineState fixed_state;
77 const u64 hash;
78
79 UniqueDescriptorSetLayout descriptor_set_layout;
80 DescriptorAllocator descriptor_allocator;
81 VKUpdateDescriptorQueue& update_descriptor_queue;
82 UniquePipelineLayout layout;
83 UniqueDescriptorUpdateTemplate descriptor_template;
84 std::vector<UniqueShaderModule> modules;
85
86 vk::RenderPass renderpass;
87 UniquePipeline pipeline;
88};
89
90} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index 33b1a1d23..e49ed135d 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -8,9 +8,12 @@
8#include <cstddef> 8#include <cstddef>
9#include <vector> 9#include <vector>
10 10
11#include <boost/functional/hash.hpp>
12
11#include "common/common_types.h" 13#include "common/common_types.h"
12#include "video_core/engines/maxwell_3d.h" 14#include "video_core/engines/maxwell_3d.h"
13#include "video_core/renderer_vulkan/declarations.h" 15#include "video_core/renderer_vulkan/declarations.h"
16#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
14#include "video_core/renderer_vulkan/vk_shader_decompiler.h" 17#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
15#include "video_core/shader/shader_ir.h" 18#include "video_core/shader/shader_ir.h"
16 19
@@ -18,6 +21,28 @@ namespace Vulkan {
18 21
19class VKDevice; 22class VKDevice;
20 23
24using Maxwell = Tegra::Engines::Maxwell3D::Regs;
25
26struct GraphicsPipelineCacheKey {
27 FixedPipelineState fixed_state;
28 std::array<GPUVAddr, Maxwell::MaxShaderProgram> shaders;
29 RenderPassParams renderpass_params;
30
31 std::size_t Hash() const noexcept {
32 std::size_t hash = fixed_state.Hash();
33 for (const auto& shader : shaders) {
34 boost::hash_combine(hash, shader);
35 }
36 boost::hash_combine(hash, renderpass_params.Hash());
37 return hash;
38 }
39
40 bool operator==(const GraphicsPipelineCacheKey& rhs) const noexcept {
41 return std::tie(fixed_state, shaders, renderpass_params) ==
42 std::tie(rhs.fixed_state, rhs.shaders, rhs.renderpass_params);
43 }
44};
45
21struct ComputePipelineCacheKey { 46struct ComputePipelineCacheKey {
22 GPUVAddr shader{}; 47 GPUVAddr shader{};
23 u32 shared_memory_size{}; 48 u32 shared_memory_size{};
@@ -42,6 +67,13 @@ struct ComputePipelineCacheKey {
42namespace std { 67namespace std {
43 68
44template <> 69template <>
70struct hash<Vulkan::GraphicsPipelineCacheKey> {
71 std::size_t operator()(const Vulkan::GraphicsPipelineCacheKey& k) const noexcept {
72 return k.Hash();
73 }
74};
75
76template <>
45struct hash<Vulkan::ComputePipelineCacheKey> { 77struct hash<Vulkan::ComputePipelineCacheKey> {
46 std::size_t operator()(const Vulkan::ComputePipelineCacheKey& k) const noexcept { 78 std::size_t operator()(const Vulkan::ComputePipelineCacheKey& k) const noexcept {
47 return k.Hash(); 79 return k.Hash();