summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGravatar Rodrigo Locatti2020-04-13 17:08:04 -0300
committerGravatar GitHub2020-04-13 17:08:04 -0300
commit7e4a132a77fd1c2d48b1410ac1f1da81869daa90 (patch)
tree79f2f04b5d7233eb3b57f86af3281f6ef5f30282
parentMerge pull request #3651 from ReinUsesLisp/line-widths (diff)
parentrenderer_vulkan: Drop Vulkan-Hpp (diff)
downloadyuzu-7e4a132a77fd1c2d48b1410ac1f1da81869daa90.tar.gz
yuzu-7e4a132a77fd1c2d48b1410ac1f1da81869daa90.tar.xz
yuzu-7e4a132a77fd1c2d48b1410ac1f1da81869daa90.zip
Merge pull request #3636 from ReinUsesLisp/drop-vk-hpp
renderer_vulkan: Drop Vulkan-Hpp
Diffstat (limited to '')
m---------externals/Vulkan-Headers0
-rw-r--r--src/video_core/CMakeLists.txt1
-rw-r--r--src/video_core/renderer_vulkan/declarations.h60
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp523
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.h37
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.cpp157
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.h10
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.cpp623
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.h34
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp149
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h12
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp241
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.h29
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp129
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.h30
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.cpp92
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.h19
-rw-r--r--src/video_core/renderer_vulkan/vk_device.cpp570
-rw-r--r--src/video_core/renderer_vulkan/vk_device.h78
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp378
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.h38
-rw-r--r--src/video_core/renderer_vulkan/vk_image.cpp61
-rw-r--r--src/video_core/renderer_vulkan/vk_image.h40
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.cpp104
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.h34
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp93
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp56
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.h14
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp285
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h15
-rw-r--r--src/video_core/renderer_vulkan/vk_renderpass_cache.cpp120
-rw-r--r--src/video_core/renderer_vulkan/vk_renderpass_cache.h8
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_manager.cpp87
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_manager.h10
-rw-r--r--src/video_core/renderer_vulkan/vk_sampler_cache.cpp76
-rw-r--r--src/video_core/renderer_vulkan/vk_sampler_cache.h8
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp87
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h29
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_util.cpp20
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_util.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp25
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.cpp64
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.h18
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.cpp155
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.h32
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp362
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h64
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.cpp18
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.h42
52 files changed, 2881 insertions, 2272 deletions
diff --git a/externals/Vulkan-Headers b/externals/Vulkan-Headers
Subproject d42d0747ee1b7a6726fb8948444b4993f9dcd2e Subproject 0e78ffd1dcfc3e9f14a966b9660dbc59bd967c5
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index f7febd6a2..258d58eba 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -156,7 +156,6 @@ add_library(video_core STATIC
156 156
157if (ENABLE_VULKAN) 157if (ENABLE_VULKAN)
158 target_sources(video_core PRIVATE 158 target_sources(video_core PRIVATE
159 renderer_vulkan/declarations.h
160 renderer_vulkan/fixed_pipeline_state.cpp 159 renderer_vulkan/fixed_pipeline_state.cpp
161 renderer_vulkan/fixed_pipeline_state.h 160 renderer_vulkan/fixed_pipeline_state.h
162 renderer_vulkan/maxwell_to_vk.cpp 161 renderer_vulkan/maxwell_to_vk.cpp
diff --git a/src/video_core/renderer_vulkan/declarations.h b/src/video_core/renderer_vulkan/declarations.h
deleted file mode 100644
index 89a035ca4..000000000
--- a/src/video_core/renderer_vulkan/declarations.h
+++ /dev/null
@@ -1,60 +0,0 @@
1// Copyright 2019 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7namespace vk {
8class DispatchLoaderDynamic;
9}
10
11namespace Vulkan {
12constexpr vk::DispatchLoaderDynamic* dont_use_me_dld = nullptr;
13}
14
15#define VULKAN_HPP_DEFAULT_DISPATCHER (*::Vulkan::dont_use_me_dld)
16#define VULKAN_HPP_ENABLE_DYNAMIC_LOADER_TOOL 0
17#define VULKAN_HPP_DISPATCH_LOADER_DYNAMIC 1
18#include <vulkan/vulkan.hpp>
19
20namespace Vulkan {
21
22// vulkan.hpp unique handlers use DispatchLoaderStatic
23template <typename T>
24using UniqueHandle = vk::UniqueHandle<T, vk::DispatchLoaderDynamic>;
25
26using UniqueAccelerationStructureNV = UniqueHandle<vk::AccelerationStructureNV>;
27using UniqueBuffer = UniqueHandle<vk::Buffer>;
28using UniqueBufferView = UniqueHandle<vk::BufferView>;
29using UniqueCommandBuffer = UniqueHandle<vk::CommandBuffer>;
30using UniqueCommandPool = UniqueHandle<vk::CommandPool>;
31using UniqueDescriptorPool = UniqueHandle<vk::DescriptorPool>;
32using UniqueDescriptorSet = UniqueHandle<vk::DescriptorSet>;
33using UniqueDescriptorSetLayout = UniqueHandle<vk::DescriptorSetLayout>;
34using UniqueDescriptorUpdateTemplate = UniqueHandle<vk::DescriptorUpdateTemplate>;
35using UniqueDevice = UniqueHandle<vk::Device>;
36using UniqueDeviceMemory = UniqueHandle<vk::DeviceMemory>;
37using UniqueEvent = UniqueHandle<vk::Event>;
38using UniqueFence = UniqueHandle<vk::Fence>;
39using UniqueFramebuffer = UniqueHandle<vk::Framebuffer>;
40using UniqueImage = UniqueHandle<vk::Image>;
41using UniqueImageView = UniqueHandle<vk::ImageView>;
42using UniqueInstance = UniqueHandle<vk::Instance>;
43using UniqueIndirectCommandsLayoutNVX = UniqueHandle<vk::IndirectCommandsLayoutNVX>;
44using UniqueObjectTableNVX = UniqueHandle<vk::ObjectTableNVX>;
45using UniquePipeline = UniqueHandle<vk::Pipeline>;
46using UniquePipelineCache = UniqueHandle<vk::PipelineCache>;
47using UniquePipelineLayout = UniqueHandle<vk::PipelineLayout>;
48using UniqueQueryPool = UniqueHandle<vk::QueryPool>;
49using UniqueRenderPass = UniqueHandle<vk::RenderPass>;
50using UniqueSampler = UniqueHandle<vk::Sampler>;
51using UniqueSamplerYcbcrConversion = UniqueHandle<vk::SamplerYcbcrConversion>;
52using UniqueSemaphore = UniqueHandle<vk::Semaphore>;
53using UniqueShaderModule = UniqueHandle<vk::ShaderModule>;
54using UniqueSurfaceKHR = UniqueHandle<vk::SurfaceKHR>;
55using UniqueSwapchainKHR = UniqueHandle<vk::SwapchainKHR>;
56using UniqueValidationCacheEXT = UniqueHandle<vk::ValidationCacheEXT>;
57using UniqueDebugReportCallbackEXT = UniqueHandle<vk::DebugReportCallbackEXT>;
58using UniqueDebugUtilsMessengerEXT = UniqueHandle<vk::DebugUtilsMessengerEXT>;
59
60} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index 7480cb7c3..143478863 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -2,13 +2,15 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <iterator>
6
5#include "common/assert.h" 7#include "common/assert.h"
6#include "common/common_types.h" 8#include "common/common_types.h"
7#include "common/logging/log.h" 9#include "common/logging/log.h"
8#include "video_core/engines/maxwell_3d.h" 10#include "video_core/engines/maxwell_3d.h"
9#include "video_core/renderer_vulkan/declarations.h"
10#include "video_core/renderer_vulkan/maxwell_to_vk.h" 11#include "video_core/renderer_vulkan/maxwell_to_vk.h"
11#include "video_core/renderer_vulkan/vk_device.h" 12#include "video_core/renderer_vulkan/vk_device.h"
13#include "video_core/renderer_vulkan/wrapper.h"
12#include "video_core/surface.h" 14#include "video_core/surface.h"
13 15
14namespace Vulkan::MaxwellToVK { 16namespace Vulkan::MaxwellToVK {
@@ -17,88 +19,89 @@ using Maxwell = Tegra::Engines::Maxwell3D::Regs;
17 19
18namespace Sampler { 20namespace Sampler {
19 21
20vk::Filter Filter(Tegra::Texture::TextureFilter filter) { 22VkFilter Filter(Tegra::Texture::TextureFilter filter) {
21 switch (filter) { 23 switch (filter) {
22 case Tegra::Texture::TextureFilter::Linear: 24 case Tegra::Texture::TextureFilter::Linear:
23 return vk::Filter::eLinear; 25 return VK_FILTER_LINEAR;
24 case Tegra::Texture::TextureFilter::Nearest: 26 case Tegra::Texture::TextureFilter::Nearest:
25 return vk::Filter::eNearest; 27 return VK_FILTER_NEAREST;
26 } 28 }
27 UNIMPLEMENTED_MSG("Unimplemented sampler filter={}", static_cast<u32>(filter)); 29 UNIMPLEMENTED_MSG("Unimplemented sampler filter={}", static_cast<u32>(filter));
28 return {}; 30 return {};
29} 31}
30 32
31vk::SamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter) { 33VkSamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter) {
32 switch (mipmap_filter) { 34 switch (mipmap_filter) {
33 case Tegra::Texture::TextureMipmapFilter::None: 35 case Tegra::Texture::TextureMipmapFilter::None:
34 // TODO(Rodrigo): None seems to be mapped to OpenGL's mag and min filters without mipmapping 36 // TODO(Rodrigo): None seems to be mapped to OpenGL's mag and min filters without mipmapping
35 // (e.g. GL_NEAREST and GL_LINEAR). Vulkan doesn't have such a thing, find out if we have to 37 // (e.g. GL_NEAREST and GL_LINEAR). Vulkan doesn't have such a thing, find out if we have to
36 // use an image view with a single mipmap level to emulate this. 38 // use an image view with a single mipmap level to emulate this.
37 return vk::SamplerMipmapMode::eLinear; 39 return VK_SAMPLER_MIPMAP_MODE_LINEAR;
40 ;
38 case Tegra::Texture::TextureMipmapFilter::Linear: 41 case Tegra::Texture::TextureMipmapFilter::Linear:
39 return vk::SamplerMipmapMode::eLinear; 42 return VK_SAMPLER_MIPMAP_MODE_LINEAR;
40 case Tegra::Texture::TextureMipmapFilter::Nearest: 43 case Tegra::Texture::TextureMipmapFilter::Nearest:
41 return vk::SamplerMipmapMode::eNearest; 44 return VK_SAMPLER_MIPMAP_MODE_NEAREST;
42 } 45 }
43 UNIMPLEMENTED_MSG("Unimplemented sampler mipmap mode={}", static_cast<u32>(mipmap_filter)); 46 UNIMPLEMENTED_MSG("Unimplemented sampler mipmap mode={}", static_cast<u32>(mipmap_filter));
44 return {}; 47 return {};
45} 48}
46 49
47vk::SamplerAddressMode WrapMode(const VKDevice& device, Tegra::Texture::WrapMode wrap_mode, 50VkSamplerAddressMode WrapMode(const VKDevice& device, Tegra::Texture::WrapMode wrap_mode,
48 Tegra::Texture::TextureFilter filter) { 51 Tegra::Texture::TextureFilter filter) {
49 switch (wrap_mode) { 52 switch (wrap_mode) {
50 case Tegra::Texture::WrapMode::Wrap: 53 case Tegra::Texture::WrapMode::Wrap:
51 return vk::SamplerAddressMode::eRepeat; 54 return VK_SAMPLER_ADDRESS_MODE_REPEAT;
52 case Tegra::Texture::WrapMode::Mirror: 55 case Tegra::Texture::WrapMode::Mirror:
53 return vk::SamplerAddressMode::eMirroredRepeat; 56 return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
54 case Tegra::Texture::WrapMode::ClampToEdge: 57 case Tegra::Texture::WrapMode::ClampToEdge:
55 return vk::SamplerAddressMode::eClampToEdge; 58 return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
56 case Tegra::Texture::WrapMode::Border: 59 case Tegra::Texture::WrapMode::Border:
57 return vk::SamplerAddressMode::eClampToBorder; 60 return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
58 case Tegra::Texture::WrapMode::Clamp: 61 case Tegra::Texture::WrapMode::Clamp:
59 if (device.GetDriverID() == vk::DriverIdKHR::eNvidiaProprietary) { 62 if (device.GetDriverID() == VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR) {
60 // Nvidia's Vulkan driver defaults to GL_CLAMP on invalid enumerations, we can hack this 63 // Nvidia's Vulkan driver defaults to GL_CLAMP on invalid enumerations, we can hack this
61 // by sending an invalid enumeration. 64 // by sending an invalid enumeration.
62 return static_cast<vk::SamplerAddressMode>(0xcafe); 65 return static_cast<VkSamplerAddressMode>(0xcafe);
63 } 66 }
64 // TODO(Rodrigo): Emulate GL_CLAMP properly on other vendors 67 // TODO(Rodrigo): Emulate GL_CLAMP properly on other vendors
65 switch (filter) { 68 switch (filter) {
66 case Tegra::Texture::TextureFilter::Nearest: 69 case Tegra::Texture::TextureFilter::Nearest:
67 return vk::SamplerAddressMode::eClampToEdge; 70 return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
68 case Tegra::Texture::TextureFilter::Linear: 71 case Tegra::Texture::TextureFilter::Linear:
69 return vk::SamplerAddressMode::eClampToBorder; 72 return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
70 } 73 }
71 UNREACHABLE(); 74 UNREACHABLE();
72 return vk::SamplerAddressMode::eClampToEdge; 75 return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
73 case Tegra::Texture::WrapMode::MirrorOnceClampToEdge: 76 case Tegra::Texture::WrapMode::MirrorOnceClampToEdge:
74 return vk::SamplerAddressMode::eMirrorClampToEdge; 77 return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
75 case Tegra::Texture::WrapMode::MirrorOnceBorder: 78 case Tegra::Texture::WrapMode::MirrorOnceBorder:
76 UNIMPLEMENTED(); 79 UNIMPLEMENTED();
77 return vk::SamplerAddressMode::eMirrorClampToEdge; 80 return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
78 default: 81 default:
79 UNIMPLEMENTED_MSG("Unimplemented wrap mode={}", static_cast<u32>(wrap_mode)); 82 UNIMPLEMENTED_MSG("Unimplemented wrap mode={}", static_cast<u32>(wrap_mode));
80 return {}; 83 return {};
81 } 84 }
82} 85}
83 86
84vk::CompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func) { 87VkCompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func) {
85 switch (depth_compare_func) { 88 switch (depth_compare_func) {
86 case Tegra::Texture::DepthCompareFunc::Never: 89 case Tegra::Texture::DepthCompareFunc::Never:
87 return vk::CompareOp::eNever; 90 return VK_COMPARE_OP_NEVER;
88 case Tegra::Texture::DepthCompareFunc::Less: 91 case Tegra::Texture::DepthCompareFunc::Less:
89 return vk::CompareOp::eLess; 92 return VK_COMPARE_OP_LESS;
90 case Tegra::Texture::DepthCompareFunc::LessEqual: 93 case Tegra::Texture::DepthCompareFunc::LessEqual:
91 return vk::CompareOp::eLessOrEqual; 94 return VK_COMPARE_OP_LESS_OR_EQUAL;
92 case Tegra::Texture::DepthCompareFunc::Equal: 95 case Tegra::Texture::DepthCompareFunc::Equal:
93 return vk::CompareOp::eEqual; 96 return VK_COMPARE_OP_EQUAL;
94 case Tegra::Texture::DepthCompareFunc::NotEqual: 97 case Tegra::Texture::DepthCompareFunc::NotEqual:
95 return vk::CompareOp::eNotEqual; 98 return VK_COMPARE_OP_NOT_EQUAL;
96 case Tegra::Texture::DepthCompareFunc::Greater: 99 case Tegra::Texture::DepthCompareFunc::Greater:
97 return vk::CompareOp::eGreater; 100 return VK_COMPARE_OP_GREATER;
98 case Tegra::Texture::DepthCompareFunc::GreaterEqual: 101 case Tegra::Texture::DepthCompareFunc::GreaterEqual:
99 return vk::CompareOp::eGreaterOrEqual; 102 return VK_COMPARE_OP_GREATER_OR_EQUAL;
100 case Tegra::Texture::DepthCompareFunc::Always: 103 case Tegra::Texture::DepthCompareFunc::Always:
101 return vk::CompareOp::eAlways; 104 return VK_COMPARE_OP_ALWAYS;
102 } 105 }
103 UNIMPLEMENTED_MSG("Unimplemented sampler depth compare function={}", 106 UNIMPLEMENTED_MSG("Unimplemented sampler depth compare function={}",
104 static_cast<u32>(depth_compare_func)); 107 static_cast<u32>(depth_compare_func));
@@ -112,92 +115,92 @@ namespace {
112enum : u32 { Attachable = 1, Storage = 2 }; 115enum : u32 { Attachable = 1, Storage = 2 };
113 116
114struct FormatTuple { 117struct FormatTuple {
115 vk::Format format; ///< Vulkan format 118 VkFormat format; ///< Vulkan format
116 int usage; ///< Describes image format usage 119 int usage = 0; ///< Describes image format usage
117} constexpr tex_format_tuples[] = { 120} constexpr tex_format_tuples[] = {
118 {vk::Format::eA8B8G8R8UnormPack32, Attachable | Storage}, // ABGR8U 121 {VK_FORMAT_A8B8G8R8_UNORM_PACK32, Attachable | Storage}, // ABGR8U
119 {vk::Format::eA8B8G8R8SnormPack32, Attachable | Storage}, // ABGR8S 122 {VK_FORMAT_A8B8G8R8_SNORM_PACK32, Attachable | Storage}, // ABGR8S
120 {vk::Format::eA8B8G8R8UintPack32, Attachable | Storage}, // ABGR8UI 123 {VK_FORMAT_A8B8G8R8_UINT_PACK32, Attachable | Storage}, // ABGR8UI
121 {vk::Format::eB5G6R5UnormPack16, {}}, // B5G6R5U 124 {VK_FORMAT_B5G6R5_UNORM_PACK16}, // B5G6R5U
122 {vk::Format::eA2B10G10R10UnormPack32, Attachable | Storage}, // A2B10G10R10U 125 {VK_FORMAT_A2B10G10R10_UNORM_PACK32, Attachable | Storage}, // A2B10G10R10U
123 {vk::Format::eA1R5G5B5UnormPack16, Attachable}, // A1B5G5R5U (flipped with swizzle) 126 {VK_FORMAT_A1R5G5B5_UNORM_PACK16, Attachable}, // A1B5G5R5U (flipped with swizzle)
124 {vk::Format::eR8Unorm, Attachable | Storage}, // R8U 127 {VK_FORMAT_R8_UNORM, Attachable | Storage}, // R8U
125 {vk::Format::eR8Uint, Attachable | Storage}, // R8UI 128 {VK_FORMAT_R8_UINT, Attachable | Storage}, // R8UI
126 {vk::Format::eR16G16B16A16Sfloat, Attachable | Storage}, // RGBA16F 129 {VK_FORMAT_R16G16B16A16_SFLOAT, Attachable | Storage}, // RGBA16F
127 {vk::Format::eR16G16B16A16Unorm, Attachable | Storage}, // RGBA16U 130 {VK_FORMAT_R16G16B16A16_UNORM, Attachable | Storage}, // RGBA16U
128 {vk::Format::eR16G16B16A16Snorm, Attachable | Storage}, // RGBA16S 131 {VK_FORMAT_R16G16B16A16_SNORM, Attachable | Storage}, // RGBA16S
129 {vk::Format::eR16G16B16A16Uint, Attachable | Storage}, // RGBA16UI 132 {VK_FORMAT_R16G16B16A16_UINT, Attachable | Storage}, // RGBA16UI
130 {vk::Format::eB10G11R11UfloatPack32, Attachable | Storage}, // R11FG11FB10F 133 {VK_FORMAT_B10G11R11_UFLOAT_PACK32, Attachable | Storage}, // R11FG11FB10F
131 {vk::Format::eR32G32B32A32Uint, Attachable | Storage}, // RGBA32UI 134 {VK_FORMAT_R32G32B32A32_UINT, Attachable | Storage}, // RGBA32UI
132 {vk::Format::eBc1RgbaUnormBlock, {}}, // DXT1 135 {VK_FORMAT_BC1_RGBA_UNORM_BLOCK}, // DXT1
133 {vk::Format::eBc2UnormBlock, {}}, // DXT23 136 {VK_FORMAT_BC2_UNORM_BLOCK}, // DXT23
134 {vk::Format::eBc3UnormBlock, {}}, // DXT45 137 {VK_FORMAT_BC3_UNORM_BLOCK}, // DXT45
135 {vk::Format::eBc4UnormBlock, {}}, // DXN1 138 {VK_FORMAT_BC4_UNORM_BLOCK}, // DXN1
136 {vk::Format::eBc5UnormBlock, {}}, // DXN2UNORM 139 {VK_FORMAT_BC5_UNORM_BLOCK}, // DXN2UNORM
137 {vk::Format::eBc5SnormBlock, {}}, // DXN2SNORM 140 {VK_FORMAT_BC5_SNORM_BLOCK}, // DXN2SNORM
138 {vk::Format::eBc7UnormBlock, {}}, // BC7U 141 {VK_FORMAT_BC7_UNORM_BLOCK}, // BC7U
139 {vk::Format::eBc6HUfloatBlock, {}}, // BC6H_UF16 142 {VK_FORMAT_BC6H_UFLOAT_BLOCK}, // BC6H_UF16
140 {vk::Format::eBc6HSfloatBlock, {}}, // BC6H_SF16 143 {VK_FORMAT_BC6H_SFLOAT_BLOCK}, // BC6H_SF16
141 {vk::Format::eAstc4x4UnormBlock, {}}, // ASTC_2D_4X4 144 {VK_FORMAT_ASTC_4x4_UNORM_BLOCK}, // ASTC_2D_4X4
142 {vk::Format::eB8G8R8A8Unorm, {}}, // BGRA8 145 {VK_FORMAT_B8G8R8A8_UNORM}, // BGRA8
143 {vk::Format::eR32G32B32A32Sfloat, Attachable | Storage}, // RGBA32F 146 {VK_FORMAT_R32G32B32A32_SFLOAT, Attachable | Storage}, // RGBA32F
144 {vk::Format::eR32G32Sfloat, Attachable | Storage}, // RG32F 147 {VK_FORMAT_R32G32_SFLOAT, Attachable | Storage}, // RG32F
145 {vk::Format::eR32Sfloat, Attachable | Storage}, // R32F 148 {VK_FORMAT_R32_SFLOAT, Attachable | Storage}, // R32F
146 {vk::Format::eR16Sfloat, Attachable | Storage}, // R16F 149 {VK_FORMAT_R16_SFLOAT, Attachable | Storage}, // R16F
147 {vk::Format::eR16Unorm, Attachable | Storage}, // R16U 150 {VK_FORMAT_R16_UNORM, Attachable | Storage}, // R16U
148 {vk::Format::eUndefined, {}}, // R16S 151 {VK_FORMAT_UNDEFINED}, // R16S
149 {vk::Format::eUndefined, {}}, // R16UI 152 {VK_FORMAT_UNDEFINED}, // R16UI
150 {vk::Format::eUndefined, {}}, // R16I 153 {VK_FORMAT_UNDEFINED}, // R16I
151 {vk::Format::eR16G16Unorm, Attachable | Storage}, // RG16 154 {VK_FORMAT_R16G16_UNORM, Attachable | Storage}, // RG16
152 {vk::Format::eR16G16Sfloat, Attachable | Storage}, // RG16F 155 {VK_FORMAT_R16G16_SFLOAT, Attachable | Storage}, // RG16F
153 {vk::Format::eUndefined, {}}, // RG16UI 156 {VK_FORMAT_UNDEFINED}, // RG16UI
154 {vk::Format::eUndefined, {}}, // RG16I 157 {VK_FORMAT_UNDEFINED}, // RG16I
155 {vk::Format::eR16G16Snorm, Attachable | Storage}, // RG16S 158 {VK_FORMAT_R16G16_SNORM, Attachable | Storage}, // RG16S
156 {vk::Format::eUndefined, {}}, // RGB32F 159 {VK_FORMAT_UNDEFINED}, // RGB32F
157 {vk::Format::eR8G8B8A8Srgb, Attachable}, // RGBA8_SRGB 160 {VK_FORMAT_R8G8B8A8_SRGB, Attachable}, // RGBA8_SRGB
158 {vk::Format::eR8G8Unorm, Attachable | Storage}, // RG8U 161 {VK_FORMAT_R8G8_UNORM, Attachable | Storage}, // RG8U
159 {vk::Format::eR8G8Snorm, Attachable | Storage}, // RG8S 162 {VK_FORMAT_R8G8_SNORM, Attachable | Storage}, // RG8S
160 {vk::Format::eR32G32Uint, Attachable | Storage}, // RG32UI 163 {VK_FORMAT_R32G32_UINT, Attachable | Storage}, // RG32UI
161 {vk::Format::eUndefined, {}}, // RGBX16F 164 {VK_FORMAT_UNDEFINED}, // RGBX16F
162 {vk::Format::eR32Uint, Attachable | Storage}, // R32UI 165 {VK_FORMAT_R32_UINT, Attachable | Storage}, // R32UI
163 {vk::Format::eR32Sint, Attachable | Storage}, // R32I 166 {VK_FORMAT_R32_SINT, Attachable | Storage}, // R32I
164 {vk::Format::eAstc8x8UnormBlock, {}}, // ASTC_2D_8X8 167 {VK_FORMAT_ASTC_8x8_UNORM_BLOCK}, // ASTC_2D_8X8
165 {vk::Format::eUndefined, {}}, // ASTC_2D_8X5 168 {VK_FORMAT_UNDEFINED}, // ASTC_2D_8X5
166 {vk::Format::eUndefined, {}}, // ASTC_2D_5X4 169 {VK_FORMAT_UNDEFINED}, // ASTC_2D_5X4
167 {vk::Format::eUndefined, {}}, // BGRA8_SRGB 170 {VK_FORMAT_UNDEFINED}, // BGRA8_SRGB
168 {vk::Format::eBc1RgbaSrgbBlock, {}}, // DXT1_SRGB 171 {VK_FORMAT_BC1_RGBA_SRGB_BLOCK}, // DXT1_SRGB
169 {vk::Format::eBc2SrgbBlock, {}}, // DXT23_SRGB 172 {VK_FORMAT_BC2_SRGB_BLOCK}, // DXT23_SRGB
170 {vk::Format::eBc3SrgbBlock, {}}, // DXT45_SRGB 173 {VK_FORMAT_BC3_SRGB_BLOCK}, // DXT45_SRGB
171 {vk::Format::eBc7SrgbBlock, {}}, // BC7U_SRGB 174 {VK_FORMAT_BC7_SRGB_BLOCK}, // BC7U_SRGB
172 {vk::Format::eR4G4B4A4UnormPack16, Attachable}, // R4G4B4A4U 175 {VK_FORMAT_R4G4B4A4_UNORM_PACK16, Attachable}, // R4G4B4A4U
173 {vk::Format::eAstc4x4SrgbBlock, {}}, // ASTC_2D_4X4_SRGB 176 {VK_FORMAT_ASTC_4x4_SRGB_BLOCK}, // ASTC_2D_4X4_SRGB
174 {vk::Format::eAstc8x8SrgbBlock, {}}, // ASTC_2D_8X8_SRGB 177 {VK_FORMAT_ASTC_8x8_SRGB_BLOCK}, // ASTC_2D_8X8_SRGB
175 {vk::Format::eAstc8x5SrgbBlock, {}}, // ASTC_2D_8X5_SRGB 178 {VK_FORMAT_ASTC_8x5_SRGB_BLOCK}, // ASTC_2D_8X5_SRGB
176 {vk::Format::eAstc5x4SrgbBlock, {}}, // ASTC_2D_5X4_SRGB 179 {VK_FORMAT_ASTC_5x4_SRGB_BLOCK}, // ASTC_2D_5X4_SRGB
177 {vk::Format::eAstc5x5UnormBlock, {}}, // ASTC_2D_5X5 180 {VK_FORMAT_ASTC_5x5_UNORM_BLOCK}, // ASTC_2D_5X5
178 {vk::Format::eAstc5x5SrgbBlock, {}}, // ASTC_2D_5X5_SRGB 181 {VK_FORMAT_ASTC_5x5_SRGB_BLOCK}, // ASTC_2D_5X5_SRGB
179 {vk::Format::eAstc10x8UnormBlock, {}}, // ASTC_2D_10X8 182 {VK_FORMAT_ASTC_10x8_UNORM_BLOCK}, // ASTC_2D_10X8
180 {vk::Format::eAstc10x8SrgbBlock, {}}, // ASTC_2D_10X8_SRGB 183 {VK_FORMAT_ASTC_10x8_SRGB_BLOCK}, // ASTC_2D_10X8_SRGB
181 {vk::Format::eAstc6x6UnormBlock, {}}, // ASTC_2D_6X6 184 {VK_FORMAT_ASTC_6x6_UNORM_BLOCK}, // ASTC_2D_6X6
182 {vk::Format::eAstc6x6SrgbBlock, {}}, // ASTC_2D_6X6_SRGB 185 {VK_FORMAT_ASTC_6x6_SRGB_BLOCK}, // ASTC_2D_6X6_SRGB
183 {vk::Format::eAstc10x10UnormBlock, {}}, // ASTC_2D_10X10 186 {VK_FORMAT_ASTC_10x10_UNORM_BLOCK}, // ASTC_2D_10X10
184 {vk::Format::eAstc10x10SrgbBlock, {}}, // ASTC_2D_10X10_SRGB 187 {VK_FORMAT_ASTC_10x10_SRGB_BLOCK}, // ASTC_2D_10X10_SRGB
185 {vk::Format::eAstc12x12UnormBlock, {}}, // ASTC_2D_12X12 188 {VK_FORMAT_ASTC_12x12_UNORM_BLOCK}, // ASTC_2D_12X12
186 {vk::Format::eAstc12x12SrgbBlock, {}}, // ASTC_2D_12X12_SRGB 189 {VK_FORMAT_ASTC_12x12_SRGB_BLOCK}, // ASTC_2D_12X12_SRGB
187 {vk::Format::eAstc8x6UnormBlock, {}}, // ASTC_2D_8X6 190 {VK_FORMAT_ASTC_8x6_UNORM_BLOCK}, // ASTC_2D_8X6
188 {vk::Format::eAstc8x6SrgbBlock, {}}, // ASTC_2D_8X6_SRGB 191 {VK_FORMAT_ASTC_8x6_SRGB_BLOCK}, // ASTC_2D_8X6_SRGB
189 {vk::Format::eAstc6x5UnormBlock, {}}, // ASTC_2D_6X5 192 {VK_FORMAT_ASTC_6x5_UNORM_BLOCK}, // ASTC_2D_6X5
190 {vk::Format::eAstc6x5SrgbBlock, {}}, // ASTC_2D_6X5_SRGB 193 {VK_FORMAT_ASTC_6x5_SRGB_BLOCK}, // ASTC_2D_6X5_SRGB
191 {vk::Format::eE5B9G9R9UfloatPack32, {}}, // E5B9G9R9F 194 {VK_FORMAT_E5B9G9R9_UFLOAT_PACK32}, // E5B9G9R9F
192 195
193 // Depth formats 196 // Depth formats
194 {vk::Format::eD32Sfloat, Attachable}, // Z32F 197 {VK_FORMAT_D32_SFLOAT, Attachable}, // Z32F
195 {vk::Format::eD16Unorm, Attachable}, // Z16 198 {VK_FORMAT_D16_UNORM, Attachable}, // Z16
196 199
197 // DepthStencil formats 200 // DepthStencil formats
198 {vk::Format::eD24UnormS8Uint, Attachable}, // Z24S8 201 {VK_FORMAT_D24_UNORM_S8_UINT, Attachable}, // Z24S8
199 {vk::Format::eD24UnormS8Uint, Attachable}, // S8Z24 (emulated) 202 {VK_FORMAT_D24_UNORM_S8_UINT, Attachable}, // S8Z24 (emulated)
200 {vk::Format::eD32SfloatS8Uint, Attachable}, // Z32FS8 203 {VK_FORMAT_D32_SFLOAT_S8_UINT, Attachable}, // Z32FS8
201}; 204};
202static_assert(std::size(tex_format_tuples) == VideoCore::Surface::MaxPixelFormat); 205static_assert(std::size(tex_format_tuples) == VideoCore::Surface::MaxPixelFormat);
203 206
@@ -212,106 +215,106 @@ FormatInfo SurfaceFormat(const VKDevice& device, FormatType format_type, PixelFo
212 ASSERT(static_cast<std::size_t>(pixel_format) < std::size(tex_format_tuples)); 215 ASSERT(static_cast<std::size_t>(pixel_format) < std::size(tex_format_tuples));
213 216
214 auto tuple = tex_format_tuples[static_cast<std::size_t>(pixel_format)]; 217 auto tuple = tex_format_tuples[static_cast<std::size_t>(pixel_format)];
215 if (tuple.format == vk::Format::eUndefined) { 218 if (tuple.format == VK_FORMAT_UNDEFINED) {
216 UNIMPLEMENTED_MSG("Unimplemented texture format with pixel format={}", 219 UNIMPLEMENTED_MSG("Unimplemented texture format with pixel format={}",
217 static_cast<u32>(pixel_format)); 220 static_cast<u32>(pixel_format));
218 return {vk::Format::eA8B8G8R8UnormPack32, true, true}; 221 return {VK_FORMAT_A8B8G8R8_UNORM_PACK32, true, true};
219 } 222 }
220 223
221 // Use ABGR8 on hardware that doesn't support ASTC natively 224 // Use ABGR8 on hardware that doesn't support ASTC natively
222 if (!device.IsOptimalAstcSupported() && VideoCore::Surface::IsPixelFormatASTC(pixel_format)) { 225 if (!device.IsOptimalAstcSupported() && VideoCore::Surface::IsPixelFormatASTC(pixel_format)) {
223 tuple.format = VideoCore::Surface::IsPixelFormatSRGB(pixel_format) 226 tuple.format = VideoCore::Surface::IsPixelFormatSRGB(pixel_format)
224 ? vk::Format::eA8B8G8R8SrgbPack32 227 ? VK_FORMAT_A8B8G8R8_SRGB_PACK32
225 : vk::Format::eA8B8G8R8UnormPack32; 228 : VK_FORMAT_A8B8G8R8_UNORM_PACK32;
226 } 229 }
227 const bool attachable = tuple.usage & Attachable; 230 const bool attachable = tuple.usage & Attachable;
228 const bool storage = tuple.usage & Storage; 231 const bool storage = tuple.usage & Storage;
229 232
230 vk::FormatFeatureFlags usage; 233 VkFormatFeatureFlags usage;
231 if (format_type == FormatType::Buffer) { 234 if (format_type == FormatType::Buffer) {
232 usage = vk::FormatFeatureFlagBits::eStorageTexelBuffer | 235 usage =
233 vk::FormatFeatureFlagBits::eUniformTexelBuffer; 236 VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT | VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT;
234 } else { 237 } else {
235 usage = vk::FormatFeatureFlagBits::eSampledImage | vk::FormatFeatureFlagBits::eTransferDst | 238 usage = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT |
236 vk::FormatFeatureFlagBits::eTransferSrc; 239 VK_FORMAT_FEATURE_TRANSFER_SRC_BIT;
237 if (attachable) { 240 if (attachable) {
238 usage |= IsZetaFormat(pixel_format) ? vk::FormatFeatureFlagBits::eDepthStencilAttachment 241 usage |= IsZetaFormat(pixel_format) ? VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT
239 : vk::FormatFeatureFlagBits::eColorAttachment; 242 : VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT;
240 } 243 }
241 if (storage) { 244 if (storage) {
242 usage |= vk::FormatFeatureFlagBits::eStorageImage; 245 usage |= VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT;
243 } 246 }
244 } 247 }
245 return {device.GetSupportedFormat(tuple.format, usage, format_type), attachable, storage}; 248 return {device.GetSupportedFormat(tuple.format, usage, format_type), attachable, storage};
246} 249}
247 250
248vk::ShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage) { 251VkShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage) {
249 switch (stage) { 252 switch (stage) {
250 case Tegra::Engines::ShaderType::Vertex: 253 case Tegra::Engines::ShaderType::Vertex:
251 return vk::ShaderStageFlagBits::eVertex; 254 return VK_SHADER_STAGE_VERTEX_BIT;
252 case Tegra::Engines::ShaderType::TesselationControl: 255 case Tegra::Engines::ShaderType::TesselationControl:
253 return vk::ShaderStageFlagBits::eTessellationControl; 256 return VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
254 case Tegra::Engines::ShaderType::TesselationEval: 257 case Tegra::Engines::ShaderType::TesselationEval:
255 return vk::ShaderStageFlagBits::eTessellationEvaluation; 258 return VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
256 case Tegra::Engines::ShaderType::Geometry: 259 case Tegra::Engines::ShaderType::Geometry:
257 return vk::ShaderStageFlagBits::eGeometry; 260 return VK_SHADER_STAGE_GEOMETRY_BIT;
258 case Tegra::Engines::ShaderType::Fragment: 261 case Tegra::Engines::ShaderType::Fragment:
259 return vk::ShaderStageFlagBits::eFragment; 262 return VK_SHADER_STAGE_FRAGMENT_BIT;
260 case Tegra::Engines::ShaderType::Compute: 263 case Tegra::Engines::ShaderType::Compute:
261 return vk::ShaderStageFlagBits::eCompute; 264 return VK_SHADER_STAGE_COMPUTE_BIT;
262 } 265 }
263 UNIMPLEMENTED_MSG("Unimplemented shader stage={}", static_cast<u32>(stage)); 266 UNIMPLEMENTED_MSG("Unimplemented shader stage={}", static_cast<u32>(stage));
264 return {}; 267 return {};
265} 268}
266 269
267vk::PrimitiveTopology PrimitiveTopology([[maybe_unused]] const VKDevice& device, 270VkPrimitiveTopology PrimitiveTopology([[maybe_unused]] const VKDevice& device,
268 Maxwell::PrimitiveTopology topology) { 271 Maxwell::PrimitiveTopology topology) {
269 switch (topology) { 272 switch (topology) {
270 case Maxwell::PrimitiveTopology::Points: 273 case Maxwell::PrimitiveTopology::Points:
271 return vk::PrimitiveTopology::ePointList; 274 return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
272 case Maxwell::PrimitiveTopology::Lines: 275 case Maxwell::PrimitiveTopology::Lines:
273 return vk::PrimitiveTopology::eLineList; 276 return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
274 case Maxwell::PrimitiveTopology::LineStrip: 277 case Maxwell::PrimitiveTopology::LineStrip:
275 return vk::PrimitiveTopology::eLineStrip; 278 return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
276 case Maxwell::PrimitiveTopology::Triangles: 279 case Maxwell::PrimitiveTopology::Triangles:
277 return vk::PrimitiveTopology::eTriangleList; 280 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
278 case Maxwell::PrimitiveTopology::TriangleStrip: 281 case Maxwell::PrimitiveTopology::TriangleStrip:
279 return vk::PrimitiveTopology::eTriangleStrip; 282 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
280 case Maxwell::PrimitiveTopology::TriangleFan: 283 case Maxwell::PrimitiveTopology::TriangleFan:
281 return vk::PrimitiveTopology::eTriangleFan; 284 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN;
282 case Maxwell::PrimitiveTopology::Quads: 285 case Maxwell::PrimitiveTopology::Quads:
283 // TODO(Rodrigo): Use VK_PRIMITIVE_TOPOLOGY_QUAD_LIST_EXT whenever it releases 286 // TODO(Rodrigo): Use VK_PRIMITIVE_TOPOLOGY_QUAD_LIST_EXT whenever it releases
284 return vk::PrimitiveTopology::eTriangleList; 287 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
285 case Maxwell::PrimitiveTopology::Patches: 288 case Maxwell::PrimitiveTopology::Patches:
286 return vk::PrimitiveTopology::ePatchList; 289 return VK_PRIMITIVE_TOPOLOGY_PATCH_LIST;
287 default: 290 default:
288 UNIMPLEMENTED_MSG("Unimplemented topology={}", static_cast<u32>(topology)); 291 UNIMPLEMENTED_MSG("Unimplemented topology={}", static_cast<u32>(topology));
289 return {}; 292 return {};
290 } 293 }
291} 294}
292 295
293vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size) { 296VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size) {
294 switch (type) { 297 switch (type) {
295 case Maxwell::VertexAttribute::Type::SignedNorm: 298 case Maxwell::VertexAttribute::Type::SignedNorm:
296 switch (size) { 299 switch (size) {
297 case Maxwell::VertexAttribute::Size::Size_8: 300 case Maxwell::VertexAttribute::Size::Size_8:
298 return vk::Format::eR8Snorm; 301 return VK_FORMAT_R8_SNORM;
299 case Maxwell::VertexAttribute::Size::Size_8_8: 302 case Maxwell::VertexAttribute::Size::Size_8_8:
300 return vk::Format::eR8G8Snorm; 303 return VK_FORMAT_R8G8_SNORM;
301 case Maxwell::VertexAttribute::Size::Size_8_8_8: 304 case Maxwell::VertexAttribute::Size::Size_8_8_8:
302 return vk::Format::eR8G8B8Snorm; 305 return VK_FORMAT_R8G8B8_SNORM;
303 case Maxwell::VertexAttribute::Size::Size_8_8_8_8: 306 case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
304 return vk::Format::eR8G8B8A8Snorm; 307 return VK_FORMAT_R8G8B8A8_SNORM;
305 case Maxwell::VertexAttribute::Size::Size_16: 308 case Maxwell::VertexAttribute::Size::Size_16:
306 return vk::Format::eR16Snorm; 309 return VK_FORMAT_R16_SNORM;
307 case Maxwell::VertexAttribute::Size::Size_16_16: 310 case Maxwell::VertexAttribute::Size::Size_16_16:
308 return vk::Format::eR16G16Snorm; 311 return VK_FORMAT_R16G16_SNORM;
309 case Maxwell::VertexAttribute::Size::Size_16_16_16: 312 case Maxwell::VertexAttribute::Size::Size_16_16_16:
310 return vk::Format::eR16G16B16Snorm; 313 return VK_FORMAT_R16G16B16_SNORM;
311 case Maxwell::VertexAttribute::Size::Size_16_16_16_16: 314 case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
312 return vk::Format::eR16G16B16A16Snorm; 315 return VK_FORMAT_R16G16B16A16_SNORM;
313 case Maxwell::VertexAttribute::Size::Size_10_10_10_2: 316 case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
314 return vk::Format::eA2B10G10R10SnormPack32; 317 return VK_FORMAT_A2B10G10R10_SNORM_PACK32;
315 default: 318 default:
316 break; 319 break;
317 } 320 }
@@ -319,23 +322,23 @@ vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttr
319 case Maxwell::VertexAttribute::Type::UnsignedNorm: 322 case Maxwell::VertexAttribute::Type::UnsignedNorm:
320 switch (size) { 323 switch (size) {
321 case Maxwell::VertexAttribute::Size::Size_8: 324 case Maxwell::VertexAttribute::Size::Size_8:
322 return vk::Format::eR8Unorm; 325 return VK_FORMAT_R8_UNORM;
323 case Maxwell::VertexAttribute::Size::Size_8_8: 326 case Maxwell::VertexAttribute::Size::Size_8_8:
324 return vk::Format::eR8G8Unorm; 327 return VK_FORMAT_R8G8_UNORM;
325 case Maxwell::VertexAttribute::Size::Size_8_8_8: 328 case Maxwell::VertexAttribute::Size::Size_8_8_8:
326 return vk::Format::eR8G8B8Unorm; 329 return VK_FORMAT_R8G8B8_UNORM;
327 case Maxwell::VertexAttribute::Size::Size_8_8_8_8: 330 case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
328 return vk::Format::eR8G8B8A8Unorm; 331 return VK_FORMAT_R8G8B8A8_UNORM;
329 case Maxwell::VertexAttribute::Size::Size_16: 332 case Maxwell::VertexAttribute::Size::Size_16:
330 return vk::Format::eR16Unorm; 333 return VK_FORMAT_R16_UNORM;
331 case Maxwell::VertexAttribute::Size::Size_16_16: 334 case Maxwell::VertexAttribute::Size::Size_16_16:
332 return vk::Format::eR16G16Unorm; 335 return VK_FORMAT_R16G16_UNORM;
333 case Maxwell::VertexAttribute::Size::Size_16_16_16: 336 case Maxwell::VertexAttribute::Size::Size_16_16_16:
334 return vk::Format::eR16G16B16Unorm; 337 return VK_FORMAT_R16G16B16_UNORM;
335 case Maxwell::VertexAttribute::Size::Size_16_16_16_16: 338 case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
336 return vk::Format::eR16G16B16A16Unorm; 339 return VK_FORMAT_R16G16B16A16_UNORM;
337 case Maxwell::VertexAttribute::Size::Size_10_10_10_2: 340 case Maxwell::VertexAttribute::Size::Size_10_10_10_2:
338 return vk::Format::eA2B10G10R10UnormPack32; 341 return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
339 default: 342 default:
340 break; 343 break;
341 } 344 }
@@ -343,59 +346,59 @@ vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttr
343 case Maxwell::VertexAttribute::Type::SignedInt: 346 case Maxwell::VertexAttribute::Type::SignedInt:
344 switch (size) { 347 switch (size) {
345 case Maxwell::VertexAttribute::Size::Size_16_16_16_16: 348 case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
346 return vk::Format::eR16G16B16A16Sint; 349 return VK_FORMAT_R16G16B16A16_SINT;
347 case Maxwell::VertexAttribute::Size::Size_8: 350 case Maxwell::VertexAttribute::Size::Size_8:
348 return vk::Format::eR8Sint; 351 return VK_FORMAT_R8_SINT;
349 case Maxwell::VertexAttribute::Size::Size_8_8: 352 case Maxwell::VertexAttribute::Size::Size_8_8:
350 return vk::Format::eR8G8Sint; 353 return VK_FORMAT_R8G8_SINT;
351 case Maxwell::VertexAttribute::Size::Size_8_8_8: 354 case Maxwell::VertexAttribute::Size::Size_8_8_8:
352 return vk::Format::eR8G8B8Sint; 355 return VK_FORMAT_R8G8B8_SINT;
353 case Maxwell::VertexAttribute::Size::Size_8_8_8_8: 356 case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
354 return vk::Format::eR8G8B8A8Sint; 357 return VK_FORMAT_R8G8B8A8_SINT;
355 case Maxwell::VertexAttribute::Size::Size_32: 358 case Maxwell::VertexAttribute::Size::Size_32:
356 return vk::Format::eR32Sint; 359 return VK_FORMAT_R32_SINT;
357 default: 360 default:
358 break; 361 break;
359 } 362 }
360 case Maxwell::VertexAttribute::Type::UnsignedInt: 363 case Maxwell::VertexAttribute::Type::UnsignedInt:
361 switch (size) { 364 switch (size) {
362 case Maxwell::VertexAttribute::Size::Size_8: 365 case Maxwell::VertexAttribute::Size::Size_8:
363 return vk::Format::eR8Uint; 366 return VK_FORMAT_R8_UINT;
364 case Maxwell::VertexAttribute::Size::Size_8_8: 367 case Maxwell::VertexAttribute::Size::Size_8_8:
365 return vk::Format::eR8G8Uint; 368 return VK_FORMAT_R8G8_UINT;
366 case Maxwell::VertexAttribute::Size::Size_8_8_8: 369 case Maxwell::VertexAttribute::Size::Size_8_8_8:
367 return vk::Format::eR8G8B8Uint; 370 return VK_FORMAT_R8G8B8_UINT;
368 case Maxwell::VertexAttribute::Size::Size_8_8_8_8: 371 case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
369 return vk::Format::eR8G8B8A8Uint; 372 return VK_FORMAT_R8G8B8A8_UINT;
370 case Maxwell::VertexAttribute::Size::Size_32: 373 case Maxwell::VertexAttribute::Size::Size_32:
371 return vk::Format::eR32Uint; 374 return VK_FORMAT_R32_UINT;
372 case Maxwell::VertexAttribute::Size::Size_32_32: 375 case Maxwell::VertexAttribute::Size::Size_32_32:
373 return vk::Format::eR32G32Uint; 376 return VK_FORMAT_R32G32_UINT;
374 case Maxwell::VertexAttribute::Size::Size_32_32_32: 377 case Maxwell::VertexAttribute::Size::Size_32_32_32:
375 return vk::Format::eR32G32B32Uint; 378 return VK_FORMAT_R32G32B32_UINT;
376 case Maxwell::VertexAttribute::Size::Size_32_32_32_32: 379 case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
377 return vk::Format::eR32G32B32A32Uint; 380 return VK_FORMAT_R32G32B32A32_UINT;
378 default: 381 default:
379 break; 382 break;
380 } 383 }
381 case Maxwell::VertexAttribute::Type::UnsignedScaled: 384 case Maxwell::VertexAttribute::Type::UnsignedScaled:
382 switch (size) { 385 switch (size) {
383 case Maxwell::VertexAttribute::Size::Size_8: 386 case Maxwell::VertexAttribute::Size::Size_8:
384 return vk::Format::eR8Uscaled; 387 return VK_FORMAT_R8_USCALED;
385 case Maxwell::VertexAttribute::Size::Size_8_8: 388 case Maxwell::VertexAttribute::Size::Size_8_8:
386 return vk::Format::eR8G8Uscaled; 389 return VK_FORMAT_R8G8_USCALED;
387 case Maxwell::VertexAttribute::Size::Size_8_8_8: 390 case Maxwell::VertexAttribute::Size::Size_8_8_8:
388 return vk::Format::eR8G8B8Uscaled; 391 return VK_FORMAT_R8G8B8_USCALED;
389 case Maxwell::VertexAttribute::Size::Size_8_8_8_8: 392 case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
390 return vk::Format::eR8G8B8A8Uscaled; 393 return VK_FORMAT_R8G8B8A8_USCALED;
391 case Maxwell::VertexAttribute::Size::Size_16: 394 case Maxwell::VertexAttribute::Size::Size_16:
392 return vk::Format::eR16Uscaled; 395 return VK_FORMAT_R16_USCALED;
393 case Maxwell::VertexAttribute::Size::Size_16_16: 396 case Maxwell::VertexAttribute::Size::Size_16_16:
394 return vk::Format::eR16G16Uscaled; 397 return VK_FORMAT_R16G16_USCALED;
395 case Maxwell::VertexAttribute::Size::Size_16_16_16: 398 case Maxwell::VertexAttribute::Size::Size_16_16_16:
396 return vk::Format::eR16G16B16Uscaled; 399 return VK_FORMAT_R16G16B16_USCALED;
397 case Maxwell::VertexAttribute::Size::Size_16_16_16_16: 400 case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
398 return vk::Format::eR16G16B16A16Uscaled; 401 return VK_FORMAT_R16G16B16A16_USCALED;
399 default: 402 default:
400 break; 403 break;
401 } 404 }
@@ -403,21 +406,21 @@ vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttr
403 case Maxwell::VertexAttribute::Type::SignedScaled: 406 case Maxwell::VertexAttribute::Type::SignedScaled:
404 switch (size) { 407 switch (size) {
405 case Maxwell::VertexAttribute::Size::Size_8: 408 case Maxwell::VertexAttribute::Size::Size_8:
406 return vk::Format::eR8Sscaled; 409 return VK_FORMAT_R8_SSCALED;
407 case Maxwell::VertexAttribute::Size::Size_8_8: 410 case Maxwell::VertexAttribute::Size::Size_8_8:
408 return vk::Format::eR8G8Sscaled; 411 return VK_FORMAT_R8G8_SSCALED;
409 case Maxwell::VertexAttribute::Size::Size_8_8_8: 412 case Maxwell::VertexAttribute::Size::Size_8_8_8:
410 return vk::Format::eR8G8B8Sscaled; 413 return VK_FORMAT_R8G8B8_SSCALED;
411 case Maxwell::VertexAttribute::Size::Size_8_8_8_8: 414 case Maxwell::VertexAttribute::Size::Size_8_8_8_8:
412 return vk::Format::eR8G8B8A8Sscaled; 415 return VK_FORMAT_R8G8B8A8_SSCALED;
413 case Maxwell::VertexAttribute::Size::Size_16: 416 case Maxwell::VertexAttribute::Size::Size_16:
414 return vk::Format::eR16Sscaled; 417 return VK_FORMAT_R16_SSCALED;
415 case Maxwell::VertexAttribute::Size::Size_16_16: 418 case Maxwell::VertexAttribute::Size::Size_16_16:
416 return vk::Format::eR16G16Sscaled; 419 return VK_FORMAT_R16G16_SSCALED;
417 case Maxwell::VertexAttribute::Size::Size_16_16_16: 420 case Maxwell::VertexAttribute::Size::Size_16_16_16:
418 return vk::Format::eR16G16B16Sscaled; 421 return VK_FORMAT_R16G16B16_SSCALED;
419 case Maxwell::VertexAttribute::Size::Size_16_16_16_16: 422 case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
420 return vk::Format::eR16G16B16A16Sscaled; 423 return VK_FORMAT_R16G16B16A16_SSCALED;
421 default: 424 default:
422 break; 425 break;
423 } 426 }
@@ -425,21 +428,21 @@ vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttr
425 case Maxwell::VertexAttribute::Type::Float: 428 case Maxwell::VertexAttribute::Type::Float:
426 switch (size) { 429 switch (size) {
427 case Maxwell::VertexAttribute::Size::Size_32: 430 case Maxwell::VertexAttribute::Size::Size_32:
428 return vk::Format::eR32Sfloat; 431 return VK_FORMAT_R32_SFLOAT;
429 case Maxwell::VertexAttribute::Size::Size_32_32: 432 case Maxwell::VertexAttribute::Size::Size_32_32:
430 return vk::Format::eR32G32Sfloat; 433 return VK_FORMAT_R32G32_SFLOAT;
431 case Maxwell::VertexAttribute::Size::Size_32_32_32: 434 case Maxwell::VertexAttribute::Size::Size_32_32_32:
432 return vk::Format::eR32G32B32Sfloat; 435 return VK_FORMAT_R32G32B32_SFLOAT;
433 case Maxwell::VertexAttribute::Size::Size_32_32_32_32: 436 case Maxwell::VertexAttribute::Size::Size_32_32_32_32:
434 return vk::Format::eR32G32B32A32Sfloat; 437 return VK_FORMAT_R32G32B32A32_SFLOAT;
435 case Maxwell::VertexAttribute::Size::Size_16: 438 case Maxwell::VertexAttribute::Size::Size_16:
436 return vk::Format::eR16Sfloat; 439 return VK_FORMAT_R16_SFLOAT;
437 case Maxwell::VertexAttribute::Size::Size_16_16: 440 case Maxwell::VertexAttribute::Size::Size_16_16:
438 return vk::Format::eR16G16Sfloat; 441 return VK_FORMAT_R16G16_SFLOAT;
439 case Maxwell::VertexAttribute::Size::Size_16_16_16: 442 case Maxwell::VertexAttribute::Size::Size_16_16_16:
440 return vk::Format::eR16G16B16Sfloat; 443 return VK_FORMAT_R16G16B16_SFLOAT;
441 case Maxwell::VertexAttribute::Size::Size_16_16_16_16: 444 case Maxwell::VertexAttribute::Size::Size_16_16_16_16:
442 return vk::Format::eR16G16B16A16Sfloat; 445 return VK_FORMAT_R16G16B16A16_SFLOAT;
443 default: 446 default:
444 break; 447 break;
445 } 448 }
@@ -450,210 +453,210 @@ vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttr
450 return {}; 453 return {};
451} 454}
452 455
453vk::CompareOp ComparisonOp(Maxwell::ComparisonOp comparison) { 456VkCompareOp ComparisonOp(Maxwell::ComparisonOp comparison) {
454 switch (comparison) { 457 switch (comparison) {
455 case Maxwell::ComparisonOp::Never: 458 case Maxwell::ComparisonOp::Never:
456 case Maxwell::ComparisonOp::NeverOld: 459 case Maxwell::ComparisonOp::NeverOld:
457 return vk::CompareOp::eNever; 460 return VK_COMPARE_OP_NEVER;
458 case Maxwell::ComparisonOp::Less: 461 case Maxwell::ComparisonOp::Less:
459 case Maxwell::ComparisonOp::LessOld: 462 case Maxwell::ComparisonOp::LessOld:
460 return vk::CompareOp::eLess; 463 return VK_COMPARE_OP_LESS;
461 case Maxwell::ComparisonOp::Equal: 464 case Maxwell::ComparisonOp::Equal:
462 case Maxwell::ComparisonOp::EqualOld: 465 case Maxwell::ComparisonOp::EqualOld:
463 return vk::CompareOp::eEqual; 466 return VK_COMPARE_OP_EQUAL;
464 case Maxwell::ComparisonOp::LessEqual: 467 case Maxwell::ComparisonOp::LessEqual:
465 case Maxwell::ComparisonOp::LessEqualOld: 468 case Maxwell::ComparisonOp::LessEqualOld:
466 return vk::CompareOp::eLessOrEqual; 469 return VK_COMPARE_OP_LESS_OR_EQUAL;
467 case Maxwell::ComparisonOp::Greater: 470 case Maxwell::ComparisonOp::Greater:
468 case Maxwell::ComparisonOp::GreaterOld: 471 case Maxwell::ComparisonOp::GreaterOld:
469 return vk::CompareOp::eGreater; 472 return VK_COMPARE_OP_GREATER;
470 case Maxwell::ComparisonOp::NotEqual: 473 case Maxwell::ComparisonOp::NotEqual:
471 case Maxwell::ComparisonOp::NotEqualOld: 474 case Maxwell::ComparisonOp::NotEqualOld:
472 return vk::CompareOp::eNotEqual; 475 return VK_COMPARE_OP_NOT_EQUAL;
473 case Maxwell::ComparisonOp::GreaterEqual: 476 case Maxwell::ComparisonOp::GreaterEqual:
474 case Maxwell::ComparisonOp::GreaterEqualOld: 477 case Maxwell::ComparisonOp::GreaterEqualOld:
475 return vk::CompareOp::eGreaterOrEqual; 478 return VK_COMPARE_OP_GREATER_OR_EQUAL;
476 case Maxwell::ComparisonOp::Always: 479 case Maxwell::ComparisonOp::Always:
477 case Maxwell::ComparisonOp::AlwaysOld: 480 case Maxwell::ComparisonOp::AlwaysOld:
478 return vk::CompareOp::eAlways; 481 return VK_COMPARE_OP_ALWAYS;
479 } 482 }
480 UNIMPLEMENTED_MSG("Unimplemented comparison op={}", static_cast<u32>(comparison)); 483 UNIMPLEMENTED_MSG("Unimplemented comparison op={}", static_cast<u32>(comparison));
481 return {}; 484 return {};
482} 485}
483 486
484vk::IndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format) { 487VkIndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format) {
485 switch (index_format) { 488 switch (index_format) {
486 case Maxwell::IndexFormat::UnsignedByte: 489 case Maxwell::IndexFormat::UnsignedByte:
487 if (!device.IsExtIndexTypeUint8Supported()) { 490 if (!device.IsExtIndexTypeUint8Supported()) {
488 UNIMPLEMENTED_MSG("Native uint8 indices are not supported on this device"); 491 UNIMPLEMENTED_MSG("Native uint8 indices are not supported on this device");
489 return vk::IndexType::eUint16; 492 return VK_INDEX_TYPE_UINT16;
490 } 493 }
491 return vk::IndexType::eUint8EXT; 494 return VK_INDEX_TYPE_UINT8_EXT;
492 case Maxwell::IndexFormat::UnsignedShort: 495 case Maxwell::IndexFormat::UnsignedShort:
493 return vk::IndexType::eUint16; 496 return VK_INDEX_TYPE_UINT16;
494 case Maxwell::IndexFormat::UnsignedInt: 497 case Maxwell::IndexFormat::UnsignedInt:
495 return vk::IndexType::eUint32; 498 return VK_INDEX_TYPE_UINT32;
496 } 499 }
497 UNIMPLEMENTED_MSG("Unimplemented index_format={}", static_cast<u32>(index_format)); 500 UNIMPLEMENTED_MSG("Unimplemented index_format={}", static_cast<u32>(index_format));
498 return {}; 501 return {};
499} 502}
500 503
501vk::StencilOp StencilOp(Maxwell::StencilOp stencil_op) { 504VkStencilOp StencilOp(Maxwell::StencilOp stencil_op) {
502 switch (stencil_op) { 505 switch (stencil_op) {
503 case Maxwell::StencilOp::Keep: 506 case Maxwell::StencilOp::Keep:
504 case Maxwell::StencilOp::KeepOGL: 507 case Maxwell::StencilOp::KeepOGL:
505 return vk::StencilOp::eKeep; 508 return VK_STENCIL_OP_KEEP;
506 case Maxwell::StencilOp::Zero: 509 case Maxwell::StencilOp::Zero:
507 case Maxwell::StencilOp::ZeroOGL: 510 case Maxwell::StencilOp::ZeroOGL:
508 return vk::StencilOp::eZero; 511 return VK_STENCIL_OP_ZERO;
509 case Maxwell::StencilOp::Replace: 512 case Maxwell::StencilOp::Replace:
510 case Maxwell::StencilOp::ReplaceOGL: 513 case Maxwell::StencilOp::ReplaceOGL:
511 return vk::StencilOp::eReplace; 514 return VK_STENCIL_OP_REPLACE;
512 case Maxwell::StencilOp::Incr: 515 case Maxwell::StencilOp::Incr:
513 case Maxwell::StencilOp::IncrOGL: 516 case Maxwell::StencilOp::IncrOGL:
514 return vk::StencilOp::eIncrementAndClamp; 517 return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
515 case Maxwell::StencilOp::Decr: 518 case Maxwell::StencilOp::Decr:
516 case Maxwell::StencilOp::DecrOGL: 519 case Maxwell::StencilOp::DecrOGL:
517 return vk::StencilOp::eDecrementAndClamp; 520 return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
518 case Maxwell::StencilOp::Invert: 521 case Maxwell::StencilOp::Invert:
519 case Maxwell::StencilOp::InvertOGL: 522 case Maxwell::StencilOp::InvertOGL:
520 return vk::StencilOp::eInvert; 523 return VK_STENCIL_OP_INVERT;
521 case Maxwell::StencilOp::IncrWrap: 524 case Maxwell::StencilOp::IncrWrap:
522 case Maxwell::StencilOp::IncrWrapOGL: 525 case Maxwell::StencilOp::IncrWrapOGL:
523 return vk::StencilOp::eIncrementAndWrap; 526 return VK_STENCIL_OP_INCREMENT_AND_WRAP;
524 case Maxwell::StencilOp::DecrWrap: 527 case Maxwell::StencilOp::DecrWrap:
525 case Maxwell::StencilOp::DecrWrapOGL: 528 case Maxwell::StencilOp::DecrWrapOGL:
526 return vk::StencilOp::eDecrementAndWrap; 529 return VK_STENCIL_OP_DECREMENT_AND_WRAP;
527 } 530 }
528 UNIMPLEMENTED_MSG("Unimplemented stencil op={}", static_cast<u32>(stencil_op)); 531 UNIMPLEMENTED_MSG("Unimplemented stencil op={}", static_cast<u32>(stencil_op));
529 return {}; 532 return {};
530} 533}
531 534
532vk::BlendOp BlendEquation(Maxwell::Blend::Equation equation) { 535VkBlendOp BlendEquation(Maxwell::Blend::Equation equation) {
533 switch (equation) { 536 switch (equation) {
534 case Maxwell::Blend::Equation::Add: 537 case Maxwell::Blend::Equation::Add:
535 case Maxwell::Blend::Equation::AddGL: 538 case Maxwell::Blend::Equation::AddGL:
536 return vk::BlendOp::eAdd; 539 return VK_BLEND_OP_ADD;
537 case Maxwell::Blend::Equation::Subtract: 540 case Maxwell::Blend::Equation::Subtract:
538 case Maxwell::Blend::Equation::SubtractGL: 541 case Maxwell::Blend::Equation::SubtractGL:
539 return vk::BlendOp::eSubtract; 542 return VK_BLEND_OP_SUBTRACT;
540 case Maxwell::Blend::Equation::ReverseSubtract: 543 case Maxwell::Blend::Equation::ReverseSubtract:
541 case Maxwell::Blend::Equation::ReverseSubtractGL: 544 case Maxwell::Blend::Equation::ReverseSubtractGL:
542 return vk::BlendOp::eReverseSubtract; 545 return VK_BLEND_OP_REVERSE_SUBTRACT;
543 case Maxwell::Blend::Equation::Min: 546 case Maxwell::Blend::Equation::Min:
544 case Maxwell::Blend::Equation::MinGL: 547 case Maxwell::Blend::Equation::MinGL:
545 return vk::BlendOp::eMin; 548 return VK_BLEND_OP_MIN;
546 case Maxwell::Blend::Equation::Max: 549 case Maxwell::Blend::Equation::Max:
547 case Maxwell::Blend::Equation::MaxGL: 550 case Maxwell::Blend::Equation::MaxGL:
548 return vk::BlendOp::eMax; 551 return VK_BLEND_OP_MAX;
549 } 552 }
550 UNIMPLEMENTED_MSG("Unimplemented blend equation={}", static_cast<u32>(equation)); 553 UNIMPLEMENTED_MSG("Unimplemented blend equation={}", static_cast<u32>(equation));
551 return {}; 554 return {};
552} 555}
553 556
554vk::BlendFactor BlendFactor(Maxwell::Blend::Factor factor) { 557VkBlendFactor BlendFactor(Maxwell::Blend::Factor factor) {
555 switch (factor) { 558 switch (factor) {
556 case Maxwell::Blend::Factor::Zero: 559 case Maxwell::Blend::Factor::Zero:
557 case Maxwell::Blend::Factor::ZeroGL: 560 case Maxwell::Blend::Factor::ZeroGL:
558 return vk::BlendFactor::eZero; 561 return VK_BLEND_FACTOR_ZERO;
559 case Maxwell::Blend::Factor::One: 562 case Maxwell::Blend::Factor::One:
560 case Maxwell::Blend::Factor::OneGL: 563 case Maxwell::Blend::Factor::OneGL:
561 return vk::BlendFactor::eOne; 564 return VK_BLEND_FACTOR_ONE;
562 case Maxwell::Blend::Factor::SourceColor: 565 case Maxwell::Blend::Factor::SourceColor:
563 case Maxwell::Blend::Factor::SourceColorGL: 566 case Maxwell::Blend::Factor::SourceColorGL:
564 return vk::BlendFactor::eSrcColor; 567 return VK_BLEND_FACTOR_SRC_COLOR;
565 case Maxwell::Blend::Factor::OneMinusSourceColor: 568 case Maxwell::Blend::Factor::OneMinusSourceColor:
566 case Maxwell::Blend::Factor::OneMinusSourceColorGL: 569 case Maxwell::Blend::Factor::OneMinusSourceColorGL:
567 return vk::BlendFactor::eOneMinusSrcColor; 570 return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
568 case Maxwell::Blend::Factor::SourceAlpha: 571 case Maxwell::Blend::Factor::SourceAlpha:
569 case Maxwell::Blend::Factor::SourceAlphaGL: 572 case Maxwell::Blend::Factor::SourceAlphaGL:
570 return vk::BlendFactor::eSrcAlpha; 573 return VK_BLEND_FACTOR_SRC_ALPHA;
571 case Maxwell::Blend::Factor::OneMinusSourceAlpha: 574 case Maxwell::Blend::Factor::OneMinusSourceAlpha:
572 case Maxwell::Blend::Factor::OneMinusSourceAlphaGL: 575 case Maxwell::Blend::Factor::OneMinusSourceAlphaGL:
573 return vk::BlendFactor::eOneMinusSrcAlpha; 576 return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
574 case Maxwell::Blend::Factor::DestAlpha: 577 case Maxwell::Blend::Factor::DestAlpha:
575 case Maxwell::Blend::Factor::DestAlphaGL: 578 case Maxwell::Blend::Factor::DestAlphaGL:
576 return vk::BlendFactor::eDstAlpha; 579 return VK_BLEND_FACTOR_DST_ALPHA;
577 case Maxwell::Blend::Factor::OneMinusDestAlpha: 580 case Maxwell::Blend::Factor::OneMinusDestAlpha:
578 case Maxwell::Blend::Factor::OneMinusDestAlphaGL: 581 case Maxwell::Blend::Factor::OneMinusDestAlphaGL:
579 return vk::BlendFactor::eOneMinusDstAlpha; 582 return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
580 case Maxwell::Blend::Factor::DestColor: 583 case Maxwell::Blend::Factor::DestColor:
581 case Maxwell::Blend::Factor::DestColorGL: 584 case Maxwell::Blend::Factor::DestColorGL:
582 return vk::BlendFactor::eDstColor; 585 return VK_BLEND_FACTOR_DST_COLOR;
583 case Maxwell::Blend::Factor::OneMinusDestColor: 586 case Maxwell::Blend::Factor::OneMinusDestColor:
584 case Maxwell::Blend::Factor::OneMinusDestColorGL: 587 case Maxwell::Blend::Factor::OneMinusDestColorGL:
585 return vk::BlendFactor::eOneMinusDstColor; 588 return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
586 case Maxwell::Blend::Factor::SourceAlphaSaturate: 589 case Maxwell::Blend::Factor::SourceAlphaSaturate:
587 case Maxwell::Blend::Factor::SourceAlphaSaturateGL: 590 case Maxwell::Blend::Factor::SourceAlphaSaturateGL:
588 return vk::BlendFactor::eSrcAlphaSaturate; 591 return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
589 case Maxwell::Blend::Factor::Source1Color: 592 case Maxwell::Blend::Factor::Source1Color:
590 case Maxwell::Blend::Factor::Source1ColorGL: 593 case Maxwell::Blend::Factor::Source1ColorGL:
591 return vk::BlendFactor::eSrc1Color; 594 return VK_BLEND_FACTOR_SRC1_COLOR;
592 case Maxwell::Blend::Factor::OneMinusSource1Color: 595 case Maxwell::Blend::Factor::OneMinusSource1Color:
593 case Maxwell::Blend::Factor::OneMinusSource1ColorGL: 596 case Maxwell::Blend::Factor::OneMinusSource1ColorGL:
594 return vk::BlendFactor::eOneMinusSrc1Color; 597 return VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR;
595 case Maxwell::Blend::Factor::Source1Alpha: 598 case Maxwell::Blend::Factor::Source1Alpha:
596 case Maxwell::Blend::Factor::Source1AlphaGL: 599 case Maxwell::Blend::Factor::Source1AlphaGL:
597 return vk::BlendFactor::eSrc1Alpha; 600 return VK_BLEND_FACTOR_SRC1_ALPHA;
598 case Maxwell::Blend::Factor::OneMinusSource1Alpha: 601 case Maxwell::Blend::Factor::OneMinusSource1Alpha:
599 case Maxwell::Blend::Factor::OneMinusSource1AlphaGL: 602 case Maxwell::Blend::Factor::OneMinusSource1AlphaGL:
600 return vk::BlendFactor::eOneMinusSrc1Alpha; 603 return VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA;
601 case Maxwell::Blend::Factor::ConstantColor: 604 case Maxwell::Blend::Factor::ConstantColor:
602 case Maxwell::Blend::Factor::ConstantColorGL: 605 case Maxwell::Blend::Factor::ConstantColorGL:
603 return vk::BlendFactor::eConstantColor; 606 return VK_BLEND_FACTOR_CONSTANT_COLOR;
604 case Maxwell::Blend::Factor::OneMinusConstantColor: 607 case Maxwell::Blend::Factor::OneMinusConstantColor:
605 case Maxwell::Blend::Factor::OneMinusConstantColorGL: 608 case Maxwell::Blend::Factor::OneMinusConstantColorGL:
606 return vk::BlendFactor::eOneMinusConstantColor; 609 return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
607 case Maxwell::Blend::Factor::ConstantAlpha: 610 case Maxwell::Blend::Factor::ConstantAlpha:
608 case Maxwell::Blend::Factor::ConstantAlphaGL: 611 case Maxwell::Blend::Factor::ConstantAlphaGL:
609 return vk::BlendFactor::eConstantAlpha; 612 return VK_BLEND_FACTOR_CONSTANT_ALPHA;
610 case Maxwell::Blend::Factor::OneMinusConstantAlpha: 613 case Maxwell::Blend::Factor::OneMinusConstantAlpha:
611 case Maxwell::Blend::Factor::OneMinusConstantAlphaGL: 614 case Maxwell::Blend::Factor::OneMinusConstantAlphaGL:
612 return vk::BlendFactor::eOneMinusConstantAlpha; 615 return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA;
613 } 616 }
614 UNIMPLEMENTED_MSG("Unimplemented blend factor={}", static_cast<u32>(factor)); 617 UNIMPLEMENTED_MSG("Unimplemented blend factor={}", static_cast<u32>(factor));
615 return {}; 618 return {};
616} 619}
617 620
618vk::FrontFace FrontFace(Maxwell::FrontFace front_face) { 621VkFrontFace FrontFace(Maxwell::FrontFace front_face) {
619 switch (front_face) { 622 switch (front_face) {
620 case Maxwell::FrontFace::ClockWise: 623 case Maxwell::FrontFace::ClockWise:
621 return vk::FrontFace::eClockwise; 624 return VK_FRONT_FACE_CLOCKWISE;
622 case Maxwell::FrontFace::CounterClockWise: 625 case Maxwell::FrontFace::CounterClockWise:
623 return vk::FrontFace::eCounterClockwise; 626 return VK_FRONT_FACE_COUNTER_CLOCKWISE;
624 } 627 }
625 UNIMPLEMENTED_MSG("Unimplemented front face={}", static_cast<u32>(front_face)); 628 UNIMPLEMENTED_MSG("Unimplemented front face={}", static_cast<u32>(front_face));
626 return {}; 629 return {};
627} 630}
628 631
629vk::CullModeFlags CullFace(Maxwell::CullFace cull_face) { 632VkCullModeFlags CullFace(Maxwell::CullFace cull_face) {
630 switch (cull_face) { 633 switch (cull_face) {
631 case Maxwell::CullFace::Front: 634 case Maxwell::CullFace::Front:
632 return vk::CullModeFlagBits::eFront; 635 return VK_CULL_MODE_FRONT_BIT;
633 case Maxwell::CullFace::Back: 636 case Maxwell::CullFace::Back:
634 return vk::CullModeFlagBits::eBack; 637 return VK_CULL_MODE_BACK_BIT;
635 case Maxwell::CullFace::FrontAndBack: 638 case Maxwell::CullFace::FrontAndBack:
636 return vk::CullModeFlagBits::eFrontAndBack; 639 return VK_CULL_MODE_FRONT_AND_BACK;
637 } 640 }
638 UNIMPLEMENTED_MSG("Unimplemented cull face={}", static_cast<u32>(cull_face)); 641 UNIMPLEMENTED_MSG("Unimplemented cull face={}", static_cast<u32>(cull_face));
639 return {}; 642 return {};
640} 643}
641 644
642vk::ComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle) { 645VkComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle) {
643 switch (swizzle) { 646 switch (swizzle) {
644 case Tegra::Texture::SwizzleSource::Zero: 647 case Tegra::Texture::SwizzleSource::Zero:
645 return vk::ComponentSwizzle::eZero; 648 return VK_COMPONENT_SWIZZLE_ZERO;
646 case Tegra::Texture::SwizzleSource::R: 649 case Tegra::Texture::SwizzleSource::R:
647 return vk::ComponentSwizzle::eR; 650 return VK_COMPONENT_SWIZZLE_R;
648 case Tegra::Texture::SwizzleSource::G: 651 case Tegra::Texture::SwizzleSource::G:
649 return vk::ComponentSwizzle::eG; 652 return VK_COMPONENT_SWIZZLE_G;
650 case Tegra::Texture::SwizzleSource::B: 653 case Tegra::Texture::SwizzleSource::B:
651 return vk::ComponentSwizzle::eB; 654 return VK_COMPONENT_SWIZZLE_B;
652 case Tegra::Texture::SwizzleSource::A: 655 case Tegra::Texture::SwizzleSource::A:
653 return vk::ComponentSwizzle::eA; 656 return VK_COMPONENT_SWIZZLE_A;
654 case Tegra::Texture::SwizzleSource::OneInt: 657 case Tegra::Texture::SwizzleSource::OneInt:
655 case Tegra::Texture::SwizzleSource::OneFloat: 658 case Tegra::Texture::SwizzleSource::OneFloat:
656 return vk::ComponentSwizzle::eOne; 659 return VK_COMPONENT_SWIZZLE_ONE;
657 } 660 }
658 UNIMPLEMENTED_MSG("Unimplemented swizzle source={}", static_cast<u32>(swizzle)); 661 UNIMPLEMENTED_MSG("Unimplemented swizzle source={}", static_cast<u32>(swizzle));
659 return {}; 662 return {};
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.h b/src/video_core/renderer_vulkan/maxwell_to_vk.h
index 24f6ab544..81bce4c6c 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.h
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.h
@@ -6,8 +6,8 @@
6 6
7#include "common/common_types.h" 7#include "common/common_types.h"
8#include "video_core/engines/maxwell_3d.h" 8#include "video_core/engines/maxwell_3d.h"
9#include "video_core/renderer_vulkan/declarations.h"
10#include "video_core/renderer_vulkan/vk_device.h" 9#include "video_core/renderer_vulkan/vk_device.h"
10#include "video_core/renderer_vulkan/wrapper.h"
11#include "video_core/surface.h" 11#include "video_core/surface.h"
12#include "video_core/textures/texture.h" 12#include "video_core/textures/texture.h"
13 13
@@ -18,46 +18,45 @@ using PixelFormat = VideoCore::Surface::PixelFormat;
18 18
19namespace Sampler { 19namespace Sampler {
20 20
21vk::Filter Filter(Tegra::Texture::TextureFilter filter); 21VkFilter Filter(Tegra::Texture::TextureFilter filter);
22 22
23vk::SamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter); 23VkSamplerMipmapMode MipmapMode(Tegra::Texture::TextureMipmapFilter mipmap_filter);
24 24
25vk::SamplerAddressMode WrapMode(const VKDevice& device, Tegra::Texture::WrapMode wrap_mode, 25VkSamplerAddressMode WrapMode(const VKDevice& device, Tegra::Texture::WrapMode wrap_mode,
26 Tegra::Texture::TextureFilter filter); 26 Tegra::Texture::TextureFilter filter);
27 27
28vk::CompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func); 28VkCompareOp DepthCompareFunction(Tegra::Texture::DepthCompareFunc depth_compare_func);
29 29
30} // namespace Sampler 30} // namespace Sampler
31 31
32struct FormatInfo { 32struct FormatInfo {
33 vk::Format format; 33 VkFormat format;
34 bool attachable; 34 bool attachable;
35 bool storage; 35 bool storage;
36}; 36};
37 37
38FormatInfo SurfaceFormat(const VKDevice& device, FormatType format_type, PixelFormat pixel_format); 38FormatInfo SurfaceFormat(const VKDevice& device, FormatType format_type, PixelFormat pixel_format);
39 39
40vk::ShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage); 40VkShaderStageFlagBits ShaderStage(Tegra::Engines::ShaderType stage);
41 41
42vk::PrimitiveTopology PrimitiveTopology(const VKDevice& device, 42VkPrimitiveTopology PrimitiveTopology(const VKDevice& device, Maxwell::PrimitiveTopology topology);
43 Maxwell::PrimitiveTopology topology);
44 43
45vk::Format VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size); 44VkFormat VertexFormat(Maxwell::VertexAttribute::Type type, Maxwell::VertexAttribute::Size size);
46 45
47vk::CompareOp ComparisonOp(Maxwell::ComparisonOp comparison); 46VkCompareOp ComparisonOp(Maxwell::ComparisonOp comparison);
48 47
49vk::IndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format); 48VkIndexType IndexFormat(const VKDevice& device, Maxwell::IndexFormat index_format);
50 49
51vk::StencilOp StencilOp(Maxwell::StencilOp stencil_op); 50VkStencilOp StencilOp(Maxwell::StencilOp stencil_op);
52 51
53vk::BlendOp BlendEquation(Maxwell::Blend::Equation equation); 52VkBlendOp BlendEquation(Maxwell::Blend::Equation equation);
54 53
55vk::BlendFactor BlendFactor(Maxwell::Blend::Factor factor); 54VkBlendFactor BlendFactor(Maxwell::Blend::Factor factor);
56 55
57vk::FrontFace FrontFace(Maxwell::FrontFace front_face); 56VkFrontFace FrontFace(Maxwell::FrontFace front_face);
58 57
59vk::CullModeFlags CullFace(Maxwell::CullFace cull_face); 58VkCullModeFlags CullFace(Maxwell::CullFace cull_face);
60 59
61vk::ComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle); 60VkComponentSwizzle SwizzleSource(Tegra::Texture::SwizzleSource swizzle);
62 61
63} // namespace Vulkan::MaxwellToVK 62} // namespace Vulkan::MaxwellToVK
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index 9cdb4b627..dd590c38b 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -24,7 +24,6 @@
24#include "core/settings.h" 24#include "core/settings.h"
25#include "core/telemetry_session.h" 25#include "core/telemetry_session.h"
26#include "video_core/gpu.h" 26#include "video_core/gpu.h"
27#include "video_core/renderer_vulkan/declarations.h"
28#include "video_core/renderer_vulkan/renderer_vulkan.h" 27#include "video_core/renderer_vulkan/renderer_vulkan.h"
29#include "video_core/renderer_vulkan/vk_blit_screen.h" 28#include "video_core/renderer_vulkan/vk_blit_screen.h"
30#include "video_core/renderer_vulkan/vk_device.h" 29#include "video_core/renderer_vulkan/vk_device.h"
@@ -34,8 +33,9 @@
34#include "video_core/renderer_vulkan/vk_scheduler.h" 33#include "video_core/renderer_vulkan/vk_scheduler.h"
35#include "video_core/renderer_vulkan/vk_state_tracker.h" 34#include "video_core/renderer_vulkan/vk_state_tracker.h"
36#include "video_core/renderer_vulkan/vk_swapchain.h" 35#include "video_core/renderer_vulkan/vk_swapchain.h"
36#include "video_core/renderer_vulkan/wrapper.h"
37 37
38// Include these late to avoid changing Vulkan-Hpp's dynamic dispatcher size 38// Include these late to avoid polluting previous headers
39#ifdef _WIN32 39#ifdef _WIN32
40#include <windows.h> 40#include <windows.h>
41// ensure include order 41// ensure include order
@@ -54,20 +54,19 @@ namespace {
54 54
55using Core::Frontend::WindowSystemType; 55using Core::Frontend::WindowSystemType;
56 56
57VkBool32 DebugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT severity_, 57VkBool32 DebugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT severity,
58 VkDebugUtilsMessageTypeFlagsEXT type, 58 VkDebugUtilsMessageTypeFlagsEXT type,
59 const VkDebugUtilsMessengerCallbackDataEXT* data, 59 const VkDebugUtilsMessengerCallbackDataEXT* data,
60 [[maybe_unused]] void* user_data) { 60 [[maybe_unused]] void* user_data) {
61 const auto severity{static_cast<vk::DebugUtilsMessageSeverityFlagBitsEXT>(severity_)};
62 const char* message{data->pMessage}; 61 const char* message{data->pMessage};
63 62
64 if (severity & vk::DebugUtilsMessageSeverityFlagBitsEXT::eError) { 63 if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) {
65 LOG_CRITICAL(Render_Vulkan, "{}", message); 64 LOG_CRITICAL(Render_Vulkan, "{}", message);
66 } else if (severity & vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning) { 65 } else if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) {
67 LOG_WARNING(Render_Vulkan, "{}", message); 66 LOG_WARNING(Render_Vulkan, "{}", message);
68 } else if (severity & vk::DebugUtilsMessageSeverityFlagBitsEXT::eInfo) { 67 } else if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT) {
69 LOG_INFO(Render_Vulkan, "{}", message); 68 LOG_INFO(Render_Vulkan, "{}", message);
70 } else if (severity & vk::DebugUtilsMessageSeverityFlagBitsEXT::eVerbose) { 69 } else if (severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT) {
71 LOG_DEBUG(Render_Vulkan, "{}", message); 70 LOG_DEBUG(Render_Vulkan, "{}", message);
72 } 71 }
73 return VK_FALSE; 72 return VK_FALSE;
@@ -94,22 +93,24 @@ Common::DynamicLibrary OpenVulkanLibrary() {
94 return library; 93 return library;
95} 94}
96 95
97UniqueInstance CreateInstance(Common::DynamicLibrary& library, vk::DispatchLoaderDynamic& dld, 96vk::Instance CreateInstance(Common::DynamicLibrary& library, vk::InstanceDispatch& dld,
98 WindowSystemType window_type = WindowSystemType::Headless, 97 WindowSystemType window_type = WindowSystemType::Headless,
99 bool enable_layers = false) { 98 bool enable_layers = false) {
100 if (!library.IsOpen()) { 99 if (!library.IsOpen()) {
101 LOG_ERROR(Render_Vulkan, "Vulkan library not available"); 100 LOG_ERROR(Render_Vulkan, "Vulkan library not available");
102 return UniqueInstance{}; 101 return {};
103 } 102 }
104 PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; 103 if (!library.GetSymbol("vkGetInstanceProcAddr", &dld.vkGetInstanceProcAddr)) {
105 if (!library.GetSymbol("vkGetInstanceProcAddr", &vkGetInstanceProcAddr)) {
106 LOG_ERROR(Render_Vulkan, "vkGetInstanceProcAddr not present in Vulkan"); 104 LOG_ERROR(Render_Vulkan, "vkGetInstanceProcAddr not present in Vulkan");
107 return UniqueInstance{}; 105 return {};
106 }
107 if (!vk::Load(dld)) {
108 LOG_ERROR(Render_Vulkan, "Failed to load Vulkan function pointers");
109 return {};
108 } 110 }
109 dld.init(vkGetInstanceProcAddr);
110 111
111 std::vector<const char*> extensions; 112 std::vector<const char*> extensions;
112 extensions.reserve(4); 113 extensions.reserve(6);
113 switch (window_type) { 114 switch (window_type) {
114 case Core::Frontend::WindowSystemType::Headless: 115 case Core::Frontend::WindowSystemType::Headless:
115 break; 116 break;
@@ -136,45 +137,39 @@ UniqueInstance CreateInstance(Common::DynamicLibrary& library, vk::DispatchLoade
136 if (enable_layers) { 137 if (enable_layers) {
137 extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME); 138 extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
138 } 139 }
140 extensions.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
139 141
140 u32 num_properties; 142 const std::optional properties = vk::EnumerateInstanceExtensionProperties(dld);
141 if (vk::enumerateInstanceExtensionProperties(nullptr, &num_properties, nullptr, dld) != 143 if (!properties) {
142 vk::Result::eSuccess) {
143 LOG_ERROR(Render_Vulkan, "Failed to query number of extension properties");
144 return UniqueInstance{};
145 }
146 std::vector<vk::ExtensionProperties> properties(num_properties);
147 if (vk::enumerateInstanceExtensionProperties(nullptr, &num_properties, properties.data(),
148 dld) != vk::Result::eSuccess) {
149 LOG_ERROR(Render_Vulkan, "Failed to query extension properties"); 144 LOG_ERROR(Render_Vulkan, "Failed to query extension properties");
150 return UniqueInstance{}; 145 return {};
151 } 146 }
152 147
153 for (const char* extension : extensions) { 148 for (const char* extension : extensions) {
154 const auto it = 149 const auto it =
155 std::find_if(properties.begin(), properties.end(), [extension](const auto& prop) { 150 std::find_if(properties->begin(), properties->end(), [extension](const auto& prop) {
156 return !std::strcmp(extension, prop.extensionName); 151 return !std::strcmp(extension, prop.extensionName);
157 }); 152 });
158 if (it == properties.end()) { 153 if (it == properties->end()) {
159 LOG_ERROR(Render_Vulkan, "Required instance extension {} is not available", extension); 154 LOG_ERROR(Render_Vulkan, "Required instance extension {} is not available", extension);
160 return UniqueInstance{}; 155 return {};
161 } 156 }
162 } 157 }
163 158
164 const vk::ApplicationInfo application_info("yuzu Emulator", VK_MAKE_VERSION(0, 1, 0), 159 static constexpr std::array layers_data{"VK_LAYER_LUNARG_standard_validation"};
165 "yuzu Emulator", VK_MAKE_VERSION(0, 1, 0), 160 vk::Span<const char*> layers = layers_data;
166 VK_API_VERSION_1_1); 161 if (!enable_layers) {
167 const std::array layers = {"VK_LAYER_LUNARG_standard_validation"}; 162 layers = {};
168 const vk::InstanceCreateInfo instance_ci( 163 }
169 {}, &application_info, enable_layers ? static_cast<u32>(layers.size()) : 0, layers.data(), 164 vk::Instance instance = vk::Instance::Create(layers, extensions, dld);
170 static_cast<u32>(extensions.size()), extensions.data()); 165 if (!instance) {
171 vk::Instance unsafe_instance;
172 if (vk::createInstance(&instance_ci, nullptr, &unsafe_instance, dld) != vk::Result::eSuccess) {
173 LOG_ERROR(Render_Vulkan, "Failed to create Vulkan instance"); 166 LOG_ERROR(Render_Vulkan, "Failed to create Vulkan instance");
174 return UniqueInstance{}; 167 return {};
168 }
169 if (!vk::Load(*instance, dld)) {
170 LOG_ERROR(Render_Vulkan, "Failed to load Vulkan instance function pointers");
175 } 171 }
176 dld.init(unsafe_instance); 172 return instance;
177 return UniqueInstance(unsafe_instance, {nullptr, dld});
178} 173}
179 174
180std::string GetReadableVersion(u32 version) { 175std::string GetReadableVersion(u32 version) {
@@ -187,14 +182,14 @@ std::string GetDriverVersion(const VKDevice& device) {
187 // https://github.com/SaschaWillems/vulkan.gpuinfo.org/blob/5dddea46ea1120b0df14eef8f15ff8e318e35462/functions.php#L308-L314 182 // https://github.com/SaschaWillems/vulkan.gpuinfo.org/blob/5dddea46ea1120b0df14eef8f15ff8e318e35462/functions.php#L308-L314
188 const u32 version = device.GetDriverVersion(); 183 const u32 version = device.GetDriverVersion();
189 184
190 if (device.GetDriverID() == vk::DriverIdKHR::eNvidiaProprietary) { 185 if (device.GetDriverID() == VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR) {
191 const u32 major = (version >> 22) & 0x3ff; 186 const u32 major = (version >> 22) & 0x3ff;
192 const u32 minor = (version >> 14) & 0x0ff; 187 const u32 minor = (version >> 14) & 0x0ff;
193 const u32 secondary = (version >> 6) & 0x0ff; 188 const u32 secondary = (version >> 6) & 0x0ff;
194 const u32 tertiary = version & 0x003f; 189 const u32 tertiary = version & 0x003f;
195 return fmt::format("{}.{}.{}.{}", major, minor, secondary, tertiary); 190 return fmt::format("{}.{}.{}.{}", major, minor, secondary, tertiary);
196 } 191 }
197 if (device.GetDriverID() == vk::DriverIdKHR::eIntelProprietaryWindows) { 192 if (device.GetDriverID() == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR) {
198 const u32 major = version >> 14; 193 const u32 major = version >> 14;
199 const u32 minor = version & 0x3fff; 194 const u32 minor = version & 0x3fff;
200 return fmt::format("{}.{}", major, minor); 195 return fmt::format("{}.{}", major, minor);
@@ -307,10 +302,8 @@ void RendererVulkan::ShutDown() {
307 if (!device) { 302 if (!device) {
308 return; 303 return;
309 } 304 }
310 const auto dev = device->GetLogical(); 305 if (const auto& dev = device->GetLogical()) {
311 const auto& dld = device->GetDispatchLoader(); 306 dev.WaitIdle();
312 if (dev && dld.vkDeviceWaitIdle) {
313 dev.waitIdle(dld);
314 } 307 }
315 308
316 rasterizer.reset(); 309 rasterizer.reset();
@@ -326,23 +319,11 @@ bool RendererVulkan::CreateDebugCallback() {
326 if (!Settings::values.renderer_debug) { 319 if (!Settings::values.renderer_debug) {
327 return true; 320 return true;
328 } 321 }
329 const vk::DebugUtilsMessengerCreateInfoEXT callback_ci( 322 debug_callback = instance.TryCreateDebugCallback(DebugCallback);
330 {}, 323 if (!debug_callback) {
331 vk::DebugUtilsMessageSeverityFlagBitsEXT::eError |
332 vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning |
333 vk::DebugUtilsMessageSeverityFlagBitsEXT::eInfo |
334 vk::DebugUtilsMessageSeverityFlagBitsEXT::eVerbose,
335 vk::DebugUtilsMessageTypeFlagBitsEXT::eGeneral |
336 vk::DebugUtilsMessageTypeFlagBitsEXT::eValidation |
337 vk::DebugUtilsMessageTypeFlagBitsEXT::ePerformance,
338 &DebugCallback, nullptr);
339 vk::DebugUtilsMessengerEXT unsafe_callback;
340 if (instance->createDebugUtilsMessengerEXT(&callback_ci, nullptr, &unsafe_callback, dld) !=
341 vk::Result::eSuccess) {
342 LOG_ERROR(Render_Vulkan, "Failed to create debug callback"); 324 LOG_ERROR(Render_Vulkan, "Failed to create debug callback");
343 return false; 325 return false;
344 } 326 }
345 debug_callback = UniqueDebugUtilsMessengerEXT(unsafe_callback, {*instance, nullptr, dld});
346 return true; 327 return true;
347} 328}
348 329
@@ -357,8 +338,8 @@ bool RendererVulkan::CreateSurface() {
357 nullptr, 0, nullptr, hWnd}; 338 nullptr, 0, nullptr, hWnd};
358 const auto vkCreateWin32SurfaceKHR = reinterpret_cast<PFN_vkCreateWin32SurfaceKHR>( 339 const auto vkCreateWin32SurfaceKHR = reinterpret_cast<PFN_vkCreateWin32SurfaceKHR>(
359 dld.vkGetInstanceProcAddr(*instance, "vkCreateWin32SurfaceKHR")); 340 dld.vkGetInstanceProcAddr(*instance, "vkCreateWin32SurfaceKHR"));
360 if (!vkCreateWin32SurfaceKHR || vkCreateWin32SurfaceKHR(instance.get(), &win32_ci, nullptr, 341 if (!vkCreateWin32SurfaceKHR ||
361 &unsafe_surface) != VK_SUCCESS) { 342 vkCreateWin32SurfaceKHR(*instance, &win32_ci, nullptr, &unsafe_surface) != VK_SUCCESS) {
362 LOG_ERROR(Render_Vulkan, "Failed to initialize Win32 surface"); 343 LOG_ERROR(Render_Vulkan, "Failed to initialize Win32 surface");
363 return false; 344 return false;
364 } 345 }
@@ -372,8 +353,8 @@ bool RendererVulkan::CreateSurface() {
372 reinterpret_cast<Window>(window_info.render_surface)}; 353 reinterpret_cast<Window>(window_info.render_surface)};
373 const auto vkCreateXlibSurfaceKHR = reinterpret_cast<PFN_vkCreateXlibSurfaceKHR>( 354 const auto vkCreateXlibSurfaceKHR = reinterpret_cast<PFN_vkCreateXlibSurfaceKHR>(
374 dld.vkGetInstanceProcAddr(*instance, "vkCreateXlibSurfaceKHR")); 355 dld.vkGetInstanceProcAddr(*instance, "vkCreateXlibSurfaceKHR"));
375 if (!vkCreateXlibSurfaceKHR || vkCreateXlibSurfaceKHR(instance.get(), &xlib_ci, nullptr, 356 if (!vkCreateXlibSurfaceKHR ||
376 &unsafe_surface) != VK_SUCCESS) { 357 vkCreateXlibSurfaceKHR(*instance, &xlib_ci, nullptr, &unsafe_surface) != VK_SUCCESS) {
377 LOG_ERROR(Render_Vulkan, "Failed to initialize Xlib surface"); 358 LOG_ERROR(Render_Vulkan, "Failed to initialize Xlib surface");
378 return false; 359 return false;
379 } 360 }
@@ -386,7 +367,7 @@ bool RendererVulkan::CreateSurface() {
386 const auto vkCreateWaylandSurfaceKHR = reinterpret_cast<PFN_vkCreateWaylandSurfaceKHR>( 367 const auto vkCreateWaylandSurfaceKHR = reinterpret_cast<PFN_vkCreateWaylandSurfaceKHR>(
387 dld.vkGetInstanceProcAddr(*instance, "vkCreateWaylandSurfaceKHR")); 368 dld.vkGetInstanceProcAddr(*instance, "vkCreateWaylandSurfaceKHR"));
388 if (!vkCreateWaylandSurfaceKHR || 369 if (!vkCreateWaylandSurfaceKHR ||
389 vkCreateWaylandSurfaceKHR(instance.get(), &wayland_ci, nullptr, &unsafe_surface) != 370 vkCreateWaylandSurfaceKHR(*instance, &wayland_ci, nullptr, &unsafe_surface) !=
390 VK_SUCCESS) { 371 VK_SUCCESS) {
391 LOG_ERROR(Render_Vulkan, "Failed to initialize Wayland surface"); 372 LOG_ERROR(Render_Vulkan, "Failed to initialize Wayland surface");
392 return false; 373 return false;
@@ -398,26 +379,30 @@ bool RendererVulkan::CreateSurface() {
398 return false; 379 return false;
399 } 380 }
400 381
401 surface = UniqueSurfaceKHR(unsafe_surface, {*instance, nullptr, dld}); 382 surface = vk::SurfaceKHR(unsafe_surface, *instance, dld);
402 return true; 383 return true;
403} 384}
404 385
405bool RendererVulkan::PickDevices() { 386bool RendererVulkan::PickDevices() {
406 const auto devices = instance->enumeratePhysicalDevices(dld); 387 const auto devices = instance.EnumeratePhysicalDevices();
388 if (!devices) {
389 LOG_ERROR(Render_Vulkan, "Failed to enumerate physical devices");
390 return false;
391 }
407 392
408 const s32 device_index = Settings::values.vulkan_device; 393 const s32 device_index = Settings::values.vulkan_device;
409 if (device_index < 0 || device_index >= static_cast<s32>(devices.size())) { 394 if (device_index < 0 || device_index >= static_cast<s32>(devices->size())) {
410 LOG_ERROR(Render_Vulkan, "Invalid device index {}!", device_index); 395 LOG_ERROR(Render_Vulkan, "Invalid device index {}!", device_index);
411 return false; 396 return false;
412 } 397 }
413 const vk::PhysicalDevice physical_device = devices[static_cast<std::size_t>(device_index)]; 398 const vk::PhysicalDevice physical_device((*devices)[static_cast<std::size_t>(device_index)],
414 399 dld);
415 if (!VKDevice::IsSuitable(physical_device, *surface, dld)) { 400 if (!VKDevice::IsSuitable(physical_device, *surface)) {
416 return false; 401 return false;
417 } 402 }
418 403
419 device = std::make_unique<VKDevice>(dld, physical_device, *surface); 404 device = std::make_unique<VKDevice>(*instance, physical_device, *surface, dld);
420 return device->Create(*instance); 405 return device->Create();
421} 406}
422 407
423void RendererVulkan::Report() const { 408void RendererVulkan::Report() const {
@@ -444,30 +429,22 @@ void RendererVulkan::Report() const {
444} 429}
445 430
446std::vector<std::string> RendererVulkan::EnumerateDevices() { 431std::vector<std::string> RendererVulkan::EnumerateDevices() {
447 // Avoid putting DispatchLoaderDynamic, it's too large 432 vk::InstanceDispatch dld;
448 auto dld_memory = std::make_unique<vk::DispatchLoaderDynamic>();
449 auto& dld = *dld_memory;
450
451 Common::DynamicLibrary library = OpenVulkanLibrary(); 433 Common::DynamicLibrary library = OpenVulkanLibrary();
452 UniqueInstance instance = CreateInstance(library, dld); 434 vk::Instance instance = CreateInstance(library, dld);
453 if (!instance) { 435 if (!instance) {
454 return {}; 436 return {};
455 } 437 }
456 438
457 u32 num_devices; 439 const std::optional physical_devices = instance.EnumeratePhysicalDevices();
458 if (instance->enumeratePhysicalDevices(&num_devices, nullptr, dld) != vk::Result::eSuccess) { 440 if (!physical_devices) {
459 return {};
460 }
461 std::vector<vk::PhysicalDevice> devices(num_devices);
462 if (instance->enumeratePhysicalDevices(&num_devices, devices.data(), dld) !=
463 vk::Result::eSuccess) {
464 return {}; 441 return {};
465 } 442 }
466 443
467 std::vector<std::string> names; 444 std::vector<std::string> names;
468 names.reserve(num_devices); 445 names.reserve(physical_devices->size());
469 for (auto& device : devices) { 446 for (const auto& device : *physical_devices) {
470 names.push_back(device.getProperties(dld).deviceName); 447 names.push_back(vk::PhysicalDevice(device, dld).GetProperties().deviceName);
471 } 448 }
472 return names; 449 return names;
473} 450}
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h
index 42e253de5..18270909b 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.h
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.h
@@ -12,7 +12,7 @@
12#include "common/dynamic_library.h" 12#include "common/dynamic_library.h"
13 13
14#include "video_core/renderer_base.h" 14#include "video_core/renderer_base.h"
15#include "video_core/renderer_vulkan/declarations.h" 15#include "video_core/renderer_vulkan/wrapper.h"
16 16
17namespace Core { 17namespace Core {
18class System; 18class System;
@@ -61,14 +61,14 @@ private:
61 Core::System& system; 61 Core::System& system;
62 62
63 Common::DynamicLibrary library; 63 Common::DynamicLibrary library;
64 vk::DispatchLoaderDynamic dld; 64 vk::InstanceDispatch dld;
65 65
66 UniqueInstance instance; 66 vk::Instance instance;
67 UniqueSurfaceKHR surface; 67 vk::SurfaceKHR surface;
68 68
69 VKScreenInfo screen_info; 69 VKScreenInfo screen_info;
70 70
71 UniqueDebugUtilsMessengerEXT debug_callback; 71 vk::DebugCallback debug_callback;
72 std::unique_ptr<VKDevice> device; 72 std::unique_ptr<VKDevice> device;
73 std::unique_ptr<VKSwapchain> swapchain; 73 std::unique_ptr<VKSwapchain> swapchain;
74 std::unique_ptr<VKMemoryManager> memory_manager; 74 std::unique_ptr<VKMemoryManager> memory_manager;
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
index 855cfc883..21644a7e7 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
@@ -20,7 +20,6 @@
20#include "video_core/gpu.h" 20#include "video_core/gpu.h"
21#include "video_core/morton.h" 21#include "video_core/morton.h"
22#include "video_core/rasterizer_interface.h" 22#include "video_core/rasterizer_interface.h"
23#include "video_core/renderer_vulkan/declarations.h"
24#include "video_core/renderer_vulkan/renderer_vulkan.h" 23#include "video_core/renderer_vulkan/renderer_vulkan.h"
25#include "video_core/renderer_vulkan/vk_blit_screen.h" 24#include "video_core/renderer_vulkan/vk_blit_screen.h"
26#include "video_core/renderer_vulkan/vk_device.h" 25#include "video_core/renderer_vulkan/vk_device.h"
@@ -30,6 +29,7 @@
30#include "video_core/renderer_vulkan/vk_scheduler.h" 29#include "video_core/renderer_vulkan/vk_scheduler.h"
31#include "video_core/renderer_vulkan/vk_shader_util.h" 30#include "video_core/renderer_vulkan/vk_shader_util.h"
32#include "video_core/renderer_vulkan/vk_swapchain.h" 31#include "video_core/renderer_vulkan/vk_swapchain.h"
32#include "video_core/renderer_vulkan/wrapper.h"
33#include "video_core/surface.h" 33#include "video_core/surface.h"
34 34
35namespace Vulkan { 35namespace Vulkan {
@@ -140,16 +140,25 @@ struct ScreenRectVertex {
140 std::array<f32, 2> position; 140 std::array<f32, 2> position;
141 std::array<f32, 2> tex_coord; 141 std::array<f32, 2> tex_coord;
142 142
143 static vk::VertexInputBindingDescription GetDescription() { 143 static VkVertexInputBindingDescription GetDescription() {
144 return vk::VertexInputBindingDescription(0, sizeof(ScreenRectVertex), 144 VkVertexInputBindingDescription description;
145 vk::VertexInputRate::eVertex); 145 description.binding = 0;
146 description.stride = sizeof(ScreenRectVertex);
147 description.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
148 return description;
146 } 149 }
147 150
148 static std::array<vk::VertexInputAttributeDescription, 2> GetAttributes() { 151 static std::array<VkVertexInputAttributeDescription, 2> GetAttributes() {
149 return {vk::VertexInputAttributeDescription(0, 0, vk::Format::eR32G32Sfloat, 152 std::array<VkVertexInputAttributeDescription, 2> attributes;
150 offsetof(ScreenRectVertex, position)), 153 attributes[0].location = 0;
151 vk::VertexInputAttributeDescription(1, 0, vk::Format::eR32G32Sfloat, 154 attributes[0].binding = 0;
152 offsetof(ScreenRectVertex, tex_coord))}; 155 attributes[0].format = VK_FORMAT_R32G32_SFLOAT;
156 attributes[0].offset = offsetof(ScreenRectVertex, position);
157 attributes[1].location = 1;
158 attributes[1].binding = 0;
159 attributes[1].format = VK_FORMAT_R32G32_SFLOAT;
160 attributes[1].offset = offsetof(ScreenRectVertex, tex_coord);
161 return attributes;
153 } 162 }
154}; 163};
155 164
@@ -172,16 +181,16 @@ std::size_t GetSizeInBytes(const Tegra::FramebufferConfig& framebuffer) {
172 static_cast<std::size_t>(framebuffer.height) * GetBytesPerPixel(framebuffer); 181 static_cast<std::size_t>(framebuffer.height) * GetBytesPerPixel(framebuffer);
173} 182}
174 183
175vk::Format GetFormat(const Tegra::FramebufferConfig& framebuffer) { 184VkFormat GetFormat(const Tegra::FramebufferConfig& framebuffer) {
176 switch (framebuffer.pixel_format) { 185 switch (framebuffer.pixel_format) {
177 case Tegra::FramebufferConfig::PixelFormat::ABGR8: 186 case Tegra::FramebufferConfig::PixelFormat::ABGR8:
178 return vk::Format::eA8B8G8R8UnormPack32; 187 return VK_FORMAT_A8B8G8R8_UNORM_PACK32;
179 case Tegra::FramebufferConfig::PixelFormat::RGB565: 188 case Tegra::FramebufferConfig::PixelFormat::RGB565:
180 return vk::Format::eR5G6B5UnormPack16; 189 return VK_FORMAT_R5G6B5_UNORM_PACK16;
181 default: 190 default:
182 UNIMPLEMENTED_MSG("Unknown framebuffer pixel format: {}", 191 UNIMPLEMENTED_MSG("Unknown framebuffer pixel format: {}",
183 static_cast<u32>(framebuffer.pixel_format)); 192 static_cast<u32>(framebuffer.pixel_format));
184 return vk::Format::eA8B8G8R8UnormPack32; 193 return VK_FORMAT_A8B8G8R8_UNORM_PACK32;
185 } 194 }
186} 195}
187 196
@@ -219,8 +228,8 @@ void VKBlitScreen::Recreate() {
219 CreateDynamicResources(); 228 CreateDynamicResources();
220} 229}
221 230
222std::tuple<VKFence&, vk::Semaphore> VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, 231std::tuple<VKFence&, VkSemaphore> VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer,
223 bool use_accelerated) { 232 bool use_accelerated) {
224 RefreshResources(framebuffer); 233 RefreshResources(framebuffer);
225 234
226 // Finish any pending renderpass 235 // Finish any pending renderpass
@@ -255,46 +264,76 @@ std::tuple<VKFence&, vk::Semaphore> VKBlitScreen::Draw(const Tegra::FramebufferC
255 framebuffer.stride, block_height_log2, framebuffer.height, 0, 1, 1, 264 framebuffer.stride, block_height_log2, framebuffer.height, 0, 1, 1,
256 map.GetAddress() + image_offset, host_ptr); 265 map.GetAddress() + image_offset, host_ptr);
257 266
258 blit_image->Transition(0, 1, 0, 1, vk::PipelineStageFlagBits::eTransfer, 267 blit_image->Transition(0, 1, 0, 1, VK_PIPELINE_STAGE_TRANSFER_BIT,
259 vk::AccessFlagBits::eTransferWrite, 268 VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
260 vk::ImageLayout::eTransferDstOptimal); 269
261 270 VkBufferImageCopy copy;
262 const vk::BufferImageCopy copy(image_offset, 0, 0, 271 copy.bufferOffset = image_offset;
263 {vk::ImageAspectFlagBits::eColor, 0, 0, 1}, {0, 0, 0}, 272 copy.bufferRowLength = 0;
264 {framebuffer.width, framebuffer.height, 1}); 273 copy.bufferImageHeight = 0;
265 scheduler.Record([buffer_handle = *buffer, image = blit_image->GetHandle(), 274 copy.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
266 copy](auto cmdbuf, auto& dld) { 275 copy.imageSubresource.mipLevel = 0;
267 cmdbuf.copyBufferToImage(buffer_handle, image, vk::ImageLayout::eTransferDstOptimal, 276 copy.imageSubresource.baseArrayLayer = 0;
268 {copy}, dld); 277 copy.imageSubresource.layerCount = 1;
269 }); 278 copy.imageOffset.x = 0;
279 copy.imageOffset.y = 0;
280 copy.imageOffset.z = 0;
281 copy.imageExtent.width = framebuffer.width;
282 copy.imageExtent.height = framebuffer.height;
283 copy.imageExtent.depth = 1;
284 scheduler.Record(
285 [buffer = *buffer, image = *blit_image->GetHandle(), copy](vk::CommandBuffer cmdbuf) {
286 cmdbuf.CopyBufferToImage(buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, copy);
287 });
270 } 288 }
271 map.Release(); 289 map.Release();
272 290
273 blit_image->Transition(0, 1, 0, 1, vk::PipelineStageFlagBits::eFragmentShader, 291 blit_image->Transition(0, 1, 0, 1, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
274 vk::AccessFlagBits::eShaderRead, 292 VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
275 vk::ImageLayout::eShaderReadOnlyOptimal);
276 293
277 scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index], 294 scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index],
278 descriptor_set = descriptor_sets[image_index], buffer = *buffer, 295 descriptor_set = descriptor_sets[image_index], buffer = *buffer,
279 size = swapchain.GetSize(), pipeline = *pipeline, 296 size = swapchain.GetSize(), pipeline = *pipeline,
280 layout = *pipeline_layout](auto cmdbuf, auto& dld) { 297 layout = *pipeline_layout](vk::CommandBuffer cmdbuf) {
281 const vk::ClearValue clear_color{std::array{0.0f, 0.0f, 0.0f, 1.0f}}; 298 VkClearValue clear_color;
282 const vk::RenderPassBeginInfo renderpass_bi(renderpass, framebuffer, {{0, 0}, size}, 1, 299 clear_color.color.float32[0] = 0.0f;
283 &clear_color); 300 clear_color.color.float32[1] = 0.0f;
284 301 clear_color.color.float32[2] = 0.0f;
285 cmdbuf.beginRenderPass(renderpass_bi, vk::SubpassContents::eInline, dld); 302 clear_color.color.float32[3] = 0.0f;
286 cmdbuf.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline, dld); 303
287 cmdbuf.setViewport( 304 VkRenderPassBeginInfo renderpass_bi;
288 0, 305 renderpass_bi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
289 {{0.0f, 0.0f, static_cast<f32>(size.width), static_cast<f32>(size.height), 0.0f, 1.0f}}, 306 renderpass_bi.pNext = nullptr;
290 dld); 307 renderpass_bi.renderPass = renderpass;
291 cmdbuf.setScissor(0, {{{0, 0}, size}}, dld); 308 renderpass_bi.framebuffer = framebuffer;
292 309 renderpass_bi.renderArea.offset.x = 0;
293 cmdbuf.bindVertexBuffers(0, {buffer}, {offsetof(BufferData, vertices)}, dld); 310 renderpass_bi.renderArea.offset.y = 0;
294 cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, layout, 0, {descriptor_set}, {}, 311 renderpass_bi.renderArea.extent = size;
295 dld); 312 renderpass_bi.clearValueCount = 1;
296 cmdbuf.draw(4, 1, 0, 0, dld); 313 renderpass_bi.pClearValues = &clear_color;
297 cmdbuf.endRenderPass(dld); 314
315 VkViewport viewport;
316 viewport.x = 0.0f;
317 viewport.y = 0.0f;
318 viewport.width = static_cast<float>(size.width);
319 viewport.height = static_cast<float>(size.height);
320 viewport.minDepth = 0.0f;
321 viewport.maxDepth = 1.0f;
322
323 VkRect2D scissor;
324 scissor.offset.x = 0;
325 scissor.offset.y = 0;
326 scissor.extent = size;
327
328 cmdbuf.BeginRenderPass(renderpass_bi, VK_SUBPASS_CONTENTS_INLINE);
329 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
330 cmdbuf.SetViewport(0, viewport);
331 cmdbuf.SetScissor(0, scissor);
332
333 cmdbuf.BindVertexBuffer(0, buffer, offsetof(BufferData, vertices));
334 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set, {});
335 cmdbuf.Draw(4, 1, 0, 0);
336 cmdbuf.EndRenderPass();
298 }); 337 });
299 338
300 return {scheduler.GetFence(), *semaphores[image_index]}; 339 return {scheduler.GetFence(), *semaphores[image_index]};
@@ -334,165 +373,295 @@ void VKBlitScreen::CreateShaders() {
334} 373}
335 374
336void VKBlitScreen::CreateSemaphores() { 375void VKBlitScreen::CreateSemaphores() {
337 const auto dev = device.GetLogical();
338 const auto& dld = device.GetDispatchLoader();
339
340 semaphores.resize(image_count); 376 semaphores.resize(image_count);
341 for (std::size_t i = 0; i < image_count; ++i) { 377 std::generate(semaphores.begin(), semaphores.end(),
342 semaphores[i] = dev.createSemaphoreUnique({}, nullptr, dld); 378 [this] { return device.GetLogical().CreateSemaphore(); });
343 }
344} 379}
345 380
346void VKBlitScreen::CreateDescriptorPool() { 381void VKBlitScreen::CreateDescriptorPool() {
347 const std::array<vk::DescriptorPoolSize, 2> pool_sizes{ 382 std::array<VkDescriptorPoolSize, 2> pool_sizes;
348 vk::DescriptorPoolSize{vk::DescriptorType::eUniformBuffer, static_cast<u32>(image_count)}, 383 pool_sizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
349 vk::DescriptorPoolSize{vk::DescriptorType::eCombinedImageSampler, 384 pool_sizes[0].descriptorCount = static_cast<u32>(image_count);
350 static_cast<u32>(image_count)}}; 385 pool_sizes[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
351 const vk::DescriptorPoolCreateInfo pool_ci( 386 pool_sizes[1].descriptorCount = static_cast<u32>(image_count);
352 {}, static_cast<u32>(image_count), static_cast<u32>(pool_sizes.size()), pool_sizes.data()); 387
353 const auto dev = device.GetLogical(); 388 VkDescriptorPoolCreateInfo ci;
354 descriptor_pool = dev.createDescriptorPoolUnique(pool_ci, nullptr, device.GetDispatchLoader()); 389 ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
390 ci.pNext = nullptr;
391 ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
392 ci.maxSets = static_cast<u32>(image_count);
393 ci.poolSizeCount = static_cast<u32>(pool_sizes.size());
394 ci.pPoolSizes = pool_sizes.data();
395 descriptor_pool = device.GetLogical().CreateDescriptorPool(ci);
355} 396}
356 397
357void VKBlitScreen::CreateRenderPass() { 398void VKBlitScreen::CreateRenderPass() {
358 const vk::AttachmentDescription color_attachment( 399 VkAttachmentDescription color_attachment;
359 {}, swapchain.GetImageFormat(), vk::SampleCountFlagBits::e1, vk::AttachmentLoadOp::eClear, 400 color_attachment.flags = 0;
360 vk::AttachmentStoreOp::eStore, vk::AttachmentLoadOp::eDontCare, 401 color_attachment.format = swapchain.GetImageFormat();
361 vk::AttachmentStoreOp::eDontCare, vk::ImageLayout::eUndefined, 402 color_attachment.samples = VK_SAMPLE_COUNT_1_BIT;
362 vk::ImageLayout::ePresentSrcKHR); 403 color_attachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
363 404 color_attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
364 const vk::AttachmentReference color_attachment_ref(0, vk::ImageLayout::eColorAttachmentOptimal); 405 color_attachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
365 406 color_attachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
366 const vk::SubpassDescription subpass_description({}, vk::PipelineBindPoint::eGraphics, 0, 407 color_attachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
367 nullptr, 1, &color_attachment_ref, nullptr, 408 color_attachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
368 nullptr, 0, nullptr); 409
369 410 VkAttachmentReference color_attachment_ref;
370 const vk::SubpassDependency dependency( 411 color_attachment_ref.attachment = 0;
371 VK_SUBPASS_EXTERNAL, 0, vk::PipelineStageFlagBits::eColorAttachmentOutput, 412 color_attachment_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
372 vk::PipelineStageFlagBits::eColorAttachmentOutput, {}, 413
373 vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite, {}); 414 VkSubpassDescription subpass_description;
374 415 subpass_description.flags = 0;
375 const vk::RenderPassCreateInfo renderpass_ci({}, 1, &color_attachment, 1, &subpass_description, 416 subpass_description.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
376 1, &dependency); 417 subpass_description.inputAttachmentCount = 0;
377 418 subpass_description.pInputAttachments = nullptr;
378 const auto dev = device.GetLogical(); 419 subpass_description.colorAttachmentCount = 1;
379 renderpass = dev.createRenderPassUnique(renderpass_ci, nullptr, device.GetDispatchLoader()); 420 subpass_description.pColorAttachments = &color_attachment_ref;
421 subpass_description.pResolveAttachments = nullptr;
422 subpass_description.pDepthStencilAttachment = nullptr;
423 subpass_description.preserveAttachmentCount = 0;
424 subpass_description.pPreserveAttachments = nullptr;
425
426 VkSubpassDependency dependency;
427 dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
428 dependency.dstSubpass = 0;
429 dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
430 dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
431 dependency.srcAccessMask = 0;
432 dependency.dstAccessMask =
433 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
434 dependency.dependencyFlags = 0;
435
436 VkRenderPassCreateInfo renderpass_ci;
437 renderpass_ci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
438 renderpass_ci.pNext = nullptr;
439 renderpass_ci.flags = 0;
440 renderpass_ci.attachmentCount = 1;
441 renderpass_ci.pAttachments = &color_attachment;
442 renderpass_ci.subpassCount = 1;
443 renderpass_ci.pSubpasses = &subpass_description;
444 renderpass_ci.dependencyCount = 1;
445 renderpass_ci.pDependencies = &dependency;
446
447 renderpass = device.GetLogical().CreateRenderPass(renderpass_ci);
380} 448}
381 449
382void VKBlitScreen::CreateDescriptorSetLayout() { 450void VKBlitScreen::CreateDescriptorSetLayout() {
383 const std::array<vk::DescriptorSetLayoutBinding, 2> layout_bindings{ 451 std::array<VkDescriptorSetLayoutBinding, 2> layout_bindings;
384 vk::DescriptorSetLayoutBinding(0, vk::DescriptorType::eUniformBuffer, 1, 452 layout_bindings[0].binding = 0;
385 vk::ShaderStageFlagBits::eVertex, nullptr), 453 layout_bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
386 vk::DescriptorSetLayoutBinding(1, vk::DescriptorType::eCombinedImageSampler, 1, 454 layout_bindings[0].descriptorCount = 1;
387 vk::ShaderStageFlagBits::eFragment, nullptr)}; 455 layout_bindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
388 const vk::DescriptorSetLayoutCreateInfo descriptor_layout_ci( 456 layout_bindings[0].pImmutableSamplers = nullptr;
389 {}, static_cast<u32>(layout_bindings.size()), layout_bindings.data()); 457 layout_bindings[1].binding = 1;
390 458 layout_bindings[1].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
391 const auto dev = device.GetLogical(); 459 layout_bindings[1].descriptorCount = 1;
392 const auto& dld = device.GetDispatchLoader(); 460 layout_bindings[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
393 descriptor_set_layout = dev.createDescriptorSetLayoutUnique(descriptor_layout_ci, nullptr, dld); 461 layout_bindings[1].pImmutableSamplers = nullptr;
462
463 VkDescriptorSetLayoutCreateInfo ci;
464 ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
465 ci.pNext = nullptr;
466 ci.flags = 0;
467 ci.bindingCount = static_cast<u32>(layout_bindings.size());
468 ci.pBindings = layout_bindings.data();
469
470 descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout(ci);
394} 471}
395 472
396void VKBlitScreen::CreateDescriptorSets() { 473void VKBlitScreen::CreateDescriptorSets() {
397 const auto dev = device.GetLogical(); 474 const std::vector layouts(image_count, *descriptor_set_layout);
398 const auto& dld = device.GetDispatchLoader(); 475
399 476 VkDescriptorSetAllocateInfo ai;
400 descriptor_sets.resize(image_count); 477 ai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
401 for (std::size_t i = 0; i < image_count; ++i) { 478 ai.pNext = nullptr;
402 const vk::DescriptorSetLayout layout = *descriptor_set_layout; 479 ai.descriptorPool = *descriptor_pool;
403 const vk::DescriptorSetAllocateInfo descriptor_set_ai(*descriptor_pool, 1, &layout); 480 ai.descriptorSetCount = static_cast<u32>(image_count);
404 const vk::Result result = 481 ai.pSetLayouts = layouts.data();
405 dev.allocateDescriptorSets(&descriptor_set_ai, &descriptor_sets[i], dld); 482 descriptor_sets = descriptor_pool.Allocate(ai);
406 ASSERT(result == vk::Result::eSuccess);
407 }
408} 483}
409 484
410void VKBlitScreen::CreatePipelineLayout() { 485void VKBlitScreen::CreatePipelineLayout() {
411 const vk::PipelineLayoutCreateInfo pipeline_layout_ci({}, 1, &descriptor_set_layout.get(), 0, 486 VkPipelineLayoutCreateInfo ci;
412 nullptr); 487 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
413 const auto dev = device.GetLogical(); 488 ci.pNext = nullptr;
414 const auto& dld = device.GetDispatchLoader(); 489 ci.flags = 0;
415 pipeline_layout = dev.createPipelineLayoutUnique(pipeline_layout_ci, nullptr, dld); 490 ci.setLayoutCount = 1;
491 ci.pSetLayouts = descriptor_set_layout.address();
492 ci.pushConstantRangeCount = 0;
493 ci.pPushConstantRanges = nullptr;
494 pipeline_layout = device.GetLogical().CreatePipelineLayout(ci);
416} 495}
417 496
418void VKBlitScreen::CreateGraphicsPipeline() { 497void VKBlitScreen::CreateGraphicsPipeline() {
419 const std::array shader_stages = { 498 std::array<VkPipelineShaderStageCreateInfo, 2> shader_stages;
420 vk::PipelineShaderStageCreateInfo({}, vk::ShaderStageFlagBits::eVertex, *vertex_shader, 499 shader_stages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
421 "main", nullptr), 500 shader_stages[0].pNext = nullptr;
422 vk::PipelineShaderStageCreateInfo({}, vk::ShaderStageFlagBits::eFragment, *fragment_shader, 501 shader_stages[0].flags = 0;
423 "main", nullptr)}; 502 shader_stages[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
503 shader_stages[0].module = *vertex_shader;
504 shader_stages[0].pName = "main";
505 shader_stages[0].pSpecializationInfo = nullptr;
506 shader_stages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
507 shader_stages[1].pNext = nullptr;
508 shader_stages[1].flags = 0;
509 shader_stages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
510 shader_stages[1].module = *fragment_shader;
511 shader_stages[1].pName = "main";
512 shader_stages[1].pSpecializationInfo = nullptr;
424 513
425 const auto vertex_binding_description = ScreenRectVertex::GetDescription(); 514 const auto vertex_binding_description = ScreenRectVertex::GetDescription();
426 const auto vertex_attrs_description = ScreenRectVertex::GetAttributes(); 515 const auto vertex_attrs_description = ScreenRectVertex::GetAttributes();
427 const vk::PipelineVertexInputStateCreateInfo vertex_input(
428 {}, 1, &vertex_binding_description, static_cast<u32>(vertex_attrs_description.size()),
429 vertex_attrs_description.data());
430
431 const vk::PipelineInputAssemblyStateCreateInfo input_assembly(
432 {}, vk::PrimitiveTopology::eTriangleStrip, false);
433
434 // Set a dummy viewport, it's going to be replaced by dynamic states.
435 const vk::Viewport viewport(0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f);
436 const vk::Rect2D scissor({0, 0}, {1, 1});
437 516
438 const vk::PipelineViewportStateCreateInfo viewport_state({}, 1, &viewport, 1, &scissor); 517 VkPipelineVertexInputStateCreateInfo vertex_input_ci;
439 518 vertex_input_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
440 const vk::PipelineRasterizationStateCreateInfo rasterizer( 519 vertex_input_ci.pNext = nullptr;
441 {}, false, false, vk::PolygonMode::eFill, vk::CullModeFlagBits::eNone, 520 vertex_input_ci.flags = 0;
442 vk::FrontFace::eClockwise, false, 0.0f, 0.0f, 0.0f, 1.0f); 521 vertex_input_ci.vertexBindingDescriptionCount = 1;
443 522 vertex_input_ci.pVertexBindingDescriptions = &vertex_binding_description;
444 const vk::PipelineMultisampleStateCreateInfo multisampling({}, vk::SampleCountFlagBits::e1, 523 vertex_input_ci.vertexAttributeDescriptionCount = u32{vertex_attrs_description.size()};
445 false, 0.0f, nullptr, false, false); 524 vertex_input_ci.pVertexAttributeDescriptions = vertex_attrs_description.data();
446 525
447 const vk::PipelineColorBlendAttachmentState color_blend_attachment( 526 VkPipelineInputAssemblyStateCreateInfo input_assembly_ci;
448 false, vk::BlendFactor::eZero, vk::BlendFactor::eZero, vk::BlendOp::eAdd, 527 input_assembly_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
449 vk::BlendFactor::eZero, vk::BlendFactor::eZero, vk::BlendOp::eAdd, 528 input_assembly_ci.pNext = nullptr;
450 vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | 529 input_assembly_ci.flags = 0;
451 vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA); 530 input_assembly_ci.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
452 531 input_assembly_ci.primitiveRestartEnable = VK_FALSE;
453 const vk::PipelineColorBlendStateCreateInfo color_blending( 532
454 {}, false, vk::LogicOp::eCopy, 1, &color_blend_attachment, {0.0f, 0.0f, 0.0f, 0.0f}); 533 VkPipelineViewportStateCreateInfo viewport_state_ci;
455 534 viewport_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
456 const std::array<vk::DynamicState, 2> dynamic_states = {vk::DynamicState::eViewport, 535 viewport_state_ci.pNext = nullptr;
457 vk::DynamicState::eScissor}; 536 viewport_state_ci.flags = 0;
458 537 viewport_state_ci.viewportCount = 1;
459 const vk::PipelineDynamicStateCreateInfo dynamic_state( 538 viewport_state_ci.scissorCount = 1;
460 {}, static_cast<u32>(dynamic_states.size()), dynamic_states.data()); 539
461 540 VkPipelineRasterizationStateCreateInfo rasterization_ci;
462 const vk::GraphicsPipelineCreateInfo pipeline_ci( 541 rasterization_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
463 {}, static_cast<u32>(shader_stages.size()), shader_stages.data(), &vertex_input, 542 rasterization_ci.pNext = nullptr;
464 &input_assembly, nullptr, &viewport_state, &rasterizer, &multisampling, nullptr, 543 rasterization_ci.flags = 0;
465 &color_blending, &dynamic_state, *pipeline_layout, *renderpass, 0, nullptr, 0); 544 rasterization_ci.depthClampEnable = VK_FALSE;
466 545 rasterization_ci.rasterizerDiscardEnable = VK_FALSE;
467 const auto dev = device.GetLogical(); 546 rasterization_ci.polygonMode = VK_POLYGON_MODE_FILL;
468 const auto& dld = device.GetDispatchLoader(); 547 rasterization_ci.cullMode = VK_CULL_MODE_NONE;
469 pipeline = dev.createGraphicsPipelineUnique({}, pipeline_ci, nullptr, dld); 548 rasterization_ci.frontFace = VK_FRONT_FACE_CLOCKWISE;
549 rasterization_ci.depthBiasEnable = VK_FALSE;
550 rasterization_ci.depthBiasConstantFactor = 0.0f;
551 rasterization_ci.depthBiasClamp = 0.0f;
552 rasterization_ci.depthBiasSlopeFactor = 0.0f;
553 rasterization_ci.lineWidth = 1.0f;
554
555 VkPipelineMultisampleStateCreateInfo multisampling_ci;
556 multisampling_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
557 multisampling_ci.pNext = nullptr;
558 multisampling_ci.flags = 0;
559 multisampling_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
560 multisampling_ci.sampleShadingEnable = VK_FALSE;
561 multisampling_ci.minSampleShading = 0.0f;
562 multisampling_ci.pSampleMask = nullptr;
563 multisampling_ci.alphaToCoverageEnable = VK_FALSE;
564 multisampling_ci.alphaToOneEnable = VK_FALSE;
565
566 VkPipelineColorBlendAttachmentState color_blend_attachment;
567 color_blend_attachment.blendEnable = VK_FALSE;
568 color_blend_attachment.srcColorBlendFactor = VK_BLEND_FACTOR_ZERO;
569 color_blend_attachment.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
570 color_blend_attachment.colorBlendOp = VK_BLEND_OP_ADD;
571 color_blend_attachment.srcAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
572 color_blend_attachment.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
573 color_blend_attachment.alphaBlendOp = VK_BLEND_OP_ADD;
574 color_blend_attachment.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
575 VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
576
577 VkPipelineColorBlendStateCreateInfo color_blend_ci;
578 color_blend_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
579 color_blend_ci.flags = 0;
580 color_blend_ci.pNext = nullptr;
581 color_blend_ci.logicOpEnable = VK_FALSE;
582 color_blend_ci.logicOp = VK_LOGIC_OP_COPY;
583 color_blend_ci.attachmentCount = 1;
584 color_blend_ci.pAttachments = &color_blend_attachment;
585 color_blend_ci.blendConstants[0] = 0.0f;
586 color_blend_ci.blendConstants[1] = 0.0f;
587 color_blend_ci.blendConstants[2] = 0.0f;
588 color_blend_ci.blendConstants[3] = 0.0f;
589
590 static constexpr std::array dynamic_states = {VK_DYNAMIC_STATE_VIEWPORT,
591 VK_DYNAMIC_STATE_SCISSOR};
592 VkPipelineDynamicStateCreateInfo dynamic_state_ci;
593 dynamic_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
594 dynamic_state_ci.pNext = nullptr;
595 dynamic_state_ci.flags = 0;
596 dynamic_state_ci.dynamicStateCount = static_cast<u32>(dynamic_states.size());
597 dynamic_state_ci.pDynamicStates = dynamic_states.data();
598
599 VkGraphicsPipelineCreateInfo pipeline_ci;
600 pipeline_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
601 pipeline_ci.pNext = nullptr;
602 pipeline_ci.flags = 0;
603 pipeline_ci.stageCount = static_cast<u32>(shader_stages.size());
604 pipeline_ci.pStages = shader_stages.data();
605 pipeline_ci.pVertexInputState = &vertex_input_ci;
606 pipeline_ci.pInputAssemblyState = &input_assembly_ci;
607 pipeline_ci.pTessellationState = nullptr;
608 pipeline_ci.pViewportState = &viewport_state_ci;
609 pipeline_ci.pRasterizationState = &rasterization_ci;
610 pipeline_ci.pMultisampleState = &multisampling_ci;
611 pipeline_ci.pDepthStencilState = nullptr;
612 pipeline_ci.pColorBlendState = &color_blend_ci;
613 pipeline_ci.pDynamicState = &dynamic_state_ci;
614 pipeline_ci.layout = *pipeline_layout;
615 pipeline_ci.renderPass = *renderpass;
616 pipeline_ci.subpass = 0;
617 pipeline_ci.basePipelineHandle = 0;
618 pipeline_ci.basePipelineIndex = 0;
619
620 pipeline = device.GetLogical().CreateGraphicsPipeline(pipeline_ci);
470} 621}
471 622
472void VKBlitScreen::CreateSampler() { 623void VKBlitScreen::CreateSampler() {
473 const auto dev = device.GetLogical(); 624 VkSamplerCreateInfo ci;
474 const auto& dld = device.GetDispatchLoader(); 625 ci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
475 const vk::SamplerCreateInfo sampler_ci( 626 ci.pNext = nullptr;
476 {}, vk::Filter::eLinear, vk::Filter::eLinear, vk::SamplerMipmapMode::eLinear, 627 ci.flags = 0;
477 vk::SamplerAddressMode::eClampToBorder, vk::SamplerAddressMode::eClampToBorder, 628 ci.magFilter = VK_FILTER_LINEAR;
478 vk::SamplerAddressMode::eClampToBorder, 0.0f, false, 0.0f, false, vk::CompareOp::eNever, 629 ci.minFilter = VK_FILTER_NEAREST;
479 0.0f, 0.0f, vk::BorderColor::eFloatOpaqueBlack, false); 630 ci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
480 sampler = dev.createSamplerUnique(sampler_ci, nullptr, dld); 631 ci.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
632 ci.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
633 ci.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
634 ci.mipLodBias = 0.0f;
635 ci.anisotropyEnable = VK_FALSE;
636 ci.maxAnisotropy = 0.0f;
637 ci.compareEnable = VK_FALSE;
638 ci.compareOp = VK_COMPARE_OP_NEVER;
639 ci.minLod = 0.0f;
640 ci.maxLod = 0.0f;
641 ci.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
642 ci.unnormalizedCoordinates = VK_FALSE;
643
644 sampler = device.GetLogical().CreateSampler(ci);
481} 645}
482 646
483void VKBlitScreen::CreateFramebuffers() { 647void VKBlitScreen::CreateFramebuffers() {
484 const vk::Extent2D size{swapchain.GetSize()}; 648 const VkExtent2D size{swapchain.GetSize()};
485 framebuffers.clear();
486 framebuffers.resize(image_count); 649 framebuffers.resize(image_count);
487 650
488 const auto dev = device.GetLogical(); 651 VkFramebufferCreateInfo ci;
489 const auto& dld = device.GetDispatchLoader(); 652 ci.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
653 ci.pNext = nullptr;
654 ci.flags = 0;
655 ci.renderPass = *renderpass;
656 ci.attachmentCount = 1;
657 ci.width = size.width;
658 ci.height = size.height;
659 ci.layers = 1;
490 660
491 for (std::size_t i = 0; i < image_count; ++i) { 661 for (std::size_t i = 0; i < image_count; ++i) {
492 const vk::ImageView image_view{swapchain.GetImageViewIndex(i)}; 662 const VkImageView image_view{swapchain.GetImageViewIndex(i)};
493 const vk::FramebufferCreateInfo framebuffer_ci({}, *renderpass, 1, &image_view, size.width, 663 ci.pAttachments = &image_view;
494 size.height, 1); 664 framebuffers[i] = device.GetLogical().CreateFramebuffer(ci);
495 framebuffers[i] = dev.createFramebufferUnique(framebuffer_ci, nullptr, dld);
496 } 665 }
497} 666}
498 667
@@ -507,54 +676,86 @@ void VKBlitScreen::ReleaseRawImages() {
507} 676}
508 677
509void VKBlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer) { 678void VKBlitScreen::CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer) {
510 const auto dev = device.GetLogical(); 679 VkBufferCreateInfo ci;
511 const auto& dld = device.GetDispatchLoader(); 680 ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
512 681 ci.pNext = nullptr;
513 const vk::BufferCreateInfo buffer_ci({}, CalculateBufferSize(framebuffer), 682 ci.flags = 0;
514 vk::BufferUsageFlagBits::eTransferSrc | 683 ci.size = CalculateBufferSize(framebuffer);
515 vk::BufferUsageFlagBits::eVertexBuffer | 684 ci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
516 vk::BufferUsageFlagBits::eUniformBuffer, 685 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
517 vk::SharingMode::eExclusive, 0, nullptr); 686 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
518 buffer = dev.createBufferUnique(buffer_ci, nullptr, dld); 687 ci.queueFamilyIndexCount = 0;
519 buffer_commit = memory_manager.Commit(*buffer, true); 688 ci.pQueueFamilyIndices = nullptr;
689
690 buffer = device.GetLogical().CreateBuffer(ci);
691 buffer_commit = memory_manager.Commit(buffer, true);
520} 692}
521 693
522void VKBlitScreen::CreateRawImages(const Tegra::FramebufferConfig& framebuffer) { 694void VKBlitScreen::CreateRawImages(const Tegra::FramebufferConfig& framebuffer) {
523 raw_images.resize(image_count); 695 raw_images.resize(image_count);
524 raw_buffer_commits.resize(image_count); 696 raw_buffer_commits.resize(image_count);
525 697
526 const auto format = GetFormat(framebuffer); 698 VkImageCreateInfo ci;
699 ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
700 ci.pNext = nullptr;
701 ci.flags = 0;
702 ci.imageType = VK_IMAGE_TYPE_2D;
703 ci.format = GetFormat(framebuffer);
704 ci.extent.width = framebuffer.width;
705 ci.extent.height = framebuffer.height;
706 ci.extent.depth = 1;
707 ci.mipLevels = 1;
708 ci.arrayLayers = 1;
709 ci.samples = VK_SAMPLE_COUNT_1_BIT;
710 ci.tiling = VK_IMAGE_TILING_LINEAR;
711 ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
712 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
713 ci.queueFamilyIndexCount = 0;
714 ci.pQueueFamilyIndices = nullptr;
715 ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
716
527 for (std::size_t i = 0; i < image_count; ++i) { 717 for (std::size_t i = 0; i < image_count; ++i) {
528 const vk::ImageCreateInfo image_ci( 718 raw_images[i] = std::make_unique<VKImage>(device, scheduler, ci, VK_IMAGE_ASPECT_COLOR_BIT);
529 {}, vk::ImageType::e2D, format, {framebuffer.width, framebuffer.height, 1}, 1, 1,
530 vk::SampleCountFlagBits::e1, vk::ImageTiling::eOptimal,
531 vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eSampled,
532 vk::SharingMode::eExclusive, 0, nullptr, vk::ImageLayout::eUndefined);
533
534 raw_images[i] =
535 std::make_unique<VKImage>(device, scheduler, image_ci, vk::ImageAspectFlagBits::eColor);
536 raw_buffer_commits[i] = memory_manager.Commit(raw_images[i]->GetHandle(), false); 719 raw_buffer_commits[i] = memory_manager.Commit(raw_images[i]->GetHandle(), false);
537 } 720 }
538} 721}
539 722
540void VKBlitScreen::UpdateDescriptorSet(std::size_t image_index, vk::ImageView image_view) const { 723void VKBlitScreen::UpdateDescriptorSet(std::size_t image_index, VkImageView image_view) const {
541 const vk::DescriptorSet descriptor_set = descriptor_sets[image_index]; 724 VkDescriptorBufferInfo buffer_info;
542 725 buffer_info.buffer = *buffer;
543 const vk::DescriptorBufferInfo buffer_info(*buffer, offsetof(BufferData, uniform), 726 buffer_info.offset = offsetof(BufferData, uniform);
544 sizeof(BufferData::uniform)); 727 buffer_info.range = sizeof(BufferData::uniform);
545 const vk::WriteDescriptorSet ubo_write(descriptor_set, 0, 0, 1, 728
546 vk::DescriptorType::eUniformBuffer, nullptr, 729 VkWriteDescriptorSet ubo_write;
547 &buffer_info, nullptr); 730 ubo_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
548 731 ubo_write.pNext = nullptr;
549 const vk::DescriptorImageInfo image_info(*sampler, image_view, 732 ubo_write.dstSet = descriptor_sets[image_index];
550 vk::ImageLayout::eShaderReadOnlyOptimal); 733 ubo_write.dstBinding = 0;
551 const vk::WriteDescriptorSet sampler_write(descriptor_set, 1, 0, 1, 734 ubo_write.dstArrayElement = 0;
552 vk::DescriptorType::eCombinedImageSampler, 735 ubo_write.descriptorCount = 1;
553 &image_info, nullptr, nullptr); 736 ubo_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
554 737 ubo_write.pImageInfo = nullptr;
555 const auto dev = device.GetLogical(); 738 ubo_write.pBufferInfo = &buffer_info;
556 const auto& dld = device.GetDispatchLoader(); 739 ubo_write.pTexelBufferView = nullptr;
557 dev.updateDescriptorSets({ubo_write, sampler_write}, {}, dld); 740
741 VkDescriptorImageInfo image_info;
742 image_info.sampler = *sampler;
743 image_info.imageView = image_view;
744 image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
745
746 VkWriteDescriptorSet sampler_write;
747 sampler_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
748 sampler_write.pNext = nullptr;
749 sampler_write.dstSet = descriptor_sets[image_index];
750 sampler_write.dstBinding = 1;
751 sampler_write.dstArrayElement = 0;
752 sampler_write.descriptorCount = 1;
753 sampler_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
754 sampler_write.pImageInfo = &image_info;
755 sampler_write.pBufferInfo = nullptr;
756 sampler_write.pTexelBufferView = nullptr;
757
758 device.GetLogical().UpdateDescriptorSets(std::array{ubo_write, sampler_write}, {});
558} 759}
559 760
560void VKBlitScreen::SetUniformData(BufferData& data, 761void VKBlitScreen::SetUniformData(BufferData& data,
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h
index ea680b3f5..5eb544aea 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.h
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.h
@@ -8,9 +8,9 @@
8#include <memory> 8#include <memory>
9#include <tuple> 9#include <tuple>
10 10
11#include "video_core/renderer_vulkan/declarations.h"
12#include "video_core/renderer_vulkan/vk_memory_manager.h" 11#include "video_core/renderer_vulkan/vk_memory_manager.h"
13#include "video_core/renderer_vulkan/vk_resource_manager.h" 12#include "video_core/renderer_vulkan/vk_resource_manager.h"
13#include "video_core/renderer_vulkan/wrapper.h"
14 14
15namespace Core { 15namespace Core {
16class System; 16class System;
@@ -49,8 +49,8 @@ public:
49 49
50 void Recreate(); 50 void Recreate();
51 51
52 std::tuple<VKFence&, vk::Semaphore> Draw(const Tegra::FramebufferConfig& framebuffer, 52 std::tuple<VKFence&, VkSemaphore> Draw(const Tegra::FramebufferConfig& framebuffer,
53 bool use_accelerated); 53 bool use_accelerated);
54 54
55private: 55private:
56 struct BufferData; 56 struct BufferData;
@@ -74,7 +74,7 @@ private:
74 void CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer); 74 void CreateStagingBuffer(const Tegra::FramebufferConfig& framebuffer);
75 void CreateRawImages(const Tegra::FramebufferConfig& framebuffer); 75 void CreateRawImages(const Tegra::FramebufferConfig& framebuffer);
76 76
77 void UpdateDescriptorSet(std::size_t image_index, vk::ImageView image_view) const; 77 void UpdateDescriptorSet(std::size_t image_index, VkImageView image_view) const;
78 void SetUniformData(BufferData& data, const Tegra::FramebufferConfig& framebuffer) const; 78 void SetUniformData(BufferData& data, const Tegra::FramebufferConfig& framebuffer) const;
79 void SetVertexData(BufferData& data, const Tegra::FramebufferConfig& framebuffer) const; 79 void SetVertexData(BufferData& data, const Tegra::FramebufferConfig& framebuffer) const;
80 80
@@ -93,23 +93,23 @@ private:
93 const std::size_t image_count; 93 const std::size_t image_count;
94 const VKScreenInfo& screen_info; 94 const VKScreenInfo& screen_info;
95 95
96 UniqueShaderModule vertex_shader; 96 vk::ShaderModule vertex_shader;
97 UniqueShaderModule fragment_shader; 97 vk::ShaderModule fragment_shader;
98 UniqueDescriptorPool descriptor_pool; 98 vk::DescriptorPool descriptor_pool;
99 UniqueDescriptorSetLayout descriptor_set_layout; 99 vk::DescriptorSetLayout descriptor_set_layout;
100 UniquePipelineLayout pipeline_layout; 100 vk::PipelineLayout pipeline_layout;
101 UniquePipeline pipeline; 101 vk::Pipeline pipeline;
102 UniqueRenderPass renderpass; 102 vk::RenderPass renderpass;
103 std::vector<UniqueFramebuffer> framebuffers; 103 std::vector<vk::Framebuffer> framebuffers;
104 std::vector<vk::DescriptorSet> descriptor_sets; 104 vk::DescriptorSets descriptor_sets;
105 UniqueSampler sampler; 105 vk::Sampler sampler;
106 106
107 UniqueBuffer buffer; 107 vk::Buffer buffer;
108 VKMemoryCommit buffer_commit; 108 VKMemoryCommit buffer_commit;
109 109
110 std::vector<std::unique_ptr<VKFenceWatch>> watches; 110 std::vector<std::unique_ptr<VKFenceWatch>> watches;
111 111
112 std::vector<UniqueSemaphore> semaphores; 112 std::vector<vk::Semaphore> semaphores;
113 std::vector<std::unique_ptr<VKImage>> raw_images; 113 std::vector<std::unique_ptr<VKImage>> raw_images;
114 std::vector<VKMemoryCommit> raw_buffer_commits; 114 std::vector<VKMemoryCommit> raw_buffer_commits;
115 u32 raw_width = 0; 115 u32 raw_width = 0;
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 326d74f29..0d167afbd 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -11,32 +11,31 @@
11#include "common/assert.h" 11#include "common/assert.h"
12#include "common/bit_util.h" 12#include "common/bit_util.h"
13#include "core/core.h" 13#include "core/core.h"
14#include "video_core/renderer_vulkan/declarations.h"
15#include "video_core/renderer_vulkan/vk_buffer_cache.h" 14#include "video_core/renderer_vulkan/vk_buffer_cache.h"
16#include "video_core/renderer_vulkan/vk_device.h" 15#include "video_core/renderer_vulkan/vk_device.h"
17#include "video_core/renderer_vulkan/vk_scheduler.h" 16#include "video_core/renderer_vulkan/vk_scheduler.h"
18#include "video_core/renderer_vulkan/vk_stream_buffer.h" 17#include "video_core/renderer_vulkan/vk_stream_buffer.h"
18#include "video_core/renderer_vulkan/wrapper.h"
19 19
20namespace Vulkan { 20namespace Vulkan {
21 21
22namespace { 22namespace {
23 23
24const auto BufferUsage = 24constexpr VkBufferUsageFlags BUFFER_USAGE =
25 vk::BufferUsageFlagBits::eVertexBuffer | vk::BufferUsageFlagBits::eIndexBuffer | 25 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
26 vk::BufferUsageFlagBits::eUniformBuffer | vk::BufferUsageFlagBits::eStorageBuffer; 26 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
27 27
28const auto UploadPipelineStage = 28constexpr VkPipelineStageFlags UPLOAD_PIPELINE_STAGE =
29 vk::PipelineStageFlagBits::eTransfer | vk::PipelineStageFlagBits::eVertexInput | 29 VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
30 vk::PipelineStageFlagBits::eVertexShader | vk::PipelineStageFlagBits::eFragmentShader | 30 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
31 vk::PipelineStageFlagBits::eComputeShader; 31 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
32 32
33const auto UploadAccessBarriers = 33constexpr VkAccessFlags UPLOAD_ACCESS_BARRIERS =
34 vk::AccessFlagBits::eTransferRead | vk::AccessFlagBits::eShaderRead | 34 VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_UNIFORM_READ_BIT |
35 vk::AccessFlagBits::eUniformRead | vk::AccessFlagBits::eVertexAttributeRead | 35 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDEX_READ_BIT;
36 vk::AccessFlagBits::eIndexRead;
37 36
38auto CreateStreamBuffer(const VKDevice& device, VKScheduler& scheduler) { 37std::unique_ptr<VKStreamBuffer> CreateStreamBuffer(const VKDevice& device, VKScheduler& scheduler) {
39 return std::make_unique<VKStreamBuffer>(device, scheduler, BufferUsage); 38 return std::make_unique<VKStreamBuffer>(device, scheduler, BUFFER_USAGE);
40} 39}
41 40
42} // Anonymous namespace 41} // Anonymous namespace
@@ -44,15 +43,18 @@ auto CreateStreamBuffer(const VKDevice& device, VKScheduler& scheduler) {
44CachedBufferBlock::CachedBufferBlock(const VKDevice& device, VKMemoryManager& memory_manager, 43CachedBufferBlock::CachedBufferBlock(const VKDevice& device, VKMemoryManager& memory_manager,
45 VAddr cpu_addr, std::size_t size) 44 VAddr cpu_addr, std::size_t size)
46 : VideoCommon::BufferBlock{cpu_addr, size} { 45 : VideoCommon::BufferBlock{cpu_addr, size} {
47 const vk::BufferCreateInfo buffer_ci({}, static_cast<vk::DeviceSize>(size), 46 VkBufferCreateInfo ci;
48 BufferUsage | vk::BufferUsageFlagBits::eTransferSrc | 47 ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
49 vk::BufferUsageFlagBits::eTransferDst, 48 ci.pNext = nullptr;
50 vk::SharingMode::eExclusive, 0, nullptr); 49 ci.flags = 0;
51 50 ci.size = static_cast<VkDeviceSize>(size);
52 const auto& dld{device.GetDispatchLoader()}; 51 ci.usage = BUFFER_USAGE | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
53 const auto dev{device.GetLogical()}; 52 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
54 buffer.handle = dev.createBufferUnique(buffer_ci, nullptr, dld); 53 ci.queueFamilyIndexCount = 0;
55 buffer.commit = memory_manager.Commit(*buffer.handle, false); 54 ci.pQueueFamilyIndices = nullptr;
55
56 buffer.handle = device.GetLogical().CreateBuffer(ci);
57 buffer.commit = memory_manager.Commit(buffer.handle, false);
56} 58}
57 59
58CachedBufferBlock::~CachedBufferBlock() = default; 60CachedBufferBlock::~CachedBufferBlock() = default;
@@ -60,9 +62,9 @@ CachedBufferBlock::~CachedBufferBlock() = default;
60VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, 62VKBufferCache::VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system,
61 const VKDevice& device, VKMemoryManager& memory_manager, 63 const VKDevice& device, VKMemoryManager& memory_manager,
62 VKScheduler& scheduler, VKStagingBufferPool& staging_pool) 64 VKScheduler& scheduler, VKStagingBufferPool& staging_pool)
63 : VideoCommon::BufferCache<Buffer, vk::Buffer, VKStreamBuffer>{rasterizer, system, 65 : VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer>{rasterizer, system,
64 CreateStreamBuffer(device, 66 CreateStreamBuffer(device,
65 scheduler)}, 67 scheduler)},
66 device{device}, memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{ 68 device{device}, memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{
67 staging_pool} {} 69 staging_pool} {}
68 70
@@ -72,18 +74,18 @@ Buffer VKBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) {
72 return std::make_shared<CachedBufferBlock>(device, memory_manager, cpu_addr, size); 74 return std::make_shared<CachedBufferBlock>(device, memory_manager, cpu_addr, size);
73} 75}
74 76
75const vk::Buffer* VKBufferCache::ToHandle(const Buffer& buffer) { 77const VkBuffer* VKBufferCache::ToHandle(const Buffer& buffer) {
76 return buffer->GetHandle(); 78 return buffer->GetHandle();
77} 79}
78 80
79const vk::Buffer* VKBufferCache::GetEmptyBuffer(std::size_t size) { 81const VkBuffer* VKBufferCache::GetEmptyBuffer(std::size_t size) {
80 size = std::max(size, std::size_t(4)); 82 size = std::max(size, std::size_t(4));
81 const auto& empty = staging_pool.GetUnusedBuffer(size, false); 83 const auto& empty = staging_pool.GetUnusedBuffer(size, false);
82 scheduler.RequestOutsideRenderPassOperationContext(); 84 scheduler.RequestOutsideRenderPassOperationContext();
83 scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf, auto& dld) { 85 scheduler.Record([size, buffer = *empty.handle](vk::CommandBuffer cmdbuf) {
84 cmdbuf.fillBuffer(buffer, 0, size, 0, dld); 86 cmdbuf.FillBuffer(buffer, 0, size, 0);
85 }); 87 });
86 return &*empty.handle; 88 return empty.handle.address();
87} 89}
88 90
89void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, 91void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
@@ -93,14 +95,21 @@ void VKBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, st
93 95
94 scheduler.RequestOutsideRenderPassOperationContext(); 96 scheduler.RequestOutsideRenderPassOperationContext();
95 scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset, 97 scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset,
96 size](auto cmdbuf, auto& dld) { 98 size](vk::CommandBuffer cmdbuf) {
97 cmdbuf.copyBuffer(staging, buffer, {{0, offset, size}}, dld); 99 cmdbuf.CopyBuffer(staging, buffer, VkBufferCopy{0, offset, size});
98 cmdbuf.pipelineBarrier( 100
99 vk::PipelineStageFlagBits::eTransfer, UploadPipelineStage, {}, {}, 101 VkBufferMemoryBarrier barrier;
100 {vk::BufferMemoryBarrier(vk::AccessFlagBits::eTransferWrite, UploadAccessBarriers, 102 barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
101 VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, buffer, 103 barrier.pNext = nullptr;
102 offset, size)}, 104 barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
103 {}, dld); 105 barrier.dstAccessMask = UPLOAD_ACCESS_BARRIERS;
106 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
107 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
108 barrier.buffer = buffer;
109 barrier.offset = offset;
110 barrier.size = size;
111 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, UPLOAD_PIPELINE_STAGE, 0, {},
112 barrier, {});
104 }); 113 });
105} 114}
106 115
@@ -109,16 +118,23 @@ void VKBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset,
109 const auto& staging = staging_pool.GetUnusedBuffer(size, true); 118 const auto& staging = staging_pool.GetUnusedBuffer(size, true);
110 scheduler.RequestOutsideRenderPassOperationContext(); 119 scheduler.RequestOutsideRenderPassOperationContext();
111 scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset, 120 scheduler.Record([staging = *staging.handle, buffer = *buffer->GetHandle(), offset,
112 size](auto cmdbuf, auto& dld) { 121 size](vk::CommandBuffer cmdbuf) {
113 cmdbuf.pipelineBarrier( 122 VkBufferMemoryBarrier barrier;
114 vk::PipelineStageFlagBits::eVertexShader | vk::PipelineStageFlagBits::eFragmentShader | 123 barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
115 vk::PipelineStageFlagBits::eComputeShader, 124 barrier.pNext = nullptr;
116 vk::PipelineStageFlagBits::eTransfer, {}, {}, 125 barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
117 {vk::BufferMemoryBarrier(vk::AccessFlagBits::eShaderWrite, 126 barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
118 vk::AccessFlagBits::eTransferRead, VK_QUEUE_FAMILY_IGNORED, 127 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
119 VK_QUEUE_FAMILY_IGNORED, buffer, offset, size)}, 128 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
120 {}, dld); 129 barrier.buffer = buffer;
121 cmdbuf.copyBuffer(buffer, staging, {{offset, 0, size}}, dld); 130 barrier.offset = offset;
131 barrier.size = size;
132
133 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
134 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
135 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
136 VK_PIPELINE_STAGE_TRANSFER_BIT, 0, {}, barrier, {});
137 cmdbuf.CopyBuffer(buffer, staging, VkBufferCopy{offset, 0, size});
122 }); 138 });
123 scheduler.Finish(); 139 scheduler.Finish();
124 140
@@ -129,17 +145,30 @@ void VKBufferCache::CopyBlock(const Buffer& src, const Buffer& dst, std::size_t
129 std::size_t dst_offset, std::size_t size) { 145 std::size_t dst_offset, std::size_t size) {
130 scheduler.RequestOutsideRenderPassOperationContext(); 146 scheduler.RequestOutsideRenderPassOperationContext();
131 scheduler.Record([src_buffer = *src->GetHandle(), dst_buffer = *dst->GetHandle(), src_offset, 147 scheduler.Record([src_buffer = *src->GetHandle(), dst_buffer = *dst->GetHandle(), src_offset,
132 dst_offset, size](auto cmdbuf, auto& dld) { 148 dst_offset, size](vk::CommandBuffer cmdbuf) {
133 cmdbuf.copyBuffer(src_buffer, dst_buffer, {{src_offset, dst_offset, size}}, dld); 149 cmdbuf.CopyBuffer(src_buffer, dst_buffer, VkBufferCopy{src_offset, dst_offset, size});
134 cmdbuf.pipelineBarrier( 150
135 vk::PipelineStageFlagBits::eTransfer, UploadPipelineStage, {}, {}, 151 std::array<VkBufferMemoryBarrier, 2> barriers;
136 {vk::BufferMemoryBarrier(vk::AccessFlagBits::eTransferRead, 152 barriers[0].sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
137 vk::AccessFlagBits::eShaderWrite, VK_QUEUE_FAMILY_IGNORED, 153 barriers[0].pNext = nullptr;
138 VK_QUEUE_FAMILY_IGNORED, src_buffer, src_offset, size), 154 barriers[0].srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
139 vk::BufferMemoryBarrier(vk::AccessFlagBits::eTransferWrite, UploadAccessBarriers, 155 barriers[0].dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
140 VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, dst_buffer, 156 barriers[0].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
141 dst_offset, size)}, 157 barriers[0].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
142 {}, dld); 158 barriers[0].buffer = src_buffer;
159 barriers[0].offset = src_offset;
160 barriers[0].size = size;
161 barriers[1].sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
162 barriers[1].pNext = nullptr;
163 barriers[1].srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
164 barriers[1].dstAccessMask = UPLOAD_ACCESS_BARRIERS;
165 barriers[1].srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
166 barriers[1].dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
167 barriers[1].buffer = dst_buffer;
168 barriers[1].offset = dst_offset;
169 barriers[1].size = size;
170 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, UPLOAD_PIPELINE_STAGE, 0, {},
171 barriers, {});
143 }); 172 });
144} 173}
145 174
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index 508214618..d3c23da98 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -11,11 +11,11 @@
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "video_core/buffer_cache/buffer_cache.h" 12#include "video_core/buffer_cache/buffer_cache.h"
13#include "video_core/rasterizer_cache.h" 13#include "video_core/rasterizer_cache.h"
14#include "video_core/renderer_vulkan/declarations.h"
15#include "video_core/renderer_vulkan/vk_memory_manager.h" 14#include "video_core/renderer_vulkan/vk_memory_manager.h"
16#include "video_core/renderer_vulkan/vk_resource_manager.h" 15#include "video_core/renderer_vulkan/vk_resource_manager.h"
17#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" 16#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
18#include "video_core/renderer_vulkan/vk_stream_buffer.h" 17#include "video_core/renderer_vulkan/vk_stream_buffer.h"
18#include "video_core/renderer_vulkan/wrapper.h"
19 19
20namespace Core { 20namespace Core {
21class System; 21class System;
@@ -33,8 +33,8 @@ public:
33 VAddr cpu_addr, std::size_t size); 33 VAddr cpu_addr, std::size_t size);
34 ~CachedBufferBlock(); 34 ~CachedBufferBlock();
35 35
36 const vk::Buffer* GetHandle() const { 36 const VkBuffer* GetHandle() const {
37 return &*buffer.handle; 37 return buffer.handle.address();
38 } 38 }
39 39
40private: 40private:
@@ -43,21 +43,21 @@ private:
43 43
44using Buffer = std::shared_ptr<CachedBufferBlock>; 44using Buffer = std::shared_ptr<CachedBufferBlock>;
45 45
46class VKBufferCache final : public VideoCommon::BufferCache<Buffer, vk::Buffer, VKStreamBuffer> { 46class VKBufferCache final : public VideoCommon::BufferCache<Buffer, VkBuffer, VKStreamBuffer> {
47public: 47public:
48 explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system, 48 explicit VKBufferCache(VideoCore::RasterizerInterface& rasterizer, Core::System& system,
49 const VKDevice& device, VKMemoryManager& memory_manager, 49 const VKDevice& device, VKMemoryManager& memory_manager,
50 VKScheduler& scheduler, VKStagingBufferPool& staging_pool); 50 VKScheduler& scheduler, VKStagingBufferPool& staging_pool);
51 ~VKBufferCache(); 51 ~VKBufferCache();
52 52
53 const vk::Buffer* GetEmptyBuffer(std::size_t size) override; 53 const VkBuffer* GetEmptyBuffer(std::size_t size) override;
54 54
55protected: 55protected:
56 void WriteBarrier() override {} 56 void WriteBarrier() override {}
57 57
58 Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override; 58 Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override;
59 59
60 const vk::Buffer* ToHandle(const Buffer& buffer) override; 60 const VkBuffer* ToHandle(const Buffer& buffer) override;
61 61
62 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, 62 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
63 const u8* data) override; 63 const u8* data) override;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 7bdda3d79..9d92305f4 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -10,13 +10,13 @@
10#include "common/alignment.h" 10#include "common/alignment.h"
11#include "common/assert.h" 11#include "common/assert.h"
12#include "common/common_types.h" 12#include "common/common_types.h"
13#include "video_core/renderer_vulkan/declarations.h"
14#include "video_core/renderer_vulkan/vk_compute_pass.h" 13#include "video_core/renderer_vulkan/vk_compute_pass.h"
15#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 14#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
16#include "video_core/renderer_vulkan/vk_device.h" 15#include "video_core/renderer_vulkan/vk_device.h"
17#include "video_core/renderer_vulkan/vk_scheduler.h" 16#include "video_core/renderer_vulkan/vk_scheduler.h"
18#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" 17#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
19#include "video_core/renderer_vulkan/vk_update_descriptor.h" 18#include "video_core/renderer_vulkan/vk_update_descriptor.h"
19#include "video_core/renderer_vulkan/wrapper.h"
20 20
21namespace Vulkan { 21namespace Vulkan {
22 22
@@ -114,6 +114,35 @@ constexpr u8 quad_array[] = {
114 0xf9, 0x00, 0x02, 0x00, 0x4c, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x4b, 0x00, 0x00, 0x00, 114 0xf9, 0x00, 0x02, 0x00, 0x4c, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x4b, 0x00, 0x00, 0x00,
115 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00}; 115 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00};
116 116
117VkDescriptorSetLayoutBinding BuildQuadArrayPassDescriptorSetLayoutBinding() {
118 VkDescriptorSetLayoutBinding binding;
119 binding.binding = 0;
120 binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
121 binding.descriptorCount = 1;
122 binding.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
123 binding.pImmutableSamplers = nullptr;
124 return binding;
125}
126
127VkDescriptorUpdateTemplateEntryKHR BuildQuadArrayPassDescriptorUpdateTemplateEntry() {
128 VkDescriptorUpdateTemplateEntryKHR entry;
129 entry.dstBinding = 0;
130 entry.dstArrayElement = 0;
131 entry.descriptorCount = 1;
132 entry.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
133 entry.offset = 0;
134 entry.stride = sizeof(DescriptorUpdateEntry);
135 return entry;
136}
137
138VkPushConstantRange BuildQuadArrayPassPushConstantRange() {
139 VkPushConstantRange range;
140 range.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
141 range.offset = 0;
142 range.size = sizeof(u32);
143 return range;
144}
145
117// Uint8 SPIR-V module. Generated from the "shaders/" directory. 146// Uint8 SPIR-V module. Generated from the "shaders/" directory.
118constexpr u8 uint8_pass[] = { 147constexpr u8 uint8_pass[] = {
119 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x07, 0x00, 0x08, 0x00, 0x2f, 0x00, 0x00, 0x00, 148 0x03, 0x02, 0x23, 0x07, 0x00, 0x00, 0x01, 0x00, 0x07, 0x00, 0x08, 0x00, 0x2f, 0x00, 0x00, 0x00,
@@ -191,53 +220,111 @@ constexpr u8 uint8_pass[] = {
191 0xf9, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00, 220 0xf9, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x02, 0x00, 0x1d, 0x00, 0x00, 0x00,
192 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00}; 221 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00};
193 222
223std::array<VkDescriptorSetLayoutBinding, 2> BuildUint8PassDescriptorSetBindings() {
224 std::array<VkDescriptorSetLayoutBinding, 2> bindings;
225 bindings[0].binding = 0;
226 bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
227 bindings[0].descriptorCount = 1;
228 bindings[0].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
229 bindings[0].pImmutableSamplers = nullptr;
230 bindings[1].binding = 1;
231 bindings[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
232 bindings[1].descriptorCount = 1;
233 bindings[1].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
234 bindings[1].pImmutableSamplers = nullptr;
235 return bindings;
236}
237
238VkDescriptorUpdateTemplateEntryKHR BuildUint8PassDescriptorUpdateTemplateEntry() {
239 VkDescriptorUpdateTemplateEntryKHR entry;
240 entry.dstBinding = 0;
241 entry.dstArrayElement = 0;
242 entry.descriptorCount = 2;
243 entry.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
244 entry.offset = 0;
245 entry.stride = sizeof(DescriptorUpdateEntry);
246 return entry;
247}
248
194} // Anonymous namespace 249} // Anonymous namespace
195 250
196VKComputePass::VKComputePass(const VKDevice& device, VKDescriptorPool& descriptor_pool, 251VKComputePass::VKComputePass(const VKDevice& device, VKDescriptorPool& descriptor_pool,
197 const std::vector<vk::DescriptorSetLayoutBinding>& bindings, 252 vk::Span<VkDescriptorSetLayoutBinding> bindings,
198 const std::vector<vk::DescriptorUpdateTemplateEntry>& templates, 253 vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
199 const std::vector<vk::PushConstantRange> push_constants, 254 vk::Span<VkPushConstantRange> push_constants, std::size_t code_size,
200 std::size_t code_size, const u8* code) { 255 const u8* code) {
201 const auto dev = device.GetLogical(); 256 VkDescriptorSetLayoutCreateInfo descriptor_layout_ci;
202 const auto& dld = device.GetDispatchLoader(); 257 descriptor_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
203 258 descriptor_layout_ci.pNext = nullptr;
204 const vk::DescriptorSetLayoutCreateInfo descriptor_layout_ci( 259 descriptor_layout_ci.flags = 0;
205 {}, static_cast<u32>(bindings.size()), bindings.data()); 260 descriptor_layout_ci.bindingCount = bindings.size();
206 descriptor_set_layout = dev.createDescriptorSetLayoutUnique(descriptor_layout_ci, nullptr, dld); 261 descriptor_layout_ci.pBindings = bindings.data();
207 262 descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout(descriptor_layout_ci);
208 const vk::PipelineLayoutCreateInfo pipeline_layout_ci({}, 1, &*descriptor_set_layout, 263
209 static_cast<u32>(push_constants.size()), 264 VkPipelineLayoutCreateInfo pipeline_layout_ci;
210 push_constants.data()); 265 pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
211 layout = dev.createPipelineLayoutUnique(pipeline_layout_ci, nullptr, dld); 266 pipeline_layout_ci.pNext = nullptr;
267 pipeline_layout_ci.flags = 0;
268 pipeline_layout_ci.setLayoutCount = 1;
269 pipeline_layout_ci.pSetLayouts = descriptor_set_layout.address();
270 pipeline_layout_ci.pushConstantRangeCount = push_constants.size();
271 pipeline_layout_ci.pPushConstantRanges = push_constants.data();
272 layout = device.GetLogical().CreatePipelineLayout(pipeline_layout_ci);
212 273
213 if (!templates.empty()) { 274 if (!templates.empty()) {
214 const vk::DescriptorUpdateTemplateCreateInfo template_ci( 275 VkDescriptorUpdateTemplateCreateInfoKHR template_ci;
215 {}, static_cast<u32>(templates.size()), templates.data(), 276 template_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR;
216 vk::DescriptorUpdateTemplateType::eDescriptorSet, *descriptor_set_layout, 277 template_ci.pNext = nullptr;
217 vk::PipelineBindPoint::eGraphics, *layout, 0); 278 template_ci.flags = 0;
218 descriptor_template = dev.createDescriptorUpdateTemplateUnique(template_ci, nullptr, dld); 279 template_ci.descriptorUpdateEntryCount = templates.size();
280 template_ci.pDescriptorUpdateEntries = templates.data();
281 template_ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR;
282 template_ci.descriptorSetLayout = *descriptor_set_layout;
283 template_ci.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
284 template_ci.pipelineLayout = *layout;
285 template_ci.set = 0;
286 descriptor_template = device.GetLogical().CreateDescriptorUpdateTemplateKHR(template_ci);
219 287
220 descriptor_allocator.emplace(descriptor_pool, *descriptor_set_layout); 288 descriptor_allocator.emplace(descriptor_pool, *descriptor_set_layout);
221 } 289 }
222 290
223 auto code_copy = std::make_unique<u32[]>(code_size / sizeof(u32) + 1); 291 auto code_copy = std::make_unique<u32[]>(code_size / sizeof(u32) + 1);
224 std::memcpy(code_copy.get(), code, code_size); 292 std::memcpy(code_copy.get(), code, code_size);
225 const vk::ShaderModuleCreateInfo module_ci({}, code_size, code_copy.get());
226 module = dev.createShaderModuleUnique(module_ci, nullptr, dld);
227 293
228 const vk::PipelineShaderStageCreateInfo stage_ci({}, vk::ShaderStageFlagBits::eCompute, *module, 294 VkShaderModuleCreateInfo module_ci;
229 "main", nullptr); 295 module_ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
296 module_ci.pNext = nullptr;
297 module_ci.flags = 0;
298 module_ci.codeSize = code_size;
299 module_ci.pCode = code_copy.get();
300 module = device.GetLogical().CreateShaderModule(module_ci);
301
302 VkComputePipelineCreateInfo pipeline_ci;
303 pipeline_ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
304 pipeline_ci.pNext = nullptr;
305 pipeline_ci.flags = 0;
306 pipeline_ci.layout = *layout;
307 pipeline_ci.basePipelineHandle = nullptr;
308 pipeline_ci.basePipelineIndex = 0;
230 309
231 const vk::ComputePipelineCreateInfo pipeline_ci({}, stage_ci, *layout, nullptr, 0); 310 VkPipelineShaderStageCreateInfo& stage_ci = pipeline_ci.stage;
232 pipeline = dev.createComputePipelineUnique(nullptr, pipeline_ci, nullptr, dld); 311 stage_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
312 stage_ci.pNext = nullptr;
313 stage_ci.flags = 0;
314 stage_ci.stage = VK_SHADER_STAGE_COMPUTE_BIT;
315 stage_ci.module = *module;
316 stage_ci.pName = "main";
317 stage_ci.pSpecializationInfo = nullptr;
318
319 pipeline = device.GetLogical().CreateComputePipeline(pipeline_ci);
233} 320}
234 321
235VKComputePass::~VKComputePass() = default; 322VKComputePass::~VKComputePass() = default;
236 323
237vk::DescriptorSet VKComputePass::CommitDescriptorSet( 324VkDescriptorSet VKComputePass::CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue,
238 VKUpdateDescriptorQueue& update_descriptor_queue, VKFence& fence) { 325 VKFence& fence) {
239 if (!descriptor_template) { 326 if (!descriptor_template) {
240 return {}; 327 return nullptr;
241 } 328 }
242 const auto set = descriptor_allocator->Commit(fence); 329 const auto set = descriptor_allocator->Commit(fence);
243 update_descriptor_queue.Send(*descriptor_template, set); 330 update_descriptor_queue.Send(*descriptor_template, set);
@@ -248,25 +335,21 @@ QuadArrayPass::QuadArrayPass(const VKDevice& device, VKScheduler& scheduler,
248 VKDescriptorPool& descriptor_pool, 335 VKDescriptorPool& descriptor_pool,
249 VKStagingBufferPool& staging_buffer_pool, 336 VKStagingBufferPool& staging_buffer_pool,
250 VKUpdateDescriptorQueue& update_descriptor_queue) 337 VKUpdateDescriptorQueue& update_descriptor_queue)
251 : VKComputePass(device, descriptor_pool, 338 : VKComputePass(device, descriptor_pool, BuildQuadArrayPassDescriptorSetLayoutBinding(),
252 {vk::DescriptorSetLayoutBinding(0, vk::DescriptorType::eStorageBuffer, 1, 339 BuildQuadArrayPassDescriptorUpdateTemplateEntry(),
253 vk::ShaderStageFlagBits::eCompute, nullptr)}, 340 BuildQuadArrayPassPushConstantRange(), std::size(quad_array), quad_array),
254 {vk::DescriptorUpdateTemplateEntry(0, 0, 1, vk::DescriptorType::eStorageBuffer,
255 0, sizeof(DescriptorUpdateEntry))},
256 {vk::PushConstantRange(vk::ShaderStageFlagBits::eCompute, 0, sizeof(u32))},
257 std::size(quad_array), quad_array),
258 scheduler{scheduler}, staging_buffer_pool{staging_buffer_pool}, 341 scheduler{scheduler}, staging_buffer_pool{staging_buffer_pool},
259 update_descriptor_queue{update_descriptor_queue} {} 342 update_descriptor_queue{update_descriptor_queue} {}
260 343
261QuadArrayPass::~QuadArrayPass() = default; 344QuadArrayPass::~QuadArrayPass() = default;
262 345
263std::pair<const vk::Buffer&, vk::DeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) { 346std::pair<const VkBuffer*, VkDeviceSize> QuadArrayPass::Assemble(u32 num_vertices, u32 first) {
264 const u32 num_triangle_vertices = num_vertices * 6 / 4; 347 const u32 num_triangle_vertices = num_vertices * 6 / 4;
265 const std::size_t staging_size = num_triangle_vertices * sizeof(u32); 348 const std::size_t staging_size = num_triangle_vertices * sizeof(u32);
266 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); 349 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false);
267 350
268 update_descriptor_queue.Acquire(); 351 update_descriptor_queue.Acquire();
269 update_descriptor_queue.AddBuffer(&*buffer.handle, 0, staging_size); 352 update_descriptor_queue.AddBuffer(buffer.handle.address(), 0, staging_size);
270 const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence()); 353 const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence());
271 354
272 scheduler.RequestOutsideRenderPassOperationContext(); 355 scheduler.RequestOutsideRenderPassOperationContext();
@@ -274,66 +357,72 @@ std::pair<const vk::Buffer&, vk::DeviceSize> QuadArrayPass::Assemble(u32 num_ver
274 ASSERT(num_vertices % 4 == 0); 357 ASSERT(num_vertices % 4 == 0);
275 const u32 num_quads = num_vertices / 4; 358 const u32 num_quads = num_vertices / 4;
276 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, num_quads, 359 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, num_quads,
277 first, set](auto cmdbuf, auto& dld) { 360 first, set](vk::CommandBuffer cmdbuf) {
278 constexpr u32 dispatch_size = 1024; 361 constexpr u32 dispatch_size = 1024;
279 cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline, dld); 362 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
280 cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eCompute, layout, 0, {set}, {}, dld); 363 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {});
281 cmdbuf.pushConstants(layout, vk::ShaderStageFlagBits::eCompute, 0, sizeof(first), &first, 364 cmdbuf.PushConstants(layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, sizeof(first), &first);
282 dld); 365 cmdbuf.Dispatch(Common::AlignUp(num_quads, dispatch_size) / dispatch_size, 1, 1);
283 cmdbuf.dispatch(Common::AlignUp(num_quads, dispatch_size) / dispatch_size, 1, 1, dld); 366
284 367 VkBufferMemoryBarrier barrier;
285 const vk::BufferMemoryBarrier barrier( 368 barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
286 vk::AccessFlagBits::eShaderWrite, vk::AccessFlagBits::eVertexAttributeRead, 369 barrier.pNext = nullptr;
287 VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, buffer, 0, 370 barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
288 static_cast<vk::DeviceSize>(num_quads) * 6 * sizeof(u32)); 371 barrier.dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
289 cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eComputeShader, 372 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
290 vk::PipelineStageFlagBits::eVertexInput, {}, {}, {barrier}, {}, dld); 373 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
374 barrier.buffer = buffer;
375 barrier.offset = 0;
376 barrier.size = static_cast<VkDeviceSize>(num_quads) * 6 * sizeof(u32);
377 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
378 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, {barrier}, {});
291 }); 379 });
292 return {*buffer.handle, 0}; 380 return {buffer.handle.address(), 0};
293} 381}
294 382
295Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler, 383Uint8Pass::Uint8Pass(const VKDevice& device, VKScheduler& scheduler,
296 VKDescriptorPool& descriptor_pool, VKStagingBufferPool& staging_buffer_pool, 384 VKDescriptorPool& descriptor_pool, VKStagingBufferPool& staging_buffer_pool,
297 VKUpdateDescriptorQueue& update_descriptor_queue) 385 VKUpdateDescriptorQueue& update_descriptor_queue)
298 : VKComputePass(device, descriptor_pool, 386 : VKComputePass(device, descriptor_pool, BuildUint8PassDescriptorSetBindings(),
299 {vk::DescriptorSetLayoutBinding(0, vk::DescriptorType::eStorageBuffer, 1, 387 BuildUint8PassDescriptorUpdateTemplateEntry(), {}, std::size(uint8_pass),
300 vk::ShaderStageFlagBits::eCompute, nullptr), 388 uint8_pass),
301 vk::DescriptorSetLayoutBinding(1, vk::DescriptorType::eStorageBuffer, 1,
302 vk::ShaderStageFlagBits::eCompute, nullptr)},
303 {vk::DescriptorUpdateTemplateEntry(0, 0, 2, vk::DescriptorType::eStorageBuffer,
304 0, sizeof(DescriptorUpdateEntry))},
305 {}, std::size(uint8_pass), uint8_pass),
306 scheduler{scheduler}, staging_buffer_pool{staging_buffer_pool}, 389 scheduler{scheduler}, staging_buffer_pool{staging_buffer_pool},
307 update_descriptor_queue{update_descriptor_queue} {} 390 update_descriptor_queue{update_descriptor_queue} {}
308 391
309Uint8Pass::~Uint8Pass() = default; 392Uint8Pass::~Uint8Pass() = default;
310 393
311std::pair<const vk::Buffer*, u64> Uint8Pass::Assemble(u32 num_vertices, vk::Buffer src_buffer, 394std::pair<const VkBuffer*, u64> Uint8Pass::Assemble(u32 num_vertices, VkBuffer src_buffer,
312 u64 src_offset) { 395 u64 src_offset) {
313 const auto staging_size = static_cast<u32>(num_vertices * sizeof(u16)); 396 const auto staging_size = static_cast<u32>(num_vertices * sizeof(u16));
314 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false); 397 auto& buffer = staging_buffer_pool.GetUnusedBuffer(staging_size, false);
315 398
316 update_descriptor_queue.Acquire(); 399 update_descriptor_queue.Acquire();
317 update_descriptor_queue.AddBuffer(&src_buffer, src_offset, num_vertices); 400 update_descriptor_queue.AddBuffer(&src_buffer, src_offset, num_vertices);
318 update_descriptor_queue.AddBuffer(&*buffer.handle, 0, staging_size); 401 update_descriptor_queue.AddBuffer(buffer.handle.address(), 0, staging_size);
319 const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence()); 402 const auto set = CommitDescriptorSet(update_descriptor_queue, scheduler.GetFence());
320 403
321 scheduler.RequestOutsideRenderPassOperationContext(); 404 scheduler.RequestOutsideRenderPassOperationContext();
322 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set, 405 scheduler.Record([layout = *layout, pipeline = *pipeline, buffer = *buffer.handle, set,
323 num_vertices](auto cmdbuf, auto& dld) { 406 num_vertices](vk::CommandBuffer cmdbuf) {
324 constexpr u32 dispatch_size = 1024; 407 constexpr u32 dispatch_size = 1024;
325 cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline, dld); 408 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline);
326 cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eCompute, layout, 0, {set}, {}, dld); 409 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, 0, set, {});
327 cmdbuf.dispatch(Common::AlignUp(num_vertices, dispatch_size) / dispatch_size, 1, 1, dld); 410 cmdbuf.Dispatch(Common::AlignUp(num_vertices, dispatch_size) / dispatch_size, 1, 1);
328 411
329 const vk::BufferMemoryBarrier barrier( 412 VkBufferMemoryBarrier barrier;
330 vk::AccessFlagBits::eShaderWrite, vk::AccessFlagBits::eVertexAttributeRead, 413 barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
331 VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, buffer, 0, 414 barrier.pNext = nullptr;
332 static_cast<vk::DeviceSize>(num_vertices) * sizeof(u16)); 415 barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
333 cmdbuf.pipelineBarrier(vk::PipelineStageFlagBits::eComputeShader, 416 barrier.dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
334 vk::PipelineStageFlagBits::eVertexInput, {}, {}, {barrier}, {}, dld); 417 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
418 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
419 barrier.buffer = buffer;
420 barrier.offset = 0;
421 barrier.size = static_cast<VkDeviceSize>(num_vertices * sizeof(u16));
422 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
423 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, {}, barrier, {});
335 }); 424 });
336 return {&*buffer.handle, 0}; 425 return {buffer.handle.address(), 0};
337} 426}
338 427
339} // namespace Vulkan 428} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index 7057eb837..c62516bff 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -8,8 +8,8 @@
8#include <utility> 8#include <utility>
9#include <vector> 9#include <vector>
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "video_core/renderer_vulkan/declarations.h"
12#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 11#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
12#include "video_core/renderer_vulkan/wrapper.h"
13 13
14namespace Vulkan { 14namespace Vulkan {
15 15
@@ -22,24 +22,24 @@ class VKUpdateDescriptorQueue;
22class VKComputePass { 22class VKComputePass {
23public: 23public:
24 explicit VKComputePass(const VKDevice& device, VKDescriptorPool& descriptor_pool, 24 explicit VKComputePass(const VKDevice& device, VKDescriptorPool& descriptor_pool,
25 const std::vector<vk::DescriptorSetLayoutBinding>& bindings, 25 vk::Span<VkDescriptorSetLayoutBinding> bindings,
26 const std::vector<vk::DescriptorUpdateTemplateEntry>& templates, 26 vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
27 const std::vector<vk::PushConstantRange> push_constants, 27 vk::Span<VkPushConstantRange> push_constants, std::size_t code_size,
28 std::size_t code_size, const u8* code); 28 const u8* code);
29 ~VKComputePass(); 29 ~VKComputePass();
30 30
31protected: 31protected:
32 vk::DescriptorSet CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue, 32 VkDescriptorSet CommitDescriptorSet(VKUpdateDescriptorQueue& update_descriptor_queue,
33 VKFence& fence); 33 VKFence& fence);
34 34
35 UniqueDescriptorUpdateTemplate descriptor_template; 35 vk::DescriptorUpdateTemplateKHR descriptor_template;
36 UniquePipelineLayout layout; 36 vk::PipelineLayout layout;
37 UniquePipeline pipeline; 37 vk::Pipeline pipeline;
38 38
39private: 39private:
40 UniqueDescriptorSetLayout descriptor_set_layout; 40 vk::DescriptorSetLayout descriptor_set_layout;
41 std::optional<DescriptorAllocator> descriptor_allocator; 41 std::optional<DescriptorAllocator> descriptor_allocator;
42 UniqueShaderModule module; 42 vk::ShaderModule module;
43}; 43};
44 44
45class QuadArrayPass final : public VKComputePass { 45class QuadArrayPass final : public VKComputePass {
@@ -50,7 +50,7 @@ public:
50 VKUpdateDescriptorQueue& update_descriptor_queue); 50 VKUpdateDescriptorQueue& update_descriptor_queue);
51 ~QuadArrayPass(); 51 ~QuadArrayPass();
52 52
53 std::pair<const vk::Buffer&, vk::DeviceSize> Assemble(u32 num_vertices, u32 first); 53 std::pair<const VkBuffer*, VkDeviceSize> Assemble(u32 num_vertices, u32 first);
54 54
55private: 55private:
56 VKScheduler& scheduler; 56 VKScheduler& scheduler;
@@ -65,8 +65,7 @@ public:
65 VKUpdateDescriptorQueue& update_descriptor_queue); 65 VKUpdateDescriptorQueue& update_descriptor_queue);
66 ~Uint8Pass(); 66 ~Uint8Pass();
67 67
68 std::pair<const vk::Buffer*, u64> Assemble(u32 num_vertices, vk::Buffer src_buffer, 68 std::pair<const VkBuffer*, u64> Assemble(u32 num_vertices, VkBuffer src_buffer, u64 src_offset);
69 u64 src_offset);
70 69
71private: 70private:
72 VKScheduler& scheduler; 71 VKScheduler& scheduler;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 60f57d83e..23beafa4f 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -5,7 +5,6 @@
5#include <memory> 5#include <memory>
6#include <vector> 6#include <vector>
7 7
8#include "video_core/renderer_vulkan/declarations.h"
9#include "video_core/renderer_vulkan/vk_compute_pipeline.h" 8#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
10#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 9#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
11#include "video_core/renderer_vulkan/vk_device.h" 10#include "video_core/renderer_vulkan/vk_device.h"
@@ -14,6 +13,7 @@
14#include "video_core/renderer_vulkan/vk_scheduler.h" 13#include "video_core/renderer_vulkan/vk_scheduler.h"
15#include "video_core/renderer_vulkan/vk_shader_decompiler.h" 14#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
16#include "video_core/renderer_vulkan/vk_update_descriptor.h" 15#include "video_core/renderer_vulkan/vk_update_descriptor.h"
16#include "video_core/renderer_vulkan/wrapper.h"
17 17
18namespace Vulkan { 18namespace Vulkan {
19 19
@@ -30,7 +30,7 @@ VKComputePipeline::VKComputePipeline(const VKDevice& device, VKScheduler& schedu
30 30
31VKComputePipeline::~VKComputePipeline() = default; 31VKComputePipeline::~VKComputePipeline() = default;
32 32
33vk::DescriptorSet VKComputePipeline::CommitDescriptorSet() { 33VkDescriptorSet VKComputePipeline::CommitDescriptorSet() {
34 if (!descriptor_template) { 34 if (!descriptor_template) {
35 return {}; 35 return {};
36 } 36 }
@@ -39,74 +39,109 @@ vk::DescriptorSet VKComputePipeline::CommitDescriptorSet() {
39 return set; 39 return set;
40} 40}
41 41
42UniqueDescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const { 42vk::DescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
43 std::vector<vk::DescriptorSetLayoutBinding> bindings; 43 std::vector<VkDescriptorSetLayoutBinding> bindings;
44 u32 binding = 0; 44 u32 binding = 0;
45 const auto AddBindings = [&](vk::DescriptorType descriptor_type, std::size_t num_entries) { 45 const auto add_bindings = [&](VkDescriptorType descriptor_type, std::size_t num_entries) {
46 // TODO(Rodrigo): Maybe make individual bindings here? 46 // TODO(Rodrigo): Maybe make individual bindings here?
47 for (u32 bindpoint = 0; bindpoint < static_cast<u32>(num_entries); ++bindpoint) { 47 for (u32 bindpoint = 0; bindpoint < static_cast<u32>(num_entries); ++bindpoint) {
48 bindings.emplace_back(binding++, descriptor_type, 1, vk::ShaderStageFlagBits::eCompute, 48 VkDescriptorSetLayoutBinding& entry = bindings.emplace_back();
49 nullptr); 49 entry.binding = binding++;
50 entry.descriptorType = descriptor_type;
51 entry.descriptorCount = 1;
52 entry.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
53 entry.pImmutableSamplers = nullptr;
50 } 54 }
51 }; 55 };
52 AddBindings(vk::DescriptorType::eUniformBuffer, entries.const_buffers.size()); 56 add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size());
53 AddBindings(vk::DescriptorType::eStorageBuffer, entries.global_buffers.size()); 57 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, entries.global_buffers.size());
54 AddBindings(vk::DescriptorType::eUniformTexelBuffer, entries.texel_buffers.size()); 58 add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, entries.texel_buffers.size());
55 AddBindings(vk::DescriptorType::eCombinedImageSampler, entries.samplers.size()); 59 add_bindings(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, entries.samplers.size());
56 AddBindings(vk::DescriptorType::eStorageImage, entries.images.size()); 60 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size());
57 61
58 const vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_ci( 62 VkDescriptorSetLayoutCreateInfo ci;
59 {}, static_cast<u32>(bindings.size()), bindings.data()); 63 ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
60 64 ci.pNext = nullptr;
61 const auto dev = device.GetLogical(); 65 ci.flags = 0;
62 const auto& dld = device.GetDispatchLoader(); 66 ci.bindingCount = static_cast<u32>(bindings.size());
63 return dev.createDescriptorSetLayoutUnique(descriptor_set_layout_ci, nullptr, dld); 67 ci.pBindings = bindings.data();
68 return device.GetLogical().CreateDescriptorSetLayout(ci);
64} 69}
65 70
66UniquePipelineLayout VKComputePipeline::CreatePipelineLayout() const { 71vk::PipelineLayout VKComputePipeline::CreatePipelineLayout() const {
67 const vk::PipelineLayoutCreateInfo layout_ci({}, 1, &*descriptor_set_layout, 0, nullptr); 72 VkPipelineLayoutCreateInfo ci;
68 const auto dev = device.GetLogical(); 73 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
69 return dev.createPipelineLayoutUnique(layout_ci, nullptr, device.GetDispatchLoader()); 74 ci.pNext = nullptr;
75 ci.flags = 0;
76 ci.setLayoutCount = 1;
77 ci.pSetLayouts = descriptor_set_layout.address();
78 ci.pushConstantRangeCount = 0;
79 ci.pPushConstantRanges = nullptr;
80 return device.GetLogical().CreatePipelineLayout(ci);
70} 81}
71 82
72UniqueDescriptorUpdateTemplate VKComputePipeline::CreateDescriptorUpdateTemplate() const { 83vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplate() const {
73 std::vector<vk::DescriptorUpdateTemplateEntry> template_entries; 84 std::vector<VkDescriptorUpdateTemplateEntryKHR> template_entries;
74 u32 binding = 0; 85 u32 binding = 0;
75 u32 offset = 0; 86 u32 offset = 0;
76 FillDescriptorUpdateTemplateEntries(entries, binding, offset, template_entries); 87 FillDescriptorUpdateTemplateEntries(entries, binding, offset, template_entries);
77 if (template_entries.empty()) { 88 if (template_entries.empty()) {
78 // If the shader doesn't use descriptor sets, skip template creation. 89 // If the shader doesn't use descriptor sets, skip template creation.
79 return UniqueDescriptorUpdateTemplate{}; 90 return {};
80 } 91 }
81 92
82 const vk::DescriptorUpdateTemplateCreateInfo template_ci( 93 VkDescriptorUpdateTemplateCreateInfoKHR ci;
83 {}, static_cast<u32>(template_entries.size()), template_entries.data(), 94 ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR;
84 vk::DescriptorUpdateTemplateType::eDescriptorSet, *descriptor_set_layout, 95 ci.pNext = nullptr;
85 vk::PipelineBindPoint::eGraphics, *layout, DESCRIPTOR_SET); 96 ci.flags = 0;
86 97 ci.descriptorUpdateEntryCount = static_cast<u32>(template_entries.size());
87 const auto dev = device.GetLogical(); 98 ci.pDescriptorUpdateEntries = template_entries.data();
88 const auto& dld = device.GetDispatchLoader(); 99 ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR;
89 return dev.createDescriptorUpdateTemplateUnique(template_ci, nullptr, dld); 100 ci.descriptorSetLayout = *descriptor_set_layout;
101 ci.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
102 ci.pipelineLayout = *layout;
103 ci.set = DESCRIPTOR_SET;
104 return device.GetLogical().CreateDescriptorUpdateTemplateKHR(ci);
90} 105}
91 106
92UniqueShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const { 107vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const {
93 const vk::ShaderModuleCreateInfo module_ci({}, code.size() * sizeof(u32), code.data()); 108 VkShaderModuleCreateInfo ci;
94 const auto dev = device.GetLogical(); 109 ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
95 return dev.createShaderModuleUnique(module_ci, nullptr, device.GetDispatchLoader()); 110 ci.pNext = nullptr;
111 ci.flags = 0;
112 ci.codeSize = code.size() * sizeof(u32);
113 ci.pCode = code.data();
114 return device.GetLogical().CreateShaderModule(ci);
96} 115}
97 116
98UniquePipeline VKComputePipeline::CreatePipeline() const { 117vk::Pipeline VKComputePipeline::CreatePipeline() const {
99 vk::PipelineShaderStageCreateInfo shader_stage_ci({}, vk::ShaderStageFlagBits::eCompute, 118 VkComputePipelineCreateInfo ci;
100 *shader_module, "main", nullptr); 119 VkPipelineShaderStageCreateInfo& stage_ci = ci.stage;
101 vk::PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci; 120 stage_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
121 stage_ci.pNext = nullptr;
122 stage_ci.flags = 0;
123 stage_ci.stage = VK_SHADER_STAGE_COMPUTE_BIT;
124 stage_ci.module = *shader_module;
125 stage_ci.pName = "main";
126 stage_ci.pSpecializationInfo = nullptr;
127
128 VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci;
129 subgroup_size_ci.sType =
130 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT;
131 subgroup_size_ci.pNext = nullptr;
102 subgroup_size_ci.requiredSubgroupSize = GuestWarpSize; 132 subgroup_size_ci.requiredSubgroupSize = GuestWarpSize;
103 if (entries.uses_warps && device.IsGuestWarpSizeSupported(vk::ShaderStageFlagBits::eCompute)) { 133
104 shader_stage_ci.pNext = &subgroup_size_ci; 134 if (entries.uses_warps && device.IsGuestWarpSizeSupported(VK_SHADER_STAGE_COMPUTE_BIT)) {
135 stage_ci.pNext = &subgroup_size_ci;
105 } 136 }
106 137
107 const vk::ComputePipelineCreateInfo create_info({}, shader_stage_ci, *layout, {}, 0); 138 ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
108 const auto dev = device.GetLogical(); 139 ci.pNext = nullptr;
109 return dev.createComputePipelineUnique({}, create_info, nullptr, device.GetDispatchLoader()); 140 ci.flags = 0;
141 ci.layout = *layout;
142 ci.basePipelineHandle = nullptr;
143 ci.basePipelineIndex = 0;
144 return device.GetLogical().CreateComputePipeline(ci);
110} 145}
111 146
112} // namespace Vulkan 147} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.h b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
index 22235c6c9..33b9af29e 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
@@ -7,9 +7,9 @@
7#include <memory> 7#include <memory>
8 8
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "video_core/renderer_vulkan/declarations.h"
11#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 10#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
12#include "video_core/renderer_vulkan/vk_shader_decompiler.h" 11#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
12#include "video_core/renderer_vulkan/wrapper.h"
13 13
14namespace Vulkan { 14namespace Vulkan {
15 15
@@ -25,42 +25,42 @@ public:
25 const SPIRVShader& shader); 25 const SPIRVShader& shader);
26 ~VKComputePipeline(); 26 ~VKComputePipeline();
27 27
28 vk::DescriptorSet CommitDescriptorSet(); 28 VkDescriptorSet CommitDescriptorSet();
29 29
30 vk::Pipeline GetHandle() const { 30 VkPipeline GetHandle() const {
31 return *pipeline; 31 return *pipeline;
32 } 32 }
33 33
34 vk::PipelineLayout GetLayout() const { 34 VkPipelineLayout GetLayout() const {
35 return *layout; 35 return *layout;
36 } 36 }
37 37
38 const ShaderEntries& GetEntries() { 38 const ShaderEntries& GetEntries() const {
39 return entries; 39 return entries;
40 } 40 }
41 41
42private: 42private:
43 UniqueDescriptorSetLayout CreateDescriptorSetLayout() const; 43 vk::DescriptorSetLayout CreateDescriptorSetLayout() const;
44 44
45 UniquePipelineLayout CreatePipelineLayout() const; 45 vk::PipelineLayout CreatePipelineLayout() const;
46 46
47 UniqueDescriptorUpdateTemplate CreateDescriptorUpdateTemplate() const; 47 vk::DescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplate() const;
48 48
49 UniqueShaderModule CreateShaderModule(const std::vector<u32>& code) const; 49 vk::ShaderModule CreateShaderModule(const std::vector<u32>& code) const;
50 50
51 UniquePipeline CreatePipeline() const; 51 vk::Pipeline CreatePipeline() const;
52 52
53 const VKDevice& device; 53 const VKDevice& device;
54 VKScheduler& scheduler; 54 VKScheduler& scheduler;
55 ShaderEntries entries; 55 ShaderEntries entries;
56 56
57 UniqueDescriptorSetLayout descriptor_set_layout; 57 vk::DescriptorSetLayout descriptor_set_layout;
58 DescriptorAllocator descriptor_allocator; 58 DescriptorAllocator descriptor_allocator;
59 VKUpdateDescriptorQueue& update_descriptor_queue; 59 VKUpdateDescriptorQueue& update_descriptor_queue;
60 UniquePipelineLayout layout; 60 vk::PipelineLayout layout;
61 UniqueDescriptorUpdateTemplate descriptor_template; 61 vk::DescriptorUpdateTemplateKHR descriptor_template;
62 UniqueShaderModule shader_module; 62 vk::ShaderModule shader_module;
63 UniquePipeline pipeline; 63 vk::Pipeline pipeline;
64}; 64};
65 65
66} // namespace Vulkan 66} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
index cc7c281a0..e9d528aa6 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
@@ -6,10 +6,10 @@
6#include <vector> 6#include <vector>
7 7
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "video_core/renderer_vulkan/declarations.h"
10#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 9#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
11#include "video_core/renderer_vulkan/vk_device.h" 10#include "video_core/renderer_vulkan/vk_device.h"
12#include "video_core/renderer_vulkan/vk_resource_manager.h" 11#include "video_core/renderer_vulkan/vk_resource_manager.h"
12#include "video_core/renderer_vulkan/wrapper.h"
13 13
14namespace Vulkan { 14namespace Vulkan {
15 15
@@ -17,19 +17,18 @@ namespace Vulkan {
17constexpr std::size_t SETS_GROW_RATE = 0x20; 17constexpr std::size_t SETS_GROW_RATE = 0x20;
18 18
19DescriptorAllocator::DescriptorAllocator(VKDescriptorPool& descriptor_pool, 19DescriptorAllocator::DescriptorAllocator(VKDescriptorPool& descriptor_pool,
20 vk::DescriptorSetLayout layout) 20 VkDescriptorSetLayout layout)
21 : VKFencedPool{SETS_GROW_RATE}, descriptor_pool{descriptor_pool}, layout{layout} {} 21 : VKFencedPool{SETS_GROW_RATE}, descriptor_pool{descriptor_pool}, layout{layout} {}
22 22
23DescriptorAllocator::~DescriptorAllocator() = default; 23DescriptorAllocator::~DescriptorAllocator() = default;
24 24
25vk::DescriptorSet DescriptorAllocator::Commit(VKFence& fence) { 25VkDescriptorSet DescriptorAllocator::Commit(VKFence& fence) {
26 return *descriptors[CommitResource(fence)]; 26 const std::size_t index = CommitResource(fence);
27 return descriptors_allocations[index / SETS_GROW_RATE][index % SETS_GROW_RATE];
27} 28}
28 29
29void DescriptorAllocator::Allocate(std::size_t begin, std::size_t end) { 30void DescriptorAllocator::Allocate(std::size_t begin, std::size_t end) {
30 auto new_sets = descriptor_pool.AllocateDescriptors(layout, end - begin); 31 descriptors_allocations.push_back(descriptor_pool.AllocateDescriptors(layout, end - begin));
31 descriptors.insert(descriptors.end(), std::make_move_iterator(new_sets.begin()),
32 std::make_move_iterator(new_sets.end()));
33} 32}
34 33
35VKDescriptorPool::VKDescriptorPool(const VKDevice& device) 34VKDescriptorPool::VKDescriptorPool(const VKDevice& device)
@@ -37,53 +36,50 @@ VKDescriptorPool::VKDescriptorPool(const VKDevice& device)
37 36
38VKDescriptorPool::~VKDescriptorPool() = default; 37VKDescriptorPool::~VKDescriptorPool() = default;
39 38
40vk::DescriptorPool VKDescriptorPool::AllocateNewPool() { 39vk::DescriptorPool* VKDescriptorPool::AllocateNewPool() {
41 static constexpr u32 num_sets = 0x20000; 40 static constexpr u32 num_sets = 0x20000;
42 static constexpr vk::DescriptorPoolSize pool_sizes[] = { 41 static constexpr VkDescriptorPoolSize pool_sizes[] = {
43 {vk::DescriptorType::eUniformBuffer, num_sets * 90}, 42 {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, num_sets * 90},
44 {vk::DescriptorType::eStorageBuffer, num_sets * 60}, 43 {VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, num_sets * 60},
45 {vk::DescriptorType::eUniformTexelBuffer, num_sets * 64}, 44 {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, num_sets * 64},
46 {vk::DescriptorType::eCombinedImageSampler, num_sets * 64}, 45 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, num_sets * 64},
47 {vk::DescriptorType::eStorageImage, num_sets * 40}}; 46 {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, num_sets * 40}};
48 47
49 const vk::DescriptorPoolCreateInfo create_info( 48 VkDescriptorPoolCreateInfo ci;
50 vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, num_sets, 49 ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
51 static_cast<u32>(std::size(pool_sizes)), std::data(pool_sizes)); 50 ci.pNext = nullptr;
52 const auto dev = device.GetLogical(); 51 ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
53 return *pools.emplace_back( 52 ci.maxSets = num_sets;
54 dev.createDescriptorPoolUnique(create_info, nullptr, device.GetDispatchLoader())); 53 ci.poolSizeCount = static_cast<u32>(std::size(pool_sizes));
54 ci.pPoolSizes = std::data(pool_sizes);
55 return &pools.emplace_back(device.GetLogical().CreateDescriptorPool(ci));
55} 56}
56 57
57std::vector<UniqueDescriptorSet> VKDescriptorPool::AllocateDescriptors( 58vk::DescriptorSets VKDescriptorPool::AllocateDescriptors(VkDescriptorSetLayout layout,
58 vk::DescriptorSetLayout layout, std::size_t count) { 59 std::size_t count) {
59 std::vector layout_copies(count, layout); 60 const std::vector layout_copies(count, layout);
60 vk::DescriptorSetAllocateInfo allocate_info(active_pool, static_cast<u32>(count), 61 VkDescriptorSetAllocateInfo ai;
61 layout_copies.data()); 62 ai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
62 63 ai.pNext = nullptr;
63 std::vector<vk::DescriptorSet> sets(count); 64 ai.descriptorPool = **active_pool;
64 const auto dev = device.GetLogical(); 65 ai.descriptorSetCount = static_cast<u32>(count);
65 const auto& dld = device.GetDispatchLoader(); 66 ai.pSetLayouts = layout_copies.data();
66 switch (const auto result = dev.allocateDescriptorSets(&allocate_info, sets.data(), dld)) { 67
67 case vk::Result::eSuccess: 68 vk::DescriptorSets sets = active_pool->Allocate(ai);
68 break; 69 if (!sets.IsOutOfPoolMemory()) {
69 case vk::Result::eErrorOutOfPoolMemory: 70 return sets;
70 active_pool = AllocateNewPool();
71 allocate_info.descriptorPool = active_pool;
72 if (dev.allocateDescriptorSets(&allocate_info, sets.data(), dld) == vk::Result::eSuccess) {
73 break;
74 }
75 [[fallthrough]];
76 default:
77 vk::throwResultException(result, "vk::Device::allocateDescriptorSetsUnique");
78 } 71 }
79 72
80 vk::PoolFree deleter(dev, active_pool, dld); 73 // Our current pool is out of memory. Allocate a new one and retry
81 std::vector<UniqueDescriptorSet> unique_sets; 74 active_pool = AllocateNewPool();
82 unique_sets.reserve(count); 75 ai.descriptorPool = **active_pool;
83 for (const auto set : sets) { 76 sets = active_pool->Allocate(ai);
84 unique_sets.push_back(UniqueDescriptorSet{set, deleter}); 77 if (!sets.IsOutOfPoolMemory()) {
78 return sets;
85 } 79 }
86 return unique_sets; 80
81 // After allocating a new pool, we are out of memory again. We can't handle this from here.
82 throw vk::Exception(VK_ERROR_OUT_OF_POOL_MEMORY);
87} 83}
88 84
89} // namespace Vulkan 85} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.h b/src/video_core/renderer_vulkan/vk_descriptor_pool.h
index a441dbc0f..ab40c70f0 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.h
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.h
@@ -8,8 +8,8 @@
8#include <vector> 8#include <vector>
9 9
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "video_core/renderer_vulkan/declarations.h"
12#include "video_core/renderer_vulkan/vk_resource_manager.h" 11#include "video_core/renderer_vulkan/vk_resource_manager.h"
12#include "video_core/renderer_vulkan/wrapper.h"
13 13
14namespace Vulkan { 14namespace Vulkan {
15 15
@@ -17,21 +17,21 @@ class VKDescriptorPool;
17 17
18class DescriptorAllocator final : public VKFencedPool { 18class DescriptorAllocator final : public VKFencedPool {
19public: 19public:
20 explicit DescriptorAllocator(VKDescriptorPool& descriptor_pool, vk::DescriptorSetLayout layout); 20 explicit DescriptorAllocator(VKDescriptorPool& descriptor_pool, VkDescriptorSetLayout layout);
21 ~DescriptorAllocator() override; 21 ~DescriptorAllocator() override;
22 22
23 DescriptorAllocator(const DescriptorAllocator&) = delete; 23 DescriptorAllocator(const DescriptorAllocator&) = delete;
24 24
25 vk::DescriptorSet Commit(VKFence& fence); 25 VkDescriptorSet Commit(VKFence& fence);
26 26
27protected: 27protected:
28 void Allocate(std::size_t begin, std::size_t end) override; 28 void Allocate(std::size_t begin, std::size_t end) override;
29 29
30private: 30private:
31 VKDescriptorPool& descriptor_pool; 31 VKDescriptorPool& descriptor_pool;
32 const vk::DescriptorSetLayout layout; 32 const VkDescriptorSetLayout layout;
33 33
34 std::vector<UniqueDescriptorSet> descriptors; 34 std::vector<vk::DescriptorSets> descriptors_allocations;
35}; 35};
36 36
37class VKDescriptorPool final { 37class VKDescriptorPool final {
@@ -42,15 +42,14 @@ public:
42 ~VKDescriptorPool(); 42 ~VKDescriptorPool();
43 43
44private: 44private:
45 vk::DescriptorPool AllocateNewPool(); 45 vk::DescriptorPool* AllocateNewPool();
46 46
47 std::vector<UniqueDescriptorSet> AllocateDescriptors(vk::DescriptorSetLayout layout, 47 vk::DescriptorSets AllocateDescriptors(VkDescriptorSetLayout layout, std::size_t count);
48 std::size_t count);
49 48
50 const VKDevice& device; 49 const VKDevice& device;
51 50
52 std::vector<UniqueDescriptorPool> pools; 51 std::vector<vk::DescriptorPool> pools;
53 vk::DescriptorPool active_pool; 52 vk::DescriptorPool* active_pool;
54}; 53};
55 54
56} // namespace Vulkan \ No newline at end of file 55} // namespace Vulkan \ No newline at end of file
diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp
index 6f4ae9132..52d29e49d 100644
--- a/src/video_core/renderer_vulkan/vk_device.cpp
+++ b/src/video_core/renderer_vulkan/vk_device.cpp
@@ -6,15 +6,15 @@
6#include <chrono> 6#include <chrono>
7#include <cstdlib> 7#include <cstdlib>
8#include <optional> 8#include <optional>
9#include <set>
10#include <string_view> 9#include <string_view>
11#include <thread> 10#include <thread>
11#include <unordered_set>
12#include <vector> 12#include <vector>
13 13
14#include "common/assert.h" 14#include "common/assert.h"
15#include "core/settings.h" 15#include "core/settings.h"
16#include "video_core/renderer_vulkan/declarations.h"
17#include "video_core/renderer_vulkan/vk_device.h" 16#include "video_core/renderer_vulkan/vk_device.h"
17#include "video_core/renderer_vulkan/wrapper.h"
18 18
19namespace Vulkan { 19namespace Vulkan {
20 20
@@ -22,49 +22,43 @@ namespace {
22 22
23namespace Alternatives { 23namespace Alternatives {
24 24
25constexpr std::array Depth24UnormS8Uint = {vk::Format::eD32SfloatS8Uint, 25constexpr std::array Depth24UnormS8_UINT = {VK_FORMAT_D32_SFLOAT_S8_UINT,
26 vk::Format::eD16UnormS8Uint, vk::Format{}}; 26 VK_FORMAT_D16_UNORM_S8_UINT, VkFormat{}};
27constexpr std::array Depth16UnormS8Uint = {vk::Format::eD24UnormS8Uint, 27constexpr std::array Depth16UnormS8_UINT = {VK_FORMAT_D24_UNORM_S8_UINT,
28 vk::Format::eD32SfloatS8Uint, vk::Format{}}; 28 VK_FORMAT_D32_SFLOAT_S8_UINT, VkFormat{}};
29 29
30} // namespace Alternatives 30} // namespace Alternatives
31 31
32constexpr std::array REQUIRED_EXTENSIONS = {
33 VK_KHR_SWAPCHAIN_EXTENSION_NAME,
34 VK_KHR_16BIT_STORAGE_EXTENSION_NAME,
35 VK_KHR_8BIT_STORAGE_EXTENSION_NAME,
36 VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME,
37 VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME,
38 VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME,
39 VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME,
40 VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME,
41 VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME,
42};
43
32template <typename T> 44template <typename T>
33void SetNext(void**& next, T& data) { 45void SetNext(void**& next, T& data) {
34 *next = &data; 46 *next = &data;
35 next = &data.pNext; 47 next = &data.pNext;
36} 48}
37 49
38template <typename T> 50constexpr const VkFormat* GetFormatAlternatives(VkFormat format) {
39T GetFeatures(vk::PhysicalDevice physical, const vk::DispatchLoaderDynamic& dld) {
40 vk::PhysicalDeviceFeatures2 features;
41 T extension_features;
42 features.pNext = &extension_features;
43 physical.getFeatures2(&features, dld);
44 return extension_features;
45}
46
47template <typename T>
48T GetProperties(vk::PhysicalDevice physical, const vk::DispatchLoaderDynamic& dld) {
49 vk::PhysicalDeviceProperties2 properties;
50 T extension_properties;
51 properties.pNext = &extension_properties;
52 physical.getProperties2(&properties, dld);
53 return extension_properties;
54}
55
56constexpr const vk::Format* GetFormatAlternatives(vk::Format format) {
57 switch (format) { 51 switch (format) {
58 case vk::Format::eD24UnormS8Uint: 52 case VK_FORMAT_D24_UNORM_S8_UINT:
59 return Alternatives::Depth24UnormS8Uint.data(); 53 return Alternatives::Depth24UnormS8_UINT.data();
60 case vk::Format::eD16UnormS8Uint: 54 case VK_FORMAT_D16_UNORM_S8_UINT:
61 return Alternatives::Depth16UnormS8Uint.data(); 55 return Alternatives::Depth16UnormS8_UINT.data();
62 default: 56 default:
63 return nullptr; 57 return nullptr;
64 } 58 }
65} 59}
66 60
67vk::FormatFeatureFlags GetFormatFeatures(vk::FormatProperties properties, FormatType format_type) { 61VkFormatFeatureFlags GetFormatFeatures(VkFormatProperties properties, FormatType format_type) {
68 switch (format_type) { 62 switch (format_type) {
69 case FormatType::Linear: 63 case FormatType::Linear:
70 return properties.linearTilingFeatures; 64 return properties.linearTilingFeatures;
@@ -77,79 +71,220 @@ vk::FormatFeatureFlags GetFormatFeatures(vk::FormatProperties properties, Format
77 } 71 }
78} 72}
79 73
74std::unordered_map<VkFormat, VkFormatProperties> GetFormatProperties(
75 vk::PhysicalDevice physical, const vk::InstanceDispatch& dld) {
76 static constexpr std::array formats{VK_FORMAT_A8B8G8R8_UNORM_PACK32,
77 VK_FORMAT_A8B8G8R8_UINT_PACK32,
78 VK_FORMAT_A8B8G8R8_SNORM_PACK32,
79 VK_FORMAT_A8B8G8R8_SRGB_PACK32,
80 VK_FORMAT_B5G6R5_UNORM_PACK16,
81 VK_FORMAT_A2B10G10R10_UNORM_PACK32,
82 VK_FORMAT_A1R5G5B5_UNORM_PACK16,
83 VK_FORMAT_R32G32B32A32_SFLOAT,
84 VK_FORMAT_R32G32B32A32_UINT,
85 VK_FORMAT_R32G32_SFLOAT,
86 VK_FORMAT_R32G32_UINT,
87 VK_FORMAT_R16G16B16A16_UINT,
88 VK_FORMAT_R16G16B16A16_SNORM,
89 VK_FORMAT_R16G16B16A16_UNORM,
90 VK_FORMAT_R16G16_UNORM,
91 VK_FORMAT_R16G16_SNORM,
92 VK_FORMAT_R16G16_SFLOAT,
93 VK_FORMAT_R16_UNORM,
94 VK_FORMAT_R8G8B8A8_SRGB,
95 VK_FORMAT_R8G8_UNORM,
96 VK_FORMAT_R8G8_SNORM,
97 VK_FORMAT_R8_UNORM,
98 VK_FORMAT_R8_UINT,
99 VK_FORMAT_B10G11R11_UFLOAT_PACK32,
100 VK_FORMAT_R32_SFLOAT,
101 VK_FORMAT_R32_UINT,
102 VK_FORMAT_R32_SINT,
103 VK_FORMAT_R16_SFLOAT,
104 VK_FORMAT_R16G16B16A16_SFLOAT,
105 VK_FORMAT_B8G8R8A8_UNORM,
106 VK_FORMAT_R4G4B4A4_UNORM_PACK16,
107 VK_FORMAT_D32_SFLOAT,
108 VK_FORMAT_D16_UNORM,
109 VK_FORMAT_D16_UNORM_S8_UINT,
110 VK_FORMAT_D24_UNORM_S8_UINT,
111 VK_FORMAT_D32_SFLOAT_S8_UINT,
112 VK_FORMAT_BC1_RGBA_UNORM_BLOCK,
113 VK_FORMAT_BC2_UNORM_BLOCK,
114 VK_FORMAT_BC3_UNORM_BLOCK,
115 VK_FORMAT_BC4_UNORM_BLOCK,
116 VK_FORMAT_BC5_UNORM_BLOCK,
117 VK_FORMAT_BC5_SNORM_BLOCK,
118 VK_FORMAT_BC7_UNORM_BLOCK,
119 VK_FORMAT_BC6H_UFLOAT_BLOCK,
120 VK_FORMAT_BC6H_SFLOAT_BLOCK,
121 VK_FORMAT_BC1_RGBA_SRGB_BLOCK,
122 VK_FORMAT_BC2_SRGB_BLOCK,
123 VK_FORMAT_BC3_SRGB_BLOCK,
124 VK_FORMAT_BC7_SRGB_BLOCK,
125 VK_FORMAT_ASTC_4x4_SRGB_BLOCK,
126 VK_FORMAT_ASTC_8x8_SRGB_BLOCK,
127 VK_FORMAT_ASTC_8x5_SRGB_BLOCK,
128 VK_FORMAT_ASTC_5x4_SRGB_BLOCK,
129 VK_FORMAT_ASTC_5x5_UNORM_BLOCK,
130 VK_FORMAT_ASTC_5x5_SRGB_BLOCK,
131 VK_FORMAT_ASTC_10x8_UNORM_BLOCK,
132 VK_FORMAT_ASTC_10x8_SRGB_BLOCK,
133 VK_FORMAT_ASTC_6x6_UNORM_BLOCK,
134 VK_FORMAT_ASTC_6x6_SRGB_BLOCK,
135 VK_FORMAT_ASTC_10x10_UNORM_BLOCK,
136 VK_FORMAT_ASTC_10x10_SRGB_BLOCK,
137 VK_FORMAT_ASTC_12x12_UNORM_BLOCK,
138 VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
139 VK_FORMAT_ASTC_8x6_UNORM_BLOCK,
140 VK_FORMAT_ASTC_8x6_SRGB_BLOCK,
141 VK_FORMAT_ASTC_6x5_UNORM_BLOCK,
142 VK_FORMAT_ASTC_6x5_SRGB_BLOCK,
143 VK_FORMAT_E5B9G9R9_UFLOAT_PACK32};
144 std::unordered_map<VkFormat, VkFormatProperties> format_properties;
145 for (const auto format : formats) {
146 format_properties.emplace(format, physical.GetFormatProperties(format));
147 }
148 return format_properties;
149}
150
80} // Anonymous namespace 151} // Anonymous namespace
81 152
82VKDevice::VKDevice(const vk::DispatchLoaderDynamic& dld, vk::PhysicalDevice physical, 153VKDevice::VKDevice(VkInstance instance, vk::PhysicalDevice physical, VkSurfaceKHR surface,
83 vk::SurfaceKHR surface) 154 const vk::InstanceDispatch& dld)
84 : dld{dld}, physical{physical}, properties{physical.getProperties(dld)}, 155 : dld{dld}, physical{physical}, properties{physical.GetProperties()},
85 format_properties{GetFormatProperties(dld, physical)} { 156 format_properties{GetFormatProperties(physical, dld)} {
86 SetupFamilies(surface); 157 SetupFamilies(surface);
87 SetupFeatures(); 158 SetupFeatures();
88} 159}
89 160
90VKDevice::~VKDevice() = default; 161VKDevice::~VKDevice() = default;
91 162
92bool VKDevice::Create(vk::Instance instance) { 163bool VKDevice::Create() {
93 const auto queue_cis = GetDeviceQueueCreateInfos(); 164 const auto queue_cis = GetDeviceQueueCreateInfos();
94 const std::vector extensions = LoadExtensions(); 165 const std::vector extensions = LoadExtensions();
95 166
96 vk::PhysicalDeviceFeatures2 features2; 167 VkPhysicalDeviceFeatures2 features2;
168 features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
169 features2.pNext = nullptr;
97 void** next = &features2.pNext; 170 void** next = &features2.pNext;
171
98 auto& features = features2.features; 172 auto& features = features2.features;
99 features.vertexPipelineStoresAndAtomics = true; 173 features.robustBufferAccess = false;
174 features.fullDrawIndexUint32 = false;
175 features.imageCubeArray = false;
100 features.independentBlend = true; 176 features.independentBlend = true;
177 features.geometryShader = true;
178 features.tessellationShader = true;
179 features.sampleRateShading = false;
180 features.dualSrcBlend = false;
181 features.logicOp = false;
182 features.multiDrawIndirect = false;
183 features.drawIndirectFirstInstance = false;
101 features.depthClamp = true; 184 features.depthClamp = true;
102 features.samplerAnisotropy = true; 185 features.depthBiasClamp = true;
186 features.fillModeNonSolid = false;
187 features.depthBounds = false;
188 features.wideLines = false;
103 features.largePoints = true; 189 features.largePoints = true;
190 features.alphaToOne = false;
104 features.multiViewport = true; 191 features.multiViewport = true;
105 features.depthBiasClamp = true; 192 features.samplerAnisotropy = true;
106 features.geometryShader = true; 193 features.textureCompressionETC2 = false;
107 features.tessellationShader = true; 194 features.textureCompressionASTC_LDR = is_optimal_astc_supported;
195 features.textureCompressionBC = false;
108 features.occlusionQueryPrecise = true; 196 features.occlusionQueryPrecise = true;
197 features.pipelineStatisticsQuery = false;
198 features.vertexPipelineStoresAndAtomics = true;
109 features.fragmentStoresAndAtomics = true; 199 features.fragmentStoresAndAtomics = true;
200 features.shaderTessellationAndGeometryPointSize = false;
110 features.shaderImageGatherExtended = true; 201 features.shaderImageGatherExtended = true;
202 features.shaderStorageImageExtendedFormats = false;
203 features.shaderStorageImageMultisample = false;
111 features.shaderStorageImageReadWithoutFormat = is_formatless_image_load_supported; 204 features.shaderStorageImageReadWithoutFormat = is_formatless_image_load_supported;
112 features.shaderStorageImageWriteWithoutFormat = true; 205 features.shaderStorageImageWriteWithoutFormat = true;
113 features.textureCompressionASTC_LDR = is_optimal_astc_supported; 206 features.shaderUniformBufferArrayDynamicIndexing = false;
114 207 features.shaderSampledImageArrayDynamicIndexing = false;
115 vk::PhysicalDevice16BitStorageFeaturesKHR bit16_storage; 208 features.shaderStorageBufferArrayDynamicIndexing = false;
209 features.shaderStorageImageArrayDynamicIndexing = false;
210 features.shaderClipDistance = false;
211 features.shaderCullDistance = false;
212 features.shaderFloat64 = false;
213 features.shaderInt64 = false;
214 features.shaderInt16 = false;
215 features.shaderResourceResidency = false;
216 features.shaderResourceMinLod = false;
217 features.sparseBinding = false;
218 features.sparseResidencyBuffer = false;
219 features.sparseResidencyImage2D = false;
220 features.sparseResidencyImage3D = false;
221 features.sparseResidency2Samples = false;
222 features.sparseResidency4Samples = false;
223 features.sparseResidency8Samples = false;
224 features.sparseResidency16Samples = false;
225 features.sparseResidencyAliased = false;
226 features.variableMultisampleRate = false;
227 features.inheritedQueries = false;
228
229 VkPhysicalDevice16BitStorageFeaturesKHR bit16_storage;
230 bit16_storage.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR;
231 bit16_storage.pNext = nullptr;
232 bit16_storage.storageBuffer16BitAccess = false;
116 bit16_storage.uniformAndStorageBuffer16BitAccess = true; 233 bit16_storage.uniformAndStorageBuffer16BitAccess = true;
234 bit16_storage.storagePushConstant16 = false;
235 bit16_storage.storageInputOutput16 = false;
117 SetNext(next, bit16_storage); 236 SetNext(next, bit16_storage);
118 237
119 vk::PhysicalDevice8BitStorageFeaturesKHR bit8_storage; 238 VkPhysicalDevice8BitStorageFeaturesKHR bit8_storage;
239 bit8_storage.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR;
240 bit8_storage.pNext = nullptr;
241 bit8_storage.storageBuffer8BitAccess = false;
120 bit8_storage.uniformAndStorageBuffer8BitAccess = true; 242 bit8_storage.uniformAndStorageBuffer8BitAccess = true;
243 bit8_storage.storagePushConstant8 = false;
121 SetNext(next, bit8_storage); 244 SetNext(next, bit8_storage);
122 245
123 vk::PhysicalDeviceHostQueryResetFeaturesEXT host_query_reset; 246 VkPhysicalDeviceHostQueryResetFeaturesEXT host_query_reset;
247 host_query_reset.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT;
124 host_query_reset.hostQueryReset = true; 248 host_query_reset.hostQueryReset = true;
125 SetNext(next, host_query_reset); 249 SetNext(next, host_query_reset);
126 250
127 vk::PhysicalDeviceFloat16Int8FeaturesKHR float16_int8; 251 VkPhysicalDeviceFloat16Int8FeaturesKHR float16_int8;
128 if (is_float16_supported) { 252 if (is_float16_supported) {
253 float16_int8.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR;
254 float16_int8.pNext = nullptr;
129 float16_int8.shaderFloat16 = true; 255 float16_int8.shaderFloat16 = true;
256 float16_int8.shaderInt8 = false;
130 SetNext(next, float16_int8); 257 SetNext(next, float16_int8);
131 } else { 258 } else {
132 LOG_INFO(Render_Vulkan, "Device doesn't support float16 natively"); 259 LOG_INFO(Render_Vulkan, "Device doesn't support float16 natively");
133 } 260 }
134 261
135 vk::PhysicalDeviceUniformBufferStandardLayoutFeaturesKHR std430_layout; 262 VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR std430_layout;
136 if (khr_uniform_buffer_standard_layout) { 263 if (khr_uniform_buffer_standard_layout) {
264 std430_layout.sType =
265 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR;
266 std430_layout.pNext = nullptr;
137 std430_layout.uniformBufferStandardLayout = true; 267 std430_layout.uniformBufferStandardLayout = true;
138 SetNext(next, std430_layout); 268 SetNext(next, std430_layout);
139 } else { 269 } else {
140 LOG_INFO(Render_Vulkan, "Device doesn't support packed UBOs"); 270 LOG_INFO(Render_Vulkan, "Device doesn't support packed UBOs");
141 } 271 }
142 272
143 vk::PhysicalDeviceIndexTypeUint8FeaturesEXT index_type_uint8; 273 VkPhysicalDeviceIndexTypeUint8FeaturesEXT index_type_uint8;
144 if (ext_index_type_uint8) { 274 if (ext_index_type_uint8) {
275 index_type_uint8.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT;
276 index_type_uint8.pNext = nullptr;
145 index_type_uint8.indexTypeUint8 = true; 277 index_type_uint8.indexTypeUint8 = true;
146 SetNext(next, index_type_uint8); 278 SetNext(next, index_type_uint8);
147 } else { 279 } else {
148 LOG_INFO(Render_Vulkan, "Device doesn't support uint8 indexes"); 280 LOG_INFO(Render_Vulkan, "Device doesn't support uint8 indexes");
149 } 281 }
150 282
151 vk::PhysicalDeviceTransformFeedbackFeaturesEXT transform_feedback; 283 VkPhysicalDeviceTransformFeedbackFeaturesEXT transform_feedback;
152 if (ext_transform_feedback) { 284 if (ext_transform_feedback) {
285 transform_feedback.sType =
286 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT;
287 transform_feedback.pNext = nullptr;
153 transform_feedback.transformFeedback = true; 288 transform_feedback.transformFeedback = true;
154 transform_feedback.geometryStreams = true; 289 transform_feedback.geometryStreams = true;
155 SetNext(next, transform_feedback); 290 SetNext(next, transform_feedback);
@@ -161,60 +296,48 @@ bool VKDevice::Create(vk::Instance instance) {
161 LOG_INFO(Render_Vulkan, "Device doesn't support depth range unrestricted"); 296 LOG_INFO(Render_Vulkan, "Device doesn't support depth range unrestricted");
162 } 297 }
163 298
164 vk::DeviceCreateInfo device_ci({}, static_cast<u32>(queue_cis.size()), queue_cis.data(), 0, 299 logical = vk::Device::Create(physical, queue_cis, extensions, features2, dld);
165 nullptr, static_cast<u32>(extensions.size()), extensions.data(), 300 if (!logical) {
166 nullptr); 301 LOG_ERROR(Render_Vulkan, "Failed to create logical device");
167 device_ci.pNext = &features2;
168
169 vk::Device unsafe_logical;
170 if (physical.createDevice(&device_ci, nullptr, &unsafe_logical, dld) != vk::Result::eSuccess) {
171 LOG_CRITICAL(Render_Vulkan, "Logical device failed to be created!");
172 return false; 302 return false;
173 } 303 }
174 dld.init(instance, dld.vkGetInstanceProcAddr, unsafe_logical);
175 logical = UniqueDevice(unsafe_logical, {nullptr, dld});
176 304
177 CollectTelemetryParameters(); 305 CollectTelemetryParameters();
178 306
179 graphics_queue = logical->getQueue(graphics_family, 0, dld); 307 graphics_queue = logical.GetQueue(graphics_family);
180 present_queue = logical->getQueue(present_family, 0, dld); 308 present_queue = logical.GetQueue(present_family);
181 return true; 309 return true;
182} 310}
183 311
184vk::Format VKDevice::GetSupportedFormat(vk::Format wanted_format, 312VkFormat VKDevice::GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
185 vk::FormatFeatureFlags wanted_usage, 313 FormatType format_type) const {
186 FormatType format_type) const {
187 if (IsFormatSupported(wanted_format, wanted_usage, format_type)) { 314 if (IsFormatSupported(wanted_format, wanted_usage, format_type)) {
188 return wanted_format; 315 return wanted_format;
189 } 316 }
190 // The wanted format is not supported by hardware, search for alternatives 317 // The wanted format is not supported by hardware, search for alternatives
191 const vk::Format* alternatives = GetFormatAlternatives(wanted_format); 318 const VkFormat* alternatives = GetFormatAlternatives(wanted_format);
192 if (alternatives == nullptr) { 319 if (alternatives == nullptr) {
193 UNREACHABLE_MSG("Format={} with usage={} and type={} has no defined alternatives and host " 320 UNREACHABLE_MSG("Format={} with usage={} and type={} has no defined alternatives and host "
194 "hardware does not support it", 321 "hardware does not support it",
195 vk::to_string(wanted_format), vk::to_string(wanted_usage), 322 wanted_format, wanted_usage, format_type);
196 static_cast<u32>(format_type));
197 return wanted_format; 323 return wanted_format;
198 } 324 }
199 325
200 std::size_t i = 0; 326 std::size_t i = 0;
201 for (vk::Format alternative = alternatives[0]; alternative != vk::Format{}; 327 for (VkFormat alternative = *alternatives; alternative; alternative = alternatives[++i]) {
202 alternative = alternatives[++i]) {
203 if (!IsFormatSupported(alternative, wanted_usage, format_type)) { 328 if (!IsFormatSupported(alternative, wanted_usage, format_type)) {
204 continue; 329 continue;
205 } 330 }
206 LOG_WARNING(Render_Vulkan, 331 LOG_WARNING(Render_Vulkan,
207 "Emulating format={} with alternative format={} with usage={} and type={}", 332 "Emulating format={} with alternative format={} with usage={} and type={}",
208 static_cast<u32>(wanted_format), static_cast<u32>(alternative), 333 wanted_format, alternative, wanted_usage, format_type);
209 static_cast<u32>(wanted_usage), static_cast<u32>(format_type));
210 return alternative; 334 return alternative;
211 } 335 }
212 336
213 // No alternatives found, panic 337 // No alternatives found, panic
214 UNREACHABLE_MSG("Format={} with usage={} and type={} is not supported by the host hardware and " 338 UNREACHABLE_MSG("Format={} with usage={} and type={} is not supported by the host hardware and "
215 "doesn't support any of the alternatives", 339 "doesn't support any of the alternatives",
216 static_cast<u32>(wanted_format), static_cast<u32>(wanted_usage), 340 wanted_format, wanted_usage, format_type);
217 static_cast<u32>(format_type));
218 return wanted_format; 341 return wanted_format;
219} 342}
220 343
@@ -228,38 +351,39 @@ void VKDevice::ReportLoss() const {
228 return; 351 return;
229 } 352 }
230 353
231 [[maybe_unused]] const std::vector data = graphics_queue.getCheckpointDataNV(dld); 354 [[maybe_unused]] const std::vector data = graphics_queue.GetCheckpointDataNV(dld);
232 // Catch here in debug builds (or with optimizations disabled) the last graphics pipeline to be 355 // Catch here in debug builds (or with optimizations disabled) the last graphics pipeline to be
233 // executed. It can be done on a debugger by evaluating the expression: 356 // executed. It can be done on a debugger by evaluating the expression:
234 // *(VKGraphicsPipeline*)data[0] 357 // *(VKGraphicsPipeline*)data[0]
235} 358}
236 359
237bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features) const { 360bool VKDevice::IsOptimalAstcSupported(const VkPhysicalDeviceFeatures& features) const {
238 // Disable for now to avoid converting ASTC twice. 361 // Disable for now to avoid converting ASTC twice.
239 static constexpr std::array astc_formats = { 362 static constexpr std::array astc_formats = {
240 vk::Format::eAstc4x4UnormBlock, vk::Format::eAstc4x4SrgbBlock, 363 VK_FORMAT_ASTC_4x4_UNORM_BLOCK, VK_FORMAT_ASTC_4x4_SRGB_BLOCK,
241 vk::Format::eAstc5x4UnormBlock, vk::Format::eAstc5x4SrgbBlock, 364 VK_FORMAT_ASTC_5x4_UNORM_BLOCK, VK_FORMAT_ASTC_5x4_SRGB_BLOCK,
242 vk::Format::eAstc5x5UnormBlock, vk::Format::eAstc5x5SrgbBlock, 365 VK_FORMAT_ASTC_5x5_UNORM_BLOCK, VK_FORMAT_ASTC_5x5_SRGB_BLOCK,
243 vk::Format::eAstc6x5UnormBlock, vk::Format::eAstc6x5SrgbBlock, 366 VK_FORMAT_ASTC_6x5_UNORM_BLOCK, VK_FORMAT_ASTC_6x5_SRGB_BLOCK,
244 vk::Format::eAstc6x6UnormBlock, vk::Format::eAstc6x6SrgbBlock, 367 VK_FORMAT_ASTC_6x6_UNORM_BLOCK, VK_FORMAT_ASTC_6x6_SRGB_BLOCK,
245 vk::Format::eAstc8x5UnormBlock, vk::Format::eAstc8x5SrgbBlock, 368 VK_FORMAT_ASTC_8x5_UNORM_BLOCK, VK_FORMAT_ASTC_8x5_SRGB_BLOCK,
246 vk::Format::eAstc8x6UnormBlock, vk::Format::eAstc8x6SrgbBlock, 369 VK_FORMAT_ASTC_8x6_UNORM_BLOCK, VK_FORMAT_ASTC_8x6_SRGB_BLOCK,
247 vk::Format::eAstc8x8UnormBlock, vk::Format::eAstc8x8SrgbBlock, 370 VK_FORMAT_ASTC_8x8_UNORM_BLOCK, VK_FORMAT_ASTC_8x8_SRGB_BLOCK,
248 vk::Format::eAstc10x5UnormBlock, vk::Format::eAstc10x5SrgbBlock, 371 VK_FORMAT_ASTC_10x5_UNORM_BLOCK, VK_FORMAT_ASTC_10x5_SRGB_BLOCK,
249 vk::Format::eAstc10x6UnormBlock, vk::Format::eAstc10x6SrgbBlock, 372 VK_FORMAT_ASTC_10x6_UNORM_BLOCK, VK_FORMAT_ASTC_10x6_SRGB_BLOCK,
250 vk::Format::eAstc10x8UnormBlock, vk::Format::eAstc10x8SrgbBlock, 373 VK_FORMAT_ASTC_10x8_UNORM_BLOCK, VK_FORMAT_ASTC_10x8_SRGB_BLOCK,
251 vk::Format::eAstc10x10UnormBlock, vk::Format::eAstc10x10SrgbBlock, 374 VK_FORMAT_ASTC_10x10_UNORM_BLOCK, VK_FORMAT_ASTC_10x10_SRGB_BLOCK,
252 vk::Format::eAstc12x10UnormBlock, vk::Format::eAstc12x10SrgbBlock, 375 VK_FORMAT_ASTC_12x10_UNORM_BLOCK, VK_FORMAT_ASTC_12x10_SRGB_BLOCK,
253 vk::Format::eAstc12x12UnormBlock, vk::Format::eAstc12x12SrgbBlock}; 376 VK_FORMAT_ASTC_12x12_UNORM_BLOCK, VK_FORMAT_ASTC_12x12_SRGB_BLOCK,
377 };
254 if (!features.textureCompressionASTC_LDR) { 378 if (!features.textureCompressionASTC_LDR) {
255 return false; 379 return false;
256 } 380 }
257 const auto format_feature_usage{ 381 const auto format_feature_usage{
258 vk::FormatFeatureFlagBits::eSampledImage | vk::FormatFeatureFlagBits::eBlitSrc | 382 VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_BLIT_SRC_BIT |
259 vk::FormatFeatureFlagBits::eBlitDst | vk::FormatFeatureFlagBits::eTransferSrc | 383 VK_FORMAT_FEATURE_BLIT_DST_BIT | VK_FORMAT_FEATURE_TRANSFER_SRC_BIT |
260 vk::FormatFeatureFlagBits::eTransferDst}; 384 VK_FORMAT_FEATURE_TRANSFER_DST_BIT};
261 for (const auto format : astc_formats) { 385 for (const auto format : astc_formats) {
262 const auto format_properties{physical.getFormatProperties(format, dld)}; 386 const auto format_properties{physical.GetFormatProperties(format)};
263 if (!(format_properties.optimalTilingFeatures & format_feature_usage)) { 387 if (!(format_properties.optimalTilingFeatures & format_feature_usage)) {
264 return false; 388 return false;
265 } 389 }
@@ -267,61 +391,49 @@ bool VKDevice::IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features
267 return true; 391 return true;
268} 392}
269 393
270bool VKDevice::IsFormatSupported(vk::Format wanted_format, vk::FormatFeatureFlags wanted_usage, 394bool VKDevice::IsFormatSupported(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
271 FormatType format_type) const { 395 FormatType format_type) const {
272 const auto it = format_properties.find(wanted_format); 396 const auto it = format_properties.find(wanted_format);
273 if (it == format_properties.end()) { 397 if (it == format_properties.end()) {
274 UNIMPLEMENTED_MSG("Unimplemented format query={}", vk::to_string(wanted_format)); 398 UNIMPLEMENTED_MSG("Unimplemented format query={}", wanted_format);
275 return true; 399 return true;
276 } 400 }
277 const auto supported_usage = GetFormatFeatures(it->second, format_type); 401 const auto supported_usage = GetFormatFeatures(it->second, format_type);
278 return (supported_usage & wanted_usage) == wanted_usage; 402 return (supported_usage & wanted_usage) == wanted_usage;
279} 403}
280 404
281bool VKDevice::IsSuitable(vk::PhysicalDevice physical, vk::SurfaceKHR surface, 405bool VKDevice::IsSuitable(vk::PhysicalDevice physical, VkSurfaceKHR surface) {
282 const vk::DispatchLoaderDynamic& dld) {
283 static constexpr std::array required_extensions = {
284 VK_KHR_SWAPCHAIN_EXTENSION_NAME,
285 VK_KHR_16BIT_STORAGE_EXTENSION_NAME,
286 VK_KHR_8BIT_STORAGE_EXTENSION_NAME,
287 VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME,
288 VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME,
289 VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME,
290 VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME,
291 VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME,
292 };
293 bool is_suitable = true; 406 bool is_suitable = true;
294 std::bitset<required_extensions.size()> available_extensions{}; 407 std::bitset<REQUIRED_EXTENSIONS.size()> available_extensions;
295 408
296 for (const auto& prop : physical.enumerateDeviceExtensionProperties(nullptr, dld)) { 409 for (const auto& prop : physical.EnumerateDeviceExtensionProperties()) {
297 for (std::size_t i = 0; i < required_extensions.size(); ++i) { 410 for (std::size_t i = 0; i < REQUIRED_EXTENSIONS.size(); ++i) {
298 if (available_extensions[i]) { 411 if (available_extensions[i]) {
299 continue; 412 continue;
300 } 413 }
301 available_extensions[i] = 414 const std::string_view name{prop.extensionName};
302 required_extensions[i] == std::string_view{prop.extensionName}; 415 available_extensions[i] = name == REQUIRED_EXTENSIONS[i];
303 } 416 }
304 } 417 }
305 if (!available_extensions.all()) { 418 if (!available_extensions.all()) {
306 for (std::size_t i = 0; i < required_extensions.size(); ++i) { 419 for (std::size_t i = 0; i < REQUIRED_EXTENSIONS.size(); ++i) {
307 if (available_extensions[i]) { 420 if (available_extensions[i]) {
308 continue; 421 continue;
309 } 422 }
310 LOG_ERROR(Render_Vulkan, "Missing required extension: {}", required_extensions[i]); 423 LOG_ERROR(Render_Vulkan, "Missing required extension: {}", REQUIRED_EXTENSIONS[i]);
311 is_suitable = false; 424 is_suitable = false;
312 } 425 }
313 } 426 }
314 427
315 bool has_graphics{}, has_present{}; 428 bool has_graphics{}, has_present{};
316 const auto queue_family_properties = physical.getQueueFamilyProperties(dld); 429 const std::vector queue_family_properties = physical.GetQueueFamilyProperties();
317 for (u32 i = 0; i < static_cast<u32>(queue_family_properties.size()); ++i) { 430 for (u32 i = 0; i < static_cast<u32>(queue_family_properties.size()); ++i) {
318 const auto& family = queue_family_properties[i]; 431 const auto& family = queue_family_properties[i];
319 if (family.queueCount == 0) { 432 if (family.queueCount == 0) {
320 continue; 433 continue;
321 } 434 }
322 has_graphics |= 435 has_graphics |= family.queueFlags & VK_QUEUE_GRAPHICS_BIT;
323 (family.queueFlags & vk::QueueFlagBits::eGraphics) != static_cast<vk::QueueFlagBits>(0); 436 has_present |= physical.GetSurfaceSupportKHR(i, surface);
324 has_present |= physical.getSurfaceSupportKHR(i, surface, dld) != 0;
325 } 437 }
326 if (!has_graphics || !has_present) { 438 if (!has_graphics || !has_present) {
327 LOG_ERROR(Render_Vulkan, "Device lacks a graphics and present queue"); 439 LOG_ERROR(Render_Vulkan, "Device lacks a graphics and present queue");
@@ -329,7 +441,7 @@ bool VKDevice::IsSuitable(vk::PhysicalDevice physical, vk::SurfaceKHR surface,
329 } 441 }
330 442
331 // TODO(Rodrigo): Check if the device matches all requeriments. 443 // TODO(Rodrigo): Check if the device matches all requeriments.
332 const auto properties{physical.getProperties(dld)}; 444 const auto properties{physical.GetProperties()};
333 const auto& limits{properties.limits}; 445 const auto& limits{properties.limits};
334 446
335 constexpr u32 required_ubo_size = 65536; 447 constexpr u32 required_ubo_size = 65536;
@@ -346,7 +458,7 @@ bool VKDevice::IsSuitable(vk::PhysicalDevice physical, vk::SurfaceKHR surface,
346 is_suitable = false; 458 is_suitable = false;
347 } 459 }
348 460
349 const auto features{physical.getFeatures(dld)}; 461 const auto features{physical.GetFeatures()};
350 const std::array feature_report = { 462 const std::array feature_report = {
351 std::make_pair(features.vertexPipelineStoresAndAtomics, "vertexPipelineStoresAndAtomics"), 463 std::make_pair(features.vertexPipelineStoresAndAtomics, "vertexPipelineStoresAndAtomics"),
352 std::make_pair(features.independentBlend, "independentBlend"), 464 std::make_pair(features.independentBlend, "independentBlend"),
@@ -380,7 +492,7 @@ bool VKDevice::IsSuitable(vk::PhysicalDevice physical, vk::SurfaceKHR surface,
380 492
381std::vector<const char*> VKDevice::LoadExtensions() { 493std::vector<const char*> VKDevice::LoadExtensions() {
382 std::vector<const char*> extensions; 494 std::vector<const char*> extensions;
383 const auto Test = [&](const vk::ExtensionProperties& extension, 495 const auto Test = [&](const VkExtensionProperties& extension,
384 std::optional<std::reference_wrapper<bool>> status, const char* name, 496 std::optional<std::reference_wrapper<bool>> status, const char* name,
385 bool push) { 497 bool push) {
386 if (extension.extensionName != std::string_view(name)) { 498 if (extension.extensionName != std::string_view(name)) {
@@ -394,22 +506,13 @@ std::vector<const char*> VKDevice::LoadExtensions() {
394 } 506 }
395 }; 507 };
396 508
397 extensions.reserve(15); 509 extensions.reserve(7 + REQUIRED_EXTENSIONS.size());
398 extensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); 510 extensions.insert(extensions.begin(), REQUIRED_EXTENSIONS.begin(), REQUIRED_EXTENSIONS.end());
399 extensions.push_back(VK_KHR_16BIT_STORAGE_EXTENSION_NAME); 511
400 extensions.push_back(VK_KHR_8BIT_STORAGE_EXTENSION_NAME);
401 extensions.push_back(VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME);
402 extensions.push_back(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME);
403 extensions.push_back(VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME);
404 extensions.push_back(VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME);
405 extensions.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
406
407 [[maybe_unused]] const bool nsight =
408 std::getenv("NVTX_INJECTION64_PATH") || std::getenv("NSIGHT_LAUNCHED");
409 bool has_khr_shader_float16_int8{}; 512 bool has_khr_shader_float16_int8{};
410 bool has_ext_subgroup_size_control{}; 513 bool has_ext_subgroup_size_control{};
411 bool has_ext_transform_feedback{}; 514 bool has_ext_transform_feedback{};
412 for (const auto& extension : physical.enumerateDeviceExtensionProperties(nullptr, dld)) { 515 for (const auto& extension : physical.EnumerateDeviceExtensionProperties()) {
413 Test(extension, khr_uniform_buffer_standard_layout, 516 Test(extension, khr_uniform_buffer_standard_layout,
414 VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME, true); 517 VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME, true);
415 Test(extension, has_khr_shader_float16_int8, VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME, 518 Test(extension, has_khr_shader_float16_int8, VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME,
@@ -429,38 +532,67 @@ std::vector<const char*> VKDevice::LoadExtensions() {
429 } 532 }
430 } 533 }
431 534
535 VkPhysicalDeviceFeatures2KHR features;
536 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR;
537
538 VkPhysicalDeviceProperties2KHR properties;
539 properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR;
540
432 if (has_khr_shader_float16_int8) { 541 if (has_khr_shader_float16_int8) {
433 is_float16_supported = 542 VkPhysicalDeviceFloat16Int8FeaturesKHR float16_int8_features;
434 GetFeatures<vk::PhysicalDeviceFloat16Int8FeaturesKHR>(physical, dld).shaderFloat16; 543 float16_int8_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR;
544 float16_int8_features.pNext = nullptr;
545 features.pNext = &float16_int8_features;
546
547 physical.GetFeatures2KHR(features);
548 is_float16_supported = float16_int8_features.shaderFloat16;
435 extensions.push_back(VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME); 549 extensions.push_back(VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME);
436 } 550 }
437 551
438 if (has_ext_subgroup_size_control) { 552 if (has_ext_subgroup_size_control) {
439 const auto features = 553 VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroup_features;
440 GetFeatures<vk::PhysicalDeviceSubgroupSizeControlFeaturesEXT>(physical, dld); 554 subgroup_features.sType =
441 const auto properties = 555 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
442 GetProperties<vk::PhysicalDeviceSubgroupSizeControlPropertiesEXT>(physical, dld); 556 subgroup_features.pNext = nullptr;
443 557 features.pNext = &subgroup_features;
444 is_warp_potentially_bigger = properties.maxSubgroupSize > GuestWarpSize; 558 physical.GetFeatures2KHR(features);
445 559
446 if (features.subgroupSizeControl && properties.minSubgroupSize <= GuestWarpSize && 560 VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroup_properties;
447 properties.maxSubgroupSize >= GuestWarpSize) { 561 subgroup_properties.sType =
562 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
563 subgroup_properties.pNext = nullptr;
564 properties.pNext = &subgroup_properties;
565 physical.GetProperties2KHR(properties);
566
567 is_warp_potentially_bigger = subgroup_properties.maxSubgroupSize > GuestWarpSize;
568
569 if (subgroup_features.subgroupSizeControl &&
570 subgroup_properties.minSubgroupSize <= GuestWarpSize &&
571 subgroup_properties.maxSubgroupSize >= GuestWarpSize) {
448 extensions.push_back(VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME); 572 extensions.push_back(VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME);
449 guest_warp_stages = properties.requiredSubgroupSizeStages; 573 guest_warp_stages = subgroup_properties.requiredSubgroupSizeStages;
450 } 574 }
451 } else { 575 } else {
452 is_warp_potentially_bigger = true; 576 is_warp_potentially_bigger = true;
453 } 577 }
454 578
455 if (has_ext_transform_feedback) { 579 if (has_ext_transform_feedback) {
456 const auto features = 580 VkPhysicalDeviceTransformFeedbackFeaturesEXT tfb_features;
457 GetFeatures<vk::PhysicalDeviceTransformFeedbackFeaturesEXT>(physical, dld); 581 tfb_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT;
458 const auto properties = 582 tfb_features.pNext = nullptr;
459 GetProperties<vk::PhysicalDeviceTransformFeedbackPropertiesEXT>(physical, dld); 583 features.pNext = &tfb_features;
460 584 physical.GetFeatures2KHR(features);
461 if (features.transformFeedback && features.geometryStreams && 585
462 properties.maxTransformFeedbackStreams >= 4 && properties.maxTransformFeedbackBuffers && 586 VkPhysicalDeviceTransformFeedbackPropertiesEXT tfb_properties;
463 properties.transformFeedbackQueries && properties.transformFeedbackDraw) { 587 tfb_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT;
588 tfb_properties.pNext = nullptr;
589 properties.pNext = &tfb_properties;
590 physical.GetProperties2KHR(properties);
591
592 if (tfb_features.transformFeedback && tfb_features.geometryStreams &&
593 tfb_properties.maxTransformFeedbackStreams >= 4 &&
594 tfb_properties.maxTransformFeedbackBuffers && tfb_properties.transformFeedbackQueries &&
595 tfb_properties.transformFeedbackDraw) {
464 extensions.push_back(VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME); 596 extensions.push_back(VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME);
465 ext_transform_feedback = true; 597 ext_transform_feedback = true;
466 } 598 }
@@ -469,10 +601,10 @@ std::vector<const char*> VKDevice::LoadExtensions() {
469 return extensions; 601 return extensions;
470} 602}
471 603
472void VKDevice::SetupFamilies(vk::SurfaceKHR surface) { 604void VKDevice::SetupFamilies(VkSurfaceKHR surface) {
473 std::optional<u32> graphics_family_, present_family_; 605 std::optional<u32> graphics_family_, present_family_;
474 606
475 const auto queue_family_properties = physical.getQueueFamilyProperties(dld); 607 const std::vector queue_family_properties = physical.GetQueueFamilyProperties();
476 for (u32 i = 0; i < static_cast<u32>(queue_family_properties.size()); ++i) { 608 for (u32 i = 0; i < static_cast<u32>(queue_family_properties.size()); ++i) {
477 if (graphics_family_ && present_family_) 609 if (graphics_family_ && present_family_)
478 break; 610 break;
@@ -481,10 +613,10 @@ void VKDevice::SetupFamilies(vk::SurfaceKHR surface) {
481 if (queue_family.queueCount == 0) 613 if (queue_family.queueCount == 0)
482 continue; 614 continue;
483 615
484 if (queue_family.queueFlags & vk::QueueFlagBits::eGraphics) { 616 if (queue_family.queueFlags & VK_QUEUE_GRAPHICS_BIT) {
485 graphics_family_ = i; 617 graphics_family_ = i;
486 } 618 }
487 if (physical.getSurfaceSupportKHR(i, surface, dld)) { 619 if (physical.GetSurfaceSupportKHR(i, surface)) {
488 present_family_ = i; 620 present_family_ = i;
489 } 621 }
490 } 622 }
@@ -495,120 +627,48 @@ void VKDevice::SetupFamilies(vk::SurfaceKHR surface) {
495} 627}
496 628
497void VKDevice::SetupFeatures() { 629void VKDevice::SetupFeatures() {
498 const auto supported_features{physical.getFeatures(dld)}; 630 const auto supported_features{physical.GetFeatures()};
499 is_formatless_image_load_supported = supported_features.shaderStorageImageReadWithoutFormat; 631 is_formatless_image_load_supported = supported_features.shaderStorageImageReadWithoutFormat;
500 is_optimal_astc_supported = IsOptimalAstcSupported(supported_features); 632 is_optimal_astc_supported = IsOptimalAstcSupported(supported_features);
501} 633}
502 634
503void VKDevice::CollectTelemetryParameters() { 635void VKDevice::CollectTelemetryParameters() {
504 const auto driver = GetProperties<vk::PhysicalDeviceDriverPropertiesKHR>(physical, dld); 636 VkPhysicalDeviceDriverPropertiesKHR driver;
637 driver.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR;
638 driver.pNext = nullptr;
639
640 VkPhysicalDeviceProperties2KHR properties;
641 properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR;
642 properties.pNext = &driver;
643 physical.GetProperties2KHR(properties);
644
505 driver_id = driver.driverID; 645 driver_id = driver.driverID;
506 vendor_name = driver.driverName; 646 vendor_name = driver.driverName;
507 647
508 const auto extensions = physical.enumerateDeviceExtensionProperties(nullptr, dld); 648 const std::vector extensions = physical.EnumerateDeviceExtensionProperties();
509 reported_extensions.reserve(std::size(extensions)); 649 reported_extensions.reserve(std::size(extensions));
510 for (const auto& extension : extensions) { 650 for (const auto& extension : extensions) {
511 reported_extensions.push_back(extension.extensionName); 651 reported_extensions.push_back(extension.extensionName);
512 } 652 }
513} 653}
514 654
515std::vector<vk::DeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() const { 655std::vector<VkDeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() const {
516 static const float QUEUE_PRIORITY = 1.0f; 656 static constexpr float QUEUE_PRIORITY = 1.0f;
517 657
518 std::set<u32> unique_queue_families = {graphics_family, present_family}; 658 std::unordered_set<u32> unique_queue_families = {graphics_family, present_family};
519 std::vector<vk::DeviceQueueCreateInfo> queue_cis; 659 std::vector<VkDeviceQueueCreateInfo> queue_cis;
520 660
521 for (u32 queue_family : unique_queue_families) 661 for (const u32 queue_family : unique_queue_families) {
522 queue_cis.push_back({{}, queue_family, 1, &QUEUE_PRIORITY}); 662 VkDeviceQueueCreateInfo& ci = queue_cis.emplace_back();
663 ci.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
664 ci.pNext = nullptr;
665 ci.flags = 0;
666 ci.queueFamilyIndex = queue_family;
667 ci.queueCount = 1;
668 ci.pQueuePriorities = &QUEUE_PRIORITY;
669 }
523 670
524 return queue_cis; 671 return queue_cis;
525} 672}
526 673
527std::unordered_map<vk::Format, vk::FormatProperties> VKDevice::GetFormatProperties(
528 const vk::DispatchLoaderDynamic& dld, vk::PhysicalDevice physical) {
529 static constexpr std::array formats{vk::Format::eA8B8G8R8UnormPack32,
530 vk::Format::eA8B8G8R8UintPack32,
531 vk::Format::eA8B8G8R8SnormPack32,
532 vk::Format::eA8B8G8R8SrgbPack32,
533 vk::Format::eB5G6R5UnormPack16,
534 vk::Format::eA2B10G10R10UnormPack32,
535 vk::Format::eA1R5G5B5UnormPack16,
536 vk::Format::eR32G32B32A32Sfloat,
537 vk::Format::eR32G32B32A32Uint,
538 vk::Format::eR32G32Sfloat,
539 vk::Format::eR32G32Uint,
540 vk::Format::eR16G16B16A16Uint,
541 vk::Format::eR16G16B16A16Snorm,
542 vk::Format::eR16G16B16A16Unorm,
543 vk::Format::eR16G16Unorm,
544 vk::Format::eR16G16Snorm,
545 vk::Format::eR16G16Sfloat,
546 vk::Format::eR16Unorm,
547 vk::Format::eR8G8B8A8Srgb,
548 vk::Format::eR8G8Unorm,
549 vk::Format::eR8G8Snorm,
550 vk::Format::eR8Unorm,
551 vk::Format::eR8Uint,
552 vk::Format::eB10G11R11UfloatPack32,
553 vk::Format::eR32Sfloat,
554 vk::Format::eR32Uint,
555 vk::Format::eR32Sint,
556 vk::Format::eR16Sfloat,
557 vk::Format::eR16G16B16A16Sfloat,
558 vk::Format::eB8G8R8A8Unorm,
559 vk::Format::eR4G4B4A4UnormPack16,
560 vk::Format::eD32Sfloat,
561 vk::Format::eD16Unorm,
562 vk::Format::eD16UnormS8Uint,
563 vk::Format::eD24UnormS8Uint,
564 vk::Format::eD32SfloatS8Uint,
565 vk::Format::eBc1RgbaUnormBlock,
566 vk::Format::eBc2UnormBlock,
567 vk::Format::eBc3UnormBlock,
568 vk::Format::eBc4UnormBlock,
569 vk::Format::eBc5UnormBlock,
570 vk::Format::eBc5SnormBlock,
571 vk::Format::eBc7UnormBlock,
572 vk::Format::eBc6HUfloatBlock,
573 vk::Format::eBc6HSfloatBlock,
574 vk::Format::eBc1RgbaSrgbBlock,
575 vk::Format::eBc2SrgbBlock,
576 vk::Format::eBc3SrgbBlock,
577 vk::Format::eBc7SrgbBlock,
578 vk::Format::eAstc4x4UnormBlock,
579 vk::Format::eAstc4x4SrgbBlock,
580 vk::Format::eAstc5x4UnormBlock,
581 vk::Format::eAstc5x4SrgbBlock,
582 vk::Format::eAstc5x5UnormBlock,
583 vk::Format::eAstc5x5SrgbBlock,
584 vk::Format::eAstc6x5UnormBlock,
585 vk::Format::eAstc6x5SrgbBlock,
586 vk::Format::eAstc6x6UnormBlock,
587 vk::Format::eAstc6x6SrgbBlock,
588 vk::Format::eAstc8x5UnormBlock,
589 vk::Format::eAstc8x5SrgbBlock,
590 vk::Format::eAstc8x6UnormBlock,
591 vk::Format::eAstc8x6SrgbBlock,
592 vk::Format::eAstc8x8UnormBlock,
593 vk::Format::eAstc8x8SrgbBlock,
594 vk::Format::eAstc10x5UnormBlock,
595 vk::Format::eAstc10x5SrgbBlock,
596 vk::Format::eAstc10x6UnormBlock,
597 vk::Format::eAstc10x6SrgbBlock,
598 vk::Format::eAstc10x8UnormBlock,
599 vk::Format::eAstc10x8SrgbBlock,
600 vk::Format::eAstc10x10UnormBlock,
601 vk::Format::eAstc10x10SrgbBlock,
602 vk::Format::eAstc12x10UnormBlock,
603 vk::Format::eAstc12x10SrgbBlock,
604 vk::Format::eAstc12x12UnormBlock,
605 vk::Format::eAstc12x12SrgbBlock,
606 vk::Format::eE5B9G9R9UfloatPack32};
607 std::unordered_map<vk::Format, vk::FormatProperties> format_properties;
608 for (const auto format : formats) {
609 format_properties.emplace(format, physical.getFormatProperties(format, dld));
610 }
611 return format_properties;
612}
613
614} // namespace Vulkan 674} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h
index d9d809852..60d64572a 100644
--- a/src/video_core/renderer_vulkan/vk_device.h
+++ b/src/video_core/renderer_vulkan/vk_device.h
@@ -8,8 +8,9 @@
8#include <string_view> 8#include <string_view>
9#include <unordered_map> 9#include <unordered_map>
10#include <vector> 10#include <vector>
11
11#include "common/common_types.h" 12#include "common/common_types.h"
12#include "video_core/renderer_vulkan/declarations.h" 13#include "video_core/renderer_vulkan/wrapper.h"
13 14
14namespace Vulkan { 15namespace Vulkan {
15 16
@@ -22,12 +23,12 @@ const u32 GuestWarpSize = 32;
22/// Handles data specific to a physical device. 23/// Handles data specific to a physical device.
23class VKDevice final { 24class VKDevice final {
24public: 25public:
25 explicit VKDevice(const vk::DispatchLoaderDynamic& dld, vk::PhysicalDevice physical, 26 explicit VKDevice(VkInstance instance, vk::PhysicalDevice physical, VkSurfaceKHR surface,
26 vk::SurfaceKHR surface); 27 const vk::InstanceDispatch& dld);
27 ~VKDevice(); 28 ~VKDevice();
28 29
29 /// Initializes the device. Returns true on success. 30 /// Initializes the device. Returns true on success.
30 bool Create(vk::Instance instance); 31 bool Create();
31 32
32 /** 33 /**
33 * Returns a format supported by the device for the passed requeriments. 34 * Returns a format supported by the device for the passed requeriments.
@@ -36,20 +37,20 @@ public:
36 * @param format_type Format type usage. 37 * @param format_type Format type usage.
37 * @returns A format supported by the device. 38 * @returns A format supported by the device.
38 */ 39 */
39 vk::Format GetSupportedFormat(vk::Format wanted_format, vk::FormatFeatureFlags wanted_usage, 40 VkFormat GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
40 FormatType format_type) const; 41 FormatType format_type) const;
41 42
42 /// Reports a device loss. 43 /// Reports a device loss.
43 void ReportLoss() const; 44 void ReportLoss() const;
44 45
45 /// Returns the dispatch loader with direct function pointers of the device. 46 /// Returns the dispatch loader with direct function pointers of the device.
46 const vk::DispatchLoaderDynamic& GetDispatchLoader() const { 47 const vk::DeviceDispatch& GetDispatchLoader() const {
47 return dld; 48 return dld;
48 } 49 }
49 50
50 /// Returns the logical device. 51 /// Returns the logical device.
51 vk::Device GetLogical() const { 52 const vk::Device& GetLogical() const {
52 return logical.get(); 53 return logical;
53 } 54 }
54 55
55 /// Returns the physical device. 56 /// Returns the physical device.
@@ -79,7 +80,7 @@ public:
79 80
80 /// Returns true if the device is integrated with the host CPU. 81 /// Returns true if the device is integrated with the host CPU.
81 bool IsIntegrated() const { 82 bool IsIntegrated() const {
82 return properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu; 83 return properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
83 } 84 }
84 85
85 /// Returns the current Vulkan API version provided in Vulkan-formatted version numbers. 86 /// Returns the current Vulkan API version provided in Vulkan-formatted version numbers.
@@ -98,27 +99,27 @@ public:
98 } 99 }
99 100
100 /// Returns the driver ID. 101 /// Returns the driver ID.
101 vk::DriverIdKHR GetDriverID() const { 102 VkDriverIdKHR GetDriverID() const {
102 return driver_id; 103 return driver_id;
103 } 104 }
104 105
105 /// Returns uniform buffer alignment requeriment. 106 /// Returns uniform buffer alignment requeriment.
106 vk::DeviceSize GetUniformBufferAlignment() const { 107 VkDeviceSize GetUniformBufferAlignment() const {
107 return properties.limits.minUniformBufferOffsetAlignment; 108 return properties.limits.minUniformBufferOffsetAlignment;
108 } 109 }
109 110
110 /// Returns storage alignment requeriment. 111 /// Returns storage alignment requeriment.
111 vk::DeviceSize GetStorageBufferAlignment() const { 112 VkDeviceSize GetStorageBufferAlignment() const {
112 return properties.limits.minStorageBufferOffsetAlignment; 113 return properties.limits.minStorageBufferOffsetAlignment;
113 } 114 }
114 115
115 /// Returns the maximum range for storage buffers. 116 /// Returns the maximum range for storage buffers.
116 vk::DeviceSize GetMaxStorageBufferRange() const { 117 VkDeviceSize GetMaxStorageBufferRange() const {
117 return properties.limits.maxStorageBufferRange; 118 return properties.limits.maxStorageBufferRange;
118 } 119 }
119 120
120 /// Returns the maximum size for push constants. 121 /// Returns the maximum size for push constants.
121 vk::DeviceSize GetMaxPushConstantsSize() const { 122 VkDeviceSize GetMaxPushConstantsSize() const {
122 return properties.limits.maxPushConstantsSize; 123 return properties.limits.maxPushConstantsSize;
123 } 124 }
124 125
@@ -138,8 +139,8 @@ public:
138 } 139 }
139 140
140 /// Returns true if the device can be forced to use the guest warp size. 141 /// Returns true if the device can be forced to use the guest warp size.
141 bool IsGuestWarpSizeSupported(vk::ShaderStageFlagBits stage) const { 142 bool IsGuestWarpSizeSupported(VkShaderStageFlagBits stage) const {
142 return (guest_warp_stages & stage) != vk::ShaderStageFlags{}; 143 return guest_warp_stages & stage;
143 } 144 }
144 145
145 /// Returns true if formatless image load is supported. 146 /// Returns true if formatless image load is supported.
@@ -188,15 +189,14 @@ public:
188 } 189 }
189 190
190 /// Checks if the physical device is suitable. 191 /// Checks if the physical device is suitable.
191 static bool IsSuitable(vk::PhysicalDevice physical, vk::SurfaceKHR surface, 192 static bool IsSuitable(vk::PhysicalDevice physical, VkSurfaceKHR surface);
192 const vk::DispatchLoaderDynamic& dld);
193 193
194private: 194private:
195 /// Loads extensions into a vector and stores available ones in this object. 195 /// Loads extensions into a vector and stores available ones in this object.
196 std::vector<const char*> LoadExtensions(); 196 std::vector<const char*> LoadExtensions();
197 197
198 /// Sets up queue families. 198 /// Sets up queue families.
199 void SetupFamilies(vk::SurfaceKHR surface); 199 void SetupFamilies(VkSurfaceKHR surface);
200 200
201 /// Sets up device features. 201 /// Sets up device features.
202 void SetupFeatures(); 202 void SetupFeatures();
@@ -205,32 +205,28 @@ private:
205 void CollectTelemetryParameters(); 205 void CollectTelemetryParameters();
206 206
207 /// Returns a list of queue initialization descriptors. 207 /// Returns a list of queue initialization descriptors.
208 std::vector<vk::DeviceQueueCreateInfo> GetDeviceQueueCreateInfos() const; 208 std::vector<VkDeviceQueueCreateInfo> GetDeviceQueueCreateInfos() const;
209 209
210 /// Returns true if ASTC textures are natively supported. 210 /// Returns true if ASTC textures are natively supported.
211 bool IsOptimalAstcSupported(const vk::PhysicalDeviceFeatures& features) const; 211 bool IsOptimalAstcSupported(const VkPhysicalDeviceFeatures& features) const;
212 212
213 /// Returns true if a format is supported. 213 /// Returns true if a format is supported.
214 bool IsFormatSupported(vk::Format wanted_format, vk::FormatFeatureFlags wanted_usage, 214 bool IsFormatSupported(VkFormat wanted_format, VkFormatFeatureFlags wanted_usage,
215 FormatType format_type) const; 215 FormatType format_type) const;
216 216
217 /// Returns the device properties for Vulkan formats. 217 vk::DeviceDispatch dld; ///< Device function pointers.
218 static std::unordered_map<vk::Format, vk::FormatProperties> GetFormatProperties( 218 vk::PhysicalDevice physical; ///< Physical device.
219 const vk::DispatchLoaderDynamic& dld, vk::PhysicalDevice physical); 219 VkPhysicalDeviceProperties properties; ///< Device properties.
220 220 vk::Device logical; ///< Logical device.
221 vk::DispatchLoaderDynamic dld; ///< Device function pointers. 221 vk::Queue graphics_queue; ///< Main graphics queue.
222 vk::PhysicalDevice physical; ///< Physical device. 222 vk::Queue present_queue; ///< Main present queue.
223 vk::PhysicalDeviceProperties properties; ///< Device properties. 223 u32 graphics_family{}; ///< Main graphics queue family index.
224 UniqueDevice logical; ///< Logical device. 224 u32 present_family{}; ///< Main present queue family index.
225 vk::Queue graphics_queue; ///< Main graphics queue. 225 VkDriverIdKHR driver_id{}; ///< Driver ID.
226 vk::Queue present_queue; ///< Main present queue. 226 VkShaderStageFlags guest_warp_stages{}; ///< Stages where the guest warp size can be forced.ed
227 u32 graphics_family{}; ///< Main graphics queue family index. 227 bool is_optimal_astc_supported{}; ///< Support for native ASTC.
228 u32 present_family{}; ///< Main present queue family index. 228 bool is_float16_supported{}; ///< Support for float16 arithmetics.
229 vk::DriverIdKHR driver_id{}; ///< Driver ID. 229 bool is_warp_potentially_bigger{}; ///< Host warp size can be bigger than guest.
230 vk::ShaderStageFlags guest_warp_stages{}; ///< Stages where the guest warp size can be forced.ed
231 bool is_optimal_astc_supported{}; ///< Support for native ASTC.
232 bool is_float16_supported{}; ///< Support for float16 arithmetics.
233 bool is_warp_potentially_bigger{}; ///< Host warp size can be bigger than guest.
234 bool is_formatless_image_load_supported{}; ///< Support for shader image read without format. 230 bool is_formatless_image_load_supported{}; ///< Support for shader image read without format.
235 bool khr_uniform_buffer_standard_layout{}; ///< Support for std430 on UBOs. 231 bool khr_uniform_buffer_standard_layout{}; ///< Support for std430 on UBOs.
236 bool ext_index_type_uint8{}; ///< Support for VK_EXT_index_type_uint8. 232 bool ext_index_type_uint8{}; ///< Support for VK_EXT_index_type_uint8.
@@ -244,7 +240,7 @@ private:
244 std::vector<std::string> reported_extensions; ///< Reported Vulkan extensions. 240 std::vector<std::string> reported_extensions; ///< Reported Vulkan extensions.
245 241
246 /// Format properties dictionary. 242 /// Format properties dictionary.
247 std::unordered_map<vk::Format, vk::FormatProperties> format_properties; 243 std::unordered_map<VkFormat, VkFormatProperties> format_properties;
248}; 244};
249 245
250} // namespace Vulkan 246} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index 6a02403c1..b540b838d 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -2,11 +2,13 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <array>
6#include <cstring>
5#include <vector> 7#include <vector>
8
6#include "common/assert.h" 9#include "common/assert.h"
7#include "common/common_types.h" 10#include "common/common_types.h"
8#include "common/microprofile.h" 11#include "common/microprofile.h"
9#include "video_core/renderer_vulkan/declarations.h"
10#include "video_core/renderer_vulkan/fixed_pipeline_state.h" 12#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
11#include "video_core/renderer_vulkan/maxwell_to_vk.h" 13#include "video_core/renderer_vulkan/maxwell_to_vk.h"
12#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 14#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
@@ -16,6 +18,7 @@
16#include "video_core/renderer_vulkan/vk_renderpass_cache.h" 18#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
17#include "video_core/renderer_vulkan/vk_scheduler.h" 19#include "video_core/renderer_vulkan/vk_scheduler.h"
18#include "video_core/renderer_vulkan/vk_update_descriptor.h" 20#include "video_core/renderer_vulkan/vk_update_descriptor.h"
21#include "video_core/renderer_vulkan/wrapper.h"
19 22
20namespace Vulkan { 23namespace Vulkan {
21 24
@@ -23,21 +26,26 @@ MICROPROFILE_DECLARE(Vulkan_PipelineCache);
23 26
24namespace { 27namespace {
25 28
26vk::StencilOpState GetStencilFaceState(const FixedPipelineState::StencilFace& face) { 29VkStencilOpState GetStencilFaceState(const FixedPipelineState::StencilFace& face) {
27 return vk::StencilOpState(MaxwellToVK::StencilOp(face.action_stencil_fail), 30 VkStencilOpState state;
28 MaxwellToVK::StencilOp(face.action_depth_pass), 31 state.failOp = MaxwellToVK::StencilOp(face.action_stencil_fail);
29 MaxwellToVK::StencilOp(face.action_depth_fail), 32 state.passOp = MaxwellToVK::StencilOp(face.action_depth_pass);
30 MaxwellToVK::ComparisonOp(face.test_func), 0, 0, 0); 33 state.depthFailOp = MaxwellToVK::StencilOp(face.action_depth_fail);
34 state.compareOp = MaxwellToVK::ComparisonOp(face.test_func);
35 state.compareMask = 0;
36 state.writeMask = 0;
37 state.reference = 0;
38 return state;
31} 39}
32 40
33bool SupportsPrimitiveRestart(vk::PrimitiveTopology topology) { 41bool SupportsPrimitiveRestart(VkPrimitiveTopology topology) {
34 static constexpr std::array unsupported_topologies = { 42 static constexpr std::array unsupported_topologies = {
35 vk::PrimitiveTopology::ePointList, 43 VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
36 vk::PrimitiveTopology::eLineList, 44 VK_PRIMITIVE_TOPOLOGY_LINE_LIST,
37 vk::PrimitiveTopology::eTriangleList, 45 VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
38 vk::PrimitiveTopology::eLineListWithAdjacency, 46 VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY,
39 vk::PrimitiveTopology::eTriangleListWithAdjacency, 47 VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY,
40 vk::PrimitiveTopology::ePatchList}; 48 VK_PRIMITIVE_TOPOLOGY_PATCH_LIST};
41 return std::find(std::begin(unsupported_topologies), std::end(unsupported_topologies), 49 return std::find(std::begin(unsupported_topologies), std::end(unsupported_topologies),
42 topology) == std::end(unsupported_topologies); 50 topology) == std::end(unsupported_topologies);
43} 51}
@@ -49,7 +57,7 @@ VKGraphicsPipeline::VKGraphicsPipeline(const VKDevice& device, VKScheduler& sche
49 VKUpdateDescriptorQueue& update_descriptor_queue, 57 VKUpdateDescriptorQueue& update_descriptor_queue,
50 VKRenderPassCache& renderpass_cache, 58 VKRenderPassCache& renderpass_cache,
51 const GraphicsPipelineCacheKey& key, 59 const GraphicsPipelineCacheKey& key,
52 const std::vector<vk::DescriptorSetLayoutBinding>& bindings, 60 vk::Span<VkDescriptorSetLayoutBinding> bindings,
53 const SPIRVProgram& program) 61 const SPIRVProgram& program)
54 : device{device}, scheduler{scheduler}, fixed_state{key.fixed_state}, hash{key.Hash()}, 62 : device{device}, scheduler{scheduler}, fixed_state{key.fixed_state}, hash{key.Hash()},
55 descriptor_set_layout{CreateDescriptorSetLayout(bindings)}, 63 descriptor_set_layout{CreateDescriptorSetLayout(bindings)},
@@ -63,7 +71,7 @@ VKGraphicsPipeline::VKGraphicsPipeline(const VKDevice& device, VKScheduler& sche
63 71
64VKGraphicsPipeline::~VKGraphicsPipeline() = default; 72VKGraphicsPipeline::~VKGraphicsPipeline() = default;
65 73
66vk::DescriptorSet VKGraphicsPipeline::CommitDescriptorSet() { 74VkDescriptorSet VKGraphicsPipeline::CommitDescriptorSet() {
67 if (!descriptor_template) { 75 if (!descriptor_template) {
68 return {}; 76 return {};
69 } 77 }
@@ -72,27 +80,32 @@ vk::DescriptorSet VKGraphicsPipeline::CommitDescriptorSet() {
72 return set; 80 return set;
73} 81}
74 82
75UniqueDescriptorSetLayout VKGraphicsPipeline::CreateDescriptorSetLayout( 83vk::DescriptorSetLayout VKGraphicsPipeline::CreateDescriptorSetLayout(
76 const std::vector<vk::DescriptorSetLayoutBinding>& bindings) const { 84 vk::Span<VkDescriptorSetLayoutBinding> bindings) const {
77 const vk::DescriptorSetLayoutCreateInfo descriptor_set_layout_ci( 85 VkDescriptorSetLayoutCreateInfo ci;
78 {}, static_cast<u32>(bindings.size()), bindings.data()); 86 ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
79 87 ci.pNext = nullptr;
80 const auto dev = device.GetLogical(); 88 ci.flags = 0;
81 const auto& dld = device.GetDispatchLoader(); 89 ci.bindingCount = bindings.size();
82 return dev.createDescriptorSetLayoutUnique(descriptor_set_layout_ci, nullptr, dld); 90 ci.pBindings = bindings.data();
91 return device.GetLogical().CreateDescriptorSetLayout(ci);
83} 92}
84 93
85UniquePipelineLayout VKGraphicsPipeline::CreatePipelineLayout() const { 94vk::PipelineLayout VKGraphicsPipeline::CreatePipelineLayout() const {
86 const vk::PipelineLayoutCreateInfo pipeline_layout_ci({}, 1, &*descriptor_set_layout, 0, 95 VkPipelineLayoutCreateInfo ci;
87 nullptr); 96 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
88 const auto dev = device.GetLogical(); 97 ci.pNext = nullptr;
89 const auto& dld = device.GetDispatchLoader(); 98 ci.flags = 0;
90 return dev.createPipelineLayoutUnique(pipeline_layout_ci, nullptr, dld); 99 ci.setLayoutCount = 1;
100 ci.pSetLayouts = descriptor_set_layout.address();
101 ci.pushConstantRangeCount = 0;
102 ci.pPushConstantRanges = nullptr;
103 return device.GetLogical().CreatePipelineLayout(ci);
91} 104}
92 105
93UniqueDescriptorUpdateTemplate VKGraphicsPipeline::CreateDescriptorUpdateTemplate( 106vk::DescriptorUpdateTemplateKHR VKGraphicsPipeline::CreateDescriptorUpdateTemplate(
94 const SPIRVProgram& program) const { 107 const SPIRVProgram& program) const {
95 std::vector<vk::DescriptorUpdateTemplateEntry> template_entries; 108 std::vector<VkDescriptorUpdateTemplateEntry> template_entries;
96 u32 binding = 0; 109 u32 binding = 0;
97 u32 offset = 0; 110 u32 offset = 0;
98 for (const auto& stage : program) { 111 for (const auto& stage : program) {
@@ -102,38 +115,47 @@ UniqueDescriptorUpdateTemplate VKGraphicsPipeline::CreateDescriptorUpdateTemplat
102 } 115 }
103 if (template_entries.empty()) { 116 if (template_entries.empty()) {
104 // If the shader doesn't use descriptor sets, skip template creation. 117 // If the shader doesn't use descriptor sets, skip template creation.
105 return UniqueDescriptorUpdateTemplate{}; 118 return {};
106 } 119 }
107 120
108 const vk::DescriptorUpdateTemplateCreateInfo template_ci( 121 VkDescriptorUpdateTemplateCreateInfoKHR ci;
109 {}, static_cast<u32>(template_entries.size()), template_entries.data(), 122 ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR;
110 vk::DescriptorUpdateTemplateType::eDescriptorSet, *descriptor_set_layout, 123 ci.pNext = nullptr;
111 vk::PipelineBindPoint::eGraphics, *layout, DESCRIPTOR_SET); 124 ci.flags = 0;
112 125 ci.descriptorUpdateEntryCount = static_cast<u32>(template_entries.size());
113 const auto dev = device.GetLogical(); 126 ci.pDescriptorUpdateEntries = template_entries.data();
114 const auto& dld = device.GetDispatchLoader(); 127 ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR;
115 return dev.createDescriptorUpdateTemplateUnique(template_ci, nullptr, dld); 128 ci.descriptorSetLayout = *descriptor_set_layout;
129 ci.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
130 ci.pipelineLayout = *layout;
131 ci.set = DESCRIPTOR_SET;
132 return device.GetLogical().CreateDescriptorUpdateTemplateKHR(ci);
116} 133}
117 134
118std::vector<UniqueShaderModule> VKGraphicsPipeline::CreateShaderModules( 135std::vector<vk::ShaderModule> VKGraphicsPipeline::CreateShaderModules(
119 const SPIRVProgram& program) const { 136 const SPIRVProgram& program) const {
120 std::vector<UniqueShaderModule> modules; 137 VkShaderModuleCreateInfo ci;
121 const auto dev = device.GetLogical(); 138 ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
122 const auto& dld = device.GetDispatchLoader(); 139 ci.pNext = nullptr;
140 ci.flags = 0;
141
142 std::vector<vk::ShaderModule> modules;
143 modules.reserve(Maxwell::MaxShaderStage);
123 for (std::size_t i = 0; i < Maxwell::MaxShaderStage; ++i) { 144 for (std::size_t i = 0; i < Maxwell::MaxShaderStage; ++i) {
124 const auto& stage = program[i]; 145 const auto& stage = program[i];
125 if (!stage) { 146 if (!stage) {
126 continue; 147 continue;
127 } 148 }
128 const vk::ShaderModuleCreateInfo module_ci({}, stage->code.size() * sizeof(u32), 149
129 stage->code.data()); 150 ci.codeSize = stage->code.size() * sizeof(u32);
130 modules.emplace_back(dev.createShaderModuleUnique(module_ci, nullptr, dld)); 151 ci.pCode = stage->code.data();
152 modules.push_back(device.GetLogical().CreateShaderModule(ci));
131 } 153 }
132 return modules; 154 return modules;
133} 155}
134 156
135UniquePipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpass_params, 157vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpass_params,
136 const SPIRVProgram& program) const { 158 const SPIRVProgram& program) const {
137 const auto& vi = fixed_state.vertex_input; 159 const auto& vi = fixed_state.vertex_input;
138 const auto& ia = fixed_state.input_assembly; 160 const auto& ia = fixed_state.input_assembly;
139 const auto& ds = fixed_state.depth_stencil; 161 const auto& ds = fixed_state.depth_stencil;
@@ -141,19 +163,26 @@ UniquePipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& render
141 const auto& ts = fixed_state.tessellation; 163 const auto& ts = fixed_state.tessellation;
142 const auto& rs = fixed_state.rasterizer; 164 const auto& rs = fixed_state.rasterizer;
143 165
144 std::vector<vk::VertexInputBindingDescription> vertex_bindings; 166 std::vector<VkVertexInputBindingDescription> vertex_bindings;
145 std::vector<vk::VertexInputBindingDivisorDescriptionEXT> vertex_binding_divisors; 167 std::vector<VkVertexInputBindingDivisorDescriptionEXT> vertex_binding_divisors;
146 for (std::size_t i = 0; i < vi.num_bindings; ++i) { 168 for (std::size_t i = 0; i < vi.num_bindings; ++i) {
147 const auto& binding = vi.bindings[i]; 169 const auto& binding = vi.bindings[i];
148 const bool instanced = binding.divisor != 0; 170 const bool instanced = binding.divisor != 0;
149 const auto rate = instanced ? vk::VertexInputRate::eInstance : vk::VertexInputRate::eVertex; 171 const auto rate = instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
150 vertex_bindings.emplace_back(binding.index, binding.stride, rate); 172
173 auto& vertex_binding = vertex_bindings.emplace_back();
174 vertex_binding.binding = binding.index;
175 vertex_binding.stride = binding.stride;
176 vertex_binding.inputRate = rate;
177
151 if (instanced) { 178 if (instanced) {
152 vertex_binding_divisors.emplace_back(binding.index, binding.divisor); 179 auto& binding_divisor = vertex_binding_divisors.emplace_back();
180 binding_divisor.binding = binding.index;
181 binding_divisor.divisor = binding.divisor;
153 } 182 }
154 } 183 }
155 184
156 std::vector<vk::VertexInputAttributeDescription> vertex_attributes; 185 std::vector<VkVertexInputAttributeDescription> vertex_attributes;
157 const auto& input_attributes = program[0]->entries.attributes; 186 const auto& input_attributes = program[0]->entries.attributes;
158 for (std::size_t i = 0; i < vi.num_attributes; ++i) { 187 for (std::size_t i = 0; i < vi.num_attributes; ++i) {
159 const auto& attribute = vi.attributes[i]; 188 const auto& attribute = vi.attributes[i];
@@ -161,109 +190,194 @@ UniquePipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& render
161 // Skip attributes not used by the vertex shaders. 190 // Skip attributes not used by the vertex shaders.
162 continue; 191 continue;
163 } 192 }
164 vertex_attributes.emplace_back(attribute.index, attribute.buffer, 193 auto& vertex_attribute = vertex_attributes.emplace_back();
165 MaxwellToVK::VertexFormat(attribute.type, attribute.size), 194 vertex_attribute.location = attribute.index;
166 attribute.offset); 195 vertex_attribute.binding = attribute.buffer;
196 vertex_attribute.format = MaxwellToVK::VertexFormat(attribute.type, attribute.size);
197 vertex_attribute.offset = attribute.offset;
167 } 198 }
168 199
169 vk::PipelineVertexInputStateCreateInfo vertex_input_ci( 200 VkPipelineVertexInputStateCreateInfo vertex_input_ci;
170 {}, static_cast<u32>(vertex_bindings.size()), vertex_bindings.data(), 201 vertex_input_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
171 static_cast<u32>(vertex_attributes.size()), vertex_attributes.data()); 202 vertex_input_ci.pNext = nullptr;
172 203 vertex_input_ci.flags = 0;
173 const vk::PipelineVertexInputDivisorStateCreateInfoEXT vertex_input_divisor_ci( 204 vertex_input_ci.vertexBindingDescriptionCount = static_cast<u32>(vertex_bindings.size());
174 static_cast<u32>(vertex_binding_divisors.size()), vertex_binding_divisors.data()); 205 vertex_input_ci.pVertexBindingDescriptions = vertex_bindings.data();
206 vertex_input_ci.vertexAttributeDescriptionCount = static_cast<u32>(vertex_attributes.size());
207 vertex_input_ci.pVertexAttributeDescriptions = vertex_attributes.data();
208
209 VkPipelineVertexInputDivisorStateCreateInfoEXT input_divisor_ci;
210 input_divisor_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT;
211 input_divisor_ci.pNext = nullptr;
212 input_divisor_ci.vertexBindingDivisorCount = static_cast<u32>(vertex_binding_divisors.size());
213 input_divisor_ci.pVertexBindingDivisors = vertex_binding_divisors.data();
175 if (!vertex_binding_divisors.empty()) { 214 if (!vertex_binding_divisors.empty()) {
176 vertex_input_ci.pNext = &vertex_input_divisor_ci; 215 vertex_input_ci.pNext = &input_divisor_ci;
177 } 216 }
178 217
179 const auto primitive_topology = MaxwellToVK::PrimitiveTopology(device, ia.topology); 218 VkPipelineInputAssemblyStateCreateInfo input_assembly_ci;
180 const vk::PipelineInputAssemblyStateCreateInfo input_assembly_ci( 219 input_assembly_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
181 {}, primitive_topology, 220 input_assembly_ci.pNext = nullptr;
182 ia.primitive_restart_enable && SupportsPrimitiveRestart(primitive_topology)); 221 input_assembly_ci.flags = 0;
183 222 input_assembly_ci.topology = MaxwellToVK::PrimitiveTopology(device, ia.topology);
184 const vk::PipelineTessellationStateCreateInfo tessellation_ci({}, ts.patch_control_points); 223 input_assembly_ci.primitiveRestartEnable =
185 224 ia.primitive_restart_enable && SupportsPrimitiveRestart(input_assembly_ci.topology);
186 const vk::PipelineViewportStateCreateInfo viewport_ci({}, Maxwell::NumViewports, nullptr, 225
187 Maxwell::NumViewports, nullptr); 226 VkPipelineTessellationStateCreateInfo tessellation_ci;
188 227 tessellation_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO;
189 // TODO(Rodrigo): Find out what's the default register value for front face 228 tessellation_ci.pNext = nullptr;
190 const vk::PipelineRasterizationStateCreateInfo rasterizer_ci( 229 tessellation_ci.flags = 0;
191 {}, rs.depth_clamp_enable, false, vk::PolygonMode::eFill, 230 tessellation_ci.patchControlPoints = ts.patch_control_points;
192 rs.cull_enable ? MaxwellToVK::CullFace(rs.cull_face) : vk::CullModeFlagBits::eNone, 231
193 MaxwellToVK::FrontFace(rs.front_face), rs.depth_bias_enable, 0.0f, 0.0f, 0.0f, 1.0f); 232 VkPipelineViewportStateCreateInfo viewport_ci;
194 233 viewport_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
195 const vk::PipelineMultisampleStateCreateInfo multisampling_ci( 234 viewport_ci.pNext = nullptr;
196 {}, vk::SampleCountFlagBits::e1, false, 0.0f, nullptr, false, false); 235 viewport_ci.flags = 0;
197 236 viewport_ci.viewportCount = Maxwell::NumViewports;
198 const vk::CompareOp depth_test_compare = ds.depth_test_enable 237 viewport_ci.pViewports = nullptr;
199 ? MaxwellToVK::ComparisonOp(ds.depth_test_function) 238 viewport_ci.scissorCount = Maxwell::NumViewports;
200 : vk::CompareOp::eAlways; 239 viewport_ci.pScissors = nullptr;
201 240
202 const vk::PipelineDepthStencilStateCreateInfo depth_stencil_ci( 241 VkPipelineRasterizationStateCreateInfo rasterization_ci;
203 {}, ds.depth_test_enable, ds.depth_write_enable, depth_test_compare, ds.depth_bounds_enable, 242 rasterization_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
204 ds.stencil_enable, GetStencilFaceState(ds.front_stencil), 243 rasterization_ci.pNext = nullptr;
205 GetStencilFaceState(ds.back_stencil), 0.0f, 0.0f); 244 rasterization_ci.flags = 0;
206 245 rasterization_ci.depthClampEnable = rs.depth_clamp_enable;
207 std::array<vk::PipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments; 246 rasterization_ci.rasterizerDiscardEnable = VK_FALSE;
247 rasterization_ci.polygonMode = VK_POLYGON_MODE_FILL;
248 rasterization_ci.cullMode =
249 rs.cull_enable ? MaxwellToVK::CullFace(rs.cull_face) : VK_CULL_MODE_NONE;
250 rasterization_ci.frontFace = MaxwellToVK::FrontFace(rs.front_face);
251 rasterization_ci.depthBiasEnable = rs.depth_bias_enable;
252 rasterization_ci.depthBiasConstantFactor = 0.0f;
253 rasterization_ci.depthBiasClamp = 0.0f;
254 rasterization_ci.depthBiasSlopeFactor = 0.0f;
255 rasterization_ci.lineWidth = 1.0f;
256
257 VkPipelineMultisampleStateCreateInfo multisample_ci;
258 multisample_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
259 multisample_ci.pNext = nullptr;
260 multisample_ci.flags = 0;
261 multisample_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
262 multisample_ci.sampleShadingEnable = VK_FALSE;
263 multisample_ci.minSampleShading = 0.0f;
264 multisample_ci.pSampleMask = nullptr;
265 multisample_ci.alphaToCoverageEnable = VK_FALSE;
266 multisample_ci.alphaToOneEnable = VK_FALSE;
267
268 VkPipelineDepthStencilStateCreateInfo depth_stencil_ci;
269 depth_stencil_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
270 depth_stencil_ci.pNext = nullptr;
271 depth_stencil_ci.flags = 0;
272 depth_stencil_ci.depthTestEnable = ds.depth_test_enable;
273 depth_stencil_ci.depthWriteEnable = ds.depth_write_enable;
274 depth_stencil_ci.depthCompareOp = ds.depth_test_enable
275 ? MaxwellToVK::ComparisonOp(ds.depth_test_function)
276 : VK_COMPARE_OP_ALWAYS;
277 depth_stencil_ci.depthBoundsTestEnable = ds.depth_bounds_enable;
278 depth_stencil_ci.stencilTestEnable = ds.stencil_enable;
279 depth_stencil_ci.front = GetStencilFaceState(ds.front_stencil);
280 depth_stencil_ci.back = GetStencilFaceState(ds.back_stencil);
281 depth_stencil_ci.minDepthBounds = 0.0f;
282 depth_stencil_ci.maxDepthBounds = 0.0f;
283
284 std::array<VkPipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments;
208 const std::size_t num_attachments = 285 const std::size_t num_attachments =
209 std::min(cd.attachments_count, renderpass_params.color_attachments.size()); 286 std::min(cd.attachments_count, renderpass_params.color_attachments.size());
210 for (std::size_t i = 0; i < num_attachments; ++i) { 287 for (std::size_t i = 0; i < num_attachments; ++i) {
211 constexpr std::array component_table{ 288 static constexpr std::array component_table = {
212 vk::ColorComponentFlagBits::eR, vk::ColorComponentFlagBits::eG, 289 VK_COLOR_COMPONENT_R_BIT, VK_COLOR_COMPONENT_G_BIT, VK_COLOR_COMPONENT_B_BIT,
213 vk::ColorComponentFlagBits::eB, vk::ColorComponentFlagBits::eA}; 290 VK_COLOR_COMPONENT_A_BIT};
214 const auto& blend = cd.attachments[i]; 291 const auto& blend = cd.attachments[i];
215 292
216 vk::ColorComponentFlags color_components{}; 293 VkColorComponentFlags color_components = 0;
217 for (std::size_t j = 0; j < component_table.size(); ++j) { 294 for (std::size_t j = 0; j < component_table.size(); ++j) {
218 if (blend.components[j]) 295 if (blend.components[j]) {
219 color_components |= component_table[j]; 296 color_components |= component_table[j];
297 }
220 } 298 }
221 299
222 cb_attachments[i] = vk::PipelineColorBlendAttachmentState( 300 VkPipelineColorBlendAttachmentState& attachment = cb_attachments[i];
223 blend.enable, MaxwellToVK::BlendFactor(blend.src_rgb_func), 301 attachment.blendEnable = blend.enable;
224 MaxwellToVK::BlendFactor(blend.dst_rgb_func), 302 attachment.srcColorBlendFactor = MaxwellToVK::BlendFactor(blend.src_rgb_func);
225 MaxwellToVK::BlendEquation(blend.rgb_equation), 303 attachment.dstColorBlendFactor = MaxwellToVK::BlendFactor(blend.dst_rgb_func);
226 MaxwellToVK::BlendFactor(blend.src_a_func), MaxwellToVK::BlendFactor(blend.dst_a_func), 304 attachment.colorBlendOp = MaxwellToVK::BlendEquation(blend.rgb_equation);
227 MaxwellToVK::BlendEquation(blend.a_equation), color_components); 305 attachment.srcAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.src_a_func);
306 attachment.dstAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.dst_a_func);
307 attachment.alphaBlendOp = MaxwellToVK::BlendEquation(blend.a_equation);
308 attachment.colorWriteMask = color_components;
228 } 309 }
229 const vk::PipelineColorBlendStateCreateInfo color_blending_ci({}, false, vk::LogicOp::eCopy, 310
230 static_cast<u32>(num_attachments), 311 VkPipelineColorBlendStateCreateInfo color_blend_ci;
231 cb_attachments.data(), {}); 312 color_blend_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
232 313 color_blend_ci.pNext = nullptr;
233 constexpr std::array dynamic_states = { 314 color_blend_ci.flags = 0;
234 vk::DynamicState::eViewport, vk::DynamicState::eScissor, 315 color_blend_ci.logicOpEnable = VK_FALSE;
235 vk::DynamicState::eDepthBias, vk::DynamicState::eBlendConstants, 316 color_blend_ci.logicOp = VK_LOGIC_OP_COPY;
236 vk::DynamicState::eDepthBounds, vk::DynamicState::eStencilCompareMask, 317 color_blend_ci.attachmentCount = static_cast<u32>(num_attachments);
237 vk::DynamicState::eStencilWriteMask, vk::DynamicState::eStencilReference}; 318 color_blend_ci.pAttachments = cb_attachments.data();
238 const vk::PipelineDynamicStateCreateInfo dynamic_state_ci( 319 std::memset(color_blend_ci.blendConstants, 0, sizeof(color_blend_ci.blendConstants));
239 {}, static_cast<u32>(dynamic_states.size()), dynamic_states.data()); 320
240 321 static constexpr std::array dynamic_states = {
241 vk::PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci; 322 VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR,
323 VK_DYNAMIC_STATE_DEPTH_BIAS, VK_DYNAMIC_STATE_BLEND_CONSTANTS,
324 VK_DYNAMIC_STATE_DEPTH_BOUNDS, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
325 VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, VK_DYNAMIC_STATE_STENCIL_REFERENCE};
326
327 VkPipelineDynamicStateCreateInfo dynamic_state_ci;
328 dynamic_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
329 dynamic_state_ci.pNext = nullptr;
330 dynamic_state_ci.flags = 0;
331 dynamic_state_ci.dynamicStateCount = static_cast<u32>(dynamic_states.size());
332 dynamic_state_ci.pDynamicStates = dynamic_states.data();
333
334 VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci;
335 subgroup_size_ci.sType =
336 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT;
337 subgroup_size_ci.pNext = nullptr;
242 subgroup_size_ci.requiredSubgroupSize = GuestWarpSize; 338 subgroup_size_ci.requiredSubgroupSize = GuestWarpSize;
243 339
244 std::vector<vk::PipelineShaderStageCreateInfo> shader_stages; 340 std::vector<VkPipelineShaderStageCreateInfo> shader_stages;
245 std::size_t module_index = 0; 341 std::size_t module_index = 0;
246 for (std::size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) { 342 for (std::size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
247 if (!program[stage]) { 343 if (!program[stage]) {
248 continue; 344 continue;
249 } 345 }
250 const auto stage_enum = static_cast<Tegra::Engines::ShaderType>(stage); 346 VkPipelineShaderStageCreateInfo& stage_ci = shader_stages.emplace_back();
251 const auto vk_stage = MaxwellToVK::ShaderStage(stage_enum); 347 stage_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
252 auto& stage_ci = shader_stages.emplace_back(vk::PipelineShaderStageCreateFlags{}, vk_stage, 348 stage_ci.pNext = nullptr;
253 *modules[module_index++], "main", nullptr); 349 stage_ci.flags = 0;
254 if (program[stage]->entries.uses_warps && device.IsGuestWarpSizeSupported(vk_stage)) { 350 stage_ci.stage = MaxwellToVK::ShaderStage(static_cast<Tegra::Engines::ShaderType>(stage));
351 stage_ci.module = *modules[module_index++];
352 stage_ci.pName = "main";
353 stage_ci.pSpecializationInfo = nullptr;
354
355 if (program[stage]->entries.uses_warps && device.IsGuestWarpSizeSupported(stage_ci.stage)) {
255 stage_ci.pNext = &subgroup_size_ci; 356 stage_ci.pNext = &subgroup_size_ci;
256 } 357 }
257 } 358 }
258 359
259 const vk::GraphicsPipelineCreateInfo create_info( 360 VkGraphicsPipelineCreateInfo ci;
260 {}, static_cast<u32>(shader_stages.size()), shader_stages.data(), &vertex_input_ci, 361 ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
261 &input_assembly_ci, &tessellation_ci, &viewport_ci, &rasterizer_ci, &multisampling_ci, 362 ci.pNext = nullptr;
262 &depth_stencil_ci, &color_blending_ci, &dynamic_state_ci, *layout, renderpass, 0, {}, 0); 363 ci.flags = 0;
263 364 ci.stageCount = static_cast<u32>(shader_stages.size());
264 const auto dev = device.GetLogical(); 365 ci.pStages = shader_stages.data();
265 const auto& dld = device.GetDispatchLoader(); 366 ci.pVertexInputState = &vertex_input_ci;
266 return dev.createGraphicsPipelineUnique(nullptr, create_info, nullptr, dld); 367 ci.pInputAssemblyState = &input_assembly_ci;
368 ci.pTessellationState = &tessellation_ci;
369 ci.pViewportState = &viewport_ci;
370 ci.pRasterizationState = &rasterization_ci;
371 ci.pMultisampleState = &multisample_ci;
372 ci.pDepthStencilState = &depth_stencil_ci;
373 ci.pColorBlendState = &color_blend_ci;
374 ci.pDynamicState = &dynamic_state_ci;
375 ci.layout = *layout;
376 ci.renderPass = renderpass;
377 ci.subpass = 0;
378 ci.basePipelineHandle = nullptr;
379 ci.basePipelineIndex = 0;
380 return device.GetLogical().CreateGraphicsPipeline(ci);
267} 381}
268 382
269} // namespace Vulkan 383} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
index 4f5e4ea2d..7aba70960 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
@@ -11,12 +11,12 @@
11#include <vector> 11#include <vector>
12 12
13#include "video_core/engines/maxwell_3d.h" 13#include "video_core/engines/maxwell_3d.h"
14#include "video_core/renderer_vulkan/declarations.h"
15#include "video_core/renderer_vulkan/fixed_pipeline_state.h" 14#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
16#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 15#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
17#include "video_core/renderer_vulkan/vk_renderpass_cache.h" 16#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
18#include "video_core/renderer_vulkan/vk_resource_manager.h" 17#include "video_core/renderer_vulkan/vk_resource_manager.h"
19#include "video_core/renderer_vulkan/vk_shader_decompiler.h" 18#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
19#include "video_core/renderer_vulkan/wrapper.h"
20 20
21namespace Vulkan { 21namespace Vulkan {
22 22
@@ -39,52 +39,52 @@ public:
39 VKUpdateDescriptorQueue& update_descriptor_queue, 39 VKUpdateDescriptorQueue& update_descriptor_queue,
40 VKRenderPassCache& renderpass_cache, 40 VKRenderPassCache& renderpass_cache,
41 const GraphicsPipelineCacheKey& key, 41 const GraphicsPipelineCacheKey& key,
42 const std::vector<vk::DescriptorSetLayoutBinding>& bindings, 42 vk::Span<VkDescriptorSetLayoutBinding> bindings,
43 const SPIRVProgram& program); 43 const SPIRVProgram& program);
44 ~VKGraphicsPipeline(); 44 ~VKGraphicsPipeline();
45 45
46 vk::DescriptorSet CommitDescriptorSet(); 46 VkDescriptorSet CommitDescriptorSet();
47 47
48 vk::Pipeline GetHandle() const { 48 VkPipeline GetHandle() const {
49 return *pipeline; 49 return *pipeline;
50 } 50 }
51 51
52 vk::PipelineLayout GetLayout() const { 52 VkPipelineLayout GetLayout() const {
53 return *layout; 53 return *layout;
54 } 54 }
55 55
56 vk::RenderPass GetRenderPass() const { 56 VkRenderPass GetRenderPass() const {
57 return renderpass; 57 return renderpass;
58 } 58 }
59 59
60private: 60private:
61 UniqueDescriptorSetLayout CreateDescriptorSetLayout( 61 vk::DescriptorSetLayout CreateDescriptorSetLayout(
62 const std::vector<vk::DescriptorSetLayoutBinding>& bindings) const; 62 vk::Span<VkDescriptorSetLayoutBinding> bindings) const;
63 63
64 UniquePipelineLayout CreatePipelineLayout() const; 64 vk::PipelineLayout CreatePipelineLayout() const;
65 65
66 UniqueDescriptorUpdateTemplate CreateDescriptorUpdateTemplate( 66 vk::DescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplate(
67 const SPIRVProgram& program) const; 67 const SPIRVProgram& program) const;
68 68
69 std::vector<UniqueShaderModule> CreateShaderModules(const SPIRVProgram& program) const; 69 std::vector<vk::ShaderModule> CreateShaderModules(const SPIRVProgram& program) const;
70 70
71 UniquePipeline CreatePipeline(const RenderPassParams& renderpass_params, 71 vk::Pipeline CreatePipeline(const RenderPassParams& renderpass_params,
72 const SPIRVProgram& program) const; 72 const SPIRVProgram& program) const;
73 73
74 const VKDevice& device; 74 const VKDevice& device;
75 VKScheduler& scheduler; 75 VKScheduler& scheduler;
76 const FixedPipelineState fixed_state; 76 const FixedPipelineState fixed_state;
77 const u64 hash; 77 const u64 hash;
78 78
79 UniqueDescriptorSetLayout descriptor_set_layout; 79 vk::DescriptorSetLayout descriptor_set_layout;
80 DescriptorAllocator descriptor_allocator; 80 DescriptorAllocator descriptor_allocator;
81 VKUpdateDescriptorQueue& update_descriptor_queue; 81 VKUpdateDescriptorQueue& update_descriptor_queue;
82 UniquePipelineLayout layout; 82 vk::PipelineLayout layout;
83 UniqueDescriptorUpdateTemplate descriptor_template; 83 vk::DescriptorUpdateTemplateKHR descriptor_template;
84 std::vector<UniqueShaderModule> modules; 84 std::vector<vk::ShaderModule> modules;
85 85
86 vk::RenderPass renderpass; 86 VkRenderPass renderpass;
87 UniquePipeline pipeline; 87 vk::Pipeline pipeline;
88}; 88};
89 89
90} // namespace Vulkan 90} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_image.cpp b/src/video_core/renderer_vulkan/vk_image.cpp
index 4bcbef959..9bceb3861 100644
--- a/src/video_core/renderer_vulkan/vk_image.cpp
+++ b/src/video_core/renderer_vulkan/vk_image.cpp
@@ -6,22 +6,21 @@
6#include <vector> 6#include <vector>
7 7
8#include "common/assert.h" 8#include "common/assert.h"
9#include "video_core/renderer_vulkan/declarations.h"
10#include "video_core/renderer_vulkan/vk_device.h" 9#include "video_core/renderer_vulkan/vk_device.h"
11#include "video_core/renderer_vulkan/vk_image.h" 10#include "video_core/renderer_vulkan/vk_image.h"
12#include "video_core/renderer_vulkan/vk_scheduler.h" 11#include "video_core/renderer_vulkan/vk_scheduler.h"
12#include "video_core/renderer_vulkan/wrapper.h"
13 13
14namespace Vulkan { 14namespace Vulkan {
15 15
16VKImage::VKImage(const VKDevice& device, VKScheduler& scheduler, 16VKImage::VKImage(const VKDevice& device, VKScheduler& scheduler, const VkImageCreateInfo& image_ci,
17 const vk::ImageCreateInfo& image_ci, vk::ImageAspectFlags aspect_mask) 17 VkImageAspectFlags aspect_mask)
18 : device{device}, scheduler{scheduler}, format{image_ci.format}, aspect_mask{aspect_mask}, 18 : device{device}, scheduler{scheduler}, format{image_ci.format}, aspect_mask{aspect_mask},
19 image_num_layers{image_ci.arrayLayers}, image_num_levels{image_ci.mipLevels} { 19 image_num_layers{image_ci.arrayLayers}, image_num_levels{image_ci.mipLevels} {
20 UNIMPLEMENTED_IF_MSG(image_ci.queueFamilyIndexCount != 0, 20 UNIMPLEMENTED_IF_MSG(image_ci.queueFamilyIndexCount != 0,
21 "Queue family tracking is not implemented"); 21 "Queue family tracking is not implemented");
22 22
23 const auto dev = device.GetLogical(); 23 image = device.GetLogical().CreateImage(image_ci);
24 image = dev.createImageUnique(image_ci, nullptr, device.GetDispatchLoader());
25 24
26 const u32 num_ranges = image_num_layers * image_num_levels; 25 const u32 num_ranges = image_num_layers * image_num_levels;
27 barriers.resize(num_ranges); 26 barriers.resize(num_ranges);
@@ -31,8 +30,8 @@ VKImage::VKImage(const VKDevice& device, VKScheduler& scheduler,
31VKImage::~VKImage() = default; 30VKImage::~VKImage() = default;
32 31
33void VKImage::Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, 32void VKImage::Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
34 vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access, 33 VkPipelineStageFlags new_stage_mask, VkAccessFlags new_access,
35 vk::ImageLayout new_layout) { 34 VkImageLayout new_layout) {
36 if (!HasChanged(base_layer, num_layers, base_level, num_levels, new_access, new_layout)) { 35 if (!HasChanged(base_layer, num_layers, base_level, num_levels, new_access, new_layout)) {
37 return; 36 return;
38 } 37 }
@@ -43,9 +42,21 @@ void VKImage::Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num
43 const u32 layer = base_layer + layer_it; 42 const u32 layer = base_layer + layer_it;
44 const u32 level = base_level + level_it; 43 const u32 level = base_level + level_it;
45 auto& state = GetSubrangeState(layer, level); 44 auto& state = GetSubrangeState(layer, level);
46 barriers[cursor] = vk::ImageMemoryBarrier( 45 auto& barrier = barriers[cursor];
47 state.access, new_access, state.layout, new_layout, VK_QUEUE_FAMILY_IGNORED, 46 barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
48 VK_QUEUE_FAMILY_IGNORED, *image, {aspect_mask, level, 1, layer, 1}); 47 barrier.pNext = nullptr;
48 barrier.srcAccessMask = state.access;
49 barrier.dstAccessMask = new_access;
50 barrier.oldLayout = state.layout;
51 barrier.newLayout = new_layout;
52 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
53 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
54 barrier.image = *image;
55 barrier.subresourceRange.aspectMask = aspect_mask;
56 barrier.subresourceRange.baseMipLevel = level;
57 barrier.subresourceRange.levelCount = 1;
58 barrier.subresourceRange.baseArrayLayer = layer;
59 barrier.subresourceRange.layerCount = 1;
49 state.access = new_access; 60 state.access = new_access;
50 state.layout = new_layout; 61 state.layout = new_layout;
51 } 62 }
@@ -53,16 +64,16 @@ void VKImage::Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num
53 64
54 scheduler.RequestOutsideRenderPassOperationContext(); 65 scheduler.RequestOutsideRenderPassOperationContext();
55 66
56 scheduler.Record([barriers = barriers, cursor](auto cmdbuf, auto& dld) { 67 scheduler.Record([barriers = barriers, cursor](vk::CommandBuffer cmdbuf) {
57 // TODO(Rodrigo): Implement a way to use the latest stage across subresources. 68 // TODO(Rodrigo): Implement a way to use the latest stage across subresources.
58 constexpr auto stage_stub = vk::PipelineStageFlagBits::eAllCommands; 69 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
59 cmdbuf.pipelineBarrier(stage_stub, stage_stub, {}, 0, nullptr, 0, nullptr, 70 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, {}, {},
60 static_cast<u32>(cursor), barriers.data(), dld); 71 vk::Span(barriers.data(), cursor));
61 }); 72 });
62} 73}
63 74
64bool VKImage::HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, 75bool VKImage::HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
65 vk::AccessFlags new_access, vk::ImageLayout new_layout) noexcept { 76 VkAccessFlags new_access, VkImageLayout new_layout) noexcept {
66 const bool is_full_range = base_layer == 0 && num_layers == image_num_layers && 77 const bool is_full_range = base_layer == 0 && num_layers == image_num_layers &&
67 base_level == 0 && num_levels == image_num_levels; 78 base_level == 0 && num_levels == image_num_levels;
68 if (!is_full_range) { 79 if (!is_full_range) {
@@ -91,11 +102,21 @@ bool VKImage::HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num
91 102
92void VKImage::CreatePresentView() { 103void VKImage::CreatePresentView() {
93 // Image type has to be 2D to be presented. 104 // Image type has to be 2D to be presented.
94 const vk::ImageViewCreateInfo image_view_ci({}, *image, vk::ImageViewType::e2D, format, {}, 105 VkImageViewCreateInfo image_view_ci;
95 {aspect_mask, 0, 1, 0, 1}); 106 image_view_ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
96 const auto dev = device.GetLogical(); 107 image_view_ci.pNext = nullptr;
97 const auto& dld = device.GetDispatchLoader(); 108 image_view_ci.flags = 0;
98 present_view = dev.createImageViewUnique(image_view_ci, nullptr, dld); 109 image_view_ci.image = *image;
110 image_view_ci.viewType = VK_IMAGE_VIEW_TYPE_2D;
111 image_view_ci.format = format;
112 image_view_ci.components = {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
113 VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY};
114 image_view_ci.subresourceRange.aspectMask = aspect_mask;
115 image_view_ci.subresourceRange.baseMipLevel = 0;
116 image_view_ci.subresourceRange.levelCount = 1;
117 image_view_ci.subresourceRange.baseArrayLayer = 0;
118 image_view_ci.subresourceRange.layerCount = 1;
119 present_view = device.GetLogical().CreateImageView(image_view_ci);
99} 120}
100 121
101VKImage::SubrangeState& VKImage::GetSubrangeState(u32 layer, u32 level) noexcept { 122VKImage::SubrangeState& VKImage::GetSubrangeState(u32 layer, u32 level) noexcept {
diff --git a/src/video_core/renderer_vulkan/vk_image.h b/src/video_core/renderer_vulkan/vk_image.h
index b78242512..b4d7229e5 100644
--- a/src/video_core/renderer_vulkan/vk_image.h
+++ b/src/video_core/renderer_vulkan/vk_image.h
@@ -8,7 +8,7 @@
8#include <vector> 8#include <vector>
9 9
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "video_core/renderer_vulkan/declarations.h" 11#include "video_core/renderer_vulkan/wrapper.h"
12 12
13namespace Vulkan { 13namespace Vulkan {
14 14
@@ -18,16 +18,16 @@ class VKScheduler;
18class VKImage { 18class VKImage {
19public: 19public:
20 explicit VKImage(const VKDevice& device, VKScheduler& scheduler, 20 explicit VKImage(const VKDevice& device, VKScheduler& scheduler,
21 const vk::ImageCreateInfo& image_ci, vk::ImageAspectFlags aspect_mask); 21 const VkImageCreateInfo& image_ci, VkImageAspectFlags aspect_mask);
22 ~VKImage(); 22 ~VKImage();
23 23
24 /// Records in the passed command buffer an image transition and updates the state of the image. 24 /// Records in the passed command buffer an image transition and updates the state of the image.
25 void Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, 25 void Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
26 vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access, 26 VkPipelineStageFlags new_stage_mask, VkAccessFlags new_access,
27 vk::ImageLayout new_layout); 27 VkImageLayout new_layout);
28 28
29 /// Returns a view compatible with presentation, the image has to be 2D. 29 /// Returns a view compatible with presentation, the image has to be 2D.
30 vk::ImageView GetPresentView() { 30 VkImageView GetPresentView() {
31 if (!present_view) { 31 if (!present_view) {
32 CreatePresentView(); 32 CreatePresentView();
33 } 33 }
@@ -35,28 +35,28 @@ public:
35 } 35 }
36 36
37 /// Returns the Vulkan image handler. 37 /// Returns the Vulkan image handler.
38 vk::Image GetHandle() const { 38 const vk::Image& GetHandle() const {
39 return *image; 39 return image;
40 } 40 }
41 41
42 /// Returns the Vulkan format for this image. 42 /// Returns the Vulkan format for this image.
43 vk::Format GetFormat() const { 43 VkFormat GetFormat() const {
44 return format; 44 return format;
45 } 45 }
46 46
47 /// Returns the Vulkan aspect mask. 47 /// Returns the Vulkan aspect mask.
48 vk::ImageAspectFlags GetAspectMask() const { 48 VkImageAspectFlags GetAspectMask() const {
49 return aspect_mask; 49 return aspect_mask;
50 } 50 }
51 51
52private: 52private:
53 struct SubrangeState final { 53 struct SubrangeState final {
54 vk::AccessFlags access{}; ///< Current access bits. 54 VkAccessFlags access = 0; ///< Current access bits.
55 vk::ImageLayout layout = vk::ImageLayout::eUndefined; ///< Current image layout. 55 VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED; ///< Current image layout.
56 }; 56 };
57 57
58 bool HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, 58 bool HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
59 vk::AccessFlags new_access, vk::ImageLayout new_layout) noexcept; 59 VkAccessFlags new_access, VkImageLayout new_layout) noexcept;
60 60
61 /// Creates a presentation view. 61 /// Creates a presentation view.
62 void CreatePresentView(); 62 void CreatePresentView();
@@ -67,16 +67,16 @@ private:
67 const VKDevice& device; ///< Device handler. 67 const VKDevice& device; ///< Device handler.
68 VKScheduler& scheduler; ///< Device scheduler. 68 VKScheduler& scheduler; ///< Device scheduler.
69 69
70 const vk::Format format; ///< Vulkan format. 70 const VkFormat format; ///< Vulkan format.
71 const vk::ImageAspectFlags aspect_mask; ///< Vulkan aspect mask. 71 const VkImageAspectFlags aspect_mask; ///< Vulkan aspect mask.
72 const u32 image_num_layers; ///< Number of layers. 72 const u32 image_num_layers; ///< Number of layers.
73 const u32 image_num_levels; ///< Number of mipmap levels. 73 const u32 image_num_levels; ///< Number of mipmap levels.
74 74
75 UniqueImage image; ///< Image handle. 75 vk::Image image; ///< Image handle.
76 UniqueImageView present_view; ///< Image view compatible with presentation. 76 vk::ImageView present_view; ///< Image view compatible with presentation.
77 77
78 std::vector<vk::ImageMemoryBarrier> barriers; ///< Pool of barriers. 78 std::vector<VkImageMemoryBarrier> barriers; ///< Pool of barriers.
79 std::vector<SubrangeState> subrange_states; ///< Current subrange state. 79 std::vector<SubrangeState> subrange_states; ///< Current subrange state.
80 80
81 bool state_diverged = false; ///< True when subresources mismatch in layout. 81 bool state_diverged = false; ///< True when subresources mismatch in layout.
82}; 82};
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.cpp b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
index 9cc9979d0..6a9e658bf 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
@@ -11,9 +11,9 @@
11#include "common/assert.h" 11#include "common/assert.h"
12#include "common/common_types.h" 12#include "common/common_types.h"
13#include "common/logging/log.h" 13#include "common/logging/log.h"
14#include "video_core/renderer_vulkan/declarations.h"
15#include "video_core/renderer_vulkan/vk_device.h" 14#include "video_core/renderer_vulkan/vk_device.h"
16#include "video_core/renderer_vulkan/vk_memory_manager.h" 15#include "video_core/renderer_vulkan/vk_memory_manager.h"
16#include "video_core/renderer_vulkan/wrapper.h"
17 17
18namespace Vulkan { 18namespace Vulkan {
19 19
@@ -30,17 +30,11 @@ u64 GetAllocationChunkSize(u64 required_size) {
30class VKMemoryAllocation final { 30class VKMemoryAllocation final {
31public: 31public:
32 explicit VKMemoryAllocation(const VKDevice& device, vk::DeviceMemory memory, 32 explicit VKMemoryAllocation(const VKDevice& device, vk::DeviceMemory memory,
33 vk::MemoryPropertyFlags properties, u64 allocation_size, u32 type) 33 VkMemoryPropertyFlags properties, u64 allocation_size, u32 type)
34 : device{device}, memory{memory}, properties{properties}, allocation_size{allocation_size}, 34 : device{device}, memory{std::move(memory)}, properties{properties},
35 shifted_type{ShiftType(type)} {} 35 allocation_size{allocation_size}, shifted_type{ShiftType(type)} {}
36
37 ~VKMemoryAllocation() {
38 const auto dev = device.GetLogical();
39 const auto& dld = device.GetDispatchLoader();
40 dev.free(memory, nullptr, dld);
41 }
42 36
43 VKMemoryCommit Commit(vk::DeviceSize commit_size, vk::DeviceSize alignment) { 37 VKMemoryCommit Commit(VkDeviceSize commit_size, VkDeviceSize alignment) {
44 auto found = TryFindFreeSection(free_iterator, allocation_size, 38 auto found = TryFindFreeSection(free_iterator, allocation_size,
45 static_cast<u64>(commit_size), static_cast<u64>(alignment)); 39 static_cast<u64>(commit_size), static_cast<u64>(alignment));
46 if (!found) { 40 if (!found) {
@@ -73,9 +67,8 @@ public:
73 } 67 }
74 68
75 /// Returns whether this allocation is compatible with the arguments. 69 /// Returns whether this allocation is compatible with the arguments.
76 bool IsCompatible(vk::MemoryPropertyFlags wanted_properties, u32 type_mask) const { 70 bool IsCompatible(VkMemoryPropertyFlags wanted_properties, u32 type_mask) const {
77 return (wanted_properties & properties) != vk::MemoryPropertyFlagBits(0) && 71 return (wanted_properties & properties) && (type_mask & shifted_type) != 0;
78 (type_mask & shifted_type) != 0;
79 } 72 }
80 73
81private: 74private:
@@ -111,11 +104,11 @@ private:
111 return std::nullopt; 104 return std::nullopt;
112 } 105 }
113 106
114 const VKDevice& device; ///< Vulkan device. 107 const VKDevice& device; ///< Vulkan device.
115 const vk::DeviceMemory memory; ///< Vulkan memory allocation handler. 108 const vk::DeviceMemory memory; ///< Vulkan memory allocation handler.
116 const vk::MemoryPropertyFlags properties; ///< Vulkan properties. 109 const VkMemoryPropertyFlags properties; ///< Vulkan properties.
117 const u64 allocation_size; ///< Size of this allocation. 110 const u64 allocation_size; ///< Size of this allocation.
118 const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted. 111 const u32 shifted_type; ///< Stored Vulkan type of this allocation, shifted.
119 112
120 /// Hints where the next free region is likely going to be. 113 /// Hints where the next free region is likely going to be.
121 u64 free_iterator{}; 114 u64 free_iterator{};
@@ -125,22 +118,20 @@ private:
125}; 118};
126 119
127VKMemoryManager::VKMemoryManager(const VKDevice& device) 120VKMemoryManager::VKMemoryManager(const VKDevice& device)
128 : device{device}, properties{device.GetPhysical().getMemoryProperties( 121 : device{device}, properties{device.GetPhysical().GetMemoryProperties()},
129 device.GetDispatchLoader())},
130 is_memory_unified{GetMemoryUnified(properties)} {} 122 is_memory_unified{GetMemoryUnified(properties)} {}
131 123
132VKMemoryManager::~VKMemoryManager() = default; 124VKMemoryManager::~VKMemoryManager() = default;
133 125
134VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& requirements, 126VKMemoryCommit VKMemoryManager::Commit(const VkMemoryRequirements& requirements,
135 bool host_visible) { 127 bool host_visible) {
136 const u64 chunk_size = GetAllocationChunkSize(requirements.size); 128 const u64 chunk_size = GetAllocationChunkSize(requirements.size);
137 129
138 // When a host visible commit is asked, search for host visible and coherent, otherwise search 130 // When a host visible commit is asked, search for host visible and coherent, otherwise search
139 // for a fast device local type. 131 // for a fast device local type.
140 const vk::MemoryPropertyFlags wanted_properties = 132 const VkMemoryPropertyFlags wanted_properties =
141 host_visible 133 host_visible ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
142 ? vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent 134 : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
143 : vk::MemoryPropertyFlagBits::eDeviceLocal;
144 135
145 if (auto commit = TryAllocCommit(requirements, wanted_properties)) { 136 if (auto commit = TryAllocCommit(requirements, wanted_properties)) {
146 return commit; 137 return commit;
@@ -161,23 +152,19 @@ VKMemoryCommit VKMemoryManager::Commit(const vk::MemoryRequirements& requirement
161 return commit; 152 return commit;
162} 153}
163 154
164VKMemoryCommit VKMemoryManager::Commit(vk::Buffer buffer, bool host_visible) { 155VKMemoryCommit VKMemoryManager::Commit(const vk::Buffer& buffer, bool host_visible) {
165 const auto dev = device.GetLogical(); 156 auto commit = Commit(device.GetLogical().GetBufferMemoryRequirements(*buffer), host_visible);
166 const auto& dld = device.GetDispatchLoader(); 157 buffer.BindMemory(commit->GetMemory(), commit->GetOffset());
167 auto commit = Commit(dev.getBufferMemoryRequirements(buffer, dld), host_visible);
168 dev.bindBufferMemory(buffer, commit->GetMemory(), commit->GetOffset(), dld);
169 return commit; 158 return commit;
170} 159}
171 160
172VKMemoryCommit VKMemoryManager::Commit(vk::Image image, bool host_visible) { 161VKMemoryCommit VKMemoryManager::Commit(const vk::Image& image, bool host_visible) {
173 const auto dev = device.GetLogical(); 162 auto commit = Commit(device.GetLogical().GetImageMemoryRequirements(*image), host_visible);
174 const auto& dld = device.GetDispatchLoader(); 163 image.BindMemory(commit->GetMemory(), commit->GetOffset());
175 auto commit = Commit(dev.getImageMemoryRequirements(image, dld), host_visible);
176 dev.bindImageMemory(image, commit->GetMemory(), commit->GetOffset(), dld);
177 return commit; 164 return commit;
178} 165}
179 166
180bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask, 167bool VKMemoryManager::AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask,
181 u64 size) { 168 u64 size) {
182 const u32 type = [&] { 169 const u32 type = [&] {
183 for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) { 170 for (u32 type_index = 0; type_index < properties.memoryTypeCount; ++type_index) {
@@ -191,24 +178,26 @@ bool VKMemoryManager::AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32
191 return 0U; 178 return 0U;
192 }(); 179 }();
193 180
194 const auto dev = device.GetLogical();
195 const auto& dld = device.GetDispatchLoader();
196
197 // Try to allocate found type. 181 // Try to allocate found type.
198 const vk::MemoryAllocateInfo memory_ai(size, type); 182 VkMemoryAllocateInfo memory_ai;
199 vk::DeviceMemory memory; 183 memory_ai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
200 if (const auto res = dev.allocateMemory(&memory_ai, nullptr, &memory, dld); 184 memory_ai.pNext = nullptr;
201 res != vk::Result::eSuccess) { 185 memory_ai.allocationSize = size;
202 LOG_CRITICAL(Render_Vulkan, "Device allocation failed with code {}!", vk::to_string(res)); 186 memory_ai.memoryTypeIndex = type;
187
188 vk::DeviceMemory memory = device.GetLogical().TryAllocateMemory(memory_ai);
189 if (!memory) {
190 LOG_CRITICAL(Render_Vulkan, "Device allocation failed!");
203 return false; 191 return false;
204 } 192 }
205 allocations.push_back( 193
206 std::make_unique<VKMemoryAllocation>(device, memory, wanted_properties, size, type)); 194 allocations.push_back(std::make_unique<VKMemoryAllocation>(device, std::move(memory),
195 wanted_properties, size, type));
207 return true; 196 return true;
208} 197}
209 198
210VKMemoryCommit VKMemoryManager::TryAllocCommit(const vk::MemoryRequirements& requirements, 199VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requirements,
211 vk::MemoryPropertyFlags wanted_properties) { 200 VkMemoryPropertyFlags wanted_properties) {
212 for (auto& allocation : allocations) { 201 for (auto& allocation : allocations) {
213 if (!allocation->IsCompatible(wanted_properties, requirements.memoryTypeBits)) { 202 if (!allocation->IsCompatible(wanted_properties, requirements.memoryTypeBits)) {
214 continue; 203 continue;
@@ -220,10 +209,9 @@ VKMemoryCommit VKMemoryManager::TryAllocCommit(const vk::MemoryRequirements& req
220 return {}; 209 return {};
221} 210}
222 211
223/*static*/ bool VKMemoryManager::GetMemoryUnified( 212bool VKMemoryManager::GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties) {
224 const vk::PhysicalDeviceMemoryProperties& properties) {
225 for (u32 heap_index = 0; heap_index < properties.memoryHeapCount; ++heap_index) { 213 for (u32 heap_index = 0; heap_index < properties.memoryHeapCount; ++heap_index) {
226 if (!(properties.memoryHeaps[heap_index].flags & vk::MemoryHeapFlagBits::eDeviceLocal)) { 214 if (!(properties.memoryHeaps[heap_index].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)) {
227 // Memory is considered unified when heaps are device local only. 215 // Memory is considered unified when heaps are device local only.
228 return false; 216 return false;
229 } 217 }
@@ -232,23 +220,19 @@ VKMemoryCommit VKMemoryManager::TryAllocCommit(const vk::MemoryRequirements& req
232} 220}
233 221
234VKMemoryCommitImpl::VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation, 222VKMemoryCommitImpl::VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation,
235 vk::DeviceMemory memory, u64 begin, u64 end) 223 const vk::DeviceMemory& memory, u64 begin, u64 end)
236 : device{device}, interval{begin, end}, memory{memory}, allocation{allocation} {} 224 : device{device}, memory{memory}, interval{begin, end}, allocation{allocation} {}
237 225
238VKMemoryCommitImpl::~VKMemoryCommitImpl() { 226VKMemoryCommitImpl::~VKMemoryCommitImpl() {
239 allocation->Free(this); 227 allocation->Free(this);
240} 228}
241 229
242MemoryMap VKMemoryCommitImpl::Map(u64 size, u64 offset_) const { 230MemoryMap VKMemoryCommitImpl::Map(u64 size, u64 offset_) const {
243 const auto dev = device.GetLogical(); 231 return MemoryMap{this, memory.Map(interval.first + offset_, size)};
244 const auto address = reinterpret_cast<u8*>(
245 dev.mapMemory(memory, interval.first + offset_, size, {}, device.GetDispatchLoader()));
246 return MemoryMap{this, address};
247} 232}
248 233
249void VKMemoryCommitImpl::Unmap() const { 234void VKMemoryCommitImpl::Unmap() const {
250 const auto dev = device.GetLogical(); 235 memory.Unmap();
251 dev.unmapMemory(memory, device.GetDispatchLoader());
252} 236}
253 237
254MemoryMap VKMemoryCommitImpl::Map() const { 238MemoryMap VKMemoryCommitImpl::Map() const {
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h
index cd00bb91b..35ee54d30 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.h
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.h
@@ -8,7 +8,7 @@
8#include <utility> 8#include <utility>
9#include <vector> 9#include <vector>
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "video_core/renderer_vulkan/declarations.h" 11#include "video_core/renderer_vulkan/wrapper.h"
12 12
13namespace Vulkan { 13namespace Vulkan {
14 14
@@ -32,13 +32,13 @@ public:
32 * memory. When passing false, it will try to allocate device local memory. 32 * memory. When passing false, it will try to allocate device local memory.
33 * @returns A memory commit. 33 * @returns A memory commit.
34 */ 34 */
35 VKMemoryCommit Commit(const vk::MemoryRequirements& reqs, bool host_visible); 35 VKMemoryCommit Commit(const VkMemoryRequirements& reqs, bool host_visible);
36 36
37 /// Commits memory required by the buffer and binds it. 37 /// Commits memory required by the buffer and binds it.
38 VKMemoryCommit Commit(vk::Buffer buffer, bool host_visible); 38 VKMemoryCommit Commit(const vk::Buffer& buffer, bool host_visible);
39 39
40 /// Commits memory required by the image and binds it. 40 /// Commits memory required by the image and binds it.
41 VKMemoryCommit Commit(vk::Image image, bool host_visible); 41 VKMemoryCommit Commit(const vk::Image& image, bool host_visible);
42 42
43 /// Returns true if the memory allocations are done always in host visible and coherent memory. 43 /// Returns true if the memory allocations are done always in host visible and coherent memory.
44 bool IsMemoryUnified() const { 44 bool IsMemoryUnified() const {
@@ -47,18 +47,18 @@ public:
47 47
48private: 48private:
49 /// Allocates a chunk of memory. 49 /// Allocates a chunk of memory.
50 bool AllocMemory(vk::MemoryPropertyFlags wanted_properties, u32 type_mask, u64 size); 50 bool AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
51 51
52 /// Tries to allocate a memory commit. 52 /// Tries to allocate a memory commit.
53 VKMemoryCommit TryAllocCommit(const vk::MemoryRequirements& requirements, 53 VKMemoryCommit TryAllocCommit(const VkMemoryRequirements& requirements,
54 vk::MemoryPropertyFlags wanted_properties); 54 VkMemoryPropertyFlags wanted_properties);
55 55
56 /// Returns true if the device uses an unified memory model. 56 /// Returns true if the device uses an unified memory model.
57 static bool GetMemoryUnified(const vk::PhysicalDeviceMemoryProperties& properties); 57 static bool GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties);
58 58
59 const VKDevice& device; ///< Device handler. 59 const VKDevice& device; ///< Device handler.
60 const vk::PhysicalDeviceMemoryProperties properties; ///< Physical device properties. 60 const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
61 const bool is_memory_unified; ///< True if memory model is unified. 61 const bool is_memory_unified; ///< True if memory model is unified.
62 std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations. 62 std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations.
63}; 63};
64 64
@@ -68,7 +68,7 @@ class VKMemoryCommitImpl final {
68 68
69public: 69public:
70 explicit VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation, 70 explicit VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation,
71 vk::DeviceMemory memory, u64 begin, u64 end); 71 const vk::DeviceMemory& memory, u64 begin, u64 end);
72 ~VKMemoryCommitImpl(); 72 ~VKMemoryCommitImpl();
73 73
74 /// Maps a memory region and returns a pointer to it. 74 /// Maps a memory region and returns a pointer to it.
@@ -80,13 +80,13 @@ public:
80 MemoryMap Map() const; 80 MemoryMap Map() const;
81 81
82 /// Returns the Vulkan memory handler. 82 /// Returns the Vulkan memory handler.
83 vk::DeviceMemory GetMemory() const { 83 VkDeviceMemory GetMemory() const {
84 return memory; 84 return *memory;
85 } 85 }
86 86
87 /// Returns the start position of the commit relative to the allocation. 87 /// Returns the start position of the commit relative to the allocation.
88 vk::DeviceSize GetOffset() const { 88 VkDeviceSize GetOffset() const {
89 return static_cast<vk::DeviceSize>(interval.first); 89 return static_cast<VkDeviceSize>(interval.first);
90 } 90 }
91 91
92private: 92private:
@@ -94,8 +94,8 @@ private:
94 void Unmap() const; 94 void Unmap() const;
95 95
96 const VKDevice& device; ///< Vulkan device. 96 const VKDevice& device; ///< Vulkan device.
97 const vk::DeviceMemory& memory; ///< Vulkan device memory handler.
97 std::pair<u64, u64> interval{}; ///< Interval where the commit exists. 98 std::pair<u64, u64> interval{}; ///< Interval where the commit exists.
98 vk::DeviceMemory memory; ///< Vulkan device memory handler.
99 VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation. 99 VKMemoryAllocation* allocation{}; ///< Pointer to the large memory allocation.
100}; 100};
101 101
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index c2a426aeb..90e3a8edd 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -13,7 +13,6 @@
13#include "video_core/engines/kepler_compute.h" 13#include "video_core/engines/kepler_compute.h"
14#include "video_core/engines/maxwell_3d.h" 14#include "video_core/engines/maxwell_3d.h"
15#include "video_core/memory_manager.h" 15#include "video_core/memory_manager.h"
16#include "video_core/renderer_vulkan/declarations.h"
17#include "video_core/renderer_vulkan/fixed_pipeline_state.h" 16#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
18#include "video_core/renderer_vulkan/maxwell_to_vk.h" 17#include "video_core/renderer_vulkan/maxwell_to_vk.h"
19#include "video_core/renderer_vulkan/vk_compute_pipeline.h" 18#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
@@ -26,6 +25,7 @@
26#include "video_core/renderer_vulkan/vk_resource_manager.h" 25#include "video_core/renderer_vulkan/vk_resource_manager.h"
27#include "video_core/renderer_vulkan/vk_scheduler.h" 26#include "video_core/renderer_vulkan/vk_scheduler.h"
28#include "video_core/renderer_vulkan/vk_update_descriptor.h" 27#include "video_core/renderer_vulkan/vk_update_descriptor.h"
28#include "video_core/renderer_vulkan/wrapper.h"
29#include "video_core/shader/compiler_settings.h" 29#include "video_core/shader/compiler_settings.h"
30 30
31namespace Vulkan { 31namespace Vulkan {
@@ -36,12 +36,11 @@ using Tegra::Engines::ShaderType;
36 36
37namespace { 37namespace {
38 38
39// C++20's using enum 39constexpr VkDescriptorType UNIFORM_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
40constexpr auto eUniformBuffer = vk::DescriptorType::eUniformBuffer; 40constexpr VkDescriptorType STORAGE_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
41constexpr auto eStorageBuffer = vk::DescriptorType::eStorageBuffer; 41constexpr VkDescriptorType UNIFORM_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
42constexpr auto eUniformTexelBuffer = vk::DescriptorType::eUniformTexelBuffer; 42constexpr VkDescriptorType COMBINED_IMAGE_SAMPLER = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
43constexpr auto eCombinedImageSampler = vk::DescriptorType::eCombinedImageSampler; 43constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
44constexpr auto eStorageImage = vk::DescriptorType::eStorageImage;
45 44
46constexpr VideoCommon::Shader::CompilerSettings compiler_settings{ 45constexpr VideoCommon::Shader::CompilerSettings compiler_settings{
47 VideoCommon::Shader::CompileDepth::FullDecompile}; 46 VideoCommon::Shader::CompileDepth::FullDecompile};
@@ -126,32 +125,37 @@ ShaderType GetShaderType(Maxwell::ShaderProgram program) {
126 } 125 }
127} 126}
128 127
129template <vk::DescriptorType descriptor_type, class Container> 128template <VkDescriptorType descriptor_type, class Container>
130void AddBindings(std::vector<vk::DescriptorSetLayoutBinding>& bindings, u32& binding, 129void AddBindings(std::vector<VkDescriptorSetLayoutBinding>& bindings, u32& binding,
131 vk::ShaderStageFlags stage_flags, const Container& container) { 130 VkShaderStageFlags stage_flags, const Container& container) {
132 const u32 num_entries = static_cast<u32>(std::size(container)); 131 const u32 num_entries = static_cast<u32>(std::size(container));
133 for (std::size_t i = 0; i < num_entries; ++i) { 132 for (std::size_t i = 0; i < num_entries; ++i) {
134 u32 count = 1; 133 u32 count = 1;
135 if constexpr (descriptor_type == eCombinedImageSampler) { 134 if constexpr (descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
136 // Combined image samplers can be arrayed. 135 // Combined image samplers can be arrayed.
137 count = container[i].Size(); 136 count = container[i].Size();
138 } 137 }
139 bindings.emplace_back(binding++, descriptor_type, count, stage_flags, nullptr); 138 VkDescriptorSetLayoutBinding& entry = bindings.emplace_back();
139 entry.binding = binding++;
140 entry.descriptorType = descriptor_type;
141 entry.descriptorCount = count;
142 entry.stageFlags = stage_flags;
143 entry.pImmutableSamplers = nullptr;
140 } 144 }
141} 145}
142 146
143u32 FillDescriptorLayout(const ShaderEntries& entries, 147u32 FillDescriptorLayout(const ShaderEntries& entries,
144 std::vector<vk::DescriptorSetLayoutBinding>& bindings, 148 std::vector<VkDescriptorSetLayoutBinding>& bindings,
145 Maxwell::ShaderProgram program_type, u32 base_binding) { 149 Maxwell::ShaderProgram program_type, u32 base_binding) {
146 const ShaderType stage = GetStageFromProgram(program_type); 150 const ShaderType stage = GetStageFromProgram(program_type);
147 const vk::ShaderStageFlags flags = MaxwellToVK::ShaderStage(stage); 151 const VkShaderStageFlags flags = MaxwellToVK::ShaderStage(stage);
148 152
149 u32 binding = base_binding; 153 u32 binding = base_binding;
150 AddBindings<eUniformBuffer>(bindings, binding, flags, entries.const_buffers); 154 AddBindings<UNIFORM_BUFFER>(bindings, binding, flags, entries.const_buffers);
151 AddBindings<eStorageBuffer>(bindings, binding, flags, entries.global_buffers); 155 AddBindings<STORAGE_BUFFER>(bindings, binding, flags, entries.global_buffers);
152 AddBindings<eUniformTexelBuffer>(bindings, binding, flags, entries.texel_buffers); 156 AddBindings<UNIFORM_TEXEL_BUFFER>(bindings, binding, flags, entries.texel_buffers);
153 AddBindings<eCombinedImageSampler>(bindings, binding, flags, entries.samplers); 157 AddBindings<COMBINED_IMAGE_SAMPLER>(bindings, binding, flags, entries.samplers);
154 AddBindings<eStorageImage>(bindings, binding, flags, entries.images); 158 AddBindings<STORAGE_IMAGE>(bindings, binding, flags, entries.images);
155 return binding; 159 return binding;
156} 160}
157 161
@@ -318,7 +322,7 @@ void VKPipelineCache::Unregister(const Shader& shader) {
318 RasterizerCache::Unregister(shader); 322 RasterizerCache::Unregister(shader);
319} 323}
320 324
321std::pair<SPIRVProgram, std::vector<vk::DescriptorSetLayoutBinding>> 325std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>>
322VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) { 326VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) {
323 const auto& fixed_state = key.fixed_state; 327 const auto& fixed_state = key.fixed_state;
324 auto& memory_manager = system.GPU().MemoryManager(); 328 auto& memory_manager = system.GPU().MemoryManager();
@@ -335,7 +339,7 @@ VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) {
335 specialization.ndc_minus_one_to_one = fixed_state.rasterizer.ndc_minus_one_to_one; 339 specialization.ndc_minus_one_to_one = fixed_state.rasterizer.ndc_minus_one_to_one;
336 340
337 SPIRVProgram program; 341 SPIRVProgram program;
338 std::vector<vk::DescriptorSetLayoutBinding> bindings; 342 std::vector<VkDescriptorSetLayoutBinding> bindings;
339 343
340 for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) { 344 for (std::size_t index = 0; index < Maxwell::MaxShaderProgram; ++index) {
341 const auto program_enum = static_cast<Maxwell::ShaderProgram>(index); 345 const auto program_enum = static_cast<Maxwell::ShaderProgram>(index);
@@ -371,32 +375,49 @@ VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) {
371 return {std::move(program), std::move(bindings)}; 375 return {std::move(program), std::move(bindings)};
372} 376}
373 377
374template <vk::DescriptorType descriptor_type, class Container> 378template <VkDescriptorType descriptor_type, class Container>
375void AddEntry(std::vector<vk::DescriptorUpdateTemplateEntry>& template_entries, u32& binding, 379void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u32& binding,
376 u32& offset, const Container& container) { 380 u32& offset, const Container& container) {
377 static constexpr u32 entry_size = static_cast<u32>(sizeof(DescriptorUpdateEntry)); 381 static constexpr u32 entry_size = static_cast<u32>(sizeof(DescriptorUpdateEntry));
378 const u32 count = static_cast<u32>(std::size(container)); 382 const u32 count = static_cast<u32>(std::size(container));
379 383
380 if constexpr (descriptor_type == eCombinedImageSampler) { 384 if constexpr (descriptor_type == COMBINED_IMAGE_SAMPLER) {
381 for (u32 i = 0; i < count; ++i) { 385 for (u32 i = 0; i < count; ++i) {
382 const u32 num_samplers = container[i].Size(); 386 const u32 num_samplers = container[i].Size();
383 template_entries.emplace_back(binding, 0, num_samplers, descriptor_type, offset, 387 VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
384 entry_size); 388 entry.dstBinding = binding;
389 entry.dstArrayElement = 0;
390 entry.descriptorCount = num_samplers;
391 entry.descriptorType = descriptor_type;
392 entry.offset = offset;
393 entry.stride = entry_size;
394
385 ++binding; 395 ++binding;
386 offset += num_samplers * entry_size; 396 offset += num_samplers * entry_size;
387 } 397 }
388 return; 398 return;
389 } 399 }
390 400
391 if constexpr (descriptor_type == eUniformTexelBuffer) { 401 if constexpr (descriptor_type == UNIFORM_TEXEL_BUFFER) {
392 // Nvidia has a bug where updating multiple uniform texels at once causes the driver to 402 // Nvidia has a bug where updating multiple uniform texels at once causes the driver to
393 // crash. 403 // crash.
394 for (u32 i = 0; i < count; ++i) { 404 for (u32 i = 0; i < count; ++i) {
395 template_entries.emplace_back(binding + i, 0, 1, descriptor_type, 405 VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
396 offset + i * entry_size, entry_size); 406 entry.dstBinding = binding + i;
407 entry.dstArrayElement = 0;
408 entry.descriptorCount = 1;
409 entry.descriptorType = descriptor_type;
410 entry.offset = offset + i * entry_size;
411 entry.stride = entry_size;
397 } 412 }
398 } else if (count > 0) { 413 } else if (count > 0) {
399 template_entries.emplace_back(binding, 0, count, descriptor_type, offset, entry_size); 414 VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
415 entry.dstBinding = binding;
416 entry.dstArrayElement = 0;
417 entry.descriptorCount = count;
418 entry.descriptorType = descriptor_type;
419 entry.offset = offset;
420 entry.stride = entry_size;
400 } 421 }
401 offset += count * entry_size; 422 offset += count * entry_size;
402 binding += count; 423 binding += count;
@@ -404,12 +425,12 @@ void AddEntry(std::vector<vk::DescriptorUpdateTemplateEntry>& template_entries,
404 425
405void FillDescriptorUpdateTemplateEntries( 426void FillDescriptorUpdateTemplateEntries(
406 const ShaderEntries& entries, u32& binding, u32& offset, 427 const ShaderEntries& entries, u32& binding, u32& offset,
407 std::vector<vk::DescriptorUpdateTemplateEntry>& template_entries) { 428 std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries) {
408 AddEntry<eUniformBuffer>(template_entries, offset, binding, entries.const_buffers); 429 AddEntry<UNIFORM_BUFFER>(template_entries, offset, binding, entries.const_buffers);
409 AddEntry<eStorageBuffer>(template_entries, offset, binding, entries.global_buffers); 430 AddEntry<STORAGE_BUFFER>(template_entries, offset, binding, entries.global_buffers);
410 AddEntry<eUniformTexelBuffer>(template_entries, offset, binding, entries.texel_buffers); 431 AddEntry<UNIFORM_TEXEL_BUFFER>(template_entries, offset, binding, entries.texel_buffers);
411 AddEntry<eCombinedImageSampler>(template_entries, offset, binding, entries.samplers); 432 AddEntry<COMBINED_IMAGE_SAMPLER>(template_entries, offset, binding, entries.samplers);
412 AddEntry<eStorageImage>(template_entries, offset, binding, entries.images); 433 AddEntry<STORAGE_IMAGE>(template_entries, offset, binding, entries.images);
413} 434}
414 435
415} // namespace Vulkan 436} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index 27c01732f..7ccdb7083 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -19,12 +19,12 @@
19#include "video_core/engines/const_buffer_engine_interface.h" 19#include "video_core/engines/const_buffer_engine_interface.h"
20#include "video_core/engines/maxwell_3d.h" 20#include "video_core/engines/maxwell_3d.h"
21#include "video_core/rasterizer_cache.h" 21#include "video_core/rasterizer_cache.h"
22#include "video_core/renderer_vulkan/declarations.h"
23#include "video_core/renderer_vulkan/fixed_pipeline_state.h" 22#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
24#include "video_core/renderer_vulkan/vk_graphics_pipeline.h" 23#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
25#include "video_core/renderer_vulkan/vk_renderpass_cache.h" 24#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
26#include "video_core/renderer_vulkan/vk_resource_manager.h" 25#include "video_core/renderer_vulkan/vk_resource_manager.h"
27#include "video_core/renderer_vulkan/vk_shader_decompiler.h" 26#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
27#include "video_core/renderer_vulkan/wrapper.h"
28#include "video_core/shader/registry.h" 28#include "video_core/shader/registry.h"
29#include "video_core/shader/shader_ir.h" 29#include "video_core/shader/shader_ir.h"
30#include "video_core/surface.h" 30#include "video_core/surface.h"
@@ -172,7 +172,7 @@ protected:
172 void FlushObjectInner(const Shader& object) override {} 172 void FlushObjectInner(const Shader& object) override {}
173 173
174private: 174private:
175 std::pair<SPIRVProgram, std::vector<vk::DescriptorSetLayoutBinding>> DecompileShaders( 175 std::pair<SPIRVProgram, std::vector<VkDescriptorSetLayoutBinding>> DecompileShaders(
176 const GraphicsPipelineCacheKey& key); 176 const GraphicsPipelineCacheKey& key);
177 177
178 Core::System& system; 178 Core::System& system;
@@ -194,6 +194,6 @@ private:
194 194
195void FillDescriptorUpdateTemplateEntries( 195void FillDescriptorUpdateTemplateEntries(
196 const ShaderEntries& entries, u32& binding, u32& offset, 196 const ShaderEntries& entries, u32& binding, u32& offset,
197 std::vector<vk::DescriptorUpdateTemplateEntry>& template_entries); 197 std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries);
198 198
199} // namespace Vulkan 199} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index ffbf60dda..0966c7ff7 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -8,19 +8,19 @@
8#include <utility> 8#include <utility>
9#include <vector> 9#include <vector>
10 10
11#include "video_core/renderer_vulkan/declarations.h"
12#include "video_core/renderer_vulkan/vk_device.h" 11#include "video_core/renderer_vulkan/vk_device.h"
13#include "video_core/renderer_vulkan/vk_query_cache.h" 12#include "video_core/renderer_vulkan/vk_query_cache.h"
14#include "video_core/renderer_vulkan/vk_resource_manager.h" 13#include "video_core/renderer_vulkan/vk_resource_manager.h"
15#include "video_core/renderer_vulkan/vk_scheduler.h" 14#include "video_core/renderer_vulkan/vk_scheduler.h"
15#include "video_core/renderer_vulkan/wrapper.h"
16 16
17namespace Vulkan { 17namespace Vulkan {
18 18
19namespace { 19namespace {
20 20
21constexpr std::array QUERY_TARGETS = {vk::QueryType::eOcclusion}; 21constexpr std::array QUERY_TARGETS = {VK_QUERY_TYPE_OCCLUSION};
22 22
23constexpr vk::QueryType GetTarget(VideoCore::QueryType type) { 23constexpr VkQueryType GetTarget(VideoCore::QueryType type) {
24 return QUERY_TARGETS[static_cast<std::size_t>(type)]; 24 return QUERY_TARGETS[static_cast<std::size_t>(type)];
25} 25}
26 26
@@ -35,29 +35,34 @@ void QueryPool::Initialize(const VKDevice& device_, VideoCore::QueryType type_)
35 type = type_; 35 type = type_;
36} 36}
37 37
38std::pair<vk::QueryPool, std::uint32_t> QueryPool::Commit(VKFence& fence) { 38std::pair<VkQueryPool, u32> QueryPool::Commit(VKFence& fence) {
39 std::size_t index; 39 std::size_t index;
40 do { 40 do {
41 index = CommitResource(fence); 41 index = CommitResource(fence);
42 } while (usage[index]); 42 } while (usage[index]);
43 usage[index] = true; 43 usage[index] = true;
44 44
45 return {*pools[index / GROW_STEP], static_cast<std::uint32_t>(index % GROW_STEP)}; 45 return {*pools[index / GROW_STEP], static_cast<u32>(index % GROW_STEP)};
46} 46}
47 47
48void QueryPool::Allocate(std::size_t begin, std::size_t end) { 48void QueryPool::Allocate(std::size_t begin, std::size_t end) {
49 usage.resize(end); 49 usage.resize(end);
50 50
51 const auto dev = device->GetLogical(); 51 VkQueryPoolCreateInfo query_pool_ci;
52 const u32 size = static_cast<u32>(end - begin); 52 query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
53 const vk::QueryPoolCreateInfo query_pool_ci({}, GetTarget(type), size, {}); 53 query_pool_ci.pNext = nullptr;
54 pools.push_back(dev.createQueryPoolUnique(query_pool_ci, nullptr, device->GetDispatchLoader())); 54 query_pool_ci.flags = 0;
55 query_pool_ci.queryType = GetTarget(type);
56 query_pool_ci.queryCount = static_cast<u32>(end - begin);
57 query_pool_ci.pipelineStatistics = 0;
58 pools.push_back(device->GetLogical().CreateQueryPool(query_pool_ci));
55} 59}
56 60
57void QueryPool::Reserve(std::pair<vk::QueryPool, std::uint32_t> query) { 61void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
58 const auto it = 62 const auto it =
59 std::find_if(std::begin(pools), std::end(pools), 63 std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) {
60 [query_pool = query.first](auto& pool) { return query_pool == *pool; }); 64 return query_pool == *pool;
65 });
61 ASSERT(it != std::end(pools)); 66 ASSERT(it != std::end(pools));
62 67
63 const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it); 68 const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
@@ -76,12 +81,11 @@ VKQueryCache::VKQueryCache(Core::System& system, VideoCore::RasterizerInterface&
76 81
77VKQueryCache::~VKQueryCache() = default; 82VKQueryCache::~VKQueryCache() = default;
78 83
79std::pair<vk::QueryPool, std::uint32_t> VKQueryCache::AllocateQuery(VideoCore::QueryType type) { 84std::pair<VkQueryPool, u32> VKQueryCache::AllocateQuery(VideoCore::QueryType type) {
80 return query_pools[static_cast<std::size_t>(type)].Commit(scheduler.GetFence()); 85 return query_pools[static_cast<std::size_t>(type)].Commit(scheduler.GetFence());
81} 86}
82 87
83void VKQueryCache::Reserve(VideoCore::QueryType type, 88void VKQueryCache::Reserve(VideoCore::QueryType type, std::pair<VkQueryPool, u32> query) {
84 std::pair<vk::QueryPool, std::uint32_t> query) {
85 query_pools[static_cast<std::size_t>(type)].Reserve(query); 89 query_pools[static_cast<std::size_t>(type)].Reserve(query);
86} 90}
87 91
@@ -89,10 +93,10 @@ HostCounter::HostCounter(VKQueryCache& cache, std::shared_ptr<HostCounter> depen
89 VideoCore::QueryType type) 93 VideoCore::QueryType type)
90 : VideoCommon::HostCounterBase<VKQueryCache, HostCounter>{std::move(dependency)}, cache{cache}, 94 : VideoCommon::HostCounterBase<VKQueryCache, HostCounter>{std::move(dependency)}, cache{cache},
91 type{type}, query{cache.AllocateQuery(type)}, ticks{cache.Scheduler().Ticks()} { 95 type{type}, query{cache.AllocateQuery(type)}, ticks{cache.Scheduler().Ticks()} {
92 const auto dev = cache.Device().GetLogical(); 96 const vk::Device* logical = &cache.Device().GetLogical();
93 cache.Scheduler().Record([dev, query = query](vk::CommandBuffer cmdbuf, auto& dld) { 97 cache.Scheduler().Record([logical, query = query](vk::CommandBuffer cmdbuf) {
94 dev.resetQueryPoolEXT(query.first, query.second, 1, dld); 98 logical->ResetQueryPoolEXT(query.first, query.second, 1);
95 cmdbuf.beginQuery(query.first, query.second, vk::QueryControlFlagBits::ePrecise, dld); 99 cmdbuf.BeginQuery(query.first, query.second, VK_QUERY_CONTROL_PRECISE_BIT);
96 }); 100 });
97} 101}
98 102
@@ -101,22 +105,16 @@ HostCounter::~HostCounter() {
101} 105}
102 106
103void HostCounter::EndQuery() { 107void HostCounter::EndQuery() {
104 cache.Scheduler().Record([query = query](auto cmdbuf, auto& dld) { 108 cache.Scheduler().Record(
105 cmdbuf.endQuery(query.first, query.second, dld); 109 [query = query](vk::CommandBuffer cmdbuf) { cmdbuf.EndQuery(query.first, query.second); });
106 });
107} 110}
108 111
109u64 HostCounter::BlockingQuery() const { 112u64 HostCounter::BlockingQuery() const {
110 if (ticks >= cache.Scheduler().Ticks()) { 113 if (ticks >= cache.Scheduler().Ticks()) {
111 cache.Scheduler().Flush(); 114 cache.Scheduler().Flush();
112 } 115 }
113 116 return cache.Device().GetLogical().GetQueryResult<u64>(
114 const auto dev = cache.Device().GetLogical(); 117 query.first, query.second, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
115 const auto& dld = cache.Device().GetDispatchLoader();
116 u64 value;
117 dev.getQueryPoolResults(query.first, query.second, 1, sizeof(value), &value, sizeof(value),
118 vk::QueryResultFlagBits::e64 | vk::QueryResultFlagBits::eWait, dld);
119 return value;
120} 118}
121 119
122} // namespace Vulkan 120} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h
index c3092ee96..b63784f4b 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.h
+++ b/src/video_core/renderer_vulkan/vk_query_cache.h
@@ -12,8 +12,8 @@
12 12
13#include "common/common_types.h" 13#include "common/common_types.h"
14#include "video_core/query_cache.h" 14#include "video_core/query_cache.h"
15#include "video_core/renderer_vulkan/declarations.h"
16#include "video_core/renderer_vulkan/vk_resource_manager.h" 15#include "video_core/renderer_vulkan/vk_resource_manager.h"
16#include "video_core/renderer_vulkan/wrapper.h"
17 17
18namespace VideoCore { 18namespace VideoCore {
19class RasterizerInterface; 19class RasterizerInterface;
@@ -36,9 +36,9 @@ public:
36 36
37 void Initialize(const VKDevice& device, VideoCore::QueryType type); 37 void Initialize(const VKDevice& device, VideoCore::QueryType type);
38 38
39 std::pair<vk::QueryPool, std::uint32_t> Commit(VKFence& fence); 39 std::pair<VkQueryPool, u32> Commit(VKFence& fence);
40 40
41 void Reserve(std::pair<vk::QueryPool, std::uint32_t> query); 41 void Reserve(std::pair<VkQueryPool, u32> query);
42 42
43protected: 43protected:
44 void Allocate(std::size_t begin, std::size_t end) override; 44 void Allocate(std::size_t begin, std::size_t end) override;
@@ -49,7 +49,7 @@ private:
49 const VKDevice* device = nullptr; 49 const VKDevice* device = nullptr;
50 VideoCore::QueryType type = {}; 50 VideoCore::QueryType type = {};
51 51
52 std::vector<UniqueQueryPool> pools; 52 std::vector<vk::QueryPool> pools;
53 std::vector<bool> usage; 53 std::vector<bool> usage;
54}; 54};
55 55
@@ -61,9 +61,9 @@ public:
61 const VKDevice& device, VKScheduler& scheduler); 61 const VKDevice& device, VKScheduler& scheduler);
62 ~VKQueryCache(); 62 ~VKQueryCache();
63 63
64 std::pair<vk::QueryPool, std::uint32_t> AllocateQuery(VideoCore::QueryType type); 64 std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type);
65 65
66 void Reserve(VideoCore::QueryType type, std::pair<vk::QueryPool, std::uint32_t> query); 66 void Reserve(VideoCore::QueryType type, std::pair<VkQueryPool, u32> query);
67 67
68 const VKDevice& Device() const noexcept { 68 const VKDevice& Device() const noexcept {
69 return device; 69 return device;
@@ -91,7 +91,7 @@ private:
91 91
92 VKQueryCache& cache; 92 VKQueryCache& cache;
93 const VideoCore::QueryType type; 93 const VideoCore::QueryType type;
94 const std::pair<vk::QueryPool, std::uint32_t> query; 94 const std::pair<VkQueryPool, u32> query;
95 const u64 ticks; 95 const u64 ticks;
96}; 96};
97 97
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 6b99cbbbc..33cbc0bb6 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -19,7 +19,6 @@
19#include "core/memory.h" 19#include "core/memory.h"
20#include "video_core/engines/kepler_compute.h" 20#include "video_core/engines/kepler_compute.h"
21#include "video_core/engines/maxwell_3d.h" 21#include "video_core/engines/maxwell_3d.h"
22#include "video_core/renderer_vulkan/declarations.h"
23#include "video_core/renderer_vulkan/fixed_pipeline_state.h" 22#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
24#include "video_core/renderer_vulkan/maxwell_to_vk.h" 23#include "video_core/renderer_vulkan/maxwell_to_vk.h"
25#include "video_core/renderer_vulkan/renderer_vulkan.h" 24#include "video_core/renderer_vulkan/renderer_vulkan.h"
@@ -39,6 +38,7 @@
39#include "video_core/renderer_vulkan/vk_state_tracker.h" 38#include "video_core/renderer_vulkan/vk_state_tracker.h"
40#include "video_core/renderer_vulkan/vk_texture_cache.h" 39#include "video_core/renderer_vulkan/vk_texture_cache.h"
41#include "video_core/renderer_vulkan/vk_update_descriptor.h" 40#include "video_core/renderer_vulkan/vk_update_descriptor.h"
41#include "video_core/renderer_vulkan/wrapper.h"
42 42
43namespace Vulkan { 43namespace Vulkan {
44 44
@@ -60,32 +60,39 @@ namespace {
60 60
61constexpr auto ComputeShaderIndex = static_cast<std::size_t>(Tegra::Engines::ShaderType::Compute); 61constexpr auto ComputeShaderIndex = static_cast<std::size_t>(Tegra::Engines::ShaderType::Compute);
62 62
63vk::Viewport GetViewportState(const VKDevice& device, const Maxwell& regs, std::size_t index) { 63VkViewport GetViewportState(const VKDevice& device, const Maxwell& regs, std::size_t index) {
64 const auto& viewport = regs.viewport_transform[index]; 64 const auto& src = regs.viewport_transform[index];
65 const float x = viewport.translate_x - viewport.scale_x; 65 VkViewport viewport;
66 const float y = viewport.translate_y - viewport.scale_y; 66 viewport.x = src.translate_x - src.scale_x;
67 const float width = viewport.scale_x * 2.0f; 67 viewport.y = src.translate_y - src.scale_y;
68 const float height = viewport.scale_y * 2.0f; 68 viewport.width = src.scale_x * 2.0f;
69 viewport.height = src.scale_y * 2.0f;
69 70
70 const float reduce_z = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne; 71 const float reduce_z = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne;
71 float near = viewport.translate_z - viewport.scale_z * reduce_z; 72 viewport.minDepth = src.translate_z - src.scale_z * reduce_z;
72 float far = viewport.translate_z + viewport.scale_z; 73 viewport.maxDepth = src.translate_z + src.scale_z;
73 if (!device.IsExtDepthRangeUnrestrictedSupported()) { 74 if (!device.IsExtDepthRangeUnrestrictedSupported()) {
74 near = std::clamp(near, 0.0f, 1.0f); 75 viewport.minDepth = std::clamp(viewport.minDepth, 0.0f, 1.0f);
75 far = std::clamp(far, 0.0f, 1.0f); 76 viewport.maxDepth = std::clamp(viewport.maxDepth, 0.0f, 1.0f);
76 } 77 }
77 78 return viewport;
78 return vk::Viewport(x, y, width != 0 ? width : 1.0f, height != 0 ? height : 1.0f, near, far);
79} 79}
80 80
81constexpr vk::Rect2D GetScissorState(const Maxwell& regs, std::size_t index) { 81VkRect2D GetScissorState(const Maxwell& regs, std::size_t index) {
82 const auto& scissor = regs.scissor_test[index]; 82 const auto& src = regs.scissor_test[index];
83 if (!scissor.enable) { 83 VkRect2D scissor;
84 return {{0, 0}, {INT32_MAX, INT32_MAX}}; 84 if (src.enable) {
85 scissor.offset.x = static_cast<s32>(src.min_x);
86 scissor.offset.y = static_cast<s32>(src.min_y);
87 scissor.extent.width = src.max_x - src.min_x;
88 scissor.extent.height = src.max_y - src.min_y;
89 } else {
90 scissor.offset.x = 0;
91 scissor.offset.y = 0;
92 scissor.extent.width = std::numeric_limits<s32>::max();
93 scissor.extent.height = std::numeric_limits<s32>::max();
85 } 94 }
86 const u32 width = scissor.max_x - scissor.min_x; 95 return scissor;
87 const u32 height = scissor.max_y - scissor.min_y;
88 return {{static_cast<s32>(scissor.min_x), static_cast<s32>(scissor.min_y)}, {width, height}};
89} 96}
90 97
91std::array<GPUVAddr, Maxwell::MaxShaderProgram> GetShaderAddresses( 98std::array<GPUVAddr, Maxwell::MaxShaderProgram> GetShaderAddresses(
@@ -97,8 +104,8 @@ std::array<GPUVAddr, Maxwell::MaxShaderProgram> GetShaderAddresses(
97 return addresses; 104 return addresses;
98} 105}
99 106
100void TransitionImages(const std::vector<ImageView>& views, vk::PipelineStageFlags pipeline_stage, 107void TransitionImages(const std::vector<ImageView>& views, VkPipelineStageFlags pipeline_stage,
101 vk::AccessFlags access) { 108 VkAccessFlags access) {
102 for (auto& [view, layout] : views) { 109 for (auto& [view, layout] : views) {
103 view->Transition(*layout, pipeline_stage, access); 110 view->Transition(*layout, pipeline_stage, access);
104 } 111 }
@@ -127,13 +134,13 @@ Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry
127 134
128class BufferBindings final { 135class BufferBindings final {
129public: 136public:
130 void AddVertexBinding(const vk::Buffer* buffer, vk::DeviceSize offset) { 137 void AddVertexBinding(const VkBuffer* buffer, VkDeviceSize offset) {
131 vertex.buffer_ptrs[vertex.num_buffers] = buffer; 138 vertex.buffer_ptrs[vertex.num_buffers] = buffer;
132 vertex.offsets[vertex.num_buffers] = offset; 139 vertex.offsets[vertex.num_buffers] = offset;
133 ++vertex.num_buffers; 140 ++vertex.num_buffers;
134 } 141 }
135 142
136 void SetIndexBinding(const vk::Buffer* buffer, vk::DeviceSize offset, vk::IndexType type) { 143 void SetIndexBinding(const VkBuffer* buffer, VkDeviceSize offset, VkIndexType type) {
137 index.buffer = buffer; 144 index.buffer = buffer;
138 index.offset = offset; 145 index.offset = offset;
139 index.type = type; 146 index.type = type;
@@ -217,14 +224,14 @@ private:
217 // Some of these fields are intentionally left uninitialized to avoid initializing them twice. 224 // Some of these fields are intentionally left uninitialized to avoid initializing them twice.
218 struct { 225 struct {
219 std::size_t num_buffers = 0; 226 std::size_t num_buffers = 0;
220 std::array<const vk::Buffer*, Maxwell::NumVertexArrays> buffer_ptrs; 227 std::array<const VkBuffer*, Maxwell::NumVertexArrays> buffer_ptrs;
221 std::array<vk::DeviceSize, Maxwell::NumVertexArrays> offsets; 228 std::array<VkDeviceSize, Maxwell::NumVertexArrays> offsets;
222 } vertex; 229 } vertex;
223 230
224 struct { 231 struct {
225 const vk::Buffer* buffer = nullptr; 232 const VkBuffer* buffer = nullptr;
226 vk::DeviceSize offset; 233 VkDeviceSize offset;
227 vk::IndexType type; 234 VkIndexType type;
228 } index; 235 } index;
229 236
230 template <std::size_t N> 237 template <std::size_t N>
@@ -243,38 +250,35 @@ private:
243 return; 250 return;
244 } 251 }
245 252
246 std::array<vk::Buffer, N> buffers; 253 std::array<VkBuffer, N> buffers;
247 std::transform(vertex.buffer_ptrs.begin(), vertex.buffer_ptrs.begin() + N, buffers.begin(), 254 std::transform(vertex.buffer_ptrs.begin(), vertex.buffer_ptrs.begin() + N, buffers.begin(),
248 [](const auto ptr) { return *ptr; }); 255 [](const auto ptr) { return *ptr; });
249 256
250 std::array<vk::DeviceSize, N> offsets; 257 std::array<VkDeviceSize, N> offsets;
251 std::copy(vertex.offsets.begin(), vertex.offsets.begin() + N, offsets.begin()); 258 std::copy(vertex.offsets.begin(), vertex.offsets.begin() + N, offsets.begin());
252 259
253 if constexpr (is_indexed) { 260 if constexpr (is_indexed) {
254 // Indexed draw 261 // Indexed draw
255 scheduler.Record([buffers, offsets, index_buffer = *index.buffer, 262 scheduler.Record([buffers, offsets, index_buffer = *index.buffer,
256 index_offset = index.offset, 263 index_offset = index.offset,
257 index_type = index.type](auto cmdbuf, auto& dld) { 264 index_type = index.type](vk::CommandBuffer cmdbuf) {
258 cmdbuf.bindIndexBuffer(index_buffer, index_offset, index_type, dld); 265 cmdbuf.BindIndexBuffer(index_buffer, index_offset, index_type);
259 cmdbuf.bindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data(), 266 cmdbuf.BindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data());
260 dld);
261 }); 267 });
262 } else { 268 } else {
263 // Array draw 269 // Array draw
264 scheduler.Record([buffers, offsets](auto cmdbuf, auto& dld) { 270 scheduler.Record([buffers, offsets](vk::CommandBuffer cmdbuf) {
265 cmdbuf.bindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data(), 271 cmdbuf.BindVertexBuffers(0, static_cast<u32>(N), buffers.data(), offsets.data());
266 dld);
267 }); 272 });
268 } 273 }
269 } 274 }
270}; 275};
271 276
272void RasterizerVulkan::DrawParameters::Draw(vk::CommandBuffer cmdbuf, 277void RasterizerVulkan::DrawParameters::Draw(vk::CommandBuffer cmdbuf) const {
273 const vk::DispatchLoaderDynamic& dld) const {
274 if (is_indexed) { 278 if (is_indexed) {
275 cmdbuf.drawIndexed(num_vertices, num_instances, 0, base_vertex, base_instance, dld); 279 cmdbuf.DrawIndexed(num_vertices, num_instances, 0, base_vertex, base_instance);
276 } else { 280 } else {
277 cmdbuf.draw(num_vertices, num_instances, base_vertex, base_instance, dld); 281 cmdbuf.Draw(num_vertices, num_instances, base_vertex, base_instance);
278 } 282 }
279} 283}
280 284
@@ -337,7 +341,7 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
337 341
338 const auto renderpass = pipeline.GetRenderPass(); 342 const auto renderpass = pipeline.GetRenderPass();
339 const auto [framebuffer, render_area] = ConfigureFramebuffers(renderpass); 343 const auto [framebuffer, render_area] = ConfigureFramebuffers(renderpass);
340 scheduler.RequestRenderpass({renderpass, framebuffer, {{0, 0}, render_area}, 0, nullptr}); 344 scheduler.RequestRenderpass(renderpass, framebuffer, render_area);
341 345
342 UpdateDynamicStates(); 346 UpdateDynamicStates();
343 347
@@ -345,19 +349,19 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
345 349
346 if (device.IsNvDeviceDiagnosticCheckpoints()) { 350 if (device.IsNvDeviceDiagnosticCheckpoints()) {
347 scheduler.Record( 351 scheduler.Record(
348 [&pipeline](auto cmdbuf, auto& dld) { cmdbuf.setCheckpointNV(&pipeline, dld); }); 352 [&pipeline](vk::CommandBuffer cmdbuf) { cmdbuf.SetCheckpointNV(&pipeline); });
349 } 353 }
350 354
351 BeginTransformFeedback(); 355 BeginTransformFeedback();
352 356
353 const auto pipeline_layout = pipeline.GetLayout(); 357 const auto pipeline_layout = pipeline.GetLayout();
354 const auto descriptor_set = pipeline.CommitDescriptorSet(); 358 const auto descriptor_set = pipeline.CommitDescriptorSet();
355 scheduler.Record([pipeline_layout, descriptor_set, draw_params](auto cmdbuf, auto& dld) { 359 scheduler.Record([pipeline_layout, descriptor_set, draw_params](vk::CommandBuffer cmdbuf) {
356 if (descriptor_set) { 360 if (descriptor_set) {
357 cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipeline_layout, 361 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout,
358 DESCRIPTOR_SET, 1, &descriptor_set, 0, nullptr, dld); 362 DESCRIPTOR_SET, descriptor_set, {});
359 } 363 }
360 draw_params.Draw(cmdbuf, dld); 364 draw_params.Draw(cmdbuf);
361 }); 365 });
362 366
363 EndTransformFeedback(); 367 EndTransformFeedback();
@@ -389,48 +393,54 @@ void RasterizerVulkan::Clear() {
389 DEBUG_ASSERT(texceptions.none()); 393 DEBUG_ASSERT(texceptions.none());
390 SetupImageTransitions(0, color_attachments, zeta_attachment); 394 SetupImageTransitions(0, color_attachments, zeta_attachment);
391 395
392 const vk::RenderPass renderpass = renderpass_cache.GetRenderPass(GetRenderPassParams(0)); 396 const VkRenderPass renderpass = renderpass_cache.GetRenderPass(GetRenderPassParams(0));
393 const auto [framebuffer, render_area] = ConfigureFramebuffers(renderpass); 397 const auto [framebuffer, render_area] = ConfigureFramebuffers(renderpass);
394 scheduler.RequestRenderpass({renderpass, framebuffer, {{0, 0}, render_area}, 0, nullptr}); 398 scheduler.RequestRenderpass(renderpass, framebuffer, render_area);
395
396 const auto& scissor = regs.scissor_test[0];
397 const vk::Offset2D scissor_offset(scissor.min_x, scissor.min_y);
398 vk::Extent2D scissor_extent{scissor.max_x - scissor.min_x, scissor.max_y - scissor.min_y};
399 scissor_extent.width = std::min(scissor_extent.width, render_area.width);
400 scissor_extent.height = std::min(scissor_extent.height, render_area.height);
401 399
402 const u32 layer = regs.clear_buffers.layer; 400 VkClearRect clear_rect;
403 const vk::ClearRect clear_rect({scissor_offset, scissor_extent}, layer, 1); 401 clear_rect.baseArrayLayer = regs.clear_buffers.layer;
402 clear_rect.layerCount = 1;
403 clear_rect.rect = GetScissorState(regs, 0);
404 clear_rect.rect.extent.width = std::min(clear_rect.rect.extent.width, render_area.width);
405 clear_rect.rect.extent.height = std::min(clear_rect.rect.extent.height, render_area.height);
404 406
405 if (use_color) { 407 if (use_color) {
406 const std::array clear_color = {regs.clear_color[0], regs.clear_color[1], 408 VkClearValue clear_value;
407 regs.clear_color[2], regs.clear_color[3]}; 409 std::memcpy(clear_value.color.float32, regs.clear_color, sizeof(regs.clear_color));
408 const vk::ClearValue clear_value{clear_color}; 410
409 const u32 color_attachment = regs.clear_buffers.RT; 411 const u32 color_attachment = regs.clear_buffers.RT;
410 scheduler.Record([color_attachment, clear_value, clear_rect](auto cmdbuf, auto& dld) { 412 scheduler.Record([color_attachment, clear_value, clear_rect](vk::CommandBuffer cmdbuf) {
411 const vk::ClearAttachment attachment(vk::ImageAspectFlagBits::eColor, color_attachment, 413 VkClearAttachment attachment;
412 clear_value); 414 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
413 cmdbuf.clearAttachments(1, &attachment, 1, &clear_rect, dld); 415 attachment.colorAttachment = color_attachment;
416 attachment.clearValue = clear_value;
417 cmdbuf.ClearAttachments(attachment, clear_rect);
414 }); 418 });
415 } 419 }
416 420
417 if (!use_depth && !use_stencil) { 421 if (!use_depth && !use_stencil) {
418 return; 422 return;
419 } 423 }
420 vk::ImageAspectFlags aspect_flags; 424 VkImageAspectFlags aspect_flags = 0;
421 if (use_depth) { 425 if (use_depth) {
422 aspect_flags |= vk::ImageAspectFlagBits::eDepth; 426 aspect_flags |= VK_IMAGE_ASPECT_DEPTH_BIT;
423 } 427 }
424 if (use_stencil) { 428 if (use_stencil) {
425 aspect_flags |= vk::ImageAspectFlagBits::eStencil; 429 aspect_flags |= VK_IMAGE_ASPECT_STENCIL_BIT;
426 } 430 }
427 431
428 scheduler.Record([clear_depth = regs.clear_depth, clear_stencil = regs.clear_stencil, 432 scheduler.Record([clear_depth = regs.clear_depth, clear_stencil = regs.clear_stencil,
429 clear_rect, aspect_flags](auto cmdbuf, auto& dld) { 433 clear_rect, aspect_flags](vk::CommandBuffer cmdbuf) {
430 const vk::ClearDepthStencilValue clear_zeta(clear_depth, clear_stencil); 434 VkClearValue clear_value;
431 const vk::ClearValue clear_value{clear_zeta}; 435 clear_value.depthStencil.depth = clear_depth;
432 const vk::ClearAttachment attachment(aspect_flags, 0, clear_value); 436 clear_value.depthStencil.stencil = clear_stencil;
433 cmdbuf.clearAttachments(1, &attachment, 1, &clear_rect, dld); 437
438 VkClearAttachment attachment;
439 attachment.aspectMask = aspect_flags;
440 attachment.colorAttachment = 0;
441 attachment.clearValue.depthStencil.depth = clear_depth;
442 attachment.clearValue.depthStencil.stencil = clear_stencil;
443 cmdbuf.ClearAttachments(attachment, clear_rect);
434 }); 444 });
435} 445}
436 446
@@ -463,24 +473,24 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
463 473
464 buffer_cache.Unmap(); 474 buffer_cache.Unmap();
465 475
466 TransitionImages(sampled_views, vk::PipelineStageFlagBits::eComputeShader, 476 TransitionImages(sampled_views, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
467 vk::AccessFlagBits::eShaderRead); 477 VK_ACCESS_SHADER_READ_BIT);
468 TransitionImages(image_views, vk::PipelineStageFlagBits::eComputeShader, 478 TransitionImages(image_views, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
469 vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite); 479 VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT);
470 480
471 if (device.IsNvDeviceDiagnosticCheckpoints()) { 481 if (device.IsNvDeviceDiagnosticCheckpoints()) {
472 scheduler.Record( 482 scheduler.Record(
473 [&pipeline](auto cmdbuf, auto& dld) { cmdbuf.setCheckpointNV(nullptr, dld); }); 483 [&pipeline](vk::CommandBuffer cmdbuf) { cmdbuf.SetCheckpointNV(nullptr); });
474 } 484 }
475 485
476 scheduler.Record([grid_x = launch_desc.grid_dim_x, grid_y = launch_desc.grid_dim_y, 486 scheduler.Record([grid_x = launch_desc.grid_dim_x, grid_y = launch_desc.grid_dim_y,
477 grid_z = launch_desc.grid_dim_z, pipeline_handle = pipeline.GetHandle(), 487 grid_z = launch_desc.grid_dim_z, pipeline_handle = pipeline.GetHandle(),
478 layout = pipeline.GetLayout(), 488 layout = pipeline.GetLayout(),
479 descriptor_set = pipeline.CommitDescriptorSet()](auto cmdbuf, auto& dld) { 489 descriptor_set = pipeline.CommitDescriptorSet()](vk::CommandBuffer cmdbuf) {
480 cmdbuf.bindPipeline(vk::PipelineBindPoint::eCompute, pipeline_handle, dld); 490 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, pipeline_handle);
481 cmdbuf.bindDescriptorSets(vk::PipelineBindPoint::eCompute, layout, DESCRIPTOR_SET, 1, 491 cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, layout, DESCRIPTOR_SET,
482 &descriptor_set, 0, nullptr, dld); 492 descriptor_set, {});
483 cmdbuf.dispatch(grid_x, grid_y, grid_z, dld); 493 cmdbuf.Dispatch(grid_x, grid_y, grid_z);
484 }); 494 });
485} 495}
486 496
@@ -625,13 +635,13 @@ bool RasterizerVulkan::WalkAttachmentOverlaps(const CachedSurfaceView& attachmen
625 continue; 635 continue;
626 } 636 }
627 overlap = true; 637 overlap = true;
628 *layout = vk::ImageLayout::eGeneral; 638 *layout = VK_IMAGE_LAYOUT_GENERAL;
629 } 639 }
630 return overlap; 640 return overlap;
631} 641}
632 642
633std::tuple<vk::Framebuffer, vk::Extent2D> RasterizerVulkan::ConfigureFramebuffers( 643std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
634 vk::RenderPass renderpass) { 644 VkRenderPass renderpass) {
635 FramebufferCacheKey key{renderpass, std::numeric_limits<u32>::max(), 645 FramebufferCacheKey key{renderpass, std::numeric_limits<u32>::max(),
636 std::numeric_limits<u32>::max(), std::numeric_limits<u32>::max()}; 646 std::numeric_limits<u32>::max(), std::numeric_limits<u32>::max()};
637 647
@@ -658,15 +668,20 @@ std::tuple<vk::Framebuffer, vk::Extent2D> RasterizerVulkan::ConfigureFramebuffer
658 const auto [fbentry, is_cache_miss] = framebuffer_cache.try_emplace(key); 668 const auto [fbentry, is_cache_miss] = framebuffer_cache.try_emplace(key);
659 auto& framebuffer = fbentry->second; 669 auto& framebuffer = fbentry->second;
660 if (is_cache_miss) { 670 if (is_cache_miss) {
661 const vk::FramebufferCreateInfo framebuffer_ci( 671 VkFramebufferCreateInfo framebuffer_ci;
662 {}, key.renderpass, static_cast<u32>(key.views.size()), key.views.data(), key.width, 672 framebuffer_ci.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
663 key.height, key.layers); 673 framebuffer_ci.pNext = nullptr;
664 const auto dev = device.GetLogical(); 674 framebuffer_ci.flags = 0;
665 const auto& dld = device.GetDispatchLoader(); 675 framebuffer_ci.renderPass = key.renderpass;
666 framebuffer = dev.createFramebufferUnique(framebuffer_ci, nullptr, dld); 676 framebuffer_ci.attachmentCount = static_cast<u32>(key.views.size());
667 } 677 framebuffer_ci.pAttachments = key.views.data();
668 678 framebuffer_ci.width = key.width;
669 return {*framebuffer, vk::Extent2D{key.width, key.height}}; 679 framebuffer_ci.height = key.height;
680 framebuffer_ci.layers = key.layers;
681 framebuffer = device.GetLogical().CreateFramebuffer(framebuffer_ci);
682 }
683
684 return {*framebuffer, VkExtent2D{key.width, key.height}};
670} 685}
671 686
672RasterizerVulkan::DrawParameters RasterizerVulkan::SetupGeometry(FixedPipelineState& fixed_state, 687RasterizerVulkan::DrawParameters RasterizerVulkan::SetupGeometry(FixedPipelineState& fixed_state,
@@ -714,10 +729,9 @@ void RasterizerVulkan::SetupShaderDescriptors(
714void RasterizerVulkan::SetupImageTransitions( 729void RasterizerVulkan::SetupImageTransitions(
715 Texceptions texceptions, const std::array<View, Maxwell::NumRenderTargets>& color_attachments, 730 Texceptions texceptions, const std::array<View, Maxwell::NumRenderTargets>& color_attachments,
716 const View& zeta_attachment) { 731 const View& zeta_attachment) {
717 TransitionImages(sampled_views, vk::PipelineStageFlagBits::eAllGraphics, 732 TransitionImages(sampled_views, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_ACCESS_SHADER_READ_BIT);
718 vk::AccessFlagBits::eShaderRead); 733 TransitionImages(image_views, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
719 TransitionImages(image_views, vk::PipelineStageFlagBits::eAllGraphics, 734 VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT);
720 vk::AccessFlagBits::eShaderRead | vk::AccessFlagBits::eShaderWrite);
721 735
722 for (std::size_t rt = 0; rt < std::size(color_attachments); ++rt) { 736 for (std::size_t rt = 0; rt < std::size(color_attachments); ++rt) {
723 const auto color_attachment = color_attachments[rt]; 737 const auto color_attachment = color_attachments[rt];
@@ -725,19 +739,19 @@ void RasterizerVulkan::SetupImageTransitions(
725 continue; 739 continue;
726 } 740 }
727 const auto image_layout = 741 const auto image_layout =
728 texceptions[rt] ? vk::ImageLayout::eGeneral : vk::ImageLayout::eColorAttachmentOptimal; 742 texceptions[rt] ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
729 color_attachment->Transition( 743 color_attachment->Transition(image_layout, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
730 image_layout, vk::PipelineStageFlagBits::eColorAttachmentOutput, 744 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
731 vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite); 745 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);
732 } 746 }
733 747
734 if (zeta_attachment != nullptr) { 748 if (zeta_attachment != nullptr) {
735 const auto image_layout = texceptions[ZETA_TEXCEPTION_INDEX] 749 const auto image_layout = texceptions[ZETA_TEXCEPTION_INDEX]
736 ? vk::ImageLayout::eGeneral 750 ? VK_IMAGE_LAYOUT_GENERAL
737 : vk::ImageLayout::eDepthStencilAttachmentOptimal; 751 : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
738 zeta_attachment->Transition(image_layout, vk::PipelineStageFlagBits::eLateFragmentTests, 752 zeta_attachment->Transition(image_layout, VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
739 vk::AccessFlagBits::eDepthStencilAttachmentRead | 753 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
740 vk::AccessFlagBits::eDepthStencilAttachmentWrite); 754 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
741 } 755 }
742} 756}
743 757
@@ -773,9 +787,9 @@ void RasterizerVulkan::BeginTransformFeedback() {
773 const std::size_t size = binding.buffer_size; 787 const std::size_t size = binding.buffer_size;
774 const auto [buffer, offset] = buffer_cache.UploadMemory(gpu_addr, size, 4, true); 788 const auto [buffer, offset] = buffer_cache.UploadMemory(gpu_addr, size, 4, true);
775 789
776 scheduler.Record([buffer = *buffer, offset = offset, size](auto cmdbuf, auto& dld) { 790 scheduler.Record([buffer = *buffer, offset = offset, size](vk::CommandBuffer cmdbuf) {
777 cmdbuf.bindTransformFeedbackBuffersEXT(0, {buffer}, {offset}, {size}, dld); 791 cmdbuf.BindTransformFeedbackBuffersEXT(0, 1, &buffer, &offset, &size);
778 cmdbuf.beginTransformFeedbackEXT(0, {}, {}, dld); 792 cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr);
779 }); 793 });
780} 794}
781 795
@@ -786,7 +800,7 @@ void RasterizerVulkan::EndTransformFeedback() {
786 } 800 }
787 801
788 scheduler.Record( 802 scheduler.Record(
789 [](auto cmdbuf, auto& dld) { cmdbuf.endTransformFeedbackEXT(0, {}, {}, dld); }); 803 [](vk::CommandBuffer cmdbuf) { cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr); });
790} 804}
791 805
792void RasterizerVulkan::SetupVertexArrays(FixedPipelineState::VertexInput& vertex_input, 806void RasterizerVulkan::SetupVertexArrays(FixedPipelineState::VertexInput& vertex_input,
@@ -837,7 +851,7 @@ void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawPar
837 } else { 851 } else {
838 const auto [buffer, offset] = 852 const auto [buffer, offset] =
839 quad_array_pass.Assemble(params.num_vertices, params.base_vertex); 853 quad_array_pass.Assemble(params.num_vertices, params.base_vertex);
840 buffer_bindings.SetIndexBinding(&buffer, offset, vk::IndexType::eUint32); 854 buffer_bindings.SetIndexBinding(buffer, offset, VK_INDEX_TYPE_UINT32);
841 params.base_vertex = 0; 855 params.base_vertex = 0;
842 params.num_vertices = params.num_vertices * 6 / 4; 856 params.num_vertices = params.num_vertices * 6 / 4;
843 params.is_indexed = true; 857 params.is_indexed = true;
@@ -1022,7 +1036,7 @@ void RasterizerVulkan::SetupTexture(const Tegra::Texture::FullTextureInfo& textu
1022 update_descriptor_queue.AddSampledImage(sampler, image_view); 1036 update_descriptor_queue.AddSampledImage(sampler, image_view);
1023 1037
1024 const auto image_layout = update_descriptor_queue.GetLastImageLayout(); 1038 const auto image_layout = update_descriptor_queue.GetLastImageLayout();
1025 *image_layout = vk::ImageLayout::eShaderReadOnlyOptimal; 1039 *image_layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
1026 sampled_views.push_back(ImageView{std::move(view), image_layout}); 1040 sampled_views.push_back(ImageView{std::move(view), image_layout});
1027} 1041}
1028 1042
@@ -1039,7 +1053,7 @@ void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const Ima
1039 update_descriptor_queue.AddImage(image_view); 1053 update_descriptor_queue.AddImage(image_view);
1040 1054
1041 const auto image_layout = update_descriptor_queue.GetLastImageLayout(); 1055 const auto image_layout = update_descriptor_queue.GetLastImageLayout();
1042 *image_layout = vk::ImageLayout::eGeneral; 1056 *image_layout = VK_IMAGE_LAYOUT_GENERAL;
1043 image_views.push_back(ImageView{std::move(view), image_layout}); 1057 image_views.push_back(ImageView{std::move(view), image_layout});
1044} 1058}
1045 1059
@@ -1056,9 +1070,7 @@ void RasterizerVulkan::UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& reg
1056 GetViewportState(device, regs, 10), GetViewportState(device, regs, 11), 1070 GetViewportState(device, regs, 10), GetViewportState(device, regs, 11),
1057 GetViewportState(device, regs, 12), GetViewportState(device, regs, 13), 1071 GetViewportState(device, regs, 12), GetViewportState(device, regs, 13),
1058 GetViewportState(device, regs, 14), GetViewportState(device, regs, 15)}; 1072 GetViewportState(device, regs, 14), GetViewportState(device, regs, 15)};
1059 scheduler.Record([viewports](auto cmdbuf, auto& dld) { 1073 scheduler.Record([viewports](vk::CommandBuffer cmdbuf) { cmdbuf.SetViewport(0, viewports); });
1060 cmdbuf.setViewport(0, static_cast<u32>(viewports.size()), viewports.data(), dld);
1061 });
1062} 1074}
1063 1075
1064void RasterizerVulkan::UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs) { 1076void RasterizerVulkan::UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs) {
@@ -1072,9 +1084,7 @@ void RasterizerVulkan::UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs
1072 GetScissorState(regs, 9), GetScissorState(regs, 10), GetScissorState(regs, 11), 1084 GetScissorState(regs, 9), GetScissorState(regs, 10), GetScissorState(regs, 11),
1073 GetScissorState(regs, 12), GetScissorState(regs, 13), GetScissorState(regs, 14), 1085 GetScissorState(regs, 12), GetScissorState(regs, 13), GetScissorState(regs, 14),
1074 GetScissorState(regs, 15)}; 1086 GetScissorState(regs, 15)};
1075 scheduler.Record([scissors](auto cmdbuf, auto& dld) { 1087 scheduler.Record([scissors](vk::CommandBuffer cmdbuf) { cmdbuf.SetScissor(0, scissors); });
1076 cmdbuf.setScissor(0, static_cast<u32>(scissors.size()), scissors.data(), dld);
1077 });
1078} 1088}
1079 1089
1080void RasterizerVulkan::UpdateDepthBias(Tegra::Engines::Maxwell3D::Regs& regs) { 1090void RasterizerVulkan::UpdateDepthBias(Tegra::Engines::Maxwell3D::Regs& regs) {
@@ -1082,8 +1092,8 @@ void RasterizerVulkan::UpdateDepthBias(Tegra::Engines::Maxwell3D::Regs& regs) {
1082 return; 1092 return;
1083 } 1093 }
1084 scheduler.Record([constant = regs.polygon_offset_units, clamp = regs.polygon_offset_clamp, 1094 scheduler.Record([constant = regs.polygon_offset_units, clamp = regs.polygon_offset_clamp,
1085 factor = regs.polygon_offset_factor](auto cmdbuf, auto& dld) { 1095 factor = regs.polygon_offset_factor](vk::CommandBuffer cmdbuf) {
1086 cmdbuf.setDepthBias(constant, clamp, factor / 2.0f, dld); 1096 cmdbuf.SetDepthBias(constant, clamp, factor / 2.0f);
1087 }); 1097 });
1088} 1098}
1089 1099
@@ -1093,9 +1103,8 @@ void RasterizerVulkan::UpdateBlendConstants(Tegra::Engines::Maxwell3D::Regs& reg
1093 } 1103 }
1094 const std::array blend_color = {regs.blend_color.r, regs.blend_color.g, regs.blend_color.b, 1104 const std::array blend_color = {regs.blend_color.r, regs.blend_color.g, regs.blend_color.b,
1095 regs.blend_color.a}; 1105 regs.blend_color.a};
1096 scheduler.Record([blend_color](auto cmdbuf, auto& dld) { 1106 scheduler.Record(
1097 cmdbuf.setBlendConstants(blend_color.data(), dld); 1107 [blend_color](vk::CommandBuffer cmdbuf) { cmdbuf.SetBlendConstants(blend_color.data()); });
1098 });
1099} 1108}
1100 1109
1101void RasterizerVulkan::UpdateDepthBounds(Tegra::Engines::Maxwell3D::Regs& regs) { 1110void RasterizerVulkan::UpdateDepthBounds(Tegra::Engines::Maxwell3D::Regs& regs) {
@@ -1103,7 +1112,7 @@ void RasterizerVulkan::UpdateDepthBounds(Tegra::Engines::Maxwell3D::Regs& regs)
1103 return; 1112 return;
1104 } 1113 }
1105 scheduler.Record([min = regs.depth_bounds[0], max = regs.depth_bounds[1]]( 1114 scheduler.Record([min = regs.depth_bounds[0], max = regs.depth_bounds[1]](
1106 auto cmdbuf, auto& dld) { cmdbuf.setDepthBounds(min, max, dld); }); 1115 vk::CommandBuffer cmdbuf) { cmdbuf.SetDepthBounds(min, max); });
1107} 1116}
1108 1117
1109void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs) { 1118void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs) {
@@ -1116,24 +1125,24 @@ void RasterizerVulkan::UpdateStencilFaces(Tegra::Engines::Maxwell3D::Regs& regs)
1116 [front_ref = regs.stencil_front_func_ref, front_write_mask = regs.stencil_front_mask, 1125 [front_ref = regs.stencil_front_func_ref, front_write_mask = regs.stencil_front_mask,
1117 front_test_mask = regs.stencil_front_func_mask, back_ref = regs.stencil_back_func_ref, 1126 front_test_mask = regs.stencil_front_func_mask, back_ref = regs.stencil_back_func_ref,
1118 back_write_mask = regs.stencil_back_mask, 1127 back_write_mask = regs.stencil_back_mask,
1119 back_test_mask = regs.stencil_back_func_mask](auto cmdbuf, auto& dld) { 1128 back_test_mask = regs.stencil_back_func_mask](vk::CommandBuffer cmdbuf) {
1120 // Front face 1129 // Front face
1121 cmdbuf.setStencilReference(vk::StencilFaceFlagBits::eFront, front_ref, dld); 1130 cmdbuf.SetStencilReference(VK_STENCIL_FACE_FRONT_BIT, front_ref);
1122 cmdbuf.setStencilWriteMask(vk::StencilFaceFlagBits::eFront, front_write_mask, dld); 1131 cmdbuf.SetStencilWriteMask(VK_STENCIL_FACE_FRONT_BIT, front_write_mask);
1123 cmdbuf.setStencilCompareMask(vk::StencilFaceFlagBits::eFront, front_test_mask, dld); 1132 cmdbuf.SetStencilCompareMask(VK_STENCIL_FACE_FRONT_BIT, front_test_mask);
1124 1133
1125 // Back face 1134 // Back face
1126 cmdbuf.setStencilReference(vk::StencilFaceFlagBits::eBack, back_ref, dld); 1135 cmdbuf.SetStencilReference(VK_STENCIL_FACE_BACK_BIT, back_ref);
1127 cmdbuf.setStencilWriteMask(vk::StencilFaceFlagBits::eBack, back_write_mask, dld); 1136 cmdbuf.SetStencilWriteMask(VK_STENCIL_FACE_BACK_BIT, back_write_mask);
1128 cmdbuf.setStencilCompareMask(vk::StencilFaceFlagBits::eBack, back_test_mask, dld); 1137 cmdbuf.SetStencilCompareMask(VK_STENCIL_FACE_BACK_BIT, back_test_mask);
1129 }); 1138 });
1130 } else { 1139 } else {
1131 // Front face defines both faces 1140 // Front face defines both faces
1132 scheduler.Record([ref = regs.stencil_back_func_ref, write_mask = regs.stencil_back_mask, 1141 scheduler.Record([ref = regs.stencil_back_func_ref, write_mask = regs.stencil_back_mask,
1133 test_mask = regs.stencil_back_func_mask](auto cmdbuf, auto& dld) { 1142 test_mask = regs.stencil_back_func_mask](vk::CommandBuffer cmdbuf) {
1134 cmdbuf.setStencilReference(vk::StencilFaceFlagBits::eFrontAndBack, ref, dld); 1143 cmdbuf.SetStencilReference(VK_STENCIL_FACE_FRONT_AND_BACK, ref);
1135 cmdbuf.setStencilWriteMask(vk::StencilFaceFlagBits::eFrontAndBack, write_mask, dld); 1144 cmdbuf.SetStencilWriteMask(VK_STENCIL_FACE_FRONT_AND_BACK, write_mask);
1136 cmdbuf.setStencilCompareMask(vk::StencilFaceFlagBits::eFrontAndBack, test_mask, dld); 1145 cmdbuf.SetStencilCompareMask(VK_STENCIL_FACE_FRONT_AND_BACK, test_mask);
1137 }); 1146 });
1138 } 1147 }
1139} 1148}
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index f642dde76..46037860a 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -17,7 +17,6 @@
17#include "video_core/memory_manager.h" 17#include "video_core/memory_manager.h"
18#include "video_core/rasterizer_accelerated.h" 18#include "video_core/rasterizer_accelerated.h"
19#include "video_core/rasterizer_interface.h" 19#include "video_core/rasterizer_interface.h"
20#include "video_core/renderer_vulkan/declarations.h"
21#include "video_core/renderer_vulkan/fixed_pipeline_state.h" 20#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
22#include "video_core/renderer_vulkan/vk_buffer_cache.h" 21#include "video_core/renderer_vulkan/vk_buffer_cache.h"
23#include "video_core/renderer_vulkan/vk_compute_pass.h" 22#include "video_core/renderer_vulkan/vk_compute_pass.h"
@@ -32,6 +31,7 @@
32#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" 31#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
33#include "video_core/renderer_vulkan/vk_texture_cache.h" 32#include "video_core/renderer_vulkan/vk_texture_cache.h"
34#include "video_core/renderer_vulkan/vk_update_descriptor.h" 33#include "video_core/renderer_vulkan/vk_update_descriptor.h"
34#include "video_core/renderer_vulkan/wrapper.h"
35 35
36namespace Core { 36namespace Core {
37class System; 37class System;
@@ -49,11 +49,10 @@ namespace Vulkan {
49 49
50struct VKScreenInfo; 50struct VKScreenInfo;
51 51
52using ImageViewsPack = 52using ImageViewsPack = boost::container::static_vector<VkImageView, Maxwell::NumRenderTargets + 1>;
53 boost::container::static_vector<vk::ImageView, Maxwell::NumRenderTargets + 1>;
54 53
55struct FramebufferCacheKey { 54struct FramebufferCacheKey {
56 vk::RenderPass renderpass{}; 55 VkRenderPass renderpass{};
57 u32 width = 0; 56 u32 width = 0;
58 u32 height = 0; 57 u32 height = 0;
59 u32 layers = 0; 58 u32 layers = 0;
@@ -101,7 +100,7 @@ class BufferBindings;
101 100
102struct ImageView { 101struct ImageView {
103 View view; 102 View view;
104 vk::ImageLayout* layout = nullptr; 103 VkImageLayout* layout = nullptr;
105}; 104};
106 105
107class RasterizerVulkan final : public VideoCore::RasterizerAccelerated { 106class RasterizerVulkan final : public VideoCore::RasterizerAccelerated {
@@ -137,7 +136,7 @@ public:
137 136
138private: 137private:
139 struct DrawParameters { 138 struct DrawParameters {
140 void Draw(vk::CommandBuffer cmdbuf, const vk::DispatchLoaderDynamic& dld) const; 139 void Draw(vk::CommandBuffer cmdbuf) const;
141 140
142 u32 base_instance = 0; 141 u32 base_instance = 0;
143 u32 num_instances = 0; 142 u32 num_instances = 0;
@@ -154,7 +153,7 @@ private:
154 153
155 Texceptions UpdateAttachments(); 154 Texceptions UpdateAttachments();
156 155
157 std::tuple<vk::Framebuffer, vk::Extent2D> ConfigureFramebuffers(vk::RenderPass renderpass); 156 std::tuple<VkFramebuffer, VkExtent2D> ConfigureFramebuffers(VkRenderPass renderpass);
158 157
159 /// Setups geometry buffers and state. 158 /// Setups geometry buffers and state.
160 DrawParameters SetupGeometry(FixedPipelineState& fixed_state, BufferBindings& buffer_bindings, 159 DrawParameters SetupGeometry(FixedPipelineState& fixed_state, BufferBindings& buffer_bindings,
@@ -272,7 +271,7 @@ private:
272 u32 draw_counter = 0; 271 u32 draw_counter = 0;
273 272
274 // TODO(Rodrigo): Invalidate on image destruction 273 // TODO(Rodrigo): Invalidate on image destruction
275 std::unordered_map<FramebufferCacheKey, UniqueFramebuffer> framebuffer_cache; 274 std::unordered_map<FramebufferCacheKey, vk::Framebuffer> framebuffer_cache;
276}; 275};
277 276
278} // namespace Vulkan 277} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp b/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
index 93f5d7ba0..4e5286a69 100644
--- a/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
@@ -6,10 +6,10 @@
6#include <vector> 6#include <vector>
7 7
8#include "video_core/engines/maxwell_3d.h" 8#include "video_core/engines/maxwell_3d.h"
9#include "video_core/renderer_vulkan/declarations.h"
10#include "video_core/renderer_vulkan/maxwell_to_vk.h" 9#include "video_core/renderer_vulkan/maxwell_to_vk.h"
11#include "video_core/renderer_vulkan/vk_device.h" 10#include "video_core/renderer_vulkan/vk_device.h"
12#include "video_core/renderer_vulkan/vk_renderpass_cache.h" 11#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
12#include "video_core/renderer_vulkan/wrapper.h"
13 13
14namespace Vulkan { 14namespace Vulkan {
15 15
@@ -17,7 +17,7 @@ VKRenderPassCache::VKRenderPassCache(const VKDevice& device) : device{device} {}
17 17
18VKRenderPassCache::~VKRenderPassCache() = default; 18VKRenderPassCache::~VKRenderPassCache() = default;
19 19
20vk::RenderPass VKRenderPassCache::GetRenderPass(const RenderPassParams& params) { 20VkRenderPass VKRenderPassCache::GetRenderPass(const RenderPassParams& params) {
21 const auto [pair, is_cache_miss] = cache.try_emplace(params); 21 const auto [pair, is_cache_miss] = cache.try_emplace(params);
22 auto& entry = pair->second; 22 auto& entry = pair->second;
23 if (is_cache_miss) { 23 if (is_cache_miss) {
@@ -26,9 +26,9 @@ vk::RenderPass VKRenderPassCache::GetRenderPass(const RenderPassParams& params)
26 return *entry; 26 return *entry;
27} 27}
28 28
29UniqueRenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& params) const { 29vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& params) const {
30 std::vector<vk::AttachmentDescription> descriptors; 30 std::vector<VkAttachmentDescription> descriptors;
31 std::vector<vk::AttachmentReference> color_references; 31 std::vector<VkAttachmentReference> color_references;
32 32
33 for (std::size_t rt = 0; rt < params.color_attachments.size(); ++rt) { 33 for (std::size_t rt = 0; rt < params.color_attachments.size(); ++rt) {
34 const auto attachment = params.color_attachments[rt]; 34 const auto attachment = params.color_attachments[rt];
@@ -39,16 +39,25 @@ UniqueRenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& par
39 39
40 // TODO(Rodrigo): Add eMayAlias when it's needed. 40 // TODO(Rodrigo): Add eMayAlias when it's needed.
41 const auto color_layout = attachment.is_texception 41 const auto color_layout = attachment.is_texception
42 ? vk::ImageLayout::eGeneral 42 ? VK_IMAGE_LAYOUT_GENERAL
43 : vk::ImageLayout::eColorAttachmentOptimal; 43 : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
44 descriptors.emplace_back(vk::AttachmentDescriptionFlagBits::eMayAlias, format.format, 44 VkAttachmentDescription& descriptor = descriptors.emplace_back();
45 vk::SampleCountFlagBits::e1, vk::AttachmentLoadOp::eLoad, 45 descriptor.flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT;
46 vk::AttachmentStoreOp::eStore, vk::AttachmentLoadOp::eDontCare, 46 descriptor.format = format.format;
47 vk::AttachmentStoreOp::eDontCare, color_layout, color_layout); 47 descriptor.samples = VK_SAMPLE_COUNT_1_BIT;
48 color_references.emplace_back(static_cast<u32>(rt), color_layout); 48 descriptor.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
49 descriptor.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
50 descriptor.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
51 descriptor.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
52 descriptor.initialLayout = color_layout;
53 descriptor.finalLayout = color_layout;
54
55 VkAttachmentReference& reference = color_references.emplace_back();
56 reference.attachment = static_cast<u32>(rt);
57 reference.layout = color_layout;
49 } 58 }
50 59
51 vk::AttachmentReference zeta_attachment_ref; 60 VkAttachmentReference zeta_attachment_ref;
52 if (params.has_zeta) { 61 if (params.has_zeta) {
53 const auto format = 62 const auto format =
54 MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.zeta_pixel_format); 63 MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.zeta_pixel_format);
@@ -56,45 +65,68 @@ UniqueRenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& par
56 static_cast<u32>(params.zeta_pixel_format)); 65 static_cast<u32>(params.zeta_pixel_format));
57 66
58 const auto zeta_layout = params.zeta_texception 67 const auto zeta_layout = params.zeta_texception
59 ? vk::ImageLayout::eGeneral 68 ? VK_IMAGE_LAYOUT_GENERAL
60 : vk::ImageLayout::eDepthStencilAttachmentOptimal; 69 : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
61 descriptors.emplace_back(vk::AttachmentDescriptionFlags{}, format.format, 70 VkAttachmentDescription& descriptor = descriptors.emplace_back();
62 vk::SampleCountFlagBits::e1, vk::AttachmentLoadOp::eLoad, 71 descriptor.flags = 0;
63 vk::AttachmentStoreOp::eStore, vk::AttachmentLoadOp::eLoad, 72 descriptor.format = format.format;
64 vk::AttachmentStoreOp::eStore, zeta_layout, zeta_layout); 73 descriptor.samples = VK_SAMPLE_COUNT_1_BIT;
65 zeta_attachment_ref = 74 descriptor.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
66 vk::AttachmentReference(static_cast<u32>(params.color_attachments.size()), zeta_layout); 75 descriptor.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
76 descriptor.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
77 descriptor.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
78 descriptor.initialLayout = zeta_layout;
79 descriptor.finalLayout = zeta_layout;
80
81 zeta_attachment_ref.attachment = static_cast<u32>(params.color_attachments.size());
82 zeta_attachment_ref.layout = zeta_layout;
67 } 83 }
68 84
69 const vk::SubpassDescription subpass_description( 85 VkSubpassDescription subpass_description;
70 {}, vk::PipelineBindPoint::eGraphics, 0, nullptr, static_cast<u32>(color_references.size()), 86 subpass_description.flags = 0;
71 color_references.data(), nullptr, params.has_zeta ? &zeta_attachment_ref : nullptr, 0, 87 subpass_description.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
72 nullptr); 88 subpass_description.inputAttachmentCount = 0;
73 89 subpass_description.pInputAttachments = nullptr;
74 vk::AccessFlags access; 90 subpass_description.colorAttachmentCount = static_cast<u32>(color_references.size());
75 vk::PipelineStageFlags stage; 91 subpass_description.pColorAttachments = color_references.data();
92 subpass_description.pResolveAttachments = nullptr;
93 subpass_description.pDepthStencilAttachment = params.has_zeta ? &zeta_attachment_ref : nullptr;
94 subpass_description.preserveAttachmentCount = 0;
95 subpass_description.pPreserveAttachments = nullptr;
96
97 VkAccessFlags access = 0;
98 VkPipelineStageFlags stage = 0;
76 if (!color_references.empty()) { 99 if (!color_references.empty()) {
77 access |= 100 access |= VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
78 vk::AccessFlagBits::eColorAttachmentRead | vk::AccessFlagBits::eColorAttachmentWrite; 101 stage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
79 stage |= vk::PipelineStageFlagBits::eColorAttachmentOutput;
80 } 102 }
81 103
82 if (params.has_zeta) { 104 if (params.has_zeta) {
83 access |= vk::AccessFlagBits::eDepthStencilAttachmentRead | 105 access |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
84 vk::AccessFlagBits::eDepthStencilAttachmentWrite; 106 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
85 stage |= vk::PipelineStageFlagBits::eLateFragmentTests; 107 stage |= VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
86 } 108 }
87 109
88 const vk::SubpassDependency subpass_dependency(VK_SUBPASS_EXTERNAL, 0, stage, stage, {}, access, 110 VkSubpassDependency subpass_dependency;
89 {}); 111 subpass_dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
90 112 subpass_dependency.dstSubpass = 0;
91 const vk::RenderPassCreateInfo create_info({}, static_cast<u32>(descriptors.size()), 113 subpass_dependency.srcStageMask = stage;
92 descriptors.data(), 1, &subpass_description, 1, 114 subpass_dependency.dstStageMask = stage;
93 &subpass_dependency); 115 subpass_dependency.srcAccessMask = 0;
94 116 subpass_dependency.dstAccessMask = access;
95 const auto dev = device.GetLogical(); 117 subpass_dependency.dependencyFlags = 0;
96 const auto& dld = device.GetDispatchLoader(); 118
97 return dev.createRenderPassUnique(create_info, nullptr, dld); 119 VkRenderPassCreateInfo ci;
120 ci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
121 ci.pNext = nullptr;
122 ci.flags = 0;
123 ci.attachmentCount = static_cast<u32>(descriptors.size());
124 ci.pAttachments = descriptors.data();
125 ci.subpassCount = 1;
126 ci.pSubpasses = &subpass_description;
127 ci.dependencyCount = 1;
128 ci.pDependencies = &subpass_dependency;
129 return device.GetLogical().CreateRenderPass(ci);
98} 130}
99 131
100} // namespace Vulkan 132} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_renderpass_cache.h b/src/video_core/renderer_vulkan/vk_renderpass_cache.h
index b49b2db48..921b6efb5 100644
--- a/src/video_core/renderer_vulkan/vk_renderpass_cache.h
+++ b/src/video_core/renderer_vulkan/vk_renderpass_cache.h
@@ -12,7 +12,7 @@
12#include <boost/functional/hash.hpp> 12#include <boost/functional/hash.hpp>
13 13
14#include "video_core/engines/maxwell_3d.h" 14#include "video_core/engines/maxwell_3d.h"
15#include "video_core/renderer_vulkan/declarations.h" 15#include "video_core/renderer_vulkan/wrapper.h"
16#include "video_core/surface.h" 16#include "video_core/surface.h"
17 17
18namespace Vulkan { 18namespace Vulkan {
@@ -85,13 +85,13 @@ public:
85 explicit VKRenderPassCache(const VKDevice& device); 85 explicit VKRenderPassCache(const VKDevice& device);
86 ~VKRenderPassCache(); 86 ~VKRenderPassCache();
87 87
88 vk::RenderPass GetRenderPass(const RenderPassParams& params); 88 VkRenderPass GetRenderPass(const RenderPassParams& params);
89 89
90private: 90private:
91 UniqueRenderPass CreateRenderPass(const RenderPassParams& params) const; 91 vk::RenderPass CreateRenderPass(const RenderPassParams& params) const;
92 92
93 const VKDevice& device; 93 const VKDevice& device;
94 std::unordered_map<RenderPassParams, UniqueRenderPass> cache; 94 std::unordered_map<RenderPassParams, vk::RenderPass> cache;
95}; 95};
96 96
97} // namespace Vulkan 97} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.cpp b/src/video_core/renderer_vulkan/vk_resource_manager.cpp
index 525b4bb46..dc06f545a 100644
--- a/src/video_core/renderer_vulkan/vk_resource_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_resource_manager.cpp
@@ -6,83 +6,83 @@
6#include <optional> 6#include <optional>
7#include "common/assert.h" 7#include "common/assert.h"
8#include "common/logging/log.h" 8#include "common/logging/log.h"
9#include "video_core/renderer_vulkan/declarations.h"
10#include "video_core/renderer_vulkan/vk_device.h" 9#include "video_core/renderer_vulkan/vk_device.h"
11#include "video_core/renderer_vulkan/vk_resource_manager.h" 10#include "video_core/renderer_vulkan/vk_resource_manager.h"
11#include "video_core/renderer_vulkan/wrapper.h"
12 12
13namespace Vulkan { 13namespace Vulkan {
14 14
15namespace {
16
15// TODO(Rodrigo): Fine tune these numbers. 17// TODO(Rodrigo): Fine tune these numbers.
16constexpr std::size_t COMMAND_BUFFER_POOL_SIZE = 0x1000; 18constexpr std::size_t COMMAND_BUFFER_POOL_SIZE = 0x1000;
17constexpr std::size_t FENCES_GROW_STEP = 0x40; 19constexpr std::size_t FENCES_GROW_STEP = 0x40;
18 20
21VkFenceCreateInfo BuildFenceCreateInfo() {
22 VkFenceCreateInfo fence_ci;
23 fence_ci.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
24 fence_ci.pNext = nullptr;
25 fence_ci.flags = 0;
26 return fence_ci;
27}
28
29} // Anonymous namespace
30
19class CommandBufferPool final : public VKFencedPool { 31class CommandBufferPool final : public VKFencedPool {
20public: 32public:
21 CommandBufferPool(const VKDevice& device) 33 CommandBufferPool(const VKDevice& device)
22 : VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {} 34 : VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {}
23 35
24 void Allocate(std::size_t begin, std::size_t end) override { 36 void Allocate(std::size_t begin, std::size_t end) override {
25 const auto dev = device.GetLogical();
26 const auto& dld = device.GetDispatchLoader();
27 const u32 graphics_family = device.GetGraphicsFamily();
28
29 auto pool = std::make_unique<Pool>();
30
31 // Command buffers are going to be commited, recorded, executed every single usage cycle. 37 // Command buffers are going to be commited, recorded, executed every single usage cycle.
32 // They are also going to be reseted when commited. 38 // They are also going to be reseted when commited.
33 const auto pool_flags = vk::CommandPoolCreateFlagBits::eTransient | 39 VkCommandPoolCreateInfo command_pool_ci;
34 vk::CommandPoolCreateFlagBits::eResetCommandBuffer; 40 command_pool_ci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
35 const vk::CommandPoolCreateInfo cmdbuf_pool_ci(pool_flags, graphics_family); 41 command_pool_ci.pNext = nullptr;
36 pool->handle = dev.createCommandPoolUnique(cmdbuf_pool_ci, nullptr, dld); 42 command_pool_ci.flags =
37 43 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
38 const vk::CommandBufferAllocateInfo cmdbuf_ai(*pool->handle, 44 command_pool_ci.queueFamilyIndex = device.GetGraphicsFamily();
39 vk::CommandBufferLevel::ePrimary, 45
40 static_cast<u32>(COMMAND_BUFFER_POOL_SIZE)); 46 Pool& pool = pools.emplace_back();
41 pool->cmdbufs = 47 pool.handle = device.GetLogical().CreateCommandPool(command_pool_ci);
42 dev.allocateCommandBuffersUnique<std::allocator<UniqueCommandBuffer>>(cmdbuf_ai, dld); 48 pool.cmdbufs = pool.handle.Allocate(COMMAND_BUFFER_POOL_SIZE);
43
44 pools.push_back(std::move(pool));
45 } 49 }
46 50
47 vk::CommandBuffer Commit(VKFence& fence) { 51 VkCommandBuffer Commit(VKFence& fence) {
48 const std::size_t index = CommitResource(fence); 52 const std::size_t index = CommitResource(fence);
49 const auto pool_index = index / COMMAND_BUFFER_POOL_SIZE; 53 const auto pool_index = index / COMMAND_BUFFER_POOL_SIZE;
50 const auto sub_index = index % COMMAND_BUFFER_POOL_SIZE; 54 const auto sub_index = index % COMMAND_BUFFER_POOL_SIZE;
51 return *pools[pool_index]->cmdbufs[sub_index]; 55 return pools[pool_index].cmdbufs[sub_index];
52 } 56 }
53 57
54private: 58private:
55 struct Pool { 59 struct Pool {
56 UniqueCommandPool handle; 60 vk::CommandPool handle;
57 std::vector<UniqueCommandBuffer> cmdbufs; 61 vk::CommandBuffers cmdbufs;
58 }; 62 };
59 63
60 const VKDevice& device; 64 const VKDevice& device;
61 65 std::vector<Pool> pools;
62 std::vector<std::unique_ptr<Pool>> pools;
63}; 66};
64 67
65VKResource::VKResource() = default; 68VKResource::VKResource() = default;
66 69
67VKResource::~VKResource() = default; 70VKResource::~VKResource() = default;
68 71
69VKFence::VKFence(const VKDevice& device, UniqueFence handle) 72VKFence::VKFence(const VKDevice& device)
70 : device{device}, handle{std::move(handle)} {} 73 : device{device}, handle{device.GetLogical().CreateFence(BuildFenceCreateInfo())} {}
71 74
72VKFence::~VKFence() = default; 75VKFence::~VKFence() = default;
73 76
74void VKFence::Wait() { 77void VKFence::Wait() {
75 static constexpr u64 timeout = std::numeric_limits<u64>::max(); 78 switch (const VkResult result = handle.Wait()) {
76 const auto dev = device.GetLogical(); 79 case VK_SUCCESS:
77 const auto& dld = device.GetDispatchLoader();
78 switch (const auto result = dev.waitForFences(1, &*handle, true, timeout, dld)) {
79 case vk::Result::eSuccess:
80 return; 80 return;
81 case vk::Result::eErrorDeviceLost: 81 case VK_ERROR_DEVICE_LOST:
82 device.ReportLoss(); 82 device.ReportLoss();
83 [[fallthrough]]; 83 [[fallthrough]];
84 default: 84 default:
85 vk::throwResultException(result, "vk::waitForFences"); 85 throw vk::Exception(result);
86 } 86 }
87} 87}
88 88
@@ -107,13 +107,11 @@ bool VKFence::Tick(bool gpu_wait, bool owner_wait) {
107 return false; 107 return false;
108 } 108 }
109 109
110 const auto dev = device.GetLogical();
111 const auto& dld = device.GetDispatchLoader();
112 if (gpu_wait) { 110 if (gpu_wait) {
113 // Wait for the fence if it has been requested. 111 // Wait for the fence if it has been requested.
114 dev.waitForFences({*handle}, true, std::numeric_limits<u64>::max(), dld); 112 (void)handle.Wait();
115 } else { 113 } else {
116 if (dev.getFenceStatus(*handle, dld) != vk::Result::eSuccess) { 114 if (handle.GetStatus() != VK_SUCCESS) {
117 // Vulkan fence is not ready, not much it can do here 115 // Vulkan fence is not ready, not much it can do here
118 return false; 116 return false;
119 } 117 }
@@ -126,7 +124,7 @@ bool VKFence::Tick(bool gpu_wait, bool owner_wait) {
126 protected_resources.clear(); 124 protected_resources.clear();
127 125
128 // Prepare fence for reusage. 126 // Prepare fence for reusage.
129 dev.resetFences({*handle}, dld); 127 handle.Reset();
130 is_used = false; 128 is_used = false;
131 return true; 129 return true;
132} 130}
@@ -299,21 +297,16 @@ VKFence& VKResourceManager::CommitFence() {
299 return *found_fence; 297 return *found_fence;
300} 298}
301 299
302vk::CommandBuffer VKResourceManager::CommitCommandBuffer(VKFence& fence) { 300VkCommandBuffer VKResourceManager::CommitCommandBuffer(VKFence& fence) {
303 return command_buffer_pool->Commit(fence); 301 return command_buffer_pool->Commit(fence);
304} 302}
305 303
306void VKResourceManager::GrowFences(std::size_t new_fences_count) { 304void VKResourceManager::GrowFences(std::size_t new_fences_count) {
307 const auto dev = device.GetLogical();
308 const auto& dld = device.GetDispatchLoader();
309 const vk::FenceCreateInfo fence_ci;
310
311 const std::size_t previous_size = fences.size(); 305 const std::size_t previous_size = fences.size();
312 fences.resize(previous_size + new_fences_count); 306 fences.resize(previous_size + new_fences_count);
313 307
314 std::generate(fences.begin() + previous_size, fences.end(), [&]() { 308 std::generate(fences.begin() + previous_size, fences.end(),
315 return std::make_unique<VKFence>(device, dev.createFenceUnique(fence_ci, nullptr, dld)); 309 [this] { return std::make_unique<VKFence>(device); });
316 });
317} 310}
318 311
319} // namespace Vulkan 312} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.h b/src/video_core/renderer_vulkan/vk_resource_manager.h
index d4cbc95a5..f683d2276 100644
--- a/src/video_core/renderer_vulkan/vk_resource_manager.h
+++ b/src/video_core/renderer_vulkan/vk_resource_manager.h
@@ -7,7 +7,7 @@
7#include <cstddef> 7#include <cstddef>
8#include <memory> 8#include <memory>
9#include <vector> 9#include <vector>
10#include "video_core/renderer_vulkan/declarations.h" 10#include "video_core/renderer_vulkan/wrapper.h"
11 11
12namespace Vulkan { 12namespace Vulkan {
13 13
@@ -42,7 +42,7 @@ class VKFence {
42 friend class VKResourceManager; 42 friend class VKResourceManager;
43 43
44public: 44public:
45 explicit VKFence(const VKDevice& device, UniqueFence handle); 45 explicit VKFence(const VKDevice& device);
46 ~VKFence(); 46 ~VKFence();
47 47
48 /** 48 /**
@@ -69,7 +69,7 @@ public:
69 void RedirectProtection(VKResource* old_resource, VKResource* new_resource) noexcept; 69 void RedirectProtection(VKResource* old_resource, VKResource* new_resource) noexcept;
70 70
71 /// Retreives the fence. 71 /// Retreives the fence.
72 operator vk::Fence() const { 72 operator VkFence() const {
73 return *handle; 73 return *handle;
74 } 74 }
75 75
@@ -87,7 +87,7 @@ private:
87 bool Tick(bool gpu_wait, bool owner_wait); 87 bool Tick(bool gpu_wait, bool owner_wait);
88 88
89 const VKDevice& device; ///< Device handler 89 const VKDevice& device; ///< Device handler
90 UniqueFence handle; ///< Vulkan fence 90 vk::Fence handle; ///< Vulkan fence
91 std::vector<VKResource*> protected_resources; ///< List of resources protected by this fence 91 std::vector<VKResource*> protected_resources; ///< List of resources protected by this fence
92 bool is_owned = false; ///< The fence has been commited but not released yet. 92 bool is_owned = false; ///< The fence has been commited but not released yet.
93 bool is_used = false; ///< The fence has been commited but it has not been checked to be free. 93 bool is_used = false; ///< The fence has been commited but it has not been checked to be free.
@@ -181,7 +181,7 @@ public:
181 VKFence& CommitFence(); 181 VKFence& CommitFence();
182 182
183 /// Commits an unused command buffer and protects it with a fence. 183 /// Commits an unused command buffer and protects it with a fence.
184 vk::CommandBuffer CommitCommandBuffer(VKFence& fence); 184 VkCommandBuffer CommitCommandBuffer(VKFence& fence);
185 185
186private: 186private:
187 /// Allocates new fences. 187 /// Allocates new fences.
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
index 204b7c39c..07bbcf520 100644
--- a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
@@ -7,64 +7,64 @@
7#include <unordered_map> 7#include <unordered_map>
8 8
9#include "common/assert.h" 9#include "common/assert.h"
10#include "video_core/renderer_vulkan/declarations.h"
11#include "video_core/renderer_vulkan/maxwell_to_vk.h" 10#include "video_core/renderer_vulkan/maxwell_to_vk.h"
12#include "video_core/renderer_vulkan/vk_sampler_cache.h" 11#include "video_core/renderer_vulkan/vk_sampler_cache.h"
12#include "video_core/renderer_vulkan/wrapper.h"
13#include "video_core/textures/texture.h" 13#include "video_core/textures/texture.h"
14 14
15namespace Vulkan { 15namespace Vulkan {
16 16
17static std::optional<vk::BorderColor> TryConvertBorderColor(std::array<float, 4> color) { 17namespace {
18
19VkBorderColor ConvertBorderColor(std::array<float, 4> color) {
18 // TODO(Rodrigo): Manage integer border colors 20 // TODO(Rodrigo): Manage integer border colors
19 if (color == std::array<float, 4>{0, 0, 0, 0}) { 21 if (color == std::array<float, 4>{0, 0, 0, 0}) {
20 return vk::BorderColor::eFloatTransparentBlack; 22 return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
21 } else if (color == std::array<float, 4>{0, 0, 0, 1}) { 23 } else if (color == std::array<float, 4>{0, 0, 0, 1}) {
22 return vk::BorderColor::eFloatOpaqueBlack; 24 return VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
23 } else if (color == std::array<float, 4>{1, 1, 1, 1}) { 25 } else if (color == std::array<float, 4>{1, 1, 1, 1}) {
24 return vk::BorderColor::eFloatOpaqueWhite; 26 return VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
27 }
28 if (color[0] + color[1] + color[2] > 1.35f) {
29 // If color elements are brighter than roughly 0.5 average, use white border
30 return VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
31 } else if (color[3] > 0.5f) {
32 return VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
25 } else { 33 } else {
26 if (color[0] + color[1] + color[2] > 1.35f) { 34 return VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
27 // If color elements are brighter than roughly 0.5 average, use white border
28 return vk::BorderColor::eFloatOpaqueWhite;
29 }
30 if (color[3] > 0.5f) {
31 return vk::BorderColor::eFloatOpaqueBlack;
32 }
33 return vk::BorderColor::eFloatTransparentBlack;
34 } 35 }
35} 36}
36 37
38} // Anonymous namespace
39
37VKSamplerCache::VKSamplerCache(const VKDevice& device) : device{device} {} 40VKSamplerCache::VKSamplerCache(const VKDevice& device) : device{device} {}
38 41
39VKSamplerCache::~VKSamplerCache() = default; 42VKSamplerCache::~VKSamplerCache() = default;
40 43
41UniqueSampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc) const { 44vk::Sampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc) const {
42 const float max_anisotropy{tsc.GetMaxAnisotropy()}; 45 VkSamplerCreateInfo ci;
43 const bool has_anisotropy{max_anisotropy > 1.0f}; 46 ci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
44 47 ci.pNext = nullptr;
45 const auto border_color{tsc.GetBorderColor()}; 48 ci.flags = 0;
46 const auto vk_border_color{TryConvertBorderColor(border_color)}; 49 ci.magFilter = MaxwellToVK::Sampler::Filter(tsc.mag_filter);
47 50 ci.minFilter = MaxwellToVK::Sampler::Filter(tsc.min_filter);
48 constexpr bool unnormalized_coords{false}; 51 ci.mipmapMode = MaxwellToVK::Sampler::MipmapMode(tsc.mipmap_filter);
49 52 ci.addressModeU = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_u, tsc.mag_filter);
50 const vk::SamplerCreateInfo sampler_ci( 53 ci.addressModeV = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_v, tsc.mag_filter);
51 {}, MaxwellToVK::Sampler::Filter(tsc.mag_filter), 54 ci.addressModeW = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_p, tsc.mag_filter);
52 MaxwellToVK::Sampler::Filter(tsc.min_filter), 55 ci.mipLodBias = tsc.GetLodBias();
53 MaxwellToVK::Sampler::MipmapMode(tsc.mipmap_filter), 56 ci.anisotropyEnable = tsc.GetMaxAnisotropy() > 1.0f ? VK_TRUE : VK_FALSE;
54 MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_u, tsc.mag_filter), 57 ci.maxAnisotropy = tsc.GetMaxAnisotropy();
55 MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_v, tsc.mag_filter), 58 ci.compareEnable = tsc.depth_compare_enabled;
56 MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_p, tsc.mag_filter), tsc.GetLodBias(), 59 ci.compareOp = MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func);
57 has_anisotropy, max_anisotropy, tsc.depth_compare_enabled, 60 ci.minLod = tsc.GetMinLod();
58 MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func), tsc.GetMinLod(), 61 ci.maxLod = tsc.GetMaxLod();
59 tsc.GetMaxLod(), vk_border_color.value_or(vk::BorderColor::eFloatTransparentBlack), 62 ci.borderColor = ConvertBorderColor(tsc.GetBorderColor());
60 unnormalized_coords); 63 ci.unnormalizedCoordinates = VK_FALSE;
61 64 return device.GetLogical().CreateSampler(ci);
62 const auto& dld{device.GetDispatchLoader()};
63 const auto dev{device.GetLogical()};
64 return dev.createSamplerUnique(sampler_ci, nullptr, dld);
65} 65}
66 66
67vk::Sampler VKSamplerCache::ToSamplerType(const UniqueSampler& sampler) const { 67VkSampler VKSamplerCache::ToSamplerType(const vk::Sampler& sampler) const {
68 return *sampler; 68 return *sampler;
69} 69}
70 70
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.h b/src/video_core/renderer_vulkan/vk_sampler_cache.h
index 1f73b716b..a33d1c0ee 100644
--- a/src/video_core/renderer_vulkan/vk_sampler_cache.h
+++ b/src/video_core/renderer_vulkan/vk_sampler_cache.h
@@ -4,7 +4,7 @@
4 4
5#pragma once 5#pragma once
6 6
7#include "video_core/renderer_vulkan/declarations.h" 7#include "video_core/renderer_vulkan/wrapper.h"
8#include "video_core/sampler_cache.h" 8#include "video_core/sampler_cache.h"
9#include "video_core/textures/texture.h" 9#include "video_core/textures/texture.h"
10 10
@@ -12,15 +12,15 @@ namespace Vulkan {
12 12
13class VKDevice; 13class VKDevice;
14 14
15class VKSamplerCache final : public VideoCommon::SamplerCache<vk::Sampler, UniqueSampler> { 15class VKSamplerCache final : public VideoCommon::SamplerCache<VkSampler, vk::Sampler> {
16public: 16public:
17 explicit VKSamplerCache(const VKDevice& device); 17 explicit VKSamplerCache(const VKDevice& device);
18 ~VKSamplerCache(); 18 ~VKSamplerCache();
19 19
20protected: 20protected:
21 UniqueSampler CreateSampler(const Tegra::Texture::TSCEntry& tsc) const override; 21 vk::Sampler CreateSampler(const Tegra::Texture::TSCEntry& tsc) const override;
22 22
23 vk::Sampler ToSamplerType(const UniqueSampler& sampler) const override; 23 VkSampler ToSamplerType(const vk::Sampler& sampler) const override;
24 24
25private: 25private:
26 const VKDevice& device; 26 const VKDevice& device;
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index b61d4fe63..900f551b3 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -10,23 +10,22 @@
10 10
11#include "common/assert.h" 11#include "common/assert.h"
12#include "common/microprofile.h" 12#include "common/microprofile.h"
13#include "video_core/renderer_vulkan/declarations.h"
14#include "video_core/renderer_vulkan/vk_device.h" 13#include "video_core/renderer_vulkan/vk_device.h"
15#include "video_core/renderer_vulkan/vk_query_cache.h" 14#include "video_core/renderer_vulkan/vk_query_cache.h"
16#include "video_core/renderer_vulkan/vk_resource_manager.h" 15#include "video_core/renderer_vulkan/vk_resource_manager.h"
17#include "video_core/renderer_vulkan/vk_scheduler.h" 16#include "video_core/renderer_vulkan/vk_scheduler.h"
18#include "video_core/renderer_vulkan/vk_state_tracker.h" 17#include "video_core/renderer_vulkan/vk_state_tracker.h"
18#include "video_core/renderer_vulkan/wrapper.h"
19 19
20namespace Vulkan { 20namespace Vulkan {
21 21
22MICROPROFILE_DECLARE(Vulkan_WaitForWorker); 22MICROPROFILE_DECLARE(Vulkan_WaitForWorker);
23 23
24void VKScheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf, 24void VKScheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf) {
25 const vk::DispatchLoaderDynamic& dld) {
26 auto command = first; 25 auto command = first;
27 while (command != nullptr) { 26 while (command != nullptr) {
28 auto next = command->GetNext(); 27 auto next = command->GetNext();
29 command->Execute(cmdbuf, dld); 28 command->Execute(cmdbuf);
30 command->~Command(); 29 command->~Command();
31 command = next; 30 command = next;
32 } 31 }
@@ -51,7 +50,7 @@ VKScheduler::~VKScheduler() {
51 worker_thread.join(); 50 worker_thread.join();
52} 51}
53 52
54void VKScheduler::Flush(bool release_fence, vk::Semaphore semaphore) { 53void VKScheduler::Flush(bool release_fence, VkSemaphore semaphore) {
55 SubmitExecution(semaphore); 54 SubmitExecution(semaphore);
56 if (release_fence) { 55 if (release_fence) {
57 current_fence->Release(); 56 current_fence->Release();
@@ -59,7 +58,7 @@ void VKScheduler::Flush(bool release_fence, vk::Semaphore semaphore) {
59 AllocateNewContext(); 58 AllocateNewContext();
60} 59}
61 60
62void VKScheduler::Finish(bool release_fence, vk::Semaphore semaphore) { 61void VKScheduler::Finish(bool release_fence, VkSemaphore semaphore) {
63 SubmitExecution(semaphore); 62 SubmitExecution(semaphore);
64 current_fence->Wait(); 63 current_fence->Wait();
65 if (release_fence) { 64 if (release_fence) {
@@ -89,17 +88,34 @@ void VKScheduler::DispatchWork() {
89 AcquireNewChunk(); 88 AcquireNewChunk();
90} 89}
91 90
92void VKScheduler::RequestRenderpass(const vk::RenderPassBeginInfo& renderpass_bi) { 91void VKScheduler::RequestRenderpass(VkRenderPass renderpass, VkFramebuffer framebuffer,
93 if (state.renderpass && renderpass_bi == *state.renderpass) { 92 VkExtent2D render_area) {
93 if (renderpass == state.renderpass && framebuffer == state.framebuffer &&
94 render_area.width == state.render_area.width &&
95 render_area.height == state.render_area.height) {
94 return; 96 return;
95 } 97 }
96 const bool end_renderpass = state.renderpass.has_value(); 98 const bool end_renderpass = state.renderpass != nullptr;
97 state.renderpass = renderpass_bi; 99 state.renderpass = renderpass;
98 Record([renderpass_bi, end_renderpass](auto cmdbuf, auto& dld) { 100 state.framebuffer = framebuffer;
101 state.render_area = render_area;
102
103 VkRenderPassBeginInfo renderpass_bi;
104 renderpass_bi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
105 renderpass_bi.pNext = nullptr;
106 renderpass_bi.renderPass = renderpass;
107 renderpass_bi.framebuffer = framebuffer;
108 renderpass_bi.renderArea.offset.x = 0;
109 renderpass_bi.renderArea.offset.y = 0;
110 renderpass_bi.renderArea.extent = render_area;
111 renderpass_bi.clearValueCount = 0;
112 renderpass_bi.pClearValues = nullptr;
113
114 Record([renderpass_bi, end_renderpass](vk::CommandBuffer cmdbuf) {
99 if (end_renderpass) { 115 if (end_renderpass) {
100 cmdbuf.endRenderPass(dld); 116 cmdbuf.EndRenderPass();
101 } 117 }
102 cmdbuf.beginRenderPass(renderpass_bi, vk::SubpassContents::eInline, dld); 118 cmdbuf.BeginRenderPass(renderpass_bi, VK_SUBPASS_CONTENTS_INLINE);
103 }); 119 });
104} 120}
105 121
@@ -107,13 +123,13 @@ void VKScheduler::RequestOutsideRenderPassOperationContext() {
107 EndRenderPass(); 123 EndRenderPass();
108} 124}
109 125
110void VKScheduler::BindGraphicsPipeline(vk::Pipeline pipeline) { 126void VKScheduler::BindGraphicsPipeline(VkPipeline pipeline) {
111 if (state.graphics_pipeline == pipeline) { 127 if (state.graphics_pipeline == pipeline) {
112 return; 128 return;
113 } 129 }
114 state.graphics_pipeline = pipeline; 130 state.graphics_pipeline = pipeline;
115 Record([pipeline](auto cmdbuf, auto& dld) { 131 Record([pipeline](vk::CommandBuffer cmdbuf) {
116 cmdbuf.bindPipeline(vk::PipelineBindPoint::eGraphics, pipeline, dld); 132 cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
117 }); 133 });
118} 134}
119 135
@@ -126,37 +142,50 @@ void VKScheduler::WorkerThread() {
126 } 142 }
127 auto extracted_chunk = std::move(chunk_queue.Front()); 143 auto extracted_chunk = std::move(chunk_queue.Front());
128 chunk_queue.Pop(); 144 chunk_queue.Pop();
129 extracted_chunk->ExecuteAll(current_cmdbuf, device.GetDispatchLoader()); 145 extracted_chunk->ExecuteAll(current_cmdbuf);
130 chunk_reserve.Push(std::move(extracted_chunk)); 146 chunk_reserve.Push(std::move(extracted_chunk));
131 } while (!quit); 147 } while (!quit);
132} 148}
133 149
134void VKScheduler::SubmitExecution(vk::Semaphore semaphore) { 150void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
135 EndPendingOperations(); 151 EndPendingOperations();
136 InvalidateState(); 152 InvalidateState();
137 WaitWorker(); 153 WaitWorker();
138 154
139 std::unique_lock lock{mutex}; 155 std::unique_lock lock{mutex};
140 156
141 const auto queue = device.GetGraphicsQueue(); 157 current_cmdbuf.End();
142 const auto& dld = device.GetDispatchLoader();
143 current_cmdbuf.end(dld);
144 158
145 const vk::SubmitInfo submit_info(0, nullptr, nullptr, 1, &current_cmdbuf, semaphore ? 1U : 0U, 159 VkSubmitInfo submit_info;
146 &semaphore); 160 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
147 queue.submit({submit_info}, static_cast<vk::Fence>(*current_fence), dld); 161 submit_info.pNext = nullptr;
162 submit_info.waitSemaphoreCount = 0;
163 submit_info.pWaitSemaphores = nullptr;
164 submit_info.pWaitDstStageMask = nullptr;
165 submit_info.commandBufferCount = 1;
166 submit_info.pCommandBuffers = current_cmdbuf.address();
167 submit_info.signalSemaphoreCount = semaphore ? 1 : 0;
168 submit_info.pSignalSemaphores = &semaphore;
169 device.GetGraphicsQueue().Submit(submit_info, *current_fence);
148} 170}
149 171
150void VKScheduler::AllocateNewContext() { 172void VKScheduler::AllocateNewContext() {
151 ++ticks; 173 ++ticks;
152 174
175 VkCommandBufferBeginInfo cmdbuf_bi;
176 cmdbuf_bi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
177 cmdbuf_bi.pNext = nullptr;
178 cmdbuf_bi.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
179 cmdbuf_bi.pInheritanceInfo = nullptr;
180
153 std::unique_lock lock{mutex}; 181 std::unique_lock lock{mutex};
154 current_fence = next_fence; 182 current_fence = next_fence;
155 next_fence = &resource_manager.CommitFence(); 183 next_fence = &resource_manager.CommitFence();
156 184
157 current_cmdbuf = resource_manager.CommitCommandBuffer(*current_fence); 185 current_cmdbuf = vk::CommandBuffer(resource_manager.CommitCommandBuffer(*current_fence),
158 current_cmdbuf.begin({vk::CommandBufferUsageFlagBits::eOneTimeSubmit}, 186 device.GetDispatchLoader());
159 device.GetDispatchLoader()); 187 current_cmdbuf.Begin(cmdbuf_bi);
188
160 // Enable counters once again. These are disabled when a command buffer is finished. 189 // Enable counters once again. These are disabled when a command buffer is finished.
161 if (query_cache) { 190 if (query_cache) {
162 query_cache->UpdateCounters(); 191 query_cache->UpdateCounters();
@@ -177,8 +206,8 @@ void VKScheduler::EndRenderPass() {
177 if (!state.renderpass) { 206 if (!state.renderpass) {
178 return; 207 return;
179 } 208 }
180 state.renderpass = std::nullopt; 209 state.renderpass = nullptr;
181 Record([](auto cmdbuf, auto& dld) { cmdbuf.endRenderPass(dld); }); 210 Record([](vk::CommandBuffer cmdbuf) { cmdbuf.EndRenderPass(); });
182} 211}
183 212
184void VKScheduler::AcquireNewChunk() { 213void VKScheduler::AcquireNewChunk() {
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
index c7cc291c3..82a8adc69 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.h
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -13,7 +13,7 @@
13#include <utility> 13#include <utility>
14#include "common/common_types.h" 14#include "common/common_types.h"
15#include "common/threadsafe_queue.h" 15#include "common/threadsafe_queue.h"
16#include "video_core/renderer_vulkan/declarations.h" 16#include "video_core/renderer_vulkan/wrapper.h"
17 17
18namespace Vulkan { 18namespace Vulkan {
19 19
@@ -49,10 +49,10 @@ public:
49 ~VKScheduler(); 49 ~VKScheduler();
50 50
51 /// Sends the current execution context to the GPU. 51 /// Sends the current execution context to the GPU.
52 void Flush(bool release_fence = true, vk::Semaphore semaphore = nullptr); 52 void Flush(bool release_fence = true, VkSemaphore semaphore = nullptr);
53 53
54 /// Sends the current execution context to the GPU and waits for it to complete. 54 /// Sends the current execution context to the GPU and waits for it to complete.
55 void Finish(bool release_fence = true, vk::Semaphore semaphore = nullptr); 55 void Finish(bool release_fence = true, VkSemaphore semaphore = nullptr);
56 56
57 /// Waits for the worker thread to finish executing everything. After this function returns it's 57 /// Waits for the worker thread to finish executing everything. After this function returns it's
58 /// safe to touch worker resources. 58 /// safe to touch worker resources.
@@ -62,14 +62,15 @@ public:
62 void DispatchWork(); 62 void DispatchWork();
63 63
64 /// Requests to begin a renderpass. 64 /// Requests to begin a renderpass.
65 void RequestRenderpass(const vk::RenderPassBeginInfo& renderpass_bi); 65 void RequestRenderpass(VkRenderPass renderpass, VkFramebuffer framebuffer,
66 VkExtent2D render_area);
66 67
67 /// Requests the current executino context to be able to execute operations only allowed outside 68 /// Requests the current executino context to be able to execute operations only allowed outside
68 /// of a renderpass. 69 /// of a renderpass.
69 void RequestOutsideRenderPassOperationContext(); 70 void RequestOutsideRenderPassOperationContext();
70 71
71 /// Binds a pipeline to the current execution context. 72 /// Binds a pipeline to the current execution context.
72 void BindGraphicsPipeline(vk::Pipeline pipeline); 73 void BindGraphicsPipeline(VkPipeline pipeline);
73 74
74 /// Assigns the query cache. 75 /// Assigns the query cache.
75 void SetQueryCache(VKQueryCache& query_cache_) { 76 void SetQueryCache(VKQueryCache& query_cache_) {
@@ -101,8 +102,7 @@ private:
101 public: 102 public:
102 virtual ~Command() = default; 103 virtual ~Command() = default;
103 104
104 virtual void Execute(vk::CommandBuffer cmdbuf, 105 virtual void Execute(vk::CommandBuffer cmdbuf) const = 0;
105 const vk::DispatchLoaderDynamic& dld) const = 0;
106 106
107 Command* GetNext() const { 107 Command* GetNext() const {
108 return next; 108 return next;
@@ -125,9 +125,8 @@ private:
125 TypedCommand(TypedCommand&&) = delete; 125 TypedCommand(TypedCommand&&) = delete;
126 TypedCommand& operator=(TypedCommand&&) = delete; 126 TypedCommand& operator=(TypedCommand&&) = delete;
127 127
128 void Execute(vk::CommandBuffer cmdbuf, 128 void Execute(vk::CommandBuffer cmdbuf) const override {
129 const vk::DispatchLoaderDynamic& dld) const override { 129 command(cmdbuf);
130 command(cmdbuf, dld);
131 } 130 }
132 131
133 private: 132 private:
@@ -136,7 +135,7 @@ private:
136 135
137 class CommandChunk final { 136 class CommandChunk final {
138 public: 137 public:
139 void ExecuteAll(vk::CommandBuffer cmdbuf, const vk::DispatchLoaderDynamic& dld); 138 void ExecuteAll(vk::CommandBuffer cmdbuf);
140 139
141 template <typename T> 140 template <typename T>
142 bool Record(T& command) { 141 bool Record(T& command) {
@@ -175,7 +174,7 @@ private:
175 174
176 void WorkerThread(); 175 void WorkerThread();
177 176
178 void SubmitExecution(vk::Semaphore semaphore); 177 void SubmitExecution(VkSemaphore semaphore);
179 178
180 void AllocateNewContext(); 179 void AllocateNewContext();
181 180
@@ -198,8 +197,10 @@ private:
198 VKFence* next_fence = nullptr; 197 VKFence* next_fence = nullptr;
199 198
200 struct State { 199 struct State {
201 std::optional<vk::RenderPassBeginInfo> renderpass; 200 VkRenderPass renderpass = nullptr;
202 vk::Pipeline graphics_pipeline; 201 VkFramebuffer framebuffer = nullptr;
202 VkExtent2D render_area = {0, 0};
203 VkPipeline graphics_pipeline = nullptr;
203 } state; 204 } state;
204 205
205 std::unique_ptr<CommandChunk> chunk; 206 std::unique_ptr<CommandChunk> chunk;
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
index b9f9e2714..62e4ca488 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
@@ -801,7 +801,7 @@ private:
801 if (IsOutputAttributeArray()) { 801 if (IsOutputAttributeArray()) {
802 const u32 num = GetNumOutputVertices(); 802 const u32 num = GetNumOutputVertices();
803 type = TypeArray(type, Constant(t_uint, num)); 803 type = TypeArray(type, Constant(t_uint, num));
804 if (device.GetDriverID() != vk::DriverIdKHR::eIntelProprietaryWindows) { 804 if (device.GetDriverID() != VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR) {
805 // Intel's proprietary driver fails to setup defaults for arrayed output 805 // Intel's proprietary driver fails to setup defaults for arrayed output
806 // attributes. 806 // attributes.
807 varying_default = ConstantComposite(type, std::vector(num, varying_default)); 807 varying_default = ConstantComposite(type, std::vector(num, varying_default));
diff --git a/src/video_core/renderer_vulkan/vk_shader_util.cpp b/src/video_core/renderer_vulkan/vk_shader_util.cpp
index b97c4cb3d..784839327 100644
--- a/src/video_core/renderer_vulkan/vk_shader_util.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_util.cpp
@@ -8,27 +8,25 @@
8#include "common/alignment.h" 8#include "common/alignment.h"
9#include "common/assert.h" 9#include "common/assert.h"
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "video_core/renderer_vulkan/declarations.h"
12#include "video_core/renderer_vulkan/vk_device.h" 11#include "video_core/renderer_vulkan/vk_device.h"
13#include "video_core/renderer_vulkan/vk_shader_util.h" 12#include "video_core/renderer_vulkan/vk_shader_util.h"
13#include "video_core/renderer_vulkan/wrapper.h"
14 14
15namespace Vulkan { 15namespace Vulkan {
16 16
17UniqueShaderModule BuildShader(const VKDevice& device, std::size_t code_size, const u8* code_data) { 17vk::ShaderModule BuildShader(const VKDevice& device, std::size_t code_size, const u8* code_data) {
18 // Avoid undefined behavior by copying to a staging allocation 18 // Avoid undefined behavior by copying to a staging allocation
19 ASSERT(code_size % sizeof(u32) == 0); 19 ASSERT(code_size % sizeof(u32) == 0);
20 const auto data = std::make_unique<u32[]>(code_size / sizeof(u32)); 20 const auto data = std::make_unique<u32[]>(code_size / sizeof(u32));
21 std::memcpy(data.get(), code_data, code_size); 21 std::memcpy(data.get(), code_data, code_size);
22 22
23 const auto dev = device.GetLogical(); 23 VkShaderModuleCreateInfo ci;
24 const auto& dld = device.GetDispatchLoader(); 24 ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
25 const vk::ShaderModuleCreateInfo shader_ci({}, code_size, data.get()); 25 ci.pNext = nullptr;
26 vk::ShaderModule shader_module; 26 ci.flags = 0;
27 if (dev.createShaderModule(&shader_ci, nullptr, &shader_module, dld) != vk::Result::eSuccess) { 27 ci.codeSize = code_size;
28 UNREACHABLE_MSG("Shader module failed to build!"); 28 ci.pCode = data.get();
29 } 29 return device.GetLogical().CreateShaderModule(ci);
30
31 return UniqueShaderModule(shader_module, vk::ObjectDestroy(dev, nullptr, dld));
32} 30}
33 31
34} // namespace Vulkan 32} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_shader_util.h b/src/video_core/renderer_vulkan/vk_shader_util.h
index c06d65970..be38d6697 100644
--- a/src/video_core/renderer_vulkan/vk_shader_util.h
+++ b/src/video_core/renderer_vulkan/vk_shader_util.h
@@ -6,12 +6,12 @@
6 6
7#include <vector> 7#include <vector>
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "video_core/renderer_vulkan/declarations.h" 9#include "video_core/renderer_vulkan/wrapper.h"
10 10
11namespace Vulkan { 11namespace Vulkan {
12 12
13class VKDevice; 13class VKDevice;
14 14
15UniqueShaderModule BuildShader(const VKDevice& device, std::size_t code_size, const u8* code_data); 15vk::ShaderModule BuildShader(const VKDevice& device, std::size_t code_size, const u8* code_data);
16 16
17} // namespace Vulkan 17} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
index 374959f82..94d954d7a 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -13,6 +13,7 @@
13#include "video_core/renderer_vulkan/vk_resource_manager.h" 13#include "video_core/renderer_vulkan/vk_resource_manager.h"
14#include "video_core/renderer_vulkan/vk_scheduler.h" 14#include "video_core/renderer_vulkan/vk_scheduler.h"
15#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" 15#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
16#include "video_core/renderer_vulkan/wrapper.h"
16 17
17namespace Vulkan { 18namespace Vulkan {
18 19
@@ -71,17 +72,23 @@ VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_
71} 72}
72 73
73VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) { 74VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) {
74 const auto usage =
75 vk::BufferUsageFlagBits::eTransferSrc | vk::BufferUsageFlagBits::eTransferDst |
76 vk::BufferUsageFlagBits::eUniformBuffer | vk::BufferUsageFlagBits::eStorageBuffer |
77 vk::BufferUsageFlagBits::eIndexBuffer;
78 const u32 log2 = Common::Log2Ceil64(size); 75 const u32 log2 = Common::Log2Ceil64(size);
79 const vk::BufferCreateInfo buffer_ci({}, 1ULL << log2, usage, vk::SharingMode::eExclusive, 0, 76
80 nullptr); 77 VkBufferCreateInfo ci;
81 const auto dev = device.GetLogical(); 78 ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
79 ci.pNext = nullptr;
80 ci.flags = 0;
81 ci.size = 1ULL << log2;
82 ci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
83 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
84 VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
85 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
86 ci.queueFamilyIndexCount = 0;
87 ci.pQueueFamilyIndices = nullptr;
88
82 auto buffer = std::make_unique<VKBuffer>(); 89 auto buffer = std::make_unique<VKBuffer>();
83 buffer->handle = dev.createBufferUnique(buffer_ci, nullptr, device.GetDispatchLoader()); 90 buffer->handle = device.GetLogical().CreateBuffer(ci);
84 buffer->commit = memory_manager.Commit(*buffer->handle, host_visible); 91 buffer->commit = memory_manager.Commit(buffer->handle, host_visible);
85 92
86 auto& entries = GetCache(host_visible)[log2].entries; 93 auto& entries = GetCache(host_visible)[log2].entries;
87 return *entries.emplace_back(std::move(buffer), scheduler.GetFence(), epoch).buffer; 94 return *entries.emplace_back(std::move(buffer), scheduler.GetFence(), epoch).buffer;
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
index 4d9488f49..a0840ff8c 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
@@ -11,9 +11,9 @@
11 11
12#include "common/common_types.h" 12#include "common/common_types.h"
13 13
14#include "video_core/renderer_vulkan/declarations.h"
15#include "video_core/renderer_vulkan/vk_memory_manager.h" 14#include "video_core/renderer_vulkan/vk_memory_manager.h"
16#include "video_core/renderer_vulkan/vk_resource_manager.h" 15#include "video_core/renderer_vulkan/vk_resource_manager.h"
16#include "video_core/renderer_vulkan/wrapper.h"
17 17
18namespace Vulkan { 18namespace Vulkan {
19 19
@@ -22,7 +22,7 @@ class VKFenceWatch;
22class VKScheduler; 22class VKScheduler;
23 23
24struct VKBuffer final { 24struct VKBuffer final {
25 UniqueBuffer handle; 25 vk::Buffer handle;
26 VKMemoryCommit commit; 26 VKMemoryCommit commit;
27}; 27};
28 28
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
index d48d3b44c..38a93a01a 100644
--- a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
@@ -9,11 +9,11 @@
9 9
10#include "common/alignment.h" 10#include "common/alignment.h"
11#include "common/assert.h" 11#include "common/assert.h"
12#include "video_core/renderer_vulkan/declarations.h"
13#include "video_core/renderer_vulkan/vk_device.h" 12#include "video_core/renderer_vulkan/vk_device.h"
14#include "video_core/renderer_vulkan/vk_resource_manager.h" 13#include "video_core/renderer_vulkan/vk_resource_manager.h"
15#include "video_core/renderer_vulkan/vk_scheduler.h" 14#include "video_core/renderer_vulkan/vk_scheduler.h"
16#include "video_core/renderer_vulkan/vk_stream_buffer.h" 15#include "video_core/renderer_vulkan/vk_stream_buffer.h"
16#include "video_core/renderer_vulkan/wrapper.h"
17 17
18namespace Vulkan { 18namespace Vulkan {
19 19
@@ -25,8 +25,8 @@ constexpr u64 WATCHES_RESERVE_CHUNK = 0x1000;
25constexpr u64 STREAM_BUFFER_SIZE = 256 * 1024 * 1024; 25constexpr u64 STREAM_BUFFER_SIZE = 256 * 1024 * 1024;
26 26
27std::optional<u32> FindMemoryType(const VKDevice& device, u32 filter, 27std::optional<u32> FindMemoryType(const VKDevice& device, u32 filter,
28 vk::MemoryPropertyFlags wanted) { 28 VkMemoryPropertyFlags wanted) {
29 const auto properties = device.GetPhysical().getMemoryProperties(device.GetDispatchLoader()); 29 const auto properties = device.GetPhysical().GetMemoryProperties();
30 for (u32 i = 0; i < properties.memoryTypeCount; i++) { 30 for (u32 i = 0; i < properties.memoryTypeCount; i++) {
31 if (!(filter & (1 << i))) { 31 if (!(filter & (1 << i))) {
32 continue; 32 continue;
@@ -35,13 +35,13 @@ std::optional<u32> FindMemoryType(const VKDevice& device, u32 filter,
35 return i; 35 return i;
36 } 36 }
37 } 37 }
38 return {}; 38 return std::nullopt;
39} 39}
40 40
41} // Anonymous namespace 41} // Anonymous namespace
42 42
43VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler, 43VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler,
44 vk::BufferUsageFlags usage) 44 VkBufferUsageFlags usage)
45 : device{device}, scheduler{scheduler} { 45 : device{device}, scheduler{scheduler} {
46 CreateBuffers(usage); 46 CreateBuffers(usage);
47 ReserveWatches(current_watches, WATCHES_INITIAL_RESERVE); 47 ReserveWatches(current_watches, WATCHES_INITIAL_RESERVE);
@@ -78,17 +78,13 @@ std::tuple<u8*, u64, bool> VKStreamBuffer::Map(u64 size, u64 alignment) {
78 invalidated = true; 78 invalidated = true;
79 } 79 }
80 80
81 const auto dev = device.GetLogical(); 81 return {memory.Map(offset, size), offset, invalidated};
82 const auto& dld = device.GetDispatchLoader();
83 const auto pointer = reinterpret_cast<u8*>(dev.mapMemory(*memory, offset, size, {}, dld));
84 return {pointer, offset, invalidated};
85} 82}
86 83
87void VKStreamBuffer::Unmap(u64 size) { 84void VKStreamBuffer::Unmap(u64 size) {
88 ASSERT_MSG(size <= mapped_size, "Reserved size is too small"); 85 ASSERT_MSG(size <= mapped_size, "Reserved size is too small");
89 86
90 const auto dev = device.GetLogical(); 87 memory.Unmap();
91 dev.unmapMemory(*memory, device.GetDispatchLoader());
92 88
93 offset += size; 89 offset += size;
94 90
@@ -101,30 +97,42 @@ void VKStreamBuffer::Unmap(u64 size) {
101 watch.fence.Watch(scheduler.GetFence()); 97 watch.fence.Watch(scheduler.GetFence());
102} 98}
103 99
104void VKStreamBuffer::CreateBuffers(vk::BufferUsageFlags usage) { 100void VKStreamBuffer::CreateBuffers(VkBufferUsageFlags usage) {
105 const vk::BufferCreateInfo buffer_ci({}, STREAM_BUFFER_SIZE, usage, vk::SharingMode::eExclusive, 101 VkBufferCreateInfo buffer_ci;
106 0, nullptr); 102 buffer_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
107 const auto dev = device.GetLogical(); 103 buffer_ci.pNext = nullptr;
108 const auto& dld = device.GetDispatchLoader(); 104 buffer_ci.flags = 0;
109 buffer = dev.createBufferUnique(buffer_ci, nullptr, dld); 105 buffer_ci.size = STREAM_BUFFER_SIZE;
106 buffer_ci.usage = usage;
107 buffer_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
108 buffer_ci.queueFamilyIndexCount = 0;
109 buffer_ci.pQueueFamilyIndices = nullptr;
110
111 const auto& dev = device.GetLogical();
112 buffer = dev.CreateBuffer(buffer_ci);
110 113
111 const auto requirements = dev.getBufferMemoryRequirements(*buffer, dld); 114 const auto& dld = device.GetDispatchLoader();
115 const auto requirements = dev.GetBufferMemoryRequirements(*buffer);
112 // Prefer device local host visible allocations (this should hit AMD's pinned memory). 116 // Prefer device local host visible allocations (this should hit AMD's pinned memory).
113 auto type = FindMemoryType(device, requirements.memoryTypeBits, 117 auto type =
114 vk::MemoryPropertyFlagBits::eHostVisible | 118 FindMemoryType(device, requirements.memoryTypeBits,
115 vk::MemoryPropertyFlagBits::eHostCoherent | 119 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
116 vk::MemoryPropertyFlagBits::eDeviceLocal); 120 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
117 if (!type) { 121 if (!type) {
118 // Otherwise search for a host visible allocation. 122 // Otherwise search for a host visible allocation.
119 type = FindMemoryType(device, requirements.memoryTypeBits, 123 type = FindMemoryType(device, requirements.memoryTypeBits,
120 vk::MemoryPropertyFlagBits::eHostVisible | 124 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
121 vk::MemoryPropertyFlagBits::eHostCoherent); 125 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
122 ASSERT_MSG(type, "No host visible and coherent memory type found"); 126 ASSERT_MSG(type, "No host visible and coherent memory type found");
123 } 127 }
124 const vk::MemoryAllocateInfo alloc_ci(requirements.size, *type); 128 VkMemoryAllocateInfo memory_ai;
125 memory = dev.allocateMemoryUnique(alloc_ci, nullptr, dld); 129 memory_ai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
126 130 memory_ai.pNext = nullptr;
127 dev.bindBufferMemory(*buffer, *memory, 0, dld); 131 memory_ai.allocationSize = requirements.size;
132 memory_ai.memoryTypeIndex = *type;
133
134 memory = dev.AllocateMemory(memory_ai);
135 buffer.BindMemory(*memory, 0);
128} 136}
129 137
130void VKStreamBuffer::ReserveWatches(std::vector<Watch>& watches, std::size_t grow_size) { 138void VKStreamBuffer::ReserveWatches(std::vector<Watch>& watches, std::size_t grow_size) {
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.h b/src/video_core/renderer_vulkan/vk_stream_buffer.h
index 187c0c612..58ce8b973 100644
--- a/src/video_core/renderer_vulkan/vk_stream_buffer.h
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.h
@@ -9,7 +9,7 @@
9#include <vector> 9#include <vector>
10 10
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "video_core/renderer_vulkan/declarations.h" 12#include "video_core/renderer_vulkan/wrapper.h"
13 13
14namespace Vulkan { 14namespace Vulkan {
15 15
@@ -21,7 +21,7 @@ class VKScheduler;
21class VKStreamBuffer final { 21class VKStreamBuffer final {
22public: 22public:
23 explicit VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler, 23 explicit VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler,
24 vk::BufferUsageFlags usage); 24 VkBufferUsageFlags usage);
25 ~VKStreamBuffer(); 25 ~VKStreamBuffer();
26 26
27 /** 27 /**
@@ -35,7 +35,7 @@ public:
35 /// Ensures that "size" bytes of memory are available to the GPU, potentially recording a copy. 35 /// Ensures that "size" bytes of memory are available to the GPU, potentially recording a copy.
36 void Unmap(u64 size); 36 void Unmap(u64 size);
37 37
38 vk::Buffer GetHandle() const { 38 VkBuffer GetHandle() const {
39 return *buffer; 39 return *buffer;
40 } 40 }
41 41
@@ -46,20 +46,18 @@ private:
46 }; 46 };
47 47
48 /// Creates Vulkan buffer handles committing the required the required memory. 48 /// Creates Vulkan buffer handles committing the required the required memory.
49 void CreateBuffers(vk::BufferUsageFlags usage); 49 void CreateBuffers(VkBufferUsageFlags usage);
50 50
51 /// Increases the amount of watches available. 51 /// Increases the amount of watches available.
52 void ReserveWatches(std::vector<Watch>& watches, std::size_t grow_size); 52 void ReserveWatches(std::vector<Watch>& watches, std::size_t grow_size);
53 53
54 void WaitPendingOperations(u64 requested_upper_bound); 54 void WaitPendingOperations(u64 requested_upper_bound);
55 55
56 const VKDevice& device; ///< Vulkan device manager. 56 const VKDevice& device; ///< Vulkan device manager.
57 VKScheduler& scheduler; ///< Command scheduler. 57 VKScheduler& scheduler; ///< Command scheduler.
58 const vk::AccessFlags access; ///< Access usage of this stream buffer.
59 const vk::PipelineStageFlags pipeline_stage; ///< Pipeline usage of this stream buffer.
60 58
61 UniqueBuffer buffer; ///< Mapped buffer. 59 vk::Buffer buffer; ///< Mapped buffer.
62 UniqueDeviceMemory memory; ///< Memory allocation. 60 vk::DeviceMemory memory; ///< Memory allocation.
63 61
64 u64 offset{}; ///< Buffer iterator. 62 u64 offset{}; ///< Buffer iterator.
65 u64 mapped_size{}; ///< Size reserved for the current copy. 63 u64 mapped_size{}; ///< Size reserved for the current copy.
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp
index 9e73fa9cd..bffd8f32a 100644
--- a/src/video_core/renderer_vulkan/vk_swapchain.cpp
+++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp
@@ -11,69 +11,64 @@
11#include "common/logging/log.h" 11#include "common/logging/log.h"
12#include "core/core.h" 12#include "core/core.h"
13#include "core/frontend/framebuffer_layout.h" 13#include "core/frontend/framebuffer_layout.h"
14#include "video_core/renderer_vulkan/declarations.h"
15#include "video_core/renderer_vulkan/vk_device.h" 14#include "video_core/renderer_vulkan/vk_device.h"
16#include "video_core/renderer_vulkan/vk_resource_manager.h" 15#include "video_core/renderer_vulkan/vk_resource_manager.h"
17#include "video_core/renderer_vulkan/vk_swapchain.h" 16#include "video_core/renderer_vulkan/vk_swapchain.h"
17#include "video_core/renderer_vulkan/wrapper.h"
18 18
19namespace Vulkan { 19namespace Vulkan {
20 20
21namespace { 21namespace {
22 22
23vk::SurfaceFormatKHR ChooseSwapSurfaceFormat(const std::vector<vk::SurfaceFormatKHR>& formats, 23VkSurfaceFormatKHR ChooseSwapSurfaceFormat(vk::Span<VkSurfaceFormatKHR> formats, bool srgb) {
24 bool srgb) { 24 if (formats.size() == 1 && formats[0].format == VK_FORMAT_UNDEFINED) {
25 if (formats.size() == 1 && formats[0].format == vk::Format::eUndefined) { 25 VkSurfaceFormatKHR format;
26 vk::SurfaceFormatKHR format; 26 format.format = VK_FORMAT_B8G8R8A8_UNORM;
27 format.format = vk::Format::eB8G8R8A8Unorm; 27 format.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
28 format.colorSpace = vk::ColorSpaceKHR::eSrgbNonlinear;
29 return format; 28 return format;
30 } 29 }
31 const auto& found = std::find_if(formats.begin(), formats.end(), [srgb](const auto& format) { 30 const auto& found = std::find_if(formats.begin(), formats.end(), [srgb](const auto& format) {
32 const auto request_format = srgb ? vk::Format::eB8G8R8A8Srgb : vk::Format::eB8G8R8A8Unorm; 31 const auto request_format = srgb ? VK_FORMAT_B8G8R8A8_SRGB : VK_FORMAT_B8G8R8A8_UNORM;
33 return format.format == request_format && 32 return format.format == request_format &&
34 format.colorSpace == vk::ColorSpaceKHR::eSrgbNonlinear; 33 format.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
35 }); 34 });
36 return found != formats.end() ? *found : formats[0]; 35 return found != formats.end() ? *found : formats[0];
37} 36}
38 37
39vk::PresentModeKHR ChooseSwapPresentMode(const std::vector<vk::PresentModeKHR>& modes) { 38VkPresentModeKHR ChooseSwapPresentMode(vk::Span<VkPresentModeKHR> modes) {
40 // Mailbox doesn't lock the application like fifo (vsync), prefer it 39 // Mailbox doesn't lock the application like fifo (vsync), prefer it
41 const auto& found = std::find_if(modes.begin(), modes.end(), [](const auto& mode) { 40 const auto found = std::find(modes.begin(), modes.end(), VK_PRESENT_MODE_MAILBOX_KHR);
42 return mode == vk::PresentModeKHR::eMailbox; 41 return found != modes.end() ? *found : VK_PRESENT_MODE_FIFO_KHR;
43 });
44 return found != modes.end() ? *found : vk::PresentModeKHR::eFifo;
45} 42}
46 43
47vk::Extent2D ChooseSwapExtent(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width, 44VkExtent2D ChooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities, u32 width, u32 height) {
48 u32 height) {
49 constexpr auto undefined_size{std::numeric_limits<u32>::max()}; 45 constexpr auto undefined_size{std::numeric_limits<u32>::max()};
50 if (capabilities.currentExtent.width != undefined_size) { 46 if (capabilities.currentExtent.width != undefined_size) {
51 return capabilities.currentExtent; 47 return capabilities.currentExtent;
52 } 48 }
53 vk::Extent2D extent = {width, height}; 49 VkExtent2D extent;
54 extent.width = std::max(capabilities.minImageExtent.width, 50 extent.width = std::max(capabilities.minImageExtent.width,
55 std::min(capabilities.maxImageExtent.width, extent.width)); 51 std::min(capabilities.maxImageExtent.width, width));
56 extent.height = std::max(capabilities.minImageExtent.height, 52 extent.height = std::max(capabilities.minImageExtent.height,
57 std::min(capabilities.maxImageExtent.height, extent.height)); 53 std::min(capabilities.maxImageExtent.height, height));
58 return extent; 54 return extent;
59} 55}
60 56
61} // Anonymous namespace 57} // Anonymous namespace
62 58
63VKSwapchain::VKSwapchain(vk::SurfaceKHR surface, const VKDevice& device) 59VKSwapchain::VKSwapchain(VkSurfaceKHR surface, const VKDevice& device)
64 : surface{surface}, device{device} {} 60 : surface{surface}, device{device} {}
65 61
66VKSwapchain::~VKSwapchain() = default; 62VKSwapchain::~VKSwapchain() = default;
67 63
68void VKSwapchain::Create(u32 width, u32 height, bool srgb) { 64void VKSwapchain::Create(u32 width, u32 height, bool srgb) {
69 const auto& dld = device.GetDispatchLoader();
70 const auto physical_device = device.GetPhysical(); 65 const auto physical_device = device.GetPhysical();
71 const auto capabilities{physical_device.getSurfaceCapabilitiesKHR(surface, dld)}; 66 const auto capabilities{physical_device.GetSurfaceCapabilitiesKHR(surface)};
72 if (capabilities.maxImageExtent.width == 0 || capabilities.maxImageExtent.height == 0) { 67 if (capabilities.maxImageExtent.width == 0 || capabilities.maxImageExtent.height == 0) {
73 return; 68 return;
74 } 69 }
75 70
76 device.GetLogical().waitIdle(dld); 71 device.GetLogical().WaitIdle();
77 Destroy(); 72 Destroy();
78 73
79 CreateSwapchain(capabilities, width, height, srgb); 74 CreateSwapchain(capabilities, width, height, srgb);
@@ -84,10 +79,8 @@ void VKSwapchain::Create(u32 width, u32 height, bool srgb) {
84} 79}
85 80
86void VKSwapchain::AcquireNextImage() { 81void VKSwapchain::AcquireNextImage() {
87 const auto dev{device.GetLogical()}; 82 device.GetLogical().AcquireNextImageKHR(*swapchain, std::numeric_limits<u64>::max(),
88 const auto& dld{device.GetDispatchLoader()}; 83 *present_semaphores[frame_index], {}, &image_index);
89 dev.acquireNextImageKHR(*swapchain, std::numeric_limits<u64>::max(),
90 *present_semaphores[frame_index], {}, &image_index, dld);
91 84
92 if (auto& fence = fences[image_index]; fence) { 85 if (auto& fence = fences[image_index]; fence) {
93 fence->Wait(); 86 fence->Wait();
@@ -96,29 +89,37 @@ void VKSwapchain::AcquireNextImage() {
96 } 89 }
97} 90}
98 91
99bool VKSwapchain::Present(vk::Semaphore render_semaphore, VKFence& fence) { 92bool VKSwapchain::Present(VkSemaphore render_semaphore, VKFence& fence) {
100 const vk::Semaphore present_semaphore{*present_semaphores[frame_index]}; 93 const VkSemaphore present_semaphore{*present_semaphores[frame_index]};
101 const std::array<vk::Semaphore, 2> semaphores{present_semaphore, render_semaphore}; 94 const std::array<VkSemaphore, 2> semaphores{present_semaphore, render_semaphore};
102 const u32 wait_semaphore_count{render_semaphore ? 2U : 1U};
103 const auto& dld{device.GetDispatchLoader()};
104 const auto present_queue{device.GetPresentQueue()}; 95 const auto present_queue{device.GetPresentQueue()};
105 bool recreated = false; 96 bool recreated = false;
106 97
107 const vk::PresentInfoKHR present_info(wait_semaphore_count, semaphores.data(), 1, 98 VkPresentInfoKHR present_info;
108 &swapchain.get(), &image_index, {}); 99 present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
109 switch (const auto result = present_queue.presentKHR(&present_info, dld); result) { 100 present_info.pNext = nullptr;
110 case vk::Result::eSuccess: 101 present_info.waitSemaphoreCount = render_semaphore ? 2U : 1U;
102 present_info.pWaitSemaphores = semaphores.data();
103 present_info.swapchainCount = 1;
104 present_info.pSwapchains = swapchain.address();
105 present_info.pImageIndices = &image_index;
106 present_info.pResults = nullptr;
107
108 switch (const VkResult result = present_queue.Present(present_info)) {
109 case VK_SUCCESS:
110 break;
111 case VK_SUBOPTIMAL_KHR:
112 LOG_DEBUG(Render_Vulkan, "Suboptimal swapchain");
111 break; 113 break;
112 case vk::Result::eErrorOutOfDateKHR: 114 case VK_ERROR_OUT_OF_DATE_KHR:
113 if (current_width > 0 && current_height > 0) { 115 if (current_width > 0 && current_height > 0) {
114 Create(current_width, current_height, current_srgb); 116 Create(current_width, current_height, current_srgb);
115 recreated = true; 117 recreated = true;
116 } 118 }
117 break; 119 break;
118 default: 120 default:
119 LOG_CRITICAL(Render_Vulkan, "Vulkan failed to present swapchain due to {}!", 121 LOG_CRITICAL(Render_Vulkan, "Failed to present with error {}", vk::ToString(result));
120 vk::to_string(result)); 122 break;
121 UNREACHABLE();
122 } 123 }
123 124
124 ASSERT(fences[image_index] == nullptr); 125 ASSERT(fences[image_index] == nullptr);
@@ -132,74 +133,92 @@ bool VKSwapchain::HasFramebufferChanged(const Layout::FramebufferLayout& framebu
132 return framebuffer.width != current_width || framebuffer.height != current_height; 133 return framebuffer.width != current_width || framebuffer.height != current_height;
133} 134}
134 135
135void VKSwapchain::CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width, 136void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width,
136 u32 height, bool srgb) { 137 u32 height, bool srgb) {
137 const auto& dld{device.GetDispatchLoader()};
138 const auto physical_device{device.GetPhysical()}; 138 const auto physical_device{device.GetPhysical()};
139 const auto formats{physical_device.getSurfaceFormatsKHR(surface, dld)}; 139 const auto formats{physical_device.GetSurfaceFormatsKHR(surface)};
140 const auto present_modes{physical_device.getSurfacePresentModesKHR(surface, dld)}; 140 const auto present_modes{physical_device.GetSurfacePresentModesKHR(surface)};
141 141
142 const vk::SurfaceFormatKHR surface_format{ChooseSwapSurfaceFormat(formats, srgb)}; 142 const VkSurfaceFormatKHR surface_format{ChooseSwapSurfaceFormat(formats, srgb)};
143 const vk::PresentModeKHR present_mode{ChooseSwapPresentMode(present_modes)}; 143 const VkPresentModeKHR present_mode{ChooseSwapPresentMode(present_modes)};
144 144
145 u32 requested_image_count{capabilities.minImageCount + 1}; 145 u32 requested_image_count{capabilities.minImageCount + 1};
146 if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) { 146 if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) {
147 requested_image_count = capabilities.maxImageCount; 147 requested_image_count = capabilities.maxImageCount;
148 } 148 }
149 149
150 vk::SwapchainCreateInfoKHR swapchain_ci( 150 VkSwapchainCreateInfoKHR swapchain_ci;
151 {}, surface, requested_image_count, surface_format.format, surface_format.colorSpace, {}, 1, 151 swapchain_ci.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
152 vk::ImageUsageFlagBits::eColorAttachment, {}, {}, {}, capabilities.currentTransform, 152 swapchain_ci.pNext = nullptr;
153 vk::CompositeAlphaFlagBitsKHR::eOpaque, present_mode, false, {}); 153 swapchain_ci.flags = 0;
154 swapchain_ci.surface = surface;
155 swapchain_ci.minImageCount = requested_image_count;
156 swapchain_ci.imageFormat = surface_format.format;
157 swapchain_ci.imageColorSpace = surface_format.colorSpace;
158 swapchain_ci.imageArrayLayers = 1;
159 swapchain_ci.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
160 swapchain_ci.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
161 swapchain_ci.queueFamilyIndexCount = 0;
162 swapchain_ci.pQueueFamilyIndices = nullptr;
163 swapchain_ci.preTransform = capabilities.currentTransform;
164 swapchain_ci.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
165 swapchain_ci.presentMode = present_mode;
166 swapchain_ci.clipped = VK_FALSE;
167 swapchain_ci.oldSwapchain = nullptr;
154 168
155 const u32 graphics_family{device.GetGraphicsFamily()}; 169 const u32 graphics_family{device.GetGraphicsFamily()};
156 const u32 present_family{device.GetPresentFamily()}; 170 const u32 present_family{device.GetPresentFamily()};
157 const std::array<u32, 2> queue_indices{graphics_family, present_family}; 171 const std::array<u32, 2> queue_indices{graphics_family, present_family};
158 if (graphics_family != present_family) { 172 if (graphics_family != present_family) {
159 swapchain_ci.imageSharingMode = vk::SharingMode::eConcurrent; 173 swapchain_ci.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
160 swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size()); 174 swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size());
161 swapchain_ci.pQueueFamilyIndices = queue_indices.data(); 175 swapchain_ci.pQueueFamilyIndices = queue_indices.data();
162 } else { 176 } else {
163 swapchain_ci.imageSharingMode = vk::SharingMode::eExclusive; 177 swapchain_ci.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
164 } 178 }
165 179
166 // Request the size again to reduce the possibility of a TOCTOU race condition. 180 // Request the size again to reduce the possibility of a TOCTOU race condition.
167 const auto updated_capabilities = physical_device.getSurfaceCapabilitiesKHR(surface, dld); 181 const auto updated_capabilities = physical_device.GetSurfaceCapabilitiesKHR(surface);
168 swapchain_ci.imageExtent = ChooseSwapExtent(updated_capabilities, width, height); 182 swapchain_ci.imageExtent = ChooseSwapExtent(updated_capabilities, width, height);
169 // Don't add code within this and the swapchain creation. 183 // Don't add code within this and the swapchain creation.
170 const auto dev{device.GetLogical()}; 184 swapchain = device.GetLogical().CreateSwapchainKHR(swapchain_ci);
171 swapchain = dev.createSwapchainKHRUnique(swapchain_ci, nullptr, dld);
172 185
173 extent = swapchain_ci.imageExtent; 186 extent = swapchain_ci.imageExtent;
174 current_width = extent.width; 187 current_width = extent.width;
175 current_height = extent.height; 188 current_height = extent.height;
176 current_srgb = srgb; 189 current_srgb = srgb;
177 190
178 images = dev.getSwapchainImagesKHR(*swapchain, dld); 191 images = swapchain.GetImages();
179 image_count = static_cast<u32>(images.size()); 192 image_count = static_cast<u32>(images.size());
180 image_format = surface_format.format; 193 image_format = surface_format.format;
181} 194}
182 195
183void VKSwapchain::CreateSemaphores() { 196void VKSwapchain::CreateSemaphores() {
184 const auto dev{device.GetLogical()};
185 const auto& dld{device.GetDispatchLoader()};
186
187 present_semaphores.resize(image_count); 197 present_semaphores.resize(image_count);
188 for (std::size_t i = 0; i < image_count; i++) { 198 std::generate(present_semaphores.begin(), present_semaphores.end(),
189 present_semaphores[i] = dev.createSemaphoreUnique({}, nullptr, dld); 199 [this] { return device.GetLogical().CreateSemaphore(); });
190 }
191} 200}
192 201
193void VKSwapchain::CreateImageViews() { 202void VKSwapchain::CreateImageViews() {
194 const auto dev{device.GetLogical()}; 203 VkImageViewCreateInfo ci;
195 const auto& dld{device.GetDispatchLoader()}; 204 ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
205 ci.pNext = nullptr;
206 ci.flags = 0;
207 // ci.image
208 ci.viewType = VK_IMAGE_VIEW_TYPE_2D;
209 ci.format = image_format;
210 ci.components = {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
211 VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY};
212 ci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
213 ci.subresourceRange.baseMipLevel = 0;
214 ci.subresourceRange.levelCount = 1;
215 ci.subresourceRange.baseArrayLayer = 0;
216 ci.subresourceRange.layerCount = 1;
196 217
197 image_views.resize(image_count); 218 image_views.resize(image_count);
198 for (std::size_t i = 0; i < image_count; i++) { 219 for (std::size_t i = 0; i < image_count; i++) {
199 const vk::ImageViewCreateInfo image_view_ci({}, images[i], vk::ImageViewType::e2D, 220 ci.image = images[i];
200 image_format, {}, 221 image_views[i] = device.GetLogical().CreateImageView(ci);
201 {vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1});
202 image_views[i] = dev.createImageViewUnique(image_view_ci, nullptr, dld);
203 } 222 }
204} 223}
205 224
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.h b/src/video_core/renderer_vulkan/vk_swapchain.h
index 2f3b2ccd5..a35d61345 100644
--- a/src/video_core/renderer_vulkan/vk_swapchain.h
+++ b/src/video_core/renderer_vulkan/vk_swapchain.h
@@ -7,7 +7,7 @@
7#include <vector> 7#include <vector>
8 8
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "video_core/renderer_vulkan/declarations.h" 10#include "video_core/renderer_vulkan/wrapper.h"
11 11
12namespace Layout { 12namespace Layout {
13struct FramebufferLayout; 13struct FramebufferLayout;
@@ -20,7 +20,7 @@ class VKFence;
20 20
21class VKSwapchain { 21class VKSwapchain {
22public: 22public:
23 explicit VKSwapchain(vk::SurfaceKHR surface, const VKDevice& device); 23 explicit VKSwapchain(VkSurfaceKHR surface, const VKDevice& device);
24 ~VKSwapchain(); 24 ~VKSwapchain();
25 25
26 /// Creates (or recreates) the swapchain with a given size. 26 /// Creates (or recreates) the swapchain with a given size.
@@ -31,12 +31,12 @@ public:
31 31
32 /// Presents the rendered image to the swapchain. Returns true when the swapchains had to be 32 /// Presents the rendered image to the swapchain. Returns true when the swapchains had to be
33 /// recreated. Takes responsability for the ownership of fence. 33 /// recreated. Takes responsability for the ownership of fence.
34 bool Present(vk::Semaphore render_semaphore, VKFence& fence); 34 bool Present(VkSemaphore render_semaphore, VKFence& fence);
35 35
36 /// Returns true when the framebuffer layout has changed. 36 /// Returns true when the framebuffer layout has changed.
37 bool HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const; 37 bool HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const;
38 38
39 const vk::Extent2D& GetSize() const { 39 VkExtent2D GetSize() const {
40 return extent; 40 return extent;
41 } 41 }
42 42
@@ -48,15 +48,15 @@ public:
48 return image_index; 48 return image_index;
49 } 49 }
50 50
51 vk::Image GetImageIndex(std::size_t index) const { 51 VkImage GetImageIndex(std::size_t index) const {
52 return images[index]; 52 return images[index];
53 } 53 }
54 54
55 vk::ImageView GetImageViewIndex(std::size_t index) const { 55 VkImageView GetImageViewIndex(std::size_t index) const {
56 return *image_views[index]; 56 return *image_views[index];
57 } 57 }
58 58
59 vk::Format GetImageFormat() const { 59 VkFormat GetImageFormat() const {
60 return image_format; 60 return image_format;
61 } 61 }
62 62
@@ -65,30 +65,30 @@ public:
65 } 65 }
66 66
67private: 67private:
68 void CreateSwapchain(const vk::SurfaceCapabilitiesKHR& capabilities, u32 width, u32 height, 68 void CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width, u32 height,
69 bool srgb); 69 bool srgb);
70 void CreateSemaphores(); 70 void CreateSemaphores();
71 void CreateImageViews(); 71 void CreateImageViews();
72 72
73 void Destroy(); 73 void Destroy();
74 74
75 const vk::SurfaceKHR surface; 75 const VkSurfaceKHR surface;
76 const VKDevice& device; 76 const VKDevice& device;
77 77
78 UniqueSwapchainKHR swapchain; 78 vk::SwapchainKHR swapchain;
79 79
80 std::size_t image_count{}; 80 std::size_t image_count{};
81 std::vector<vk::Image> images; 81 std::vector<VkImage> images;
82 std::vector<UniqueImageView> image_views; 82 std::vector<vk::ImageView> image_views;
83 std::vector<UniqueFramebuffer> framebuffers; 83 std::vector<vk::Framebuffer> framebuffers;
84 std::vector<VKFence*> fences; 84 std::vector<VKFence*> fences;
85 std::vector<UniqueSemaphore> present_semaphores; 85 std::vector<vk::Semaphore> present_semaphores;
86 86
87 u32 image_index{}; 87 u32 image_index{};
88 u32 frame_index{}; 88 u32 frame_index{};
89 89
90 vk::Format image_format{}; 90 VkFormat image_format{};
91 vk::Extent2D extent{}; 91 VkExtent2D extent{};
92 92
93 u32 current_width{}; 93 u32 current_width{};
94 u32 current_height{}; 94 u32 current_height{};
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index 5b9b39670..de4c23120 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -17,7 +17,6 @@
17#include "core/memory.h" 17#include "core/memory.h"
18#include "video_core/engines/maxwell_3d.h" 18#include "video_core/engines/maxwell_3d.h"
19#include "video_core/morton.h" 19#include "video_core/morton.h"
20#include "video_core/renderer_vulkan/declarations.h"
21#include "video_core/renderer_vulkan/maxwell_to_vk.h" 20#include "video_core/renderer_vulkan/maxwell_to_vk.h"
22#include "video_core/renderer_vulkan/vk_device.h" 21#include "video_core/renderer_vulkan/vk_device.h"
23#include "video_core/renderer_vulkan/vk_memory_manager.h" 22#include "video_core/renderer_vulkan/vk_memory_manager.h"
@@ -25,6 +24,7 @@
25#include "video_core/renderer_vulkan/vk_scheduler.h" 24#include "video_core/renderer_vulkan/vk_scheduler.h"
26#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" 25#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
27#include "video_core/renderer_vulkan/vk_texture_cache.h" 26#include "video_core/renderer_vulkan/vk_texture_cache.h"
27#include "video_core/renderer_vulkan/wrapper.h"
28#include "video_core/surface.h" 28#include "video_core/surface.h"
29#include "video_core/textures/convert.h" 29#include "video_core/textures/convert.h"
30 30
@@ -39,18 +39,18 @@ using VideoCore::Surface::SurfaceTarget;
39 39
40namespace { 40namespace {
41 41
42vk::ImageType SurfaceTargetToImage(SurfaceTarget target) { 42VkImageType SurfaceTargetToImage(SurfaceTarget target) {
43 switch (target) { 43 switch (target) {
44 case SurfaceTarget::Texture1D: 44 case SurfaceTarget::Texture1D:
45 case SurfaceTarget::Texture1DArray: 45 case SurfaceTarget::Texture1DArray:
46 return vk::ImageType::e1D; 46 return VK_IMAGE_TYPE_1D;
47 case SurfaceTarget::Texture2D: 47 case SurfaceTarget::Texture2D:
48 case SurfaceTarget::Texture2DArray: 48 case SurfaceTarget::Texture2DArray:
49 case SurfaceTarget::TextureCubemap: 49 case SurfaceTarget::TextureCubemap:
50 case SurfaceTarget::TextureCubeArray: 50 case SurfaceTarget::TextureCubeArray:
51 return vk::ImageType::e2D; 51 return VK_IMAGE_TYPE_2D;
52 case SurfaceTarget::Texture3D: 52 case SurfaceTarget::Texture3D:
53 return vk::ImageType::e3D; 53 return VK_IMAGE_TYPE_3D;
54 case SurfaceTarget::TextureBuffer: 54 case SurfaceTarget::TextureBuffer:
55 UNREACHABLE(); 55 UNREACHABLE();
56 return {}; 56 return {};
@@ -59,35 +59,35 @@ vk::ImageType SurfaceTargetToImage(SurfaceTarget target) {
59 return {}; 59 return {};
60} 60}
61 61
62vk::ImageAspectFlags PixelFormatToImageAspect(PixelFormat pixel_format) { 62VkImageAspectFlags PixelFormatToImageAspect(PixelFormat pixel_format) {
63 if (pixel_format < PixelFormat::MaxColorFormat) { 63 if (pixel_format < PixelFormat::MaxColorFormat) {
64 return vk::ImageAspectFlagBits::eColor; 64 return VK_IMAGE_ASPECT_COLOR_BIT;
65 } else if (pixel_format < PixelFormat::MaxDepthFormat) { 65 } else if (pixel_format < PixelFormat::MaxDepthFormat) {
66 return vk::ImageAspectFlagBits::eDepth; 66 return VK_IMAGE_ASPECT_DEPTH_BIT;
67 } else if (pixel_format < PixelFormat::MaxDepthStencilFormat) { 67 } else if (pixel_format < PixelFormat::MaxDepthStencilFormat) {
68 return vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil; 68 return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
69 } else { 69 } else {
70 UNREACHABLE_MSG("Invalid pixel format={}", static_cast<u32>(pixel_format)); 70 UNREACHABLE_MSG("Invalid pixel format={}", static_cast<int>(pixel_format));
71 return vk::ImageAspectFlagBits::eColor; 71 return VK_IMAGE_ASPECT_COLOR_BIT;
72 } 72 }
73} 73}
74 74
75vk::ImageViewType GetImageViewType(SurfaceTarget target) { 75VkImageViewType GetImageViewType(SurfaceTarget target) {
76 switch (target) { 76 switch (target) {
77 case SurfaceTarget::Texture1D: 77 case SurfaceTarget::Texture1D:
78 return vk::ImageViewType::e1D; 78 return VK_IMAGE_VIEW_TYPE_1D;
79 case SurfaceTarget::Texture2D: 79 case SurfaceTarget::Texture2D:
80 return vk::ImageViewType::e2D; 80 return VK_IMAGE_VIEW_TYPE_2D;
81 case SurfaceTarget::Texture3D: 81 case SurfaceTarget::Texture3D:
82 return vk::ImageViewType::e3D; 82 return VK_IMAGE_VIEW_TYPE_3D;
83 case SurfaceTarget::Texture1DArray: 83 case SurfaceTarget::Texture1DArray:
84 return vk::ImageViewType::e1DArray; 84 return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
85 case SurfaceTarget::Texture2DArray: 85 case SurfaceTarget::Texture2DArray:
86 return vk::ImageViewType::e2DArray; 86 return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
87 case SurfaceTarget::TextureCubemap: 87 case SurfaceTarget::TextureCubemap:
88 return vk::ImageViewType::eCube; 88 return VK_IMAGE_VIEW_TYPE_CUBE;
89 case SurfaceTarget::TextureCubeArray: 89 case SurfaceTarget::TextureCubeArray:
90 return vk::ImageViewType::eCubeArray; 90 return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
91 case SurfaceTarget::TextureBuffer: 91 case SurfaceTarget::TextureBuffer:
92 break; 92 break;
93 } 93 }
@@ -95,73 +95,88 @@ vk::ImageViewType GetImageViewType(SurfaceTarget target) {
95 return {}; 95 return {};
96} 96}
97 97
98UniqueBuffer CreateBuffer(const VKDevice& device, const SurfaceParams& params, 98vk::Buffer CreateBuffer(const VKDevice& device, const SurfaceParams& params,
99 std::size_t host_memory_size) { 99 std::size_t host_memory_size) {
100 // TODO(Rodrigo): Move texture buffer creation to the buffer cache 100 // TODO(Rodrigo): Move texture buffer creation to the buffer cache
101 const vk::BufferCreateInfo buffer_ci({}, host_memory_size, 101 VkBufferCreateInfo ci;
102 vk::BufferUsageFlagBits::eUniformTexelBuffer | 102 ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
103 vk::BufferUsageFlagBits::eTransferSrc | 103 ci.pNext = nullptr;
104 vk::BufferUsageFlagBits::eTransferDst, 104 ci.flags = 0;
105 vk::SharingMode::eExclusive, 0, nullptr); 105 ci.size = static_cast<VkDeviceSize>(host_memory_size);
106 const auto dev = device.GetLogical(); 106 ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
107 const auto& dld = device.GetDispatchLoader(); 107 VK_BUFFER_USAGE_TRANSFER_DST_BIT;
108 return dev.createBufferUnique(buffer_ci, nullptr, dld); 108 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
109 ci.queueFamilyIndexCount = 0;
110 ci.pQueueFamilyIndices = nullptr;
111 return device.GetLogical().CreateBuffer(ci);
109} 112}
110 113
111vk::BufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device, 114VkBufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device,
112 const SurfaceParams& params, 115 const SurfaceParams& params, VkBuffer buffer,
113 vk::Buffer buffer, 116 std::size_t host_memory_size) {
114 std::size_t host_memory_size) {
115 ASSERT(params.IsBuffer()); 117 ASSERT(params.IsBuffer());
116 118
117 const auto format = 119 VkBufferViewCreateInfo ci;
118 MaxwellToVK::SurfaceFormat(device, FormatType::Buffer, params.pixel_format).format; 120 ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
119 return vk::BufferViewCreateInfo({}, buffer, format, 0, host_memory_size); 121 ci.pNext = nullptr;
122 ci.flags = 0;
123 ci.buffer = buffer;
124 ci.format = MaxwellToVK::SurfaceFormat(device, FormatType::Buffer, params.pixel_format).format;
125 ci.offset = 0;
126 ci.range = static_cast<VkDeviceSize>(host_memory_size);
127 return ci;
120} 128}
121 129
122vk::ImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceParams& params) { 130VkImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceParams& params) {
123 constexpr auto sample_count = vk::SampleCountFlagBits::e1;
124 constexpr auto tiling = vk::ImageTiling::eOptimal;
125
126 ASSERT(!params.IsBuffer()); 131 ASSERT(!params.IsBuffer());
127 132
128 const auto [format, attachable, storage] = 133 const auto [format, attachable, storage] =
129 MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.pixel_format); 134 MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.pixel_format);
130 135
131 auto image_usage = vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst | 136 VkImageCreateInfo ci;
132 vk::ImageUsageFlagBits::eTransferSrc; 137 ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
138 ci.pNext = nullptr;
139 ci.flags = 0;
140 ci.imageType = SurfaceTargetToImage(params.target);
141 ci.format = format;
142 ci.mipLevels = params.num_levels;
143 ci.arrayLayers = static_cast<u32>(params.GetNumLayers());
144 ci.samples = VK_SAMPLE_COUNT_1_BIT;
145 ci.tiling = VK_IMAGE_TILING_OPTIMAL;
146 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
147 ci.queueFamilyIndexCount = 0;
148 ci.pQueueFamilyIndices = nullptr;
149 ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
150
151 ci.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
152 VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
133 if (attachable) { 153 if (attachable) {
134 image_usage |= params.IsPixelFormatZeta() ? vk::ImageUsageFlagBits::eDepthStencilAttachment 154 ci.usage |= params.IsPixelFormatZeta() ? VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT
135 : vk::ImageUsageFlagBits::eColorAttachment; 155 : VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
136 } 156 }
137 if (storage) { 157 if (storage) {
138 image_usage |= vk::ImageUsageFlagBits::eStorage; 158 ci.usage |= VK_IMAGE_USAGE_STORAGE_BIT;
139 } 159 }
140 160
141 vk::ImageCreateFlags flags;
142 vk::Extent3D extent;
143 switch (params.target) { 161 switch (params.target) {
144 case SurfaceTarget::TextureCubemap: 162 case SurfaceTarget::TextureCubemap:
145 case SurfaceTarget::TextureCubeArray: 163 case SurfaceTarget::TextureCubeArray:
146 flags |= vk::ImageCreateFlagBits::eCubeCompatible; 164 ci.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
147 [[fallthrough]]; 165 [[fallthrough]];
148 case SurfaceTarget::Texture1D: 166 case SurfaceTarget::Texture1D:
149 case SurfaceTarget::Texture1DArray: 167 case SurfaceTarget::Texture1DArray:
150 case SurfaceTarget::Texture2D: 168 case SurfaceTarget::Texture2D:
151 case SurfaceTarget::Texture2DArray: 169 case SurfaceTarget::Texture2DArray:
152 extent = vk::Extent3D(params.width, params.height, 1); 170 ci.extent = {params.width, params.height, 1};
153 break; 171 break;
154 case SurfaceTarget::Texture3D: 172 case SurfaceTarget::Texture3D:
155 extent = vk::Extent3D(params.width, params.height, params.depth); 173 ci.extent = {params.width, params.height, params.depth};
156 break; 174 break;
157 case SurfaceTarget::TextureBuffer: 175 case SurfaceTarget::TextureBuffer:
158 UNREACHABLE(); 176 UNREACHABLE();
159 } 177 }
160 178
161 return vk::ImageCreateInfo(flags, SurfaceTargetToImage(params.target), format, extent, 179 return ci;
162 params.num_levels, static_cast<u32>(params.GetNumLayers()),
163 sample_count, tiling, image_usage, vk::SharingMode::eExclusive, 0,
164 nullptr, vk::ImageLayout::eUndefined);
165} 180}
166 181
167} // Anonymous namespace 182} // Anonymous namespace
@@ -175,15 +190,13 @@ CachedSurface::CachedSurface(Core::System& system, const VKDevice& device,
175 memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{staging_pool} { 190 memory_manager{memory_manager}, scheduler{scheduler}, staging_pool{staging_pool} {
176 if (params.IsBuffer()) { 191 if (params.IsBuffer()) {
177 buffer = CreateBuffer(device, params, host_memory_size); 192 buffer = CreateBuffer(device, params, host_memory_size);
178 commit = memory_manager.Commit(*buffer, false); 193 commit = memory_manager.Commit(buffer, false);
179 194
180 const auto buffer_view_ci = 195 const auto buffer_view_ci =
181 GenerateBufferViewCreateInfo(device, params, *buffer, host_memory_size); 196 GenerateBufferViewCreateInfo(device, params, *buffer, host_memory_size);
182 format = buffer_view_ci.format; 197 format = buffer_view_ci.format;
183 198
184 const auto dev = device.GetLogical(); 199 buffer_view = device.GetLogical().CreateBufferView(buffer_view_ci);
185 const auto& dld = device.GetDispatchLoader();
186 buffer_view = dev.createBufferViewUnique(buffer_view_ci, nullptr, dld);
187 } else { 200 } else {
188 const auto image_ci = GenerateImageCreateInfo(device, params); 201 const auto image_ci = GenerateImageCreateInfo(device, params);
189 format = image_ci.format; 202 format = image_ci.format;
@@ -221,16 +234,15 @@ void CachedSurface::DownloadTexture(std::vector<u8>& staging_buffer) {
221 // We can't copy images to buffers inside a renderpass 234 // We can't copy images to buffers inside a renderpass
222 scheduler.RequestOutsideRenderPassOperationContext(); 235 scheduler.RequestOutsideRenderPassOperationContext();
223 236
224 FullTransition(vk::PipelineStageFlagBits::eTransfer, vk::AccessFlagBits::eTransferRead, 237 FullTransition(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_READ_BIT,
225 vk::ImageLayout::eTransferSrcOptimal); 238 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
226 239
227 const auto& buffer = staging_pool.GetUnusedBuffer(host_memory_size, true); 240 const auto& buffer = staging_pool.GetUnusedBuffer(host_memory_size, true);
228 // TODO(Rodrigo): Do this in a single copy 241 // TODO(Rodrigo): Do this in a single copy
229 for (u32 level = 0; level < params.num_levels; ++level) { 242 for (u32 level = 0; level < params.num_levels; ++level) {
230 scheduler.Record([image = image->GetHandle(), buffer = *buffer.handle, 243 scheduler.Record([image = *image->GetHandle(), buffer = *buffer.handle,
231 copy = GetBufferImageCopy(level)](auto cmdbuf, auto& dld) { 244 copy = GetBufferImageCopy(level)](vk::CommandBuffer cmdbuf) {
232 cmdbuf.copyImageToBuffer(image, vk::ImageLayout::eTransferSrcOptimal, buffer, {copy}, 245 cmdbuf.CopyImageToBuffer(image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer, copy);
233 dld);
234 }); 246 });
235 } 247 }
236 scheduler.Finish(); 248 scheduler.Finish();
@@ -257,15 +269,27 @@ void CachedSurface::UploadBuffer(const std::vector<u8>& staging_buffer) {
257 std::memcpy(src_buffer.commit->Map(host_memory_size), staging_buffer.data(), host_memory_size); 269 std::memcpy(src_buffer.commit->Map(host_memory_size), staging_buffer.data(), host_memory_size);
258 270
259 scheduler.Record([src_buffer = *src_buffer.handle, dst_buffer = *buffer, 271 scheduler.Record([src_buffer = *src_buffer.handle, dst_buffer = *buffer,
260 size = host_memory_size](auto cmdbuf, auto& dld) { 272 size = host_memory_size](vk::CommandBuffer cmdbuf) {
261 const vk::BufferCopy copy(0, 0, size); 273 VkBufferCopy copy;
262 cmdbuf.copyBuffer(src_buffer, dst_buffer, {copy}, dld); 274 copy.srcOffset = 0;
263 275 copy.dstOffset = 0;
264 cmdbuf.pipelineBarrier( 276 copy.size = size;
265 vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eVertexShader, {}, {}, 277 cmdbuf.CopyBuffer(src_buffer, dst_buffer, copy);
266 {vk::BufferMemoryBarrier(vk::AccessFlagBits::eTransferWrite, 278
267 vk::AccessFlagBits::eShaderRead, 0, 0, dst_buffer, 0, size)}, 279 VkBufferMemoryBarrier barrier;
268 {}, dld); 280 barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
281 barrier.pNext = nullptr;
282 barrier.srcAccessMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
283 barrier.dstAccessMask = VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
284 barrier.srcQueueFamilyIndex = VK_ACCESS_TRANSFER_WRITE_BIT;
285 barrier.dstQueueFamilyIndex = VK_ACCESS_SHADER_READ_BIT;
286 barrier.srcQueueFamilyIndex = 0;
287 barrier.dstQueueFamilyIndex = 0;
288 barrier.buffer = dst_buffer;
289 barrier.offset = 0;
290 barrier.size = size;
291 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
292 0, {}, barrier, {});
269 }); 293 });
270} 294}
271 295
@@ -273,43 +297,49 @@ void CachedSurface::UploadImage(const std::vector<u8>& staging_buffer) {
273 const auto& src_buffer = staging_pool.GetUnusedBuffer(host_memory_size, true); 297 const auto& src_buffer = staging_pool.GetUnusedBuffer(host_memory_size, true);
274 std::memcpy(src_buffer.commit->Map(host_memory_size), staging_buffer.data(), host_memory_size); 298 std::memcpy(src_buffer.commit->Map(host_memory_size), staging_buffer.data(), host_memory_size);
275 299
276 FullTransition(vk::PipelineStageFlagBits::eTransfer, vk::AccessFlagBits::eTransferWrite, 300 FullTransition(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
277 vk::ImageLayout::eTransferDstOptimal); 301 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
278 302
279 for (u32 level = 0; level < params.num_levels; ++level) { 303 for (u32 level = 0; level < params.num_levels; ++level) {
280 vk::BufferImageCopy copy = GetBufferImageCopy(level); 304 const VkBufferImageCopy copy = GetBufferImageCopy(level);
281 if (image->GetAspectMask() == 305 if (image->GetAspectMask() == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
282 (vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil)) { 306 scheduler.Record([buffer = *src_buffer.handle, image = *image->GetHandle(),
283 vk::BufferImageCopy depth = copy; 307 copy](vk::CommandBuffer cmdbuf) {
284 vk::BufferImageCopy stencil = copy; 308 std::array<VkBufferImageCopy, 2> copies = {copy, copy};
285 depth.imageSubresource.aspectMask = vk::ImageAspectFlagBits::eDepth; 309 copies[0].imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
286 stencil.imageSubresource.aspectMask = vk::ImageAspectFlagBits::eStencil; 310 copies[1].imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
287 scheduler.Record([buffer = *src_buffer.handle, image = image->GetHandle(), depth, 311 cmdbuf.CopyBufferToImage(buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
288 stencil](auto cmdbuf, auto& dld) { 312 copies);
289 cmdbuf.copyBufferToImage(buffer, image, vk::ImageLayout::eTransferDstOptimal,
290 {depth, stencil}, dld);
291 }); 313 });
292 } else { 314 } else {
293 scheduler.Record([buffer = *src_buffer.handle, image = image->GetHandle(), 315 scheduler.Record([buffer = *src_buffer.handle, image = *image->GetHandle(),
294 copy](auto cmdbuf, auto& dld) { 316 copy](vk::CommandBuffer cmdbuf) {
295 cmdbuf.copyBufferToImage(buffer, image, vk::ImageLayout::eTransferDstOptimal, 317 cmdbuf.CopyBufferToImage(buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, copy);
296 {copy}, dld);
297 }); 318 });
298 } 319 }
299 } 320 }
300} 321}
301 322
302vk::BufferImageCopy CachedSurface::GetBufferImageCopy(u32 level) const { 323VkBufferImageCopy CachedSurface::GetBufferImageCopy(u32 level) const {
303 const u32 vk_depth = params.target == SurfaceTarget::Texture3D ? params.GetMipDepth(level) : 1; 324 VkBufferImageCopy copy;
304 const std::size_t mip_offset = params.GetHostMipmapLevelOffset(level, is_converted); 325 copy.bufferOffset = params.GetHostMipmapLevelOffset(level, is_converted);
305 326 copy.bufferRowLength = 0;
306 return vk::BufferImageCopy( 327 copy.bufferImageHeight = 0;
307 mip_offset, 0, 0, 328 copy.imageSubresource.aspectMask = image->GetAspectMask();
308 {image->GetAspectMask(), level, 0, static_cast<u32>(params.GetNumLayers())}, {0, 0, 0}, 329 copy.imageSubresource.mipLevel = level;
309 {params.GetMipWidth(level), params.GetMipHeight(level), vk_depth}); 330 copy.imageSubresource.baseArrayLayer = 0;
331 copy.imageSubresource.layerCount = static_cast<u32>(params.GetNumLayers());
332 copy.imageOffset.x = 0;
333 copy.imageOffset.y = 0;
334 copy.imageOffset.z = 0;
335 copy.imageExtent.width = params.GetMipWidth(level);
336 copy.imageExtent.height = params.GetMipHeight(level);
337 copy.imageExtent.depth =
338 params.target == SurfaceTarget::Texture3D ? params.GetMipDepth(level) : 1;
339 return copy;
310} 340}
311 341
312vk::ImageSubresourceRange CachedSurface::GetImageSubresourceRange() const { 342VkImageSubresourceRange CachedSurface::GetImageSubresourceRange() const {
313 return {image->GetAspectMask(), 0, params.num_levels, 0, 343 return {image->GetAspectMask(), 0, params.num_levels, 0,
314 static_cast<u32>(params.GetNumLayers())}; 344 static_cast<u32>(params.GetNumLayers())};
315} 345}
@@ -321,12 +351,12 @@ CachedSurfaceView::CachedSurfaceView(const VKDevice& device, CachedSurface& surf
321 aspect_mask{surface.GetAspectMask()}, device{device}, surface{surface}, 351 aspect_mask{surface.GetAspectMask()}, device{device}, surface{surface},
322 base_layer{params.base_layer}, num_layers{params.num_layers}, base_level{params.base_level}, 352 base_layer{params.base_layer}, num_layers{params.num_layers}, base_level{params.base_level},
323 num_levels{params.num_levels}, image_view_type{image ? GetImageViewType(params.target) 353 num_levels{params.num_levels}, image_view_type{image ? GetImageViewType(params.target)
324 : vk::ImageViewType{}} {} 354 : VK_IMAGE_VIEW_TYPE_1D} {}
325 355
326CachedSurfaceView::~CachedSurfaceView() = default; 356CachedSurfaceView::~CachedSurfaceView() = default;
327 357
328vk::ImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource y_source, 358VkImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource y_source,
329 SwizzleSource z_source, SwizzleSource w_source) { 359 SwizzleSource z_source, SwizzleSource w_source) {
330 const u32 swizzle = EncodeSwizzle(x_source, y_source, z_source, w_source); 360 const u32 swizzle = EncodeSwizzle(x_source, y_source, z_source, w_source);
331 if (last_image_view && last_swizzle == swizzle) { 361 if (last_image_view && last_swizzle == swizzle) {
332 return last_image_view; 362 return last_image_view;
@@ -351,37 +381,45 @@ vk::ImageView CachedSurfaceView::GetHandle(SwizzleSource x_source, SwizzleSource
351 381
352 // Games can sample depth or stencil values on textures. This is decided by the swizzle value on 382 // Games can sample depth or stencil values on textures. This is decided by the swizzle value on
353 // hardware. To emulate this on Vulkan we specify it in the aspect. 383 // hardware. To emulate this on Vulkan we specify it in the aspect.
354 vk::ImageAspectFlags aspect = aspect_mask; 384 VkImageAspectFlags aspect = aspect_mask;
355 if (aspect == (vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil)) { 385 if (aspect == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
356 UNIMPLEMENTED_IF(x_source != SwizzleSource::R && x_source != SwizzleSource::G); 386 UNIMPLEMENTED_IF(x_source != SwizzleSource::R && x_source != SwizzleSource::G);
357 const bool is_first = x_source == SwizzleSource::R; 387 const bool is_first = x_source == SwizzleSource::R;
358 switch (params.pixel_format) { 388 switch (params.pixel_format) {
359 case VideoCore::Surface::PixelFormat::Z24S8: 389 case VideoCore::Surface::PixelFormat::Z24S8:
360 case VideoCore::Surface::PixelFormat::Z32FS8: 390 case VideoCore::Surface::PixelFormat::Z32FS8:
361 aspect = is_first ? vk::ImageAspectFlagBits::eDepth : vk::ImageAspectFlagBits::eStencil; 391 aspect = is_first ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_STENCIL_BIT;
362 break; 392 break;
363 case VideoCore::Surface::PixelFormat::S8Z24: 393 case VideoCore::Surface::PixelFormat::S8Z24:
364 aspect = is_first ? vk::ImageAspectFlagBits::eStencil : vk::ImageAspectFlagBits::eDepth; 394 aspect = is_first ? VK_IMAGE_ASPECT_STENCIL_BIT : VK_IMAGE_ASPECT_DEPTH_BIT;
365 break; 395 break;
366 default: 396 default:
367 aspect = vk::ImageAspectFlagBits::eDepth; 397 aspect = VK_IMAGE_ASPECT_DEPTH_BIT;
368 UNIMPLEMENTED(); 398 UNIMPLEMENTED();
369 } 399 }
370 400
371 // Vulkan doesn't seem to understand swizzling of a depth stencil image, use identity 401 // Vulkan doesn't seem to understand swizzling of a depth stencil image, use identity
372 swizzle_x = vk::ComponentSwizzle::eR; 402 swizzle_x = VK_COMPONENT_SWIZZLE_R;
373 swizzle_y = vk::ComponentSwizzle::eG; 403 swizzle_y = VK_COMPONENT_SWIZZLE_G;
374 swizzle_z = vk::ComponentSwizzle::eB; 404 swizzle_z = VK_COMPONENT_SWIZZLE_B;
375 swizzle_w = vk::ComponentSwizzle::eA; 405 swizzle_w = VK_COMPONENT_SWIZZLE_A;
376 } 406 }
377 407
378 const vk::ImageViewCreateInfo image_view_ci( 408 VkImageViewCreateInfo ci;
379 {}, surface.GetImageHandle(), image_view_type, surface.GetImage().GetFormat(), 409 ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
380 {swizzle_x, swizzle_y, swizzle_z, swizzle_w}, 410 ci.pNext = nullptr;
381 {aspect, base_level, num_levels, base_layer, num_layers}); 411 ci.flags = 0;
412 ci.image = surface.GetImageHandle();
413 ci.viewType = image_view_type;
414 ci.format = surface.GetImage().GetFormat();
415 ci.components = {swizzle_x, swizzle_y, swizzle_z, swizzle_w};
416 ci.subresourceRange.aspectMask = aspect;
417 ci.subresourceRange.baseMipLevel = base_level;
418 ci.subresourceRange.levelCount = num_levels;
419 ci.subresourceRange.baseArrayLayer = base_layer;
420 ci.subresourceRange.layerCount = num_layers;
421 image_view = device.GetLogical().CreateImageView(ci);
382 422
383 const auto dev = device.GetLogical();
384 image_view = dev.createImageViewUnique(image_view_ci, nullptr, device.GetDispatchLoader());
385 return last_image_view = *image_view; 423 return last_image_view = *image_view;
386} 424}
387 425
@@ -418,25 +456,36 @@ void VKTextureCache::ImageCopy(Surface& src_surface, Surface& dst_surface,
418 scheduler.RequestOutsideRenderPassOperationContext(); 456 scheduler.RequestOutsideRenderPassOperationContext();
419 457
420 src_surface->Transition(copy_params.source_z, copy_params.depth, copy_params.source_level, 1, 458 src_surface->Transition(copy_params.source_z, copy_params.depth, copy_params.source_level, 1,
421 vk::PipelineStageFlagBits::eTransfer, vk::AccessFlagBits::eTransferRead, 459 VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_READ_BIT,
422 vk::ImageLayout::eTransferSrcOptimal); 460 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
423 dst_surface->Transition( 461 dst_surface->Transition(dst_base_layer, num_layers, copy_params.dest_level, 1,
424 dst_base_layer, num_layers, copy_params.dest_level, 1, vk::PipelineStageFlagBits::eTransfer, 462 VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
425 vk::AccessFlagBits::eTransferWrite, vk::ImageLayout::eTransferDstOptimal); 463 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
426 464
427 const vk::ImageSubresourceLayers src_subresource( 465 VkImageCopy copy;
428 src_surface->GetAspectMask(), copy_params.source_level, copy_params.source_z, num_layers); 466 copy.srcSubresource.aspectMask = src_surface->GetAspectMask();
429 const vk::ImageSubresourceLayers dst_subresource( 467 copy.srcSubresource.mipLevel = copy_params.source_level;
430 dst_surface->GetAspectMask(), copy_params.dest_level, dst_base_layer, num_layers); 468 copy.srcSubresource.baseArrayLayer = copy_params.source_z;
431 const vk::Offset3D src_offset(copy_params.source_x, copy_params.source_y, 0); 469 copy.srcSubresource.layerCount = num_layers;
432 const vk::Offset3D dst_offset(copy_params.dest_x, copy_params.dest_y, dst_offset_z); 470 copy.srcOffset.x = copy_params.source_x;
433 const vk::Extent3D extent(copy_params.width, copy_params.height, extent_z); 471 copy.srcOffset.y = copy_params.source_y;
434 const vk::ImageCopy copy(src_subresource, src_offset, dst_subresource, dst_offset, extent); 472 copy.srcOffset.z = 0;
435 const vk::Image src_image = src_surface->GetImageHandle(); 473 copy.dstSubresource.aspectMask = dst_surface->GetAspectMask();
436 const vk::Image dst_image = dst_surface->GetImageHandle(); 474 copy.dstSubresource.mipLevel = copy_params.dest_level;
437 scheduler.Record([src_image, dst_image, copy](auto cmdbuf, auto& dld) { 475 copy.dstSubresource.baseArrayLayer = dst_base_layer;
438 cmdbuf.copyImage(src_image, vk::ImageLayout::eTransferSrcOptimal, dst_image, 476 copy.dstSubresource.layerCount = num_layers;
439 vk::ImageLayout::eTransferDstOptimal, {copy}, dld); 477 copy.dstOffset.x = copy_params.dest_x;
478 copy.dstOffset.y = copy_params.dest_y;
479 copy.dstOffset.z = dst_offset_z;
480 copy.extent.width = copy_params.width;
481 copy.extent.height = copy_params.height;
482 copy.extent.depth = extent_z;
483
484 const VkImage src_image = src_surface->GetImageHandle();
485 const VkImage dst_image = dst_surface->GetImageHandle();
486 scheduler.Record([src_image, dst_image, copy](vk::CommandBuffer cmdbuf) {
487 cmdbuf.CopyImage(src_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_image,
488 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, copy);
440 }); 489 });
441} 490}
442 491
@@ -445,25 +494,34 @@ void VKTextureCache::ImageBlit(View& src_view, View& dst_view,
445 // We can't blit inside a renderpass 494 // We can't blit inside a renderpass
446 scheduler.RequestOutsideRenderPassOperationContext(); 495 scheduler.RequestOutsideRenderPassOperationContext();
447 496
448 src_view->Transition(vk::ImageLayout::eTransferSrcOptimal, vk::PipelineStageFlagBits::eTransfer, 497 src_view->Transition(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_PIPELINE_STAGE_TRANSFER_BIT,
449 vk::AccessFlagBits::eTransferRead); 498 VK_ACCESS_TRANSFER_READ_BIT);
450 dst_view->Transition(vk::ImageLayout::eTransferDstOptimal, vk::PipelineStageFlagBits::eTransfer, 499 dst_view->Transition(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_PIPELINE_STAGE_TRANSFER_BIT,
451 vk::AccessFlagBits::eTransferWrite); 500 VK_ACCESS_TRANSFER_WRITE_BIT);
452 501
453 const auto& cfg = copy_config; 502 VkImageBlit blit;
454 const auto src_top_left = vk::Offset3D(cfg.src_rect.left, cfg.src_rect.top, 0); 503 blit.srcSubresource = src_view->GetImageSubresourceLayers();
455 const auto src_bot_right = vk::Offset3D(cfg.src_rect.right, cfg.src_rect.bottom, 1); 504 blit.srcOffsets[0].x = copy_config.src_rect.left;
456 const auto dst_top_left = vk::Offset3D(cfg.dst_rect.left, cfg.dst_rect.top, 0); 505 blit.srcOffsets[0].y = copy_config.src_rect.top;
457 const auto dst_bot_right = vk::Offset3D(cfg.dst_rect.right, cfg.dst_rect.bottom, 1); 506 blit.srcOffsets[0].z = 0;
458 const vk::ImageBlit blit(src_view->GetImageSubresourceLayers(), {src_top_left, src_bot_right}, 507 blit.srcOffsets[1].x = copy_config.src_rect.right;
459 dst_view->GetImageSubresourceLayers(), {dst_top_left, dst_bot_right}); 508 blit.srcOffsets[1].y = copy_config.src_rect.bottom;
509 blit.srcOffsets[1].z = 1;
510 blit.dstSubresource = dst_view->GetImageSubresourceLayers();
511 blit.dstOffsets[0].x = copy_config.dst_rect.left;
512 blit.dstOffsets[0].y = copy_config.dst_rect.top;
513 blit.dstOffsets[0].z = 0;
514 blit.dstOffsets[1].x = copy_config.dst_rect.right;
515 blit.dstOffsets[1].y = copy_config.dst_rect.bottom;
516 blit.dstOffsets[1].z = 1;
517
460 const bool is_linear = copy_config.filter == Tegra::Engines::Fermi2D::Filter::Linear; 518 const bool is_linear = copy_config.filter == Tegra::Engines::Fermi2D::Filter::Linear;
461 519
462 scheduler.Record([src_image = src_view->GetImage(), dst_image = dst_view->GetImage(), blit, 520 scheduler.Record([src_image = src_view->GetImage(), dst_image = dst_view->GetImage(), blit,
463 is_linear](auto cmdbuf, auto& dld) { 521 is_linear](vk::CommandBuffer cmdbuf) {
464 cmdbuf.blitImage(src_image, vk::ImageLayout::eTransferSrcOptimal, dst_image, 522 cmdbuf.BlitImage(src_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_image,
465 vk::ImageLayout::eTransferDstOptimal, {blit}, 523 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, blit,
466 is_linear ? vk::Filter::eLinear : vk::Filter::eNearest, dld); 524 is_linear ? VK_FILTER_LINEAR : VK_FILTER_NEAREST);
467 }); 525 });
468} 526}
469 527
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 22e3d34de..115595f28 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -13,10 +13,10 @@
13#include "common/math_util.h" 13#include "common/math_util.h"
14#include "video_core/gpu.h" 14#include "video_core/gpu.h"
15#include "video_core/rasterizer_cache.h" 15#include "video_core/rasterizer_cache.h"
16#include "video_core/renderer_vulkan/declarations.h"
17#include "video_core/renderer_vulkan/vk_image.h" 16#include "video_core/renderer_vulkan/vk_image.h"
18#include "video_core/renderer_vulkan/vk_memory_manager.h" 17#include "video_core/renderer_vulkan/vk_memory_manager.h"
19#include "video_core/renderer_vulkan/vk_scheduler.h" 18#include "video_core/renderer_vulkan/vk_scheduler.h"
19#include "video_core/renderer_vulkan/wrapper.h"
20#include "video_core/texture_cache/surface_base.h" 20#include "video_core/texture_cache/surface_base.h"
21#include "video_core/texture_cache/texture_cache.h" 21#include "video_core/texture_cache/texture_cache.h"
22#include "video_core/textures/decoders.h" 22#include "video_core/textures/decoders.h"
@@ -60,15 +60,15 @@ public:
60 void UploadTexture(const std::vector<u8>& staging_buffer) override; 60 void UploadTexture(const std::vector<u8>& staging_buffer) override;
61 void DownloadTexture(std::vector<u8>& staging_buffer) override; 61 void DownloadTexture(std::vector<u8>& staging_buffer) override;
62 62
63 void FullTransition(vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access, 63 void FullTransition(VkPipelineStageFlags new_stage_mask, VkAccessFlags new_access,
64 vk::ImageLayout new_layout) { 64 VkImageLayout new_layout) {
65 image->Transition(0, static_cast<u32>(params.GetNumLayers()), 0, params.num_levels, 65 image->Transition(0, static_cast<u32>(params.GetNumLayers()), 0, params.num_levels,
66 new_stage_mask, new_access, new_layout); 66 new_stage_mask, new_access, new_layout);
67 } 67 }
68 68
69 void Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels, 69 void Transition(u32 base_layer, u32 num_layers, u32 base_level, u32 num_levels,
70 vk::PipelineStageFlags new_stage_mask, vk::AccessFlags new_access, 70 VkPipelineStageFlags new_stage_mask, VkAccessFlags new_access,
71 vk::ImageLayout new_layout) { 71 VkImageLayout new_layout) {
72 image->Transition(base_layer, num_layers, base_level, num_levels, new_stage_mask, 72 image->Transition(base_layer, num_layers, base_level, num_levels, new_stage_mask,
73 new_access, new_layout); 73 new_access, new_layout);
74 } 74 }
@@ -81,15 +81,15 @@ public:
81 return *image; 81 return *image;
82 } 82 }
83 83
84 vk::Image GetImageHandle() const { 84 VkImage GetImageHandle() const {
85 return image->GetHandle(); 85 return *image->GetHandle();
86 } 86 }
87 87
88 vk::ImageAspectFlags GetAspectMask() const { 88 VkImageAspectFlags GetAspectMask() const {
89 return image->GetAspectMask(); 89 return image->GetAspectMask();
90 } 90 }
91 91
92 vk::BufferView GetBufferViewHandle() const { 92 VkBufferView GetBufferViewHandle() const {
93 return *buffer_view; 93 return *buffer_view;
94 } 94 }
95 95
@@ -104,9 +104,9 @@ private:
104 104
105 void UploadImage(const std::vector<u8>& staging_buffer); 105 void UploadImage(const std::vector<u8>& staging_buffer);
106 106
107 vk::BufferImageCopy GetBufferImageCopy(u32 level) const; 107 VkBufferImageCopy GetBufferImageCopy(u32 level) const;
108 108
109 vk::ImageSubresourceRange GetImageSubresourceRange() const; 109 VkImageSubresourceRange GetImageSubresourceRange() const;
110 110
111 Core::System& system; 111 Core::System& system;
112 const VKDevice& device; 112 const VKDevice& device;
@@ -116,11 +116,11 @@ private:
116 VKStagingBufferPool& staging_pool; 116 VKStagingBufferPool& staging_pool;
117 117
118 std::optional<VKImage> image; 118 std::optional<VKImage> image;
119 UniqueBuffer buffer; 119 vk::Buffer buffer;
120 UniqueBufferView buffer_view; 120 vk::BufferView buffer_view;
121 VKMemoryCommit commit; 121 VKMemoryCommit commit;
122 122
123 vk::Format format; 123 VkFormat format = VK_FORMAT_UNDEFINED;
124}; 124};
125 125
126class CachedSurfaceView final : public VideoCommon::ViewBase { 126class CachedSurfaceView final : public VideoCommon::ViewBase {
@@ -129,16 +129,16 @@ public:
129 const ViewParams& params, bool is_proxy); 129 const ViewParams& params, bool is_proxy);
130 ~CachedSurfaceView(); 130 ~CachedSurfaceView();
131 131
132 vk::ImageView GetHandle(Tegra::Texture::SwizzleSource x_source, 132 VkImageView GetHandle(Tegra::Texture::SwizzleSource x_source,
133 Tegra::Texture::SwizzleSource y_source, 133 Tegra::Texture::SwizzleSource y_source,
134 Tegra::Texture::SwizzleSource z_source, 134 Tegra::Texture::SwizzleSource z_source,
135 Tegra::Texture::SwizzleSource w_source); 135 Tegra::Texture::SwizzleSource w_source);
136 136
137 bool IsSameSurface(const CachedSurfaceView& rhs) const { 137 bool IsSameSurface(const CachedSurfaceView& rhs) const {
138 return &surface == &rhs.surface; 138 return &surface == &rhs.surface;
139 } 139 }
140 140
141 vk::ImageView GetHandle() { 141 VkImageView GetHandle() {
142 return GetHandle(Tegra::Texture::SwizzleSource::R, Tegra::Texture::SwizzleSource::G, 142 return GetHandle(Tegra::Texture::SwizzleSource::R, Tegra::Texture::SwizzleSource::G,
143 Tegra::Texture::SwizzleSource::B, Tegra::Texture::SwizzleSource::A); 143 Tegra::Texture::SwizzleSource::B, Tegra::Texture::SwizzleSource::A);
144 } 144 }
@@ -159,24 +159,24 @@ public:
159 return buffer_view; 159 return buffer_view;
160 } 160 }
161 161
162 vk::Image GetImage() const { 162 VkImage GetImage() const {
163 return image; 163 return image;
164 } 164 }
165 165
166 vk::BufferView GetBufferView() const { 166 VkBufferView GetBufferView() const {
167 return buffer_view; 167 return buffer_view;
168 } 168 }
169 169
170 vk::ImageSubresourceRange GetImageSubresourceRange() const { 170 VkImageSubresourceRange GetImageSubresourceRange() const {
171 return {aspect_mask, base_level, num_levels, base_layer, num_layers}; 171 return {aspect_mask, base_level, num_levels, base_layer, num_layers};
172 } 172 }
173 173
174 vk::ImageSubresourceLayers GetImageSubresourceLayers() const { 174 VkImageSubresourceLayers GetImageSubresourceLayers() const {
175 return {surface.GetAspectMask(), base_level, base_layer, num_layers}; 175 return {surface.GetAspectMask(), base_level, base_layer, num_layers};
176 } 176 }
177 177
178 void Transition(vk::ImageLayout new_layout, vk::PipelineStageFlags new_stage_mask, 178 void Transition(VkImageLayout new_layout, VkPipelineStageFlags new_stage_mask,
179 vk::AccessFlags new_access) const { 179 VkAccessFlags new_access) const {
180 surface.Transition(base_layer, num_layers, base_level, num_levels, new_stage_mask, 180 surface.Transition(base_layer, num_layers, base_level, num_levels, new_stage_mask,
181 new_access, new_layout); 181 new_access, new_layout);
182 } 182 }
@@ -196,9 +196,9 @@ private:
196 196
197 // Store a copy of these values to avoid double dereference when reading them 197 // Store a copy of these values to avoid double dereference when reading them
198 const SurfaceParams params; 198 const SurfaceParams params;
199 const vk::Image image; 199 const VkImage image;
200 const vk::BufferView buffer_view; 200 const VkBufferView buffer_view;
201 const vk::ImageAspectFlags aspect_mask; 201 const VkImageAspectFlags aspect_mask;
202 202
203 const VKDevice& device; 203 const VKDevice& device;
204 CachedSurface& surface; 204 CachedSurface& surface;
@@ -206,12 +206,12 @@ private:
206 const u32 num_layers; 206 const u32 num_layers;
207 const u32 base_level; 207 const u32 base_level;
208 const u32 num_levels; 208 const u32 num_levels;
209 const vk::ImageViewType image_view_type; 209 const VkImageViewType image_view_type;
210 210
211 vk::ImageView last_image_view; 211 VkImageView last_image_view = nullptr;
212 u32 last_swizzle{}; 212 u32 last_swizzle = 0;
213 213
214 std::unordered_map<u32, UniqueImageView> view_cache; 214 std::unordered_map<u32, vk::ImageView> view_cache;
215}; 215};
216 216
217class VKTextureCache final : public TextureCacheBase { 217class VKTextureCache final : public TextureCacheBase {
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
index 0e577b9ff..4bfec0077 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.cpp
@@ -7,10 +7,10 @@
7 7
8#include "common/assert.h" 8#include "common/assert.h"
9#include "common/logging/log.h" 9#include "common/logging/log.h"
10#include "video_core/renderer_vulkan/declarations.h"
11#include "video_core/renderer_vulkan/vk_device.h" 10#include "video_core/renderer_vulkan/vk_device.h"
12#include "video_core/renderer_vulkan/vk_scheduler.h" 11#include "video_core/renderer_vulkan/vk_scheduler.h"
13#include "video_core/renderer_vulkan/vk_update_descriptor.h" 12#include "video_core/renderer_vulkan/vk_update_descriptor.h"
13#include "video_core/renderer_vulkan/wrapper.h"
14 14
15namespace Vulkan { 15namespace Vulkan {
16 16
@@ -27,8 +27,8 @@ void VKUpdateDescriptorQueue::Acquire() {
27 entries.clear(); 27 entries.clear();
28} 28}
29 29
30void VKUpdateDescriptorQueue::Send(vk::DescriptorUpdateTemplate update_template, 30void VKUpdateDescriptorQueue::Send(VkDescriptorUpdateTemplateKHR update_template,
31 vk::DescriptorSet set) { 31 VkDescriptorSet set) {
32 if (payload.size() + entries.size() >= payload.max_size()) { 32 if (payload.size() + entries.size() >= payload.max_size()) {
33 LOG_WARNING(Render_Vulkan, "Payload overflow, waiting for worker thread"); 33 LOG_WARNING(Render_Vulkan, "Payload overflow, waiting for worker thread");
34 scheduler.WaitWorker(); 34 scheduler.WaitWorker();
@@ -37,21 +37,21 @@ void VKUpdateDescriptorQueue::Send(vk::DescriptorUpdateTemplate update_template,
37 37
38 const auto payload_start = payload.data() + payload.size(); 38 const auto payload_start = payload.data() + payload.size();
39 for (const auto& entry : entries) { 39 for (const auto& entry : entries) {
40 if (const auto image = std::get_if<vk::DescriptorImageInfo>(&entry)) { 40 if (const auto image = std::get_if<VkDescriptorImageInfo>(&entry)) {
41 payload.push_back(*image); 41 payload.push_back(*image);
42 } else if (const auto buffer = std::get_if<Buffer>(&entry)) { 42 } else if (const auto buffer = std::get_if<Buffer>(&entry)) {
43 payload.emplace_back(*buffer->buffer, buffer->offset, buffer->size); 43 payload.emplace_back(*buffer->buffer, buffer->offset, buffer->size);
44 } else if (const auto texel = std::get_if<vk::BufferView>(&entry)) { 44 } else if (const auto texel = std::get_if<VkBufferView>(&entry)) {
45 payload.push_back(*texel); 45 payload.push_back(*texel);
46 } else { 46 } else {
47 UNREACHABLE(); 47 UNREACHABLE();
48 } 48 }
49 } 49 }
50 50
51 scheduler.Record([dev = device.GetLogical(), payload_start, set, 51 scheduler.Record(
52 update_template]([[maybe_unused]] auto cmdbuf, auto& dld) { 52 [payload_start, set, update_template, logical = &device.GetLogical()](vk::CommandBuffer) {
53 dev.updateDescriptorSetWithTemplate(set, update_template, payload_start, dld); 53 logical->UpdateDescriptorSet(set, update_template, payload_start);
54 }); 54 });
55} 55}
56 56
57} // namespace Vulkan 57} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.h b/src/video_core/renderer_vulkan/vk_update_descriptor.h
index 8c825aa29..a9e3d5dba 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.h
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.h
@@ -9,7 +9,7 @@
9#include <boost/container/static_vector.hpp> 9#include <boost/container/static_vector.hpp>
10 10
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "video_core/renderer_vulkan/declarations.h" 12#include "video_core/renderer_vulkan/wrapper.h"
13 13
14namespace Vulkan { 14namespace Vulkan {
15 15
@@ -20,18 +20,18 @@ class DescriptorUpdateEntry {
20public: 20public:
21 explicit DescriptorUpdateEntry() : image{} {} 21 explicit DescriptorUpdateEntry() : image{} {}
22 22
23 DescriptorUpdateEntry(vk::DescriptorImageInfo image) : image{image} {} 23 DescriptorUpdateEntry(VkDescriptorImageInfo image) : image{image} {}
24 24
25 DescriptorUpdateEntry(vk::Buffer buffer, vk::DeviceSize offset, vk::DeviceSize size) 25 DescriptorUpdateEntry(VkBuffer buffer, VkDeviceSize offset, VkDeviceSize size)
26 : buffer{buffer, offset, size} {} 26 : buffer{buffer, offset, size} {}
27 27
28 DescriptorUpdateEntry(vk::BufferView texel_buffer) : texel_buffer{texel_buffer} {} 28 DescriptorUpdateEntry(VkBufferView texel_buffer) : texel_buffer{texel_buffer} {}
29 29
30private: 30private:
31 union { 31 union {
32 vk::DescriptorImageInfo image; 32 VkDescriptorImageInfo image;
33 vk::DescriptorBufferInfo buffer; 33 VkDescriptorBufferInfo buffer;
34 vk::BufferView texel_buffer; 34 VkBufferView texel_buffer;
35 }; 35 };
36}; 36};
37 37
@@ -44,37 +44,35 @@ public:
44 44
45 void Acquire(); 45 void Acquire();
46 46
47 void Send(vk::DescriptorUpdateTemplate update_template, vk::DescriptorSet set); 47 void Send(VkDescriptorUpdateTemplateKHR update_template, VkDescriptorSet set);
48 48
49 void AddSampledImage(vk::Sampler sampler, vk::ImageView image_view) { 49 void AddSampledImage(VkSampler sampler, VkImageView image_view) {
50 entries.emplace_back(vk::DescriptorImageInfo{sampler, image_view, {}}); 50 entries.emplace_back(VkDescriptorImageInfo{sampler, image_view, {}});
51 } 51 }
52 52
53 void AddImage(vk::ImageView image_view) { 53 void AddImage(VkImageView image_view) {
54 entries.emplace_back(vk::DescriptorImageInfo{{}, image_view, {}}); 54 entries.emplace_back(VkDescriptorImageInfo{{}, image_view, {}});
55 } 55 }
56 56
57 void AddBuffer(const vk::Buffer* buffer, u64 offset, std::size_t size) { 57 void AddBuffer(const VkBuffer* buffer, u64 offset, std::size_t size) {
58 entries.push_back(Buffer{buffer, offset, size}); 58 entries.push_back(Buffer{buffer, offset, size});
59 } 59 }
60 60
61 void AddTexelBuffer(vk::BufferView texel_buffer) { 61 void AddTexelBuffer(VkBufferView texel_buffer) {
62 entries.emplace_back(texel_buffer); 62 entries.emplace_back(texel_buffer);
63 } 63 }
64 64
65 vk::ImageLayout* GetLastImageLayout() { 65 VkImageLayout* GetLastImageLayout() {
66 return &std::get<vk::DescriptorImageInfo>(entries.back()).imageLayout; 66 return &std::get<VkDescriptorImageInfo>(entries.back()).imageLayout;
67 } 67 }
68 68
69private: 69private:
70 struct Buffer { 70 struct Buffer {
71 const vk::Buffer* buffer{}; 71 const VkBuffer* buffer = nullptr;
72 u64 offset{}; 72 u64 offset = 0;
73 std::size_t size{}; 73 std::size_t size = 0;
74 }; 74 };
75 using Variant = std::variant<vk::DescriptorImageInfo, Buffer, vk::BufferView>; 75 using Variant = std::variant<VkDescriptorImageInfo, Buffer, VkBufferView>;
76 // Old gcc versions don't consider this trivially copyable.
77 // static_assert(std::is_trivially_copyable_v<Variant>);
78 76
79 const VKDevice& device; 77 const VKDevice& device;
80 VKScheduler& scheduler; 78 VKScheduler& scheduler;