summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/file_sys/fsmitm_romfsbuild.cpp4
-rw-r--r--src/core/file_sys/fsmitm_romfsbuild.h2
-rw-r--r--src/core/file_sys/vfs_concat.cpp8
-rw-r--r--src/core/file_sys/vfs_concat.h6
-rw-r--r--src/core/hle/service/mii/manager.cpp113
-rw-r--r--src/core/memory/dmnt_cheat_vm.cpp29
-rw-r--r--src/core/memory/dmnt_cheat_vm.h14
-rw-r--r--src/video_core/macro/macro_hle.cpp17
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp63
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp194
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp131
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.cpp33
-rw-r--r--src/video_core/renderer_vulkan/vk_device.cpp6
-rw-r--r--src/video_core/renderer_vulkan/vk_image.cpp38
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.cpp13
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp66
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp16
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp88
-rw-r--r--src/video_core/renderer_vulkan/vk_renderpass_cache.cpp129
-rw-r--r--src/video_core/renderer_vulkan/vk_resource_manager.cpp29
-rw-r--r--src/video_core/renderer_vulkan/vk_sampler_cache.cpp51
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp57
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_util.cpp14
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp25
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.cpp35
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.cpp94
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp231
-rw-r--r--src/video_core/renderer_vulkan/wrapper.cpp120
-rw-r--r--src/video_core/shader_cache.h4
30 files changed, 885 insertions, 746 deletions
diff --git a/src/core/file_sys/fsmitm_romfsbuild.cpp b/src/core/file_sys/fsmitm_romfsbuild.cpp
index d126ae8dd..2aff2708a 100644
--- a/src/core/file_sys/fsmitm_romfsbuild.cpp
+++ b/src/core/file_sys/fsmitm_romfsbuild.cpp
@@ -240,7 +240,7 @@ RomFSBuildContext::RomFSBuildContext(VirtualDir base_, VirtualDir ext_)
240 240
241RomFSBuildContext::~RomFSBuildContext() = default; 241RomFSBuildContext::~RomFSBuildContext() = default;
242 242
243std::map<u64, VirtualFile> RomFSBuildContext::Build() { 243std::multimap<u64, VirtualFile> RomFSBuildContext::Build() {
244 const u64 dir_hash_table_entry_count = romfs_get_hash_table_count(num_dirs); 244 const u64 dir_hash_table_entry_count = romfs_get_hash_table_count(num_dirs);
245 const u64 file_hash_table_entry_count = romfs_get_hash_table_count(num_files); 245 const u64 file_hash_table_entry_count = romfs_get_hash_table_count(num_files);
246 dir_hash_table_size = 4 * dir_hash_table_entry_count; 246 dir_hash_table_size = 4 * dir_hash_table_entry_count;
@@ -294,7 +294,7 @@ std::map<u64, VirtualFile> RomFSBuildContext::Build() {
294 cur_dir->parent->child = cur_dir; 294 cur_dir->parent->child = cur_dir;
295 } 295 }
296 296
297 std::map<u64, VirtualFile> out; 297 std::multimap<u64, VirtualFile> out;
298 298
299 // Populate file tables. 299 // Populate file tables.
300 for (const auto& it : files) { 300 for (const auto& it : files) {
diff --git a/src/core/file_sys/fsmitm_romfsbuild.h b/src/core/file_sys/fsmitm_romfsbuild.h
index a62502193..049de180b 100644
--- a/src/core/file_sys/fsmitm_romfsbuild.h
+++ b/src/core/file_sys/fsmitm_romfsbuild.h
@@ -43,7 +43,7 @@ public:
43 ~RomFSBuildContext(); 43 ~RomFSBuildContext();
44 44
45 // This finalizes the context. 45 // This finalizes the context.
46 std::map<u64, VirtualFile> Build(); 46 std::multimap<u64, VirtualFile> Build();
47 47
48private: 48private:
49 VirtualDir base; 49 VirtualDir base;
diff --git a/src/core/file_sys/vfs_concat.cpp b/src/core/file_sys/vfs_concat.cpp
index 16d801c0c..e0ff70174 100644
--- a/src/core/file_sys/vfs_concat.cpp
+++ b/src/core/file_sys/vfs_concat.cpp
@@ -11,7 +11,7 @@
11 11
12namespace FileSys { 12namespace FileSys {
13 13
14static bool VerifyConcatenationMapContinuity(const std::map<u64, VirtualFile>& map) { 14static bool VerifyConcatenationMapContinuity(const std::multimap<u64, VirtualFile>& map) {
15 const auto last_valid = --map.end(); 15 const auto last_valid = --map.end();
16 for (auto iter = map.begin(); iter != last_valid;) { 16 for (auto iter = map.begin(); iter != last_valid;) {
17 const auto old = iter++; 17 const auto old = iter++;
@@ -27,12 +27,12 @@ ConcatenatedVfsFile::ConcatenatedVfsFile(std::vector<VirtualFile> files_, std::s
27 : name(std::move(name)) { 27 : name(std::move(name)) {
28 std::size_t next_offset = 0; 28 std::size_t next_offset = 0;
29 for (const auto& file : files_) { 29 for (const auto& file : files_) {
30 files[next_offset] = file; 30 files.emplace(next_offset, file);
31 next_offset += file->GetSize(); 31 next_offset += file->GetSize();
32 } 32 }
33} 33}
34 34
35ConcatenatedVfsFile::ConcatenatedVfsFile(std::map<u64, VirtualFile> files_, std::string name) 35ConcatenatedVfsFile::ConcatenatedVfsFile(std::multimap<u64, VirtualFile> files_, std::string name)
36 : files(std::move(files_)), name(std::move(name)) { 36 : files(std::move(files_)), name(std::move(name)) {
37 ASSERT(VerifyConcatenationMapContinuity(files)); 37 ASSERT(VerifyConcatenationMapContinuity(files));
38} 38}
@@ -50,7 +50,7 @@ VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(std::vector<VirtualFile> f
50} 50}
51 51
52VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(u8 filler_byte, 52VirtualFile ConcatenatedVfsFile::MakeConcatenatedFile(u8 filler_byte,
53 std::map<u64, VirtualFile> files, 53 std::multimap<u64, VirtualFile> files,
54 std::string name) { 54 std::string name) {
55 if (files.empty()) 55 if (files.empty())
56 return nullptr; 56 return nullptr;
diff --git a/src/core/file_sys/vfs_concat.h b/src/core/file_sys/vfs_concat.h
index c90f9d5d1..7a26343c0 100644
--- a/src/core/file_sys/vfs_concat.h
+++ b/src/core/file_sys/vfs_concat.h
@@ -15,7 +15,7 @@ namespace FileSys {
15// read-only. 15// read-only.
16class ConcatenatedVfsFile : public VfsFile { 16class ConcatenatedVfsFile : public VfsFile {
17 ConcatenatedVfsFile(std::vector<VirtualFile> files, std::string name); 17 ConcatenatedVfsFile(std::vector<VirtualFile> files, std::string name);
18 ConcatenatedVfsFile(std::map<u64, VirtualFile> files, std::string name); 18 ConcatenatedVfsFile(std::multimap<u64, VirtualFile> files, std::string name);
19 19
20public: 20public:
21 ~ConcatenatedVfsFile() override; 21 ~ConcatenatedVfsFile() override;
@@ -25,7 +25,7 @@ public:
25 25
26 /// Convenience function that turns a map of offsets to files into a concatenated file, filling 26 /// Convenience function that turns a map of offsets to files into a concatenated file, filling
27 /// gaps with a given filler byte. 27 /// gaps with a given filler byte.
28 static VirtualFile MakeConcatenatedFile(u8 filler_byte, std::map<u64, VirtualFile> files, 28 static VirtualFile MakeConcatenatedFile(u8 filler_byte, std::multimap<u64, VirtualFile> files,
29 std::string name); 29 std::string name);
30 30
31 std::string GetName() const override; 31 std::string GetName() const override;
@@ -40,7 +40,7 @@ public:
40 40
41private: 41private:
42 // Maps starting offset to file -- more efficient. 42 // Maps starting offset to file -- more efficient.
43 std::map<u64, VirtualFile> files; 43 std::multimap<u64, VirtualFile> files;
44 std::string name; 44 std::string name;
45}; 45};
46 46
diff --git a/src/core/hle/service/mii/manager.cpp b/src/core/hle/service/mii/manager.cpp
index 4a1d1182e..4730070cb 100644
--- a/src/core/hle/service/mii/manager.cpp
+++ b/src/core/hle/service/mii/manager.cpp
@@ -47,66 +47,67 @@ std::array<T, DestArraySize> ResizeArray(const std::array<T, SourceArraySize>& i
47MiiInfo ConvertStoreDataToInfo(const MiiStoreData& data) { 47MiiInfo ConvertStoreDataToInfo(const MiiStoreData& data) {
48 MiiStoreBitFields bf; 48 MiiStoreBitFields bf;
49 std::memcpy(&bf, data.data.data.data(), sizeof(MiiStoreBitFields)); 49 std::memcpy(&bf, data.data.data.data(), sizeof(MiiStoreBitFields));
50 MiiInfo info{}; 50
51 info.name = ResizeArray<char16_t, 10, 11>(data.data.name); 51 return {
52 info.uuid = data.data.uuid; 52 .uuid = data.data.uuid,
53 info.font_region = static_cast<u8>(bf.font_region.Value()); 53 .name = ResizeArray<char16_t, 10, 11>(data.data.name),
54 info.favorite_color = static_cast<u8>(bf.favorite_color.Value()); 54 .font_region = static_cast<u8>(bf.font_region.Value()),
55 info.gender = static_cast<u8>(bf.gender.Value()); 55 .favorite_color = static_cast<u8>(bf.favorite_color.Value()),
56 info.height = static_cast<u8>(bf.height.Value()); 56 .gender = static_cast<u8>(bf.gender.Value()),
57 info.build = static_cast<u8>(bf.build.Value()); 57 .height = static_cast<u8>(bf.height.Value()),
58 info.type = static_cast<u8>(bf.type.Value()); 58 .build = static_cast<u8>(bf.build.Value()),
59 info.region_move = static_cast<u8>(bf.region_move.Value()); 59 .type = static_cast<u8>(bf.type.Value()),
60 info.faceline_type = static_cast<u8>(bf.faceline_type.Value()); 60 .region_move = static_cast<u8>(bf.region_move.Value()),
61 info.faceline_color = static_cast<u8>(bf.faceline_color.Value()); 61 .faceline_type = static_cast<u8>(bf.faceline_type.Value()),
62 info.faceline_wrinkle = static_cast<u8>(bf.faceline_wrinkle.Value()); 62 .faceline_color = static_cast<u8>(bf.faceline_color.Value()),
63 info.faceline_make = static_cast<u8>(bf.faceline_makeup.Value()); 63 .faceline_wrinkle = static_cast<u8>(bf.faceline_wrinkle.Value()),
64 info.hair_type = static_cast<u8>(bf.hair_type.Value()); 64 .faceline_make = static_cast<u8>(bf.faceline_makeup.Value()),
65 info.hair_color = static_cast<u8>(bf.hair_color.Value()); 65 .hair_type = static_cast<u8>(bf.hair_type.Value()),
66 info.hair_flip = static_cast<u8>(bf.hair_flip.Value()); 66 .hair_color = static_cast<u8>(bf.hair_color.Value()),
67 info.eye_type = static_cast<u8>(bf.eye_type.Value()); 67 .hair_flip = static_cast<u8>(bf.hair_flip.Value()),
68 info.eye_color = static_cast<u8>(bf.eye_color.Value()); 68 .eye_type = static_cast<u8>(bf.eye_type.Value()),
69 info.eye_scale = static_cast<u8>(bf.eye_scale.Value()); 69 .eye_color = static_cast<u8>(bf.eye_color.Value()),
70 info.eye_aspect = static_cast<u8>(bf.eye_aspect.Value()); 70 .eye_scale = static_cast<u8>(bf.eye_scale.Value()),
71 info.eye_rotate = static_cast<u8>(bf.eye_rotate.Value()); 71 .eye_aspect = static_cast<u8>(bf.eye_aspect.Value()),
72 info.eye_x = static_cast<u8>(bf.eye_x.Value()); 72 .eye_rotate = static_cast<u8>(bf.eye_rotate.Value()),
73 info.eye_y = static_cast<u8>(bf.eye_y.Value()); 73 .eye_x = static_cast<u8>(bf.eye_x.Value()),
74 info.eyebrow_type = static_cast<u8>(bf.eyebrow_type.Value()); 74 .eye_y = static_cast<u8>(bf.eye_y.Value()),
75 info.eyebrow_color = static_cast<u8>(bf.eyebrow_color.Value()); 75 .eyebrow_type = static_cast<u8>(bf.eyebrow_type.Value()),
76 info.eyebrow_scale = static_cast<u8>(bf.eyebrow_scale.Value()); 76 .eyebrow_color = static_cast<u8>(bf.eyebrow_color.Value()),
77 info.eyebrow_aspect = static_cast<u8>(bf.eyebrow_aspect.Value()); 77 .eyebrow_scale = static_cast<u8>(bf.eyebrow_scale.Value()),
78 info.eyebrow_rotate = static_cast<u8>(bf.eyebrow_rotate.Value()); 78 .eyebrow_aspect = static_cast<u8>(bf.eyebrow_aspect.Value()),
79 info.eyebrow_x = static_cast<u8>(bf.eyebrow_x.Value()); 79 .eyebrow_rotate = static_cast<u8>(bf.eyebrow_rotate.Value()),
80 info.eyebrow_y = static_cast<u8>(bf.eyebrow_y.Value() + 3); 80 .eyebrow_x = static_cast<u8>(bf.eyebrow_x.Value()),
81 info.nose_type = static_cast<u8>(bf.nose_type.Value()); 81 .eyebrow_y = static_cast<u8>(bf.eyebrow_y.Value() + 3),
82 info.nose_scale = static_cast<u8>(bf.nose_scale.Value()); 82 .nose_type = static_cast<u8>(bf.nose_type.Value()),
83 info.nose_y = static_cast<u8>(bf.nose_y.Value()); 83 .nose_scale = static_cast<u8>(bf.nose_scale.Value()),
84 info.mouth_type = static_cast<u8>(bf.mouth_type.Value()); 84 .nose_y = static_cast<u8>(bf.nose_y.Value()),
85 info.mouth_color = static_cast<u8>(bf.mouth_color.Value()); 85 .mouth_type = static_cast<u8>(bf.mouth_type.Value()),
86 info.mouth_scale = static_cast<u8>(bf.mouth_scale.Value()); 86 .mouth_color = static_cast<u8>(bf.mouth_color.Value()),
87 info.mouth_aspect = static_cast<u8>(bf.mouth_aspect.Value()); 87 .mouth_scale = static_cast<u8>(bf.mouth_scale.Value()),
88 info.mouth_y = static_cast<u8>(bf.mouth_y.Value()); 88 .mouth_aspect = static_cast<u8>(bf.mouth_aspect.Value()),
89 info.beard_color = static_cast<u8>(bf.beard_color.Value()); 89 .mouth_y = static_cast<u8>(bf.mouth_y.Value()),
90 info.beard_type = static_cast<u8>(bf.beard_type.Value()); 90 .beard_color = static_cast<u8>(bf.beard_color.Value()),
91 info.mustache_type = static_cast<u8>(bf.mustache_type.Value()); 91 .beard_type = static_cast<u8>(bf.beard_type.Value()),
92 info.mustache_scale = static_cast<u8>(bf.mustache_scale.Value()); 92 .mustache_type = static_cast<u8>(bf.mustache_type.Value()),
93 info.mustache_y = static_cast<u8>(bf.mustache_y.Value()); 93 .mustache_scale = static_cast<u8>(bf.mustache_scale.Value()),
94 info.glasses_type = static_cast<u8>(bf.glasses_type.Value()); 94 .mustache_y = static_cast<u8>(bf.mustache_y.Value()),
95 info.glasses_color = static_cast<u8>(bf.glasses_color.Value()); 95 .glasses_type = static_cast<u8>(bf.glasses_type.Value()),
96 info.glasses_scale = static_cast<u8>(bf.glasses_scale.Value()); 96 .glasses_color = static_cast<u8>(bf.glasses_color.Value()),
97 info.glasses_y = static_cast<u8>(bf.glasses_y.Value()); 97 .glasses_scale = static_cast<u8>(bf.glasses_scale.Value()),
98 info.mole_type = static_cast<u8>(bf.mole_type.Value()); 98 .glasses_y = static_cast<u8>(bf.glasses_y.Value()),
99 info.mole_scale = static_cast<u8>(bf.mole_scale.Value()); 99 .mole_type = static_cast<u8>(bf.mole_type.Value()),
100 info.mole_x = static_cast<u8>(bf.mole_x.Value()); 100 .mole_scale = static_cast<u8>(bf.mole_scale.Value()),
101 info.mole_y = static_cast<u8>(bf.mole_y.Value()); 101 .mole_x = static_cast<u8>(bf.mole_x.Value()),
102 return info; 102 .mole_y = static_cast<u8>(bf.mole_y.Value()),
103 };
103} 104}
104 105
105u16 GenerateCrc16(const void* data, std::size_t size) { 106u16 GenerateCrc16(const void* data, std::size_t size) {
106 s32 crc{}; 107 s32 crc{};
107 for (int i = 0; i < size; i++) { 108 for (std::size_t i = 0; i < size; i++) {
108 crc ^= reinterpret_cast<const u8*>(data)[i] << 8; 109 crc ^= static_cast<const u8*>(data)[i] << 8;
109 for (int j = 0; j < 8; j++) { 110 for (std::size_t j = 0; j < 8; j++) {
110 crc <<= 1; 111 crc <<= 1;
111 if ((crc & 0x10000) != 0) { 112 if ((crc & 0x10000) != 0) {
112 crc = (crc ^ 0x1021) & 0xFFFF; 113 crc = (crc ^ 0x1021) & 0xFFFF;
diff --git a/src/core/memory/dmnt_cheat_vm.cpp b/src/core/memory/dmnt_cheat_vm.cpp
index fb9f36bfd..2e7da23fe 100644
--- a/src/core/memory/dmnt_cheat_vm.cpp
+++ b/src/core/memory/dmnt_cheat_vm.cpp
@@ -190,6 +190,15 @@ void DmntCheatVm::LogOpcode(const CheatVmOpcode& opcode) {
190 callbacks->CommandLog( 190 callbacks->CommandLog(
191 fmt::format("Act[{:02X}]: {:d}", i, save_restore_regmask->should_operate[i])); 191 fmt::format("Act[{:02X}]: {:d}", i, save_restore_regmask->should_operate[i]));
192 } 192 }
193 } else if (auto rw_static_reg = std::get_if<ReadWriteStaticRegisterOpcode>(&opcode.opcode)) {
194 callbacks->CommandLog("Opcode: Read/Write Static Register");
195 if (rw_static_reg->static_idx < NumReadableStaticRegisters) {
196 callbacks->CommandLog("Op Type: ReadStaticRegister");
197 } else {
198 callbacks->CommandLog("Op Type: WriteStaticRegister");
199 }
200 callbacks->CommandLog(fmt::format("Reg Idx {:X}", rw_static_reg->idx));
201 callbacks->CommandLog(fmt::format("Stc Idx {:X}", rw_static_reg->static_idx));
193 } else if (auto debug_log = std::get_if<DebugLogOpcode>(&opcode.opcode)) { 202 } else if (auto debug_log = std::get_if<DebugLogOpcode>(&opcode.opcode)) {
194 callbacks->CommandLog("Opcode: Debug Log"); 203 callbacks->CommandLog("Opcode: Debug Log");
195 callbacks->CommandLog(fmt::format("Bit Width: {:X}", debug_log->bit_width)); 204 callbacks->CommandLog(fmt::format("Bit Width: {:X}", debug_log->bit_width));
@@ -544,6 +553,16 @@ bool DmntCheatVm::DecodeNextOpcode(CheatVmOpcode& out) {
544 } 553 }
545 opcode.opcode = save_restore_regmask; 554 opcode.opcode = save_restore_regmask;
546 } break; 555 } break;
556 case CheatVmOpcodeType::ReadWriteStaticRegister: {
557 ReadWriteStaticRegisterOpcode rw_static_reg{};
558 // C3000XXx
559 // C3 = opcode 0xC3.
560 // XX = static register index.
561 // x = register index.
562 rw_static_reg.static_idx = ((first_dword >> 4) & 0xFF);
563 rw_static_reg.idx = (first_dword & 0xF);
564 opcode.opcode = rw_static_reg;
565 } break;
547 case CheatVmOpcodeType::DebugLog: { 566 case CheatVmOpcodeType::DebugLog: {
548 DebugLogOpcode debug_log{}; 567 DebugLogOpcode debug_log{};
549 // FFFTIX## 568 // FFFTIX##
@@ -667,6 +686,7 @@ void DmntCheatVm::ResetState() {
667 registers.fill(0); 686 registers.fill(0);
668 saved_values.fill(0); 687 saved_values.fill(0);
669 loop_tops.fill(0); 688 loop_tops.fill(0);
689 static_registers.fill(0);
670 instruction_ptr = 0; 690 instruction_ptr = 0;
671 condition_depth = 0; 691 condition_depth = 0;
672 decode_success = true; 692 decode_success = true;
@@ -1153,6 +1173,15 @@ void DmntCheatVm::Execute(const CheatProcessMetadata& metadata) {
1153 } 1173 }
1154 } 1174 }
1155 } 1175 }
1176 } else if (auto rw_static_reg =
1177 std::get_if<ReadWriteStaticRegisterOpcode>(&cur_opcode.opcode)) {
1178 if (rw_static_reg->static_idx < NumReadableStaticRegisters) {
1179 // Load a register with a static register.
1180 registers[rw_static_reg->idx] = static_registers[rw_static_reg->static_idx];
1181 } else {
1182 // Store a register to a static register.
1183 static_registers[rw_static_reg->static_idx] = registers[rw_static_reg->idx];
1184 }
1156 } else if (auto debug_log = std::get_if<DebugLogOpcode>(&cur_opcode.opcode)) { 1185 } else if (auto debug_log = std::get_if<DebugLogOpcode>(&cur_opcode.opcode)) {
1157 // Read value from memory. 1186 // Read value from memory.
1158 u64 log_value = 0; 1187 u64 log_value = 0;
diff --git a/src/core/memory/dmnt_cheat_vm.h b/src/core/memory/dmnt_cheat_vm.h
index 8351fd798..21b86b72c 100644
--- a/src/core/memory/dmnt_cheat_vm.h
+++ b/src/core/memory/dmnt_cheat_vm.h
@@ -56,6 +56,7 @@ enum class CheatVmOpcodeType : u32 {
56 BeginRegisterConditionalBlock = 0xC0, 56 BeginRegisterConditionalBlock = 0xC0,
57 SaveRestoreRegister = 0xC1, 57 SaveRestoreRegister = 0xC1,
58 SaveRestoreRegisterMask = 0xC2, 58 SaveRestoreRegisterMask = 0xC2,
59 ReadWriteStaticRegister = 0xC3,
59 60
60 // This is a meta entry, and not a real opcode. 61 // This is a meta entry, and not a real opcode.
61 // This is to facilitate multi-nybble instruction decoding. 62 // This is to facilitate multi-nybble instruction decoding.
@@ -237,6 +238,11 @@ struct SaveRestoreRegisterMaskOpcode {
237 std::array<bool, 0x10> should_operate{}; 238 std::array<bool, 0x10> should_operate{};
238}; 239};
239 240
241struct ReadWriteStaticRegisterOpcode {
242 u32 static_idx{};
243 u32 idx{};
244};
245
240struct DebugLogOpcode { 246struct DebugLogOpcode {
241 u32 bit_width{}; 247 u32 bit_width{};
242 u32 log_id{}; 248 u32 log_id{};
@@ -259,7 +265,8 @@ struct CheatVmOpcode {
259 PerformArithmeticStaticOpcode, BeginKeypressConditionalOpcode, 265 PerformArithmeticStaticOpcode, BeginKeypressConditionalOpcode,
260 PerformArithmeticRegisterOpcode, StoreRegisterToAddressOpcode, 266 PerformArithmeticRegisterOpcode, StoreRegisterToAddressOpcode,
261 BeginRegisterConditionalOpcode, SaveRestoreRegisterOpcode, 267 BeginRegisterConditionalOpcode, SaveRestoreRegisterOpcode,
262 SaveRestoreRegisterMaskOpcode, DebugLogOpcode, UnrecognizedInstruction> 268 SaveRestoreRegisterMaskOpcode, ReadWriteStaticRegisterOpcode, DebugLogOpcode,
269 UnrecognizedInstruction>
263 opcode{}; 270 opcode{};
264}; 271};
265 272
@@ -281,6 +288,10 @@ public:
281 288
282 static constexpr std::size_t MaximumProgramOpcodeCount = 0x400; 289 static constexpr std::size_t MaximumProgramOpcodeCount = 0x400;
283 static constexpr std::size_t NumRegisters = 0x10; 290 static constexpr std::size_t NumRegisters = 0x10;
291 static constexpr std::size_t NumReadableStaticRegisters = 0x80;
292 static constexpr std::size_t NumWritableStaticRegisters = 0x80;
293 static constexpr std::size_t NumStaticRegisters =
294 NumReadableStaticRegisters + NumWritableStaticRegisters;
284 295
285 explicit DmntCheatVm(std::unique_ptr<Callbacks> callbacks); 296 explicit DmntCheatVm(std::unique_ptr<Callbacks> callbacks);
286 ~DmntCheatVm(); 297 ~DmntCheatVm();
@@ -302,6 +313,7 @@ private:
302 std::array<u32, MaximumProgramOpcodeCount> program{}; 313 std::array<u32, MaximumProgramOpcodeCount> program{};
303 std::array<u64, NumRegisters> registers{}; 314 std::array<u64, NumRegisters> registers{};
304 std::array<u64, NumRegisters> saved_values{}; 315 std::array<u64, NumRegisters> saved_values{};
316 std::array<u64, NumStaticRegisters> static_registers{};
305 std::array<std::size_t, NumRegisters> loop_tops{}; 317 std::array<std::size_t, NumRegisters> loop_tops{};
306 318
307 bool DecodeNextOpcode(CheatVmOpcode& out); 319 bool DecodeNextOpcode(CheatVmOpcode& out);
diff --git a/src/video_core/macro/macro_hle.cpp b/src/video_core/macro/macro_hle.cpp
index 37f784a35..0c9ff59a4 100644
--- a/src/video_core/macro/macro_hle.cpp
+++ b/src/video_core/macro/macro_hle.cpp
@@ -12,8 +12,7 @@ namespace Tegra {
12 12
13namespace { 13namespace {
14// HLE'd functions 14// HLE'd functions
15static void HLE_771BB18C62444DA0(Engines::Maxwell3D& maxwell3d, 15void HLE_771BB18C62444DA0(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) {
16 const std::vector<u32>& parameters) {
17 const u32 instance_count = parameters[2] & maxwell3d.GetRegisterValue(0xD1B); 16 const u32 instance_count = parameters[2] & maxwell3d.GetRegisterValue(0xD1B);
18 17
19 maxwell3d.regs.draw.topology.Assign( 18 maxwell3d.regs.draw.topology.Assign(
@@ -32,8 +31,7 @@ static void HLE_771BB18C62444DA0(Engines::Maxwell3D& maxwell3d,
32 maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined; 31 maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
33} 32}
34 33
35static void HLE_0D61FC9FAAC9FCAD(Engines::Maxwell3D& maxwell3d, 34void HLE_0D61FC9FAAC9FCAD(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) {
36 const std::vector<u32>& parameters) {
37 const u32 count = (maxwell3d.GetRegisterValue(0xD1B) & parameters[2]); 35 const u32 count = (maxwell3d.GetRegisterValue(0xD1B) & parameters[2]);
38 36
39 maxwell3d.regs.vertex_buffer.first = parameters[3]; 37 maxwell3d.regs.vertex_buffer.first = parameters[3];
@@ -51,8 +49,7 @@ static void HLE_0D61FC9FAAC9FCAD(Engines::Maxwell3D& maxwell3d,
51 maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined; 49 maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
52} 50}
53 51
54static void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, 52void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) {
55 const std::vector<u32>& parameters) {
56 const u32 instance_count = (maxwell3d.GetRegisterValue(0xD1B) & parameters[2]); 53 const u32 instance_count = (maxwell3d.GetRegisterValue(0xD1B) & parameters[2]);
57 const u32 element_base = parameters[4]; 54 const u32 element_base = parameters[4];
58 const u32 base_instance = parameters[5]; 55 const u32 base_instance = parameters[5];
@@ -80,12 +77,12 @@ static void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d,
80 maxwell3d.CallMethodFromMME(0x8e5, 0x0); 77 maxwell3d.CallMethodFromMME(0x8e5, 0x0);
81 maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined; 78 maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
82} 79}
83} // namespace 80} // Anonymous namespace
84 81
85constexpr std::array<std::pair<u64, HLEFunction>, 3> hle_funcs{{ 82constexpr std::array<std::pair<u64, HLEFunction>, 3> hle_funcs{{
86 std::make_pair<u64, HLEFunction>(0x771BB18C62444DA0, &HLE_771BB18C62444DA0), 83 {0x771BB18C62444DA0, &HLE_771BB18C62444DA0},
87 std::make_pair<u64, HLEFunction>(0x0D61FC9FAAC9FCAD, &HLE_0D61FC9FAAC9FCAD), 84 {0x0D61FC9FAAC9FCAD, &HLE_0D61FC9FAAC9FCAD},
88 std::make_pair<u64, HLEFunction>(0x0217920100488FF7, &HLE_0217920100488FF7), 85 {0x0217920100488FF7, &HLE_0217920100488FF7},
89}}; 86}};
90 87
91HLEMacro::HLEMacro(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {} 88HLEMacro::HLEMacro(Engines::Maxwell3D& maxwell3d) : maxwell3d(maxwell3d) {}
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 2be38d419..1d2f8b557 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -39,16 +39,17 @@ std::unique_ptr<VKStreamBuffer> CreateStreamBuffer(const VKDevice& device, VKSch
39 39
40Buffer::Buffer(const VKDevice& device, VKMemoryManager& memory_manager, VKScheduler& scheduler_, 40Buffer::Buffer(const VKDevice& device, VKMemoryManager& memory_manager, VKScheduler& scheduler_,
41 VKStagingBufferPool& staging_pool_, VAddr cpu_addr, std::size_t size) 41 VKStagingBufferPool& staging_pool_, VAddr cpu_addr, std::size_t size)
42 : VideoCommon::BufferBlock{cpu_addr, size}, scheduler{scheduler_}, staging_pool{staging_pool_} { 42 : BufferBlock{cpu_addr, size}, scheduler{scheduler_}, staging_pool{staging_pool_} {
43 VkBufferCreateInfo ci; 43 const VkBufferCreateInfo ci{
44 ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; 44 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
45 ci.pNext = nullptr; 45 .pNext = nullptr,
46 ci.flags = 0; 46 .flags = 0,
47 ci.size = static_cast<VkDeviceSize>(size); 47 .size = static_cast<VkDeviceSize>(size),
48 ci.usage = BUFFER_USAGE | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; 48 .usage = BUFFER_USAGE | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
49 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; 49 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
50 ci.queueFamilyIndexCount = 0; 50 .queueFamilyIndexCount = 0,
51 ci.pQueueFamilyIndices = nullptr; 51 .pQueueFamilyIndices = nullptr,
52 };
52 53
53 buffer.handle = device.GetLogical().CreateBuffer(ci); 54 buffer.handle = device.GetLogical().CreateBuffer(ci);
54 buffer.commit = memory_manager.Commit(buffer.handle, false); 55 buffer.commit = memory_manager.Commit(buffer.handle, false);
@@ -66,16 +67,17 @@ void Buffer::Upload(std::size_t offset, std::size_t size, const u8* data) {
66 scheduler.Record([staging = *staging.handle, handle, offset, size](vk::CommandBuffer cmdbuf) { 67 scheduler.Record([staging = *staging.handle, handle, offset, size](vk::CommandBuffer cmdbuf) {
67 cmdbuf.CopyBuffer(staging, handle, VkBufferCopy{0, offset, size}); 68 cmdbuf.CopyBuffer(staging, handle, VkBufferCopy{0, offset, size});
68 69
69 VkBufferMemoryBarrier barrier; 70 const VkBufferMemoryBarrier barrier{
70 barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; 71 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
71 barrier.pNext = nullptr; 72 .pNext = nullptr,
72 barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; 73 .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
73 barrier.dstAccessMask = UPLOAD_ACCESS_BARRIERS; 74 .dstAccessMask = UPLOAD_ACCESS_BARRIERS,
74 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; 75 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
75 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; 76 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
76 barrier.buffer = handle; 77 .buffer = handle,
77 barrier.offset = offset; 78 .offset = offset,
78 barrier.size = size; 79 .size = size,
80 };
79 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, UPLOAD_PIPELINE_STAGE, 0, {}, 81 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, UPLOAD_PIPELINE_STAGE, 0, {},
80 barrier, {}); 82 barrier, {});
81 }); 83 });
@@ -87,16 +89,17 @@ void Buffer::Download(std::size_t offset, std::size_t size, u8* data) {
87 89
88 const VkBuffer handle = Handle(); 90 const VkBuffer handle = Handle();
89 scheduler.Record([staging = *staging.handle, handle, offset, size](vk::CommandBuffer cmdbuf) { 91 scheduler.Record([staging = *staging.handle, handle, offset, size](vk::CommandBuffer cmdbuf) {
90 VkBufferMemoryBarrier barrier; 92 const VkBufferMemoryBarrier barrier{
91 barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; 93 .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
92 barrier.pNext = nullptr; 94 .pNext = nullptr,
93 barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT; 95 .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
94 barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; 96 .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
95 barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; 97 .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
96 barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; 98 .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
97 barrier.buffer = handle; 99 .buffer = handle,
98 barrier.offset = offset; 100 .offset = offset,
99 barrier.size = size; 101 .size = size,
102 };
100 103
101 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | 104 cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
102 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | 105 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index da71e710c..182461ed9 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -115,32 +115,32 @@ constexpr u8 quad_array[] = {
115 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00}; 115 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00};
116 116
117VkDescriptorSetLayoutBinding BuildQuadArrayPassDescriptorSetLayoutBinding() { 117VkDescriptorSetLayoutBinding BuildQuadArrayPassDescriptorSetLayoutBinding() {
118 VkDescriptorSetLayoutBinding binding; 118 return {
119 binding.binding = 0; 119 .binding = 0,
120 binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; 120 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
121 binding.descriptorCount = 1; 121 .descriptorCount = 1,
122 binding.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; 122 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
123 binding.pImmutableSamplers = nullptr; 123 .pImmutableSamplers = nullptr,
124 return binding; 124 };
125} 125}
126 126
127VkDescriptorUpdateTemplateEntryKHR BuildQuadArrayPassDescriptorUpdateTemplateEntry() { 127VkDescriptorUpdateTemplateEntryKHR BuildQuadArrayPassDescriptorUpdateTemplateEntry() {
128 VkDescriptorUpdateTemplateEntryKHR entry; 128 return {
129 entry.dstBinding = 0; 129 .dstBinding = 0,
130 entry.dstArrayElement = 0; 130 .dstArrayElement = 0,
131 entry.descriptorCount = 1; 131 .descriptorCount = 1,
132 entry.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; 132 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
133 entry.offset = 0; 133 .offset = 0,
134 entry.stride = sizeof(DescriptorUpdateEntry); 134 .stride = sizeof(DescriptorUpdateEntry),
135 return entry; 135 };
136} 136}
137 137
138VkPushConstantRange BuildComputePushConstantRange(std::size_t size) { 138VkPushConstantRange BuildComputePushConstantRange(std::size_t size) {
139 VkPushConstantRange range; 139 return {
140 range.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; 140 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
141 range.offset = 0; 141 .offset = 0,
142 range.size = static_cast<u32>(size); 142 .size = static_cast<u32>(size),
143 return range; 143 };
144} 144}
145 145
146// Uint8 SPIR-V module. Generated from the "shaders/" directory. 146// Uint8 SPIR-V module. Generated from the "shaders/" directory.
@@ -344,29 +344,33 @@ constexpr u8 QUAD_INDEXED_SPV[] = {
344 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00}; 344 0xfd, 0x00, 0x01, 0x00, 0x38, 0x00, 0x01, 0x00};
345 345
346std::array<VkDescriptorSetLayoutBinding, 2> BuildInputOutputDescriptorSetBindings() { 346std::array<VkDescriptorSetLayoutBinding, 2> BuildInputOutputDescriptorSetBindings() {
347 std::array<VkDescriptorSetLayoutBinding, 2> bindings; 347 return {{
348 bindings[0].binding = 0; 348 {
349 bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; 349 .binding = 0,
350 bindings[0].descriptorCount = 1; 350 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
351 bindings[0].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; 351 .descriptorCount = 1,
352 bindings[0].pImmutableSamplers = nullptr; 352 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
353 bindings[1].binding = 1; 353 .pImmutableSamplers = nullptr,
354 bindings[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; 354 },
355 bindings[1].descriptorCount = 1; 355 {
356 bindings[1].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; 356 .binding = 1,
357 bindings[1].pImmutableSamplers = nullptr; 357 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
358 return bindings; 358 .descriptorCount = 1,
359 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
360 .pImmutableSamplers = nullptr,
361 },
362 }};
359} 363}
360 364
361VkDescriptorUpdateTemplateEntryKHR BuildInputOutputDescriptorUpdateTemplate() { 365VkDescriptorUpdateTemplateEntryKHR BuildInputOutputDescriptorUpdateTemplate() {
362 VkDescriptorUpdateTemplateEntryKHR entry; 366 return {
363 entry.dstBinding = 0; 367 .dstBinding = 0,
364 entry.dstArrayElement = 0; 368 .dstArrayElement = 0,
365 entry.descriptorCount = 2; 369 .descriptorCount = 2,
366 entry.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; 370 .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
367 entry.offset = 0; 371 .offset = 0,
368 entry.stride = sizeof(DescriptorUpdateEntry); 372 .stride = sizeof(DescriptorUpdateEntry),
369 return entry; 373 };
370} 374}
371 375
372} // Anonymous namespace 376} // Anonymous namespace
@@ -376,37 +380,37 @@ VKComputePass::VKComputePass(const VKDevice& device, VKDescriptorPool& descripto
376 vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates, 380 vk::Span<VkDescriptorUpdateTemplateEntryKHR> templates,
377 vk::Span<VkPushConstantRange> push_constants, std::size_t code_size, 381 vk::Span<VkPushConstantRange> push_constants, std::size_t code_size,
378 const u8* code) { 382 const u8* code) {
379 VkDescriptorSetLayoutCreateInfo descriptor_layout_ci; 383 descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout({
380 descriptor_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; 384 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
381 descriptor_layout_ci.pNext = nullptr; 385 .pNext = nullptr,
382 descriptor_layout_ci.flags = 0; 386 .flags = 0,
383 descriptor_layout_ci.bindingCount = bindings.size(); 387 .bindingCount = bindings.size(),
384 descriptor_layout_ci.pBindings = bindings.data(); 388 .pBindings = bindings.data(),
385 descriptor_set_layout = device.GetLogical().CreateDescriptorSetLayout(descriptor_layout_ci); 389 });
386 390
387 VkPipelineLayoutCreateInfo pipeline_layout_ci; 391 layout = device.GetLogical().CreatePipelineLayout({
388 pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; 392 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
389 pipeline_layout_ci.pNext = nullptr; 393 .pNext = nullptr,
390 pipeline_layout_ci.flags = 0; 394 .flags = 0,
391 pipeline_layout_ci.setLayoutCount = 1; 395 .setLayoutCount = 1,
392 pipeline_layout_ci.pSetLayouts = descriptor_set_layout.address(); 396 .pSetLayouts = descriptor_set_layout.address(),
393 pipeline_layout_ci.pushConstantRangeCount = push_constants.size(); 397 .pushConstantRangeCount = push_constants.size(),
394 pipeline_layout_ci.pPushConstantRanges = push_constants.data(); 398 .pPushConstantRanges = push_constants.data(),
395 layout = device.GetLogical().CreatePipelineLayout(pipeline_layout_ci); 399 });
396 400
397 if (!templates.empty()) { 401 if (!templates.empty()) {
398 VkDescriptorUpdateTemplateCreateInfoKHR template_ci; 402 descriptor_template = device.GetLogical().CreateDescriptorUpdateTemplateKHR({
399 template_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR; 403 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
400 template_ci.pNext = nullptr; 404 .pNext = nullptr,
401 template_ci.flags = 0; 405 .flags = 0,
402 template_ci.descriptorUpdateEntryCount = templates.size(); 406 .descriptorUpdateEntryCount = templates.size(),
403 template_ci.pDescriptorUpdateEntries = templates.data(); 407 .pDescriptorUpdateEntries = templates.data(),
404 template_ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR; 408 .templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR,
405 template_ci.descriptorSetLayout = *descriptor_set_layout; 409 .descriptorSetLayout = *descriptor_set_layout,
406 template_ci.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; 410 .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
407 template_ci.pipelineLayout = *layout; 411 .pipelineLayout = *layout,
408 template_ci.set = 0; 412 .set = 0,
409 descriptor_template = device.GetLogical().CreateDescriptorUpdateTemplateKHR(template_ci); 413 });
410 414
411 descriptor_allocator.emplace(descriptor_pool, *descriptor_set_layout); 415 descriptor_allocator.emplace(descriptor_pool, *descriptor_set_layout);
412 } 416 }
@@ -414,32 +418,32 @@ VKComputePass::VKComputePass(const VKDevice& device, VKDescriptorPool& descripto
414 auto code_copy = std::make_unique<u32[]>(code_size / sizeof(u32) + 1); 418 auto code_copy = std::make_unique<u32[]>(code_size / sizeof(u32) + 1);
415 std::memcpy(code_copy.get(), code, code_size); 419 std::memcpy(code_copy.get(), code, code_size);
416 420
417 VkShaderModuleCreateInfo module_ci; 421 module = device.GetLogical().CreateShaderModule({
418 module_ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; 422 .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
419 module_ci.pNext = nullptr; 423 .pNext = nullptr,
420 module_ci.flags = 0; 424 .flags = 0,
421 module_ci.codeSize = code_size; 425 .codeSize = code_size,
422 module_ci.pCode = code_copy.get(); 426 .pCode = code_copy.get(),
423 module = device.GetLogical().CreateShaderModule(module_ci); 427 });
424 428
425 VkComputePipelineCreateInfo pipeline_ci; 429 pipeline = device.GetLogical().CreateComputePipeline({
426 pipeline_ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO; 430 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
427 pipeline_ci.pNext = nullptr; 431 .pNext = nullptr,
428 pipeline_ci.flags = 0; 432 .flags = 0,
429 pipeline_ci.layout = *layout; 433 .stage =
430 pipeline_ci.basePipelineHandle = nullptr; 434 {
431 pipeline_ci.basePipelineIndex = 0; 435 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
432 436 .pNext = nullptr,
433 VkPipelineShaderStageCreateInfo& stage_ci = pipeline_ci.stage; 437 .flags = 0,
434 stage_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; 438 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
435 stage_ci.pNext = nullptr; 439 .module = *module,
436 stage_ci.flags = 0; 440 .pName = "main",
437 stage_ci.stage = VK_SHADER_STAGE_COMPUTE_BIT; 441 .pSpecializationInfo = nullptr,
438 stage_ci.module = *module; 442 },
439 stage_ci.pName = "main"; 443 .layout = *layout,
440 stage_ci.pSpecializationInfo = nullptr; 444 .basePipelineHandle = nullptr,
441 445 .basePipelineIndex = 0,
442 pipeline = device.GetLogical().CreateComputePipeline(pipeline_ci); 446 });
443} 447}
444 448
445VKComputePass::~VKComputePass() = default; 449VKComputePass::~VKComputePass() = default;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 281bf9ac3..ed9d2991c 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -43,12 +43,13 @@ vk::DescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
43 const auto add_bindings = [&](VkDescriptorType descriptor_type, std::size_t num_entries) { 43 const auto add_bindings = [&](VkDescriptorType descriptor_type, std::size_t num_entries) {
44 // TODO(Rodrigo): Maybe make individual bindings here? 44 // TODO(Rodrigo): Maybe make individual bindings here?
45 for (u32 bindpoint = 0; bindpoint < static_cast<u32>(num_entries); ++bindpoint) { 45 for (u32 bindpoint = 0; bindpoint < static_cast<u32>(num_entries); ++bindpoint) {
46 VkDescriptorSetLayoutBinding& entry = bindings.emplace_back(); 46 bindings.push_back({
47 entry.binding = binding++; 47 .binding = binding++,
48 entry.descriptorType = descriptor_type; 48 .descriptorType = descriptor_type,
49 entry.descriptorCount = 1; 49 .descriptorCount = 1,
50 entry.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; 50 .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
51 entry.pImmutableSamplers = nullptr; 51 .pImmutableSamplers = nullptr,
52 });
52 } 53 }
53 }; 54 };
54 add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size()); 55 add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size());
@@ -58,25 +59,25 @@ vk::DescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
58 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, entries.storage_texels.size()); 59 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, entries.storage_texels.size());
59 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size()); 60 add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size());
60 61
61 VkDescriptorSetLayoutCreateInfo ci; 62 return device.GetLogical().CreateDescriptorSetLayout({
62 ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; 63 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
63 ci.pNext = nullptr; 64 .pNext = nullptr,
64 ci.flags = 0; 65 .flags = 0,
65 ci.bindingCount = static_cast<u32>(bindings.size()); 66 .bindingCount = static_cast<u32>(bindings.size()),
66 ci.pBindings = bindings.data(); 67 .pBindings = bindings.data(),
67 return device.GetLogical().CreateDescriptorSetLayout(ci); 68 });
68} 69}
69 70
70vk::PipelineLayout VKComputePipeline::CreatePipelineLayout() const { 71vk::PipelineLayout VKComputePipeline::CreatePipelineLayout() const {
71 VkPipelineLayoutCreateInfo ci; 72 return device.GetLogical().CreatePipelineLayout({
72 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; 73 .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
73 ci.pNext = nullptr; 74 .pNext = nullptr,
74 ci.flags = 0; 75 .flags = 0,
75 ci.setLayoutCount = 1; 76 .setLayoutCount = 1,
76 ci.pSetLayouts = descriptor_set_layout.address(); 77 .pSetLayouts = descriptor_set_layout.address(),
77 ci.pushConstantRangeCount = 0; 78 .pushConstantRangeCount = 0,
78 ci.pPushConstantRanges = nullptr; 79 .pPushConstantRanges = nullptr,
79 return device.GetLogical().CreatePipelineLayout(ci); 80 });
80} 81}
81 82
82vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplate() const { 83vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplate() const {
@@ -89,59 +90,63 @@ vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplat
89 return {}; 90 return {};
90 } 91 }
91 92
92 VkDescriptorUpdateTemplateCreateInfoKHR ci; 93 return device.GetLogical().CreateDescriptorUpdateTemplateKHR({
93 ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR; 94 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
94 ci.pNext = nullptr; 95 .pNext = nullptr,
95 ci.flags = 0; 96 .flags = 0,
96 ci.descriptorUpdateEntryCount = static_cast<u32>(template_entries.size()); 97 .descriptorUpdateEntryCount = static_cast<u32>(template_entries.size()),
97 ci.pDescriptorUpdateEntries = template_entries.data(); 98 .pDescriptorUpdateEntries = template_entries.data(),
98 ci.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR; 99 .templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR,
99 ci.descriptorSetLayout = *descriptor_set_layout; 100 .descriptorSetLayout = *descriptor_set_layout,
100 ci.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; 101 .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
101 ci.pipelineLayout = *layout; 102 .pipelineLayout = *layout,
102 ci.set = DESCRIPTOR_SET; 103 .set = DESCRIPTOR_SET,
103 return device.GetLogical().CreateDescriptorUpdateTemplateKHR(ci); 104 });
104} 105}
105 106
106vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const { 107vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const {
107 device.SaveShader(code); 108 device.SaveShader(code);
108 109
109 VkShaderModuleCreateInfo ci; 110 return device.GetLogical().CreateShaderModule({
110 ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; 111 .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
111 ci.pNext = nullptr; 112 .pNext = nullptr,
112 ci.flags = 0; 113 .flags = 0,
113 ci.codeSize = code.size() * sizeof(u32); 114 .codeSize = code.size() * sizeof(u32),
114 ci.pCode = code.data(); 115 .pCode = code.data(),
115 return device.GetLogical().CreateShaderModule(ci); 116 });
116} 117}
117 118
118vk::Pipeline VKComputePipeline::CreatePipeline() const { 119vk::Pipeline VKComputePipeline::CreatePipeline() const {
119 VkComputePipelineCreateInfo ci; 120
120 VkPipelineShaderStageCreateInfo& stage_ci = ci.stage; 121 VkComputePipelineCreateInfo ci{
121 stage_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; 122 .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
122 stage_ci.pNext = nullptr; 123 .pNext = nullptr,
123 stage_ci.flags = 0; 124 .flags = 0,
124 stage_ci.stage = VK_SHADER_STAGE_COMPUTE_BIT; 125 .stage =
125 stage_ci.module = *shader_module; 126 {
126 stage_ci.pName = "main"; 127 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
127 stage_ci.pSpecializationInfo = nullptr; 128 .pNext = nullptr,
128 129 .flags = 0,
129 VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci; 130 .stage = VK_SHADER_STAGE_COMPUTE_BIT,
130 subgroup_size_ci.sType = 131 .module = *shader_module,
131 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT; 132 .pName = "main",
132 subgroup_size_ci.pNext = nullptr; 133 .pSpecializationInfo = nullptr,
133 subgroup_size_ci.requiredSubgroupSize = GuestWarpSize; 134 },
135 .layout = *layout,
136 .basePipelineHandle = nullptr,
137 .basePipelineIndex = 0,
138 };
139
140 const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
141 .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
142 .pNext = nullptr,
143 .requiredSubgroupSize = GuestWarpSize,
144 };
134 145
135 if (entries.uses_warps && device.IsGuestWarpSizeSupported(VK_SHADER_STAGE_COMPUTE_BIT)) { 146 if (entries.uses_warps && device.IsGuestWarpSizeSupported(VK_SHADER_STAGE_COMPUTE_BIT)) {
136 stage_ci.pNext = &subgroup_size_ci; 147 ci.stage.pNext = &subgroup_size_ci;
137 } 148 }
138 149
139 ci.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
140 ci.pNext = nullptr;
141 ci.flags = 0;
142 ci.layout = *layout;
143 ci.basePipelineHandle = nullptr;
144 ci.basePipelineIndex = 0;
145 return device.GetLogical().CreateComputePipeline(ci); 150 return device.GetLogical().CreateComputePipeline(ci);
146} 151}
147 152
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
index 9259b618d..ac4a0884e 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
@@ -43,27 +43,30 @@ vk::DescriptorPool* VKDescriptorPool::AllocateNewPool() {
43 {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, num_sets * 64}, 43 {VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, num_sets * 64},
44 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, num_sets * 64}, 44 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, num_sets * 64},
45 {VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, num_sets * 64}, 45 {VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, num_sets * 64},
46 {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, num_sets * 40}}; 46 {VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, num_sets * 40},
47 47 };
48 VkDescriptorPoolCreateInfo ci; 48
49 ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; 49 const VkDescriptorPoolCreateInfo ci{
50 ci.pNext = nullptr; 50 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
51 ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; 51 .pNext = nullptr,
52 ci.maxSets = num_sets; 52 .flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
53 ci.poolSizeCount = static_cast<u32>(std::size(pool_sizes)); 53 .maxSets = num_sets,
54 ci.pPoolSizes = std::data(pool_sizes); 54 .poolSizeCount = static_cast<u32>(std::size(pool_sizes)),
55 .pPoolSizes = std::data(pool_sizes),
56 };
55 return &pools.emplace_back(device.GetLogical().CreateDescriptorPool(ci)); 57 return &pools.emplace_back(device.GetLogical().CreateDescriptorPool(ci));
56} 58}
57 59
58vk::DescriptorSets VKDescriptorPool::AllocateDescriptors(VkDescriptorSetLayout layout, 60vk::DescriptorSets VKDescriptorPool::AllocateDescriptors(VkDescriptorSetLayout layout,
59 std::size_t count) { 61 std::size_t count) {
60 const std::vector layout_copies(count, layout); 62 const std::vector layout_copies(count, layout);
61 VkDescriptorSetAllocateInfo ai; 63 VkDescriptorSetAllocateInfo ai{
62 ai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; 64 .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
63 ai.pNext = nullptr; 65 .pNext = nullptr,
64 ai.descriptorPool = **active_pool; 66 .descriptorPool = **active_pool,
65 ai.descriptorSetCount = static_cast<u32>(count); 67 .descriptorSetCount = static_cast<u32>(count),
66 ai.pSetLayouts = layout_copies.data(); 68 .pSetLayouts = layout_copies.data(),
69 };
67 70
68 vk::DescriptorSets sets = active_pool->Allocate(ai); 71 vk::DescriptorSets sets = active_pool->Allocate(ai);
69 if (!sets.IsOutOfPoolMemory()) { 72 if (!sets.IsOutOfPoolMemory()) {
diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp
index 9226e591c..26379ee01 100644
--- a/src/video_core/renderer_vulkan/vk_device.cpp
+++ b/src/video_core/renderer_vulkan/vk_device.cpp
@@ -757,14 +757,14 @@ std::vector<VkDeviceQueueCreateInfo> VKDevice::GetDeviceQueueCreateInfos() const
757 queue_cis.reserve(unique_queue_families.size()); 757 queue_cis.reserve(unique_queue_families.size());
758 758
759 for (const u32 queue_family : unique_queue_families) { 759 for (const u32 queue_family : unique_queue_families) {
760 queue_cis.push_back({ 760 auto& ci = queue_cis.emplace_back(VkDeviceQueueCreateInfo{
761 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, 761 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
762 .pNext = nullptr, 762 .pNext = nullptr,
763 .flags = 0, 763 .flags = 0,
764 .queueFamilyIndex = queue_family, 764 .queueFamilyIndex = queue_family,
765 .queueCount = 1,
766 .pQueuePriorities = &QUEUE_PRIORITY,
767 }); 765 });
766 ci.queueCount = 1;
767 ci.pQueuePriorities = &QUEUE_PRIORITY;
768 } 768 }
769 769
770 return queue_cis; 770 return queue_cis;
diff --git a/src/video_core/renderer_vulkan/vk_image.cpp b/src/video_core/renderer_vulkan/vk_image.cpp
index 9bceb3861..1c418ea17 100644
--- a/src/video_core/renderer_vulkan/vk_image.cpp
+++ b/src/video_core/renderer_vulkan/vk_image.cpp
@@ -102,21 +102,29 @@ bool VKImage::HasChanged(u32 base_layer, u32 num_layers, u32 base_level, u32 num
102 102
103void VKImage::CreatePresentView() { 103void VKImage::CreatePresentView() {
104 // Image type has to be 2D to be presented. 104 // Image type has to be 2D to be presented.
105 VkImageViewCreateInfo image_view_ci; 105 present_view = device.GetLogical().CreateImageView({
106 image_view_ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; 106 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
107 image_view_ci.pNext = nullptr; 107 .pNext = nullptr,
108 image_view_ci.flags = 0; 108 .flags = 0,
109 image_view_ci.image = *image; 109 .image = *image,
110 image_view_ci.viewType = VK_IMAGE_VIEW_TYPE_2D; 110 .viewType = VK_IMAGE_VIEW_TYPE_2D,
111 image_view_ci.format = format; 111 .format = format,
112 image_view_ci.components = {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, 112 .components =
113 VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}; 113 {
114 image_view_ci.subresourceRange.aspectMask = aspect_mask; 114 .r = VK_COMPONENT_SWIZZLE_IDENTITY,
115 image_view_ci.subresourceRange.baseMipLevel = 0; 115 .g = VK_COMPONENT_SWIZZLE_IDENTITY,
116 image_view_ci.subresourceRange.levelCount = 1; 116 .b = VK_COMPONENT_SWIZZLE_IDENTITY,
117 image_view_ci.subresourceRange.baseArrayLayer = 0; 117 .a = VK_COMPONENT_SWIZZLE_IDENTITY,
118 image_view_ci.subresourceRange.layerCount = 1; 118 },
119 present_view = device.GetLogical().CreateImageView(image_view_ci); 119 .subresourceRange =
120 {
121 .aspectMask = aspect_mask,
122 .baseMipLevel = 0,
123 .levelCount = 1,
124 .baseArrayLayer = 0,
125 .layerCount = 1,
126 },
127 });
120} 128}
121 129
122VKImage::SubrangeState& VKImage::GetSubrangeState(u32 layer, u32 level) noexcept { 130VKImage::SubrangeState& VKImage::GetSubrangeState(u32 layer, u32 level) noexcept {
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.cpp b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
index b4c650a63..24c8960ac 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
@@ -178,13 +178,12 @@ bool VKMemoryManager::AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 t
178 }(); 178 }();
179 179
180 // Try to allocate found type. 180 // Try to allocate found type.
181 VkMemoryAllocateInfo memory_ai; 181 vk::DeviceMemory memory = device.GetLogical().TryAllocateMemory({
182 memory_ai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; 182 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
183 memory_ai.pNext = nullptr; 183 .pNext = nullptr,
184 memory_ai.allocationSize = size; 184 .allocationSize = size,
185 memory_ai.memoryTypeIndex = type; 185 .memoryTypeIndex = type,
186 186 });
187 vk::DeviceMemory memory = device.GetLogical().TryAllocateMemory(memory_ai);
188 if (!memory) { 187 if (!memory) {
189 LOG_CRITICAL(Render_Vulkan, "Device allocation failed!"); 188 LOG_CRITICAL(Render_Vulkan, "Device allocation failed!");
190 return false; 189 return false;
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 3da835324..42b3a744c 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -88,12 +88,13 @@ void AddBindings(std::vector<VkDescriptorSetLayoutBinding>& bindings, u32& bindi
88 // Combined image samplers can be arrayed. 88 // Combined image samplers can be arrayed.
89 count = container[i].size; 89 count = container[i].size;
90 } 90 }
91 VkDescriptorSetLayoutBinding& entry = bindings.emplace_back(); 91 bindings.push_back({
92 entry.binding = binding++; 92 .binding = binding++,
93 entry.descriptorType = descriptor_type; 93 .descriptorType = descriptor_type,
94 entry.descriptorCount = count; 94 .descriptorCount = count,
95 entry.stageFlags = stage_flags; 95 .stageFlags = stage_flags,
96 entry.pImmutableSamplers = nullptr; 96 .pImmutableSamplers = nullptr,
97 });
97 } 98 }
98} 99}
99 100
@@ -259,10 +260,10 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach
259 } 260 }
260 } 261 }
261 262
262 Specialization specialization; 263 const Specialization specialization{
263 specialization.workgroup_size = key.workgroup_size; 264 .workgroup_size = key.workgroup_size,
264 specialization.shared_memory_size = key.shared_memory_size; 265 .shared_memory_size = key.shared_memory_size,
265 266 };
266 const SPIRVShader spirv_shader{Decompile(device, shader->GetIR(), ShaderType::Compute, 267 const SPIRVShader spirv_shader{Decompile(device, shader->GetIR(), ShaderType::Compute,
267 shader->GetRegistry(), specialization), 268 shader->GetRegistry(), specialization),
268 shader->GetEntries()}; 269 shader->GetEntries()};
@@ -370,13 +371,14 @@ void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u3
370 if constexpr (descriptor_type == COMBINED_IMAGE_SAMPLER) { 371 if constexpr (descriptor_type == COMBINED_IMAGE_SAMPLER) {
371 for (u32 i = 0; i < count; ++i) { 372 for (u32 i = 0; i < count; ++i) {
372 const u32 num_samplers = container[i].size; 373 const u32 num_samplers = container[i].size;
373 VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back(); 374 template_entries.push_back({
374 entry.dstBinding = binding; 375 .dstBinding = binding,
375 entry.dstArrayElement = 0; 376 .dstArrayElement = 0,
376 entry.descriptorCount = num_samplers; 377 .descriptorCount = num_samplers,
377 entry.descriptorType = descriptor_type; 378 .descriptorType = descriptor_type,
378 entry.offset = offset; 379 .offset = offset,
379 entry.stride = entry_size; 380 .stride = entry_size,
381 });
380 382
381 ++binding; 383 ++binding;
382 offset += num_samplers * entry_size; 384 offset += num_samplers * entry_size;
@@ -389,22 +391,24 @@ void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u3
389 // Nvidia has a bug where updating multiple texels at once causes the driver to crash. 391 // Nvidia has a bug where updating multiple texels at once causes the driver to crash.
390 // Note: Fixed in driver Windows 443.24, Linux 440.66.15 392 // Note: Fixed in driver Windows 443.24, Linux 440.66.15
391 for (u32 i = 0; i < count; ++i) { 393 for (u32 i = 0; i < count; ++i) {
392 VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back(); 394 template_entries.push_back({
393 entry.dstBinding = binding + i; 395 .dstBinding = binding + i,
394 entry.dstArrayElement = 0; 396 .dstArrayElement = 0,
395 entry.descriptorCount = 1; 397 .descriptorCount = 1,
396 entry.descriptorType = descriptor_type; 398 .descriptorType = descriptor_type,
397 entry.offset = static_cast<std::size_t>(offset + i * entry_size); 399 .offset = static_cast<std::size_t>(offset + i * entry_size),
398 entry.stride = entry_size; 400 .stride = entry_size,
401 });
399 } 402 }
400 } else if (count > 0) { 403 } else if (count > 0) {
401 VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back(); 404 template_entries.push_back({
402 entry.dstBinding = binding; 405 .dstBinding = binding,
403 entry.dstArrayElement = 0; 406 .dstArrayElement = 0,
404 entry.descriptorCount = count; 407 .descriptorCount = count,
405 entry.descriptorType = descriptor_type; 408 .descriptorType = descriptor_type,
406 entry.offset = offset; 409 .offset = offset,
407 entry.stride = entry_size; 410 .stride = entry_size,
411 });
408 } 412 }
409 offset += count * entry_size; 413 offset += count * entry_size;
410 binding += count; 414 binding += count;
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index bc91c48cc..6cd63d090 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -47,14 +47,14 @@ std::pair<VkQueryPool, u32> QueryPool::Commit(VKFence& fence) {
47void QueryPool::Allocate(std::size_t begin, std::size_t end) { 47void QueryPool::Allocate(std::size_t begin, std::size_t end) {
48 usage.resize(end); 48 usage.resize(end);
49 49
50 VkQueryPoolCreateInfo query_pool_ci; 50 pools.push_back(device->GetLogical().CreateQueryPool({
51 query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; 51 .sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
52 query_pool_ci.pNext = nullptr; 52 .pNext = nullptr,
53 query_pool_ci.flags = 0; 53 .flags = 0,
54 query_pool_ci.queryType = GetTarget(type); 54 .queryType = GetTarget(type),
55 query_pool_ci.queryCount = static_cast<u32>(end - begin); 55 .queryCount = static_cast<u32>(end - begin),
56 query_pool_ci.pipelineStatistics = 0; 56 .pipelineStatistics = 0,
57 pools.push_back(device->GetLogical().CreateQueryPool(query_pool_ci)); 57 }));
58} 58}
59 59
60void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) { 60void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 7625871c2..31e44aa2b 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -64,20 +64,22 @@ VkViewport GetViewportState(const VKDevice& device, const Maxwell& regs, std::si
64 const auto& src = regs.viewport_transform[index]; 64 const auto& src = regs.viewport_transform[index];
65 const float width = src.scale_x * 2.0f; 65 const float width = src.scale_x * 2.0f;
66 const float height = src.scale_y * 2.0f; 66 const float height = src.scale_y * 2.0f;
67 const float reduce_z = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1.0f : 0.0f;
67 68
68 VkViewport viewport; 69 VkViewport viewport{
69 viewport.x = src.translate_x - src.scale_x; 70 .x = src.translate_x - src.scale_x,
70 viewport.y = src.translate_y - src.scale_y; 71 .y = src.translate_y - src.scale_y,
71 viewport.width = width != 0.0f ? width : 1.0f; 72 .width = width != 0.0f ? width : 1.0f,
72 viewport.height = height != 0.0f ? height : 1.0f; 73 .height = height != 0.0f ? height : 1.0f,
74 .minDepth = src.translate_z - src.scale_z * reduce_z,
75 .maxDepth = src.translate_z + src.scale_z,
76 };
73 77
74 const float reduce_z = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1.0f : 0.0f;
75 viewport.minDepth = src.translate_z - src.scale_z * reduce_z;
76 viewport.maxDepth = src.translate_z + src.scale_z;
77 if (!device.IsExtDepthRangeUnrestrictedSupported()) { 78 if (!device.IsExtDepthRangeUnrestrictedSupported()) {
78 viewport.minDepth = std::clamp(viewport.minDepth, 0.0f, 1.0f); 79 viewport.minDepth = std::clamp(viewport.minDepth, 0.0f, 1.0f);
79 viewport.maxDepth = std::clamp(viewport.maxDepth, 0.0f, 1.0f); 80 viewport.maxDepth = std::clamp(viewport.maxDepth, 0.0f, 1.0f);
80 } 81 }
82
81 return viewport; 83 return viewport;
82} 84}
83 85
@@ -508,10 +510,11 @@ void RasterizerVulkan::Clear() {
508 510
509 const u32 color_attachment = regs.clear_buffers.RT; 511 const u32 color_attachment = regs.clear_buffers.RT;
510 scheduler.Record([color_attachment, clear_value, clear_rect](vk::CommandBuffer cmdbuf) { 512 scheduler.Record([color_attachment, clear_value, clear_rect](vk::CommandBuffer cmdbuf) {
511 VkClearAttachment attachment; 513 const VkClearAttachment attachment{
512 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; 514 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
513 attachment.colorAttachment = color_attachment; 515 .colorAttachment = color_attachment,
514 attachment.clearValue = clear_value; 516 .clearValue = clear_value,
517 };
515 cmdbuf.ClearAttachments(attachment, clear_rect); 518 cmdbuf.ClearAttachments(attachment, clear_rect);
516 }); 519 });
517 } 520 }
@@ -551,13 +554,16 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
551 query_cache.UpdateCounters(); 554 query_cache.UpdateCounters();
552 555
553 const auto& launch_desc = system.GPU().KeplerCompute().launch_description; 556 const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
554 ComputePipelineCacheKey key; 557 auto& pipeline = pipeline_cache.GetComputePipeline({
555 key.shader = code_addr; 558 .shader = code_addr,
556 key.shared_memory_size = launch_desc.shared_alloc; 559 .shared_memory_size = launch_desc.shared_alloc,
557 key.workgroup_size = {launch_desc.block_dim_x, launch_desc.block_dim_y, 560 .workgroup_size =
558 launch_desc.block_dim_z}; 561 {
559 562 launch_desc.block_dim_x,
560 auto& pipeline = pipeline_cache.GetComputePipeline(key); 563 launch_desc.block_dim_y,
564 launch_desc.block_dim_z,
565 },
566 });
561 567
562 // Compute dispatches can't be executed inside a renderpass 568 // Compute dispatches can't be executed inside a renderpass
563 scheduler.RequestOutsideRenderPassOperationContext(); 569 scheduler.RequestOutsideRenderPassOperationContext();
@@ -841,17 +847,17 @@ std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
841 const auto [fbentry, is_cache_miss] = framebuffer_cache.try_emplace(key); 847 const auto [fbentry, is_cache_miss] = framebuffer_cache.try_emplace(key);
842 auto& framebuffer = fbentry->second; 848 auto& framebuffer = fbentry->second;
843 if (is_cache_miss) { 849 if (is_cache_miss) {
844 VkFramebufferCreateInfo framebuffer_ci; 850 framebuffer = device.GetLogical().CreateFramebuffer({
845 framebuffer_ci.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; 851 .sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
846 framebuffer_ci.pNext = nullptr; 852 .pNext = nullptr,
847 framebuffer_ci.flags = 0; 853 .flags = 0,
848 framebuffer_ci.renderPass = key.renderpass; 854 .renderPass = key.renderpass,
849 framebuffer_ci.attachmentCount = static_cast<u32>(key.views.size()); 855 .attachmentCount = static_cast<u32>(key.views.size()),
850 framebuffer_ci.pAttachments = key.views.data(); 856 .pAttachments = key.views.data(),
851 framebuffer_ci.width = key.width; 857 .width = key.width,
852 framebuffer_ci.height = key.height; 858 .height = key.height,
853 framebuffer_ci.layers = key.layers; 859 .layers = key.layers,
854 framebuffer = device.GetLogical().CreateFramebuffer(framebuffer_ci); 860 });
855 } 861 }
856 862
857 return {*framebuffer, VkExtent2D{key.width, key.height}}; 863 return {*framebuffer, VkExtent2D{key.width, key.height}};
@@ -1553,17 +1559,17 @@ VkBuffer RasterizerVulkan::DefaultBuffer() {
1553 return *default_buffer; 1559 return *default_buffer;
1554 } 1560 }
1555 1561
1556 VkBufferCreateInfo ci; 1562 default_buffer = device.GetLogical().CreateBuffer({
1557 ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; 1563 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
1558 ci.pNext = nullptr; 1564 .pNext = nullptr,
1559 ci.flags = 0; 1565 .flags = 0,
1560 ci.size = DEFAULT_BUFFER_SIZE; 1566 .size = DEFAULT_BUFFER_SIZE,
1561 ci.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | 1567 .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
1562 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; 1568 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
1563 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; 1569 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
1564 ci.queueFamilyIndexCount = 0; 1570 .queueFamilyIndexCount = 0,
1565 ci.pQueueFamilyIndices = nullptr; 1571 .pQueueFamilyIndices = nullptr,
1566 default_buffer = device.GetLogical().CreateBuffer(ci); 1572 });
1567 default_buffer_commit = memory_manager.Commit(default_buffer, false); 1573 default_buffer_commit = memory_manager.Commit(default_buffer, false);
1568 1574
1569 scheduler.RequestOutsideRenderPassOperationContext(); 1575 scheduler.RequestOutsideRenderPassOperationContext();
diff --git a/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp b/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
index 3f71d005e..80284cf92 100644
--- a/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
@@ -39,10 +39,14 @@ VkRenderPass VKRenderPassCache::GetRenderPass(const RenderPassParams& params) {
39 39
40vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& params) const { 40vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& params) const {
41 using namespace VideoCore::Surface; 41 using namespace VideoCore::Surface;
42 const std::size_t num_attachments = static_cast<std::size_t>(params.num_color_attachments);
43
42 std::vector<VkAttachmentDescription> descriptors; 44 std::vector<VkAttachmentDescription> descriptors;
45 descriptors.reserve(num_attachments);
46
43 std::vector<VkAttachmentReference> color_references; 47 std::vector<VkAttachmentReference> color_references;
48 color_references.reserve(num_attachments);
44 49
45 const std::size_t num_attachments = static_cast<std::size_t>(params.num_color_attachments);
46 for (std::size_t rt = 0; rt < num_attachments; ++rt) { 50 for (std::size_t rt = 0; rt < num_attachments; ++rt) {
47 const auto guest_format = static_cast<Tegra::RenderTargetFormat>(params.color_formats[rt]); 51 const auto guest_format = static_cast<Tegra::RenderTargetFormat>(params.color_formats[rt]);
48 const PixelFormat pixel_format = PixelFormatFromRenderTargetFormat(guest_format); 52 const PixelFormat pixel_format = PixelFormatFromRenderTargetFormat(guest_format);
@@ -54,20 +58,22 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
54 const VkImageLayout color_layout = ((params.texceptions >> rt) & 1) != 0 58 const VkImageLayout color_layout = ((params.texceptions >> rt) & 1) != 0
55 ? VK_IMAGE_LAYOUT_GENERAL 59 ? VK_IMAGE_LAYOUT_GENERAL
56 : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; 60 : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
57 VkAttachmentDescription& descriptor = descriptors.emplace_back(); 61 descriptors.push_back({
58 descriptor.flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT; 62 .flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT,
59 descriptor.format = format.format; 63 .format = format.format,
60 descriptor.samples = VK_SAMPLE_COUNT_1_BIT; 64 .samples = VK_SAMPLE_COUNT_1_BIT,
61 descriptor.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; 65 .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
62 descriptor.storeOp = VK_ATTACHMENT_STORE_OP_STORE; 66 .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
63 descriptor.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; 67 .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
64 descriptor.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; 68 .stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
65 descriptor.initialLayout = color_layout; 69 .initialLayout = color_layout,
66 descriptor.finalLayout = color_layout; 70 .finalLayout = color_layout,
67 71 });
68 VkAttachmentReference& reference = color_references.emplace_back(); 72
69 reference.attachment = static_cast<u32>(rt); 73 color_references.push_back({
70 reference.layout = color_layout; 74 .attachment = static_cast<u32>(rt),
75 .layout = color_layout,
76 });
71 } 77 }
72 78
73 VkAttachmentReference zeta_attachment_ref; 79 VkAttachmentReference zeta_attachment_ref;
@@ -82,32 +88,36 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
82 const VkImageLayout zeta_layout = params.zeta_texception != 0 88 const VkImageLayout zeta_layout = params.zeta_texception != 0
83 ? VK_IMAGE_LAYOUT_GENERAL 89 ? VK_IMAGE_LAYOUT_GENERAL
84 : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; 90 : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
85 VkAttachmentDescription& descriptor = descriptors.emplace_back(); 91 descriptors.push_back({
86 descriptor.flags = 0; 92 .flags = 0,
87 descriptor.format = format.format; 93 .format = format.format,
88 descriptor.samples = VK_SAMPLE_COUNT_1_BIT; 94 .samples = VK_SAMPLE_COUNT_1_BIT,
89 descriptor.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; 95 .loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
90 descriptor.storeOp = VK_ATTACHMENT_STORE_OP_STORE; 96 .storeOp = VK_ATTACHMENT_STORE_OP_STORE,
91 descriptor.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD; 97 .stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
92 descriptor.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE; 98 .stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE,
93 descriptor.initialLayout = zeta_layout; 99 .initialLayout = zeta_layout,
94 descriptor.finalLayout = zeta_layout; 100 .finalLayout = zeta_layout,
95 101 });
96 zeta_attachment_ref.attachment = static_cast<u32>(num_attachments); 102
97 zeta_attachment_ref.layout = zeta_layout; 103 zeta_attachment_ref = {
104 .attachment = static_cast<u32>(num_attachments),
105 .layout = zeta_layout,
106 };
98 } 107 }
99 108
100 VkSubpassDescription subpass_description; 109 const VkSubpassDescription subpass_description{
101 subpass_description.flags = 0; 110 .flags = 0,
102 subpass_description.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; 111 .pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
103 subpass_description.inputAttachmentCount = 0; 112 .inputAttachmentCount = 0,
104 subpass_description.pInputAttachments = nullptr; 113 .pInputAttachments = nullptr,
105 subpass_description.colorAttachmentCount = static_cast<u32>(color_references.size()); 114 .colorAttachmentCount = static_cast<u32>(color_references.size()),
106 subpass_description.pColorAttachments = color_references.data(); 115 .pColorAttachments = color_references.data(),
107 subpass_description.pResolveAttachments = nullptr; 116 .pResolveAttachments = nullptr,
108 subpass_description.pDepthStencilAttachment = has_zeta ? &zeta_attachment_ref : nullptr; 117 .pDepthStencilAttachment = has_zeta ? &zeta_attachment_ref : nullptr,
109 subpass_description.preserveAttachmentCount = 0; 118 .preserveAttachmentCount = 0,
110 subpass_description.pPreserveAttachments = nullptr; 119 .pPreserveAttachments = nullptr,
120 };
111 121
112 VkAccessFlags access = 0; 122 VkAccessFlags access = 0;
113 VkPipelineStageFlags stage = 0; 123 VkPipelineStageFlags stage = 0;
@@ -122,26 +132,27 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
122 stage |= VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; 132 stage |= VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
123 } 133 }
124 134
125 VkSubpassDependency subpass_dependency; 135 const VkSubpassDependency subpass_dependency{
126 subpass_dependency.srcSubpass = VK_SUBPASS_EXTERNAL; 136 .srcSubpass = VK_SUBPASS_EXTERNAL,
127 subpass_dependency.dstSubpass = 0; 137 .dstSubpass = 0,
128 subpass_dependency.srcStageMask = stage; 138 .srcStageMask = stage,
129 subpass_dependency.dstStageMask = stage; 139 .dstStageMask = stage,
130 subpass_dependency.srcAccessMask = 0; 140 .srcAccessMask = 0,
131 subpass_dependency.dstAccessMask = access; 141 .dstAccessMask = access,
132 subpass_dependency.dependencyFlags = 0; 142 .dependencyFlags = 0,
133 143 };
134 VkRenderPassCreateInfo ci; 144
135 ci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; 145 return device.GetLogical().CreateRenderPass({
136 ci.pNext = nullptr; 146 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
137 ci.flags = 0; 147 .pNext = nullptr,
138 ci.attachmentCount = static_cast<u32>(descriptors.size()); 148 .flags = 0,
139 ci.pAttachments = descriptors.data(); 149 .attachmentCount = static_cast<u32>(descriptors.size()),
140 ci.subpassCount = 1; 150 .pAttachments = descriptors.data(),
141 ci.pSubpasses = &subpass_description; 151 .subpassCount = 1,
142 ci.dependencyCount = 1; 152 .pSubpasses = &subpass_description,
143 ci.pDependencies = &subpass_dependency; 153 .dependencyCount = 1,
144 return device.GetLogical().CreateRenderPass(ci); 154 .pDependencies = &subpass_dependency,
155 });
145} 156}
146 157
147} // namespace Vulkan 158} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_resource_manager.cpp b/src/video_core/renderer_vulkan/vk_resource_manager.cpp
index dc06f545a..f19330a36 100644
--- a/src/video_core/renderer_vulkan/vk_resource_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_resource_manager.cpp
@@ -18,33 +18,32 @@ namespace {
18constexpr std::size_t COMMAND_BUFFER_POOL_SIZE = 0x1000; 18constexpr std::size_t COMMAND_BUFFER_POOL_SIZE = 0x1000;
19constexpr std::size_t FENCES_GROW_STEP = 0x40; 19constexpr std::size_t FENCES_GROW_STEP = 0x40;
20 20
21VkFenceCreateInfo BuildFenceCreateInfo() { 21constexpr VkFenceCreateInfo BuildFenceCreateInfo() {
22 VkFenceCreateInfo fence_ci; 22 return {
23 fence_ci.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; 23 .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
24 fence_ci.pNext = nullptr; 24 .pNext = nullptr,
25 fence_ci.flags = 0; 25 .flags = 0,
26 return fence_ci; 26 };
27} 27}
28 28
29} // Anonymous namespace 29} // Anonymous namespace
30 30
31class CommandBufferPool final : public VKFencedPool { 31class CommandBufferPool final : public VKFencedPool {
32public: 32public:
33 CommandBufferPool(const VKDevice& device) 33 explicit CommandBufferPool(const VKDevice& device)
34 : VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {} 34 : VKFencedPool(COMMAND_BUFFER_POOL_SIZE), device{device} {}
35 35
36 void Allocate(std::size_t begin, std::size_t end) override { 36 void Allocate(std::size_t begin, std::size_t end) override {
37 // Command buffers are going to be commited, recorded, executed every single usage cycle. 37 // Command buffers are going to be commited, recorded, executed every single usage cycle.
38 // They are also going to be reseted when commited. 38 // They are also going to be reseted when commited.
39 VkCommandPoolCreateInfo command_pool_ci;
40 command_pool_ci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
41 command_pool_ci.pNext = nullptr;
42 command_pool_ci.flags =
43 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
44 command_pool_ci.queueFamilyIndex = device.GetGraphicsFamily();
45
46 Pool& pool = pools.emplace_back(); 39 Pool& pool = pools.emplace_back();
47 pool.handle = device.GetLogical().CreateCommandPool(command_pool_ci); 40 pool.handle = device.GetLogical().CreateCommandPool({
41 .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
42 .pNext = nullptr,
43 .flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT |
44 VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
45 .queueFamilyIndex = device.GetGraphicsFamily(),
46 });
48 pool.cmdbufs = pool.handle.Allocate(COMMAND_BUFFER_POOL_SIZE); 47 pool.cmdbufs = pool.handle.Allocate(COMMAND_BUFFER_POOL_SIZE);
49 } 48 }
50 49
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
index 616eacc36..2d5460776 100644
--- a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
@@ -44,32 +44,35 @@ vk::Sampler VKSamplerCache::CreateSampler(const Tegra::Texture::TSCEntry& tsc) c
44 const bool arbitrary_borders = device.IsExtCustomBorderColorSupported(); 44 const bool arbitrary_borders = device.IsExtCustomBorderColorSupported();
45 const std::array color = tsc.GetBorderColor(); 45 const std::array color = tsc.GetBorderColor();
46 46
47 VkSamplerCustomBorderColorCreateInfoEXT border; 47 VkSamplerCustomBorderColorCreateInfoEXT border{
48 border.sType = VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT; 48 .sType = VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT,
49 border.pNext = nullptr; 49 .pNext = nullptr,
50 border.format = VK_FORMAT_UNDEFINED; 50 .format = VK_FORMAT_UNDEFINED,
51 };
51 std::memcpy(&border.customBorderColor, color.data(), sizeof(color)); 52 std::memcpy(&border.customBorderColor, color.data(), sizeof(color));
52 53
53 VkSamplerCreateInfo ci; 54 return device.GetLogical().CreateSampler({
54 ci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; 55 .sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
55 ci.pNext = arbitrary_borders ? &border : nullptr; 56 .pNext = arbitrary_borders ? &border : nullptr,
56 ci.flags = 0; 57 .flags = 0,
57 ci.magFilter = MaxwellToVK::Sampler::Filter(tsc.mag_filter); 58 .magFilter = MaxwellToVK::Sampler::Filter(tsc.mag_filter),
58 ci.minFilter = MaxwellToVK::Sampler::Filter(tsc.min_filter); 59 .minFilter = MaxwellToVK::Sampler::Filter(tsc.min_filter),
59 ci.mipmapMode = MaxwellToVK::Sampler::MipmapMode(tsc.mipmap_filter); 60 .mipmapMode = MaxwellToVK::Sampler::MipmapMode(tsc.mipmap_filter),
60 ci.addressModeU = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_u, tsc.mag_filter); 61 .addressModeU = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_u, tsc.mag_filter),
61 ci.addressModeV = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_v, tsc.mag_filter); 62 .addressModeV = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_v, tsc.mag_filter),
62 ci.addressModeW = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_p, tsc.mag_filter); 63 .addressModeW = MaxwellToVK::Sampler::WrapMode(device, tsc.wrap_p, tsc.mag_filter),
63 ci.mipLodBias = tsc.GetLodBias(); 64 .mipLodBias = tsc.GetLodBias(),
64 ci.anisotropyEnable = tsc.GetMaxAnisotropy() > 1.0f ? VK_TRUE : VK_FALSE; 65 .anisotropyEnable =
65 ci.maxAnisotropy = tsc.GetMaxAnisotropy(); 66 static_cast<VkBool32>(tsc.GetMaxAnisotropy() > 1.0f ? VK_TRUE : VK_FALSE),
66 ci.compareEnable = tsc.depth_compare_enabled; 67 .maxAnisotropy = tsc.GetMaxAnisotropy(),
67 ci.compareOp = MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func); 68 .compareEnable = tsc.depth_compare_enabled,
68 ci.minLod = tsc.mipmap_filter == TextureMipmapFilter::None ? 0.0f : tsc.GetMinLod(); 69 .compareOp = MaxwellToVK::Sampler::DepthCompareFunction(tsc.depth_compare_func),
69 ci.maxLod = tsc.mipmap_filter == TextureMipmapFilter::None ? 0.25f : tsc.GetMaxLod(); 70 .minLod = tsc.mipmap_filter == TextureMipmapFilter::None ? 0.0f : tsc.GetMinLod(),
70 ci.borderColor = arbitrary_borders ? VK_BORDER_COLOR_INT_CUSTOM_EXT : ConvertBorderColor(color); 71 .maxLod = tsc.mipmap_filter == TextureMipmapFilter::None ? 0.25f : tsc.GetMaxLod(),
71 ci.unnormalizedCoordinates = VK_FALSE; 72 .borderColor =
72 return device.GetLogical().CreateSampler(ci); 73 arbitrary_borders ? VK_BORDER_COLOR_INT_CUSTOM_EXT : ConvertBorderColor(color),
74 .unnormalizedCoordinates = VK_FALSE,
75 });
73} 76}
74 77
75VkSampler VKSamplerCache::ToSamplerType(const vk::Sampler& sampler) const { 78VkSampler VKSamplerCache::ToSamplerType(const vk::Sampler& sampler) const {
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index 56524e6f3..dbbd0961a 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -100,16 +100,19 @@ void VKScheduler::RequestRenderpass(VkRenderPass renderpass, VkFramebuffer frame
100 state.framebuffer = framebuffer; 100 state.framebuffer = framebuffer;
101 state.render_area = render_area; 101 state.render_area = render_area;
102 102
103 VkRenderPassBeginInfo renderpass_bi; 103 const VkRenderPassBeginInfo renderpass_bi{
104 renderpass_bi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; 104 .sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
105 renderpass_bi.pNext = nullptr; 105 .pNext = nullptr,
106 renderpass_bi.renderPass = renderpass; 106 .renderPass = renderpass,
107 renderpass_bi.framebuffer = framebuffer; 107 .framebuffer = framebuffer,
108 renderpass_bi.renderArea.offset.x = 0; 108 .renderArea =
109 renderpass_bi.renderArea.offset.y = 0; 109 {
110 renderpass_bi.renderArea.extent = render_area; 110 .offset = {.x = 0, .y = 0},
111 renderpass_bi.clearValueCount = 0; 111 .extent = render_area,
112 renderpass_bi.pClearValues = nullptr; 112 },
113 .clearValueCount = 0,
114 .pClearValues = nullptr,
115 };
113 116
114 Record([renderpass_bi, end_renderpass](vk::CommandBuffer cmdbuf) { 117 Record([renderpass_bi, end_renderpass](vk::CommandBuffer cmdbuf) {
115 if (end_renderpass) { 118 if (end_renderpass) {
@@ -157,16 +160,17 @@ void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
157 160
158 current_cmdbuf.End(); 161 current_cmdbuf.End();
159 162
160 VkSubmitInfo submit_info; 163 const VkSubmitInfo submit_info{
161 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; 164 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
162 submit_info.pNext = nullptr; 165 .pNext = nullptr,
163 submit_info.waitSemaphoreCount = 0; 166 .waitSemaphoreCount = 0,
164 submit_info.pWaitSemaphores = nullptr; 167 .pWaitSemaphores = nullptr,
165 submit_info.pWaitDstStageMask = nullptr; 168 .pWaitDstStageMask = nullptr,
166 submit_info.commandBufferCount = 1; 169 .commandBufferCount = 1,
167 submit_info.pCommandBuffers = current_cmdbuf.address(); 170 .pCommandBuffers = current_cmdbuf.address(),
168 submit_info.signalSemaphoreCount = semaphore ? 1 : 0; 171 .signalSemaphoreCount = semaphore ? 1U : 0U,
169 submit_info.pSignalSemaphores = &semaphore; 172 .pSignalSemaphores = &semaphore,
173 };
170 switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info, *current_fence)) { 174 switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info, *current_fence)) {
171 case VK_SUCCESS: 175 case VK_SUCCESS:
172 break; 176 break;
@@ -181,19 +185,18 @@ void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
181void VKScheduler::AllocateNewContext() { 185void VKScheduler::AllocateNewContext() {
182 ++ticks; 186 ++ticks;
183 187
184 VkCommandBufferBeginInfo cmdbuf_bi;
185 cmdbuf_bi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
186 cmdbuf_bi.pNext = nullptr;
187 cmdbuf_bi.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
188 cmdbuf_bi.pInheritanceInfo = nullptr;
189
190 std::unique_lock lock{mutex}; 188 std::unique_lock lock{mutex};
191 current_fence = next_fence; 189 current_fence = next_fence;
192 next_fence = &resource_manager.CommitFence(); 190 next_fence = &resource_manager.CommitFence();
193 191
194 current_cmdbuf = vk::CommandBuffer(resource_manager.CommitCommandBuffer(*current_fence), 192 current_cmdbuf = vk::CommandBuffer(resource_manager.CommitCommandBuffer(*current_fence),
195 device.GetDispatchLoader()); 193 device.GetDispatchLoader());
196 current_cmdbuf.Begin(cmdbuf_bi); 194 current_cmdbuf.Begin({
195 .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
196 .pNext = nullptr,
197 .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
198 .pInheritanceInfo = nullptr,
199 });
197 200
198 // Enable counters once again. These are disabled when a command buffer is finished. 201 // Enable counters once again. These are disabled when a command buffer is finished.
199 if (query_cache) { 202 if (query_cache) {
diff --git a/src/video_core/renderer_vulkan/vk_shader_util.cpp b/src/video_core/renderer_vulkan/vk_shader_util.cpp
index 112df9c71..c1a218d76 100644
--- a/src/video_core/renderer_vulkan/vk_shader_util.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_util.cpp
@@ -19,13 +19,13 @@ vk::ShaderModule BuildShader(const VKDevice& device, std::size_t code_size, cons
19 const auto data = std::make_unique<u32[]>(code_size / sizeof(u32)); 19 const auto data = std::make_unique<u32[]>(code_size / sizeof(u32));
20 std::memcpy(data.get(), code_data, code_size); 20 std::memcpy(data.get(), code_data, code_size);
21 21
22 VkShaderModuleCreateInfo ci; 22 return device.GetLogical().CreateShaderModule({
23 ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; 23 .sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
24 ci.pNext = nullptr; 24 .pNext = nullptr,
25 ci.flags = 0; 25 .flags = 0,
26 ci.codeSize = code_size; 26 .codeSize = code_size,
27 ci.pCode = data.get(); 27 .pCode = data.get(),
28 return device.GetLogical().CreateShaderModule(ci); 28 });
29} 29}
30 30
31} // namespace Vulkan 31} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
index 45c180221..5eca0ab91 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -71,20 +71,19 @@ VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_
71VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) { 71VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_visible) {
72 const u32 log2 = Common::Log2Ceil64(size); 72 const u32 log2 = Common::Log2Ceil64(size);
73 73
74 VkBufferCreateInfo ci;
75 ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
76 ci.pNext = nullptr;
77 ci.flags = 0;
78 ci.size = 1ULL << log2;
79 ci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
80 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
81 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
82 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
83 ci.queueFamilyIndexCount = 0;
84 ci.pQueueFamilyIndices = nullptr;
85
86 auto buffer = std::make_unique<VKBuffer>(); 74 auto buffer = std::make_unique<VKBuffer>();
87 buffer->handle = device.GetLogical().CreateBuffer(ci); 75 buffer->handle = device.GetLogical().CreateBuffer({
76 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
77 .pNext = nullptr,
78 .flags = 0,
79 .size = 1ULL << log2,
80 .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
81 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
82 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
83 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
84 .queueFamilyIndexCount = 0,
85 .pQueueFamilyIndices = nullptr,
86 });
88 buffer->commit = memory_manager.Commit(buffer->handle, host_visible); 87 buffer->commit = memory_manager.Commit(buffer->handle, host_visible);
89 88
90 auto& entries = GetCache(host_visible)[log2].entries; 89 auto& entries = GetCache(host_visible)[log2].entries;
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
index e5a583dd5..9151d9fb1 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
@@ -158,6 +158,7 @@ void StateTracker::Initialize() {
158 SetupDirtyFrontFace(tables); 158 SetupDirtyFrontFace(tables);
159 SetupDirtyPrimitiveTopology(tables); 159 SetupDirtyPrimitiveTopology(tables);
160 SetupDirtyStencilOp(tables); 160 SetupDirtyStencilOp(tables);
161 SetupDirtyStencilTestEnable(tables);
161} 162}
162 163
163void StateTracker::InvalidateCommandBufferState() { 164void StateTracker::InvalidateCommandBufferState() {
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
index 2d28a6c47..a5526a3f5 100644
--- a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
@@ -122,30 +122,27 @@ void VKStreamBuffer::CreateBuffers(VkBufferUsageFlags usage) {
122 // Substract from the preferred heap size some bytes to avoid getting out of memory. 122 // Substract from the preferred heap size some bytes to avoid getting out of memory.
123 const VkDeviceSize heap_size = memory_properties.memoryHeaps[preferred_heap].size; 123 const VkDeviceSize heap_size = memory_properties.memoryHeaps[preferred_heap].size;
124 const VkDeviceSize allocable_size = heap_size - 9 * 1024 * 1024; 124 const VkDeviceSize allocable_size = heap_size - 9 * 1024 * 1024;
125 125 buffer = device.GetLogical().CreateBuffer({
126 VkBufferCreateInfo buffer_ci; 126 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
127 buffer_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; 127 .pNext = nullptr,
128 buffer_ci.pNext = nullptr; 128 .flags = 0,
129 buffer_ci.flags = 0; 129 .size = std::min(PREFERRED_STREAM_BUFFER_SIZE, allocable_size),
130 buffer_ci.size = std::min(PREFERRED_STREAM_BUFFER_SIZE, allocable_size); 130 .usage = usage,
131 buffer_ci.usage = usage; 131 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
132 buffer_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; 132 .queueFamilyIndexCount = 0,
133 buffer_ci.queueFamilyIndexCount = 0; 133 .pQueueFamilyIndices = nullptr,
134 buffer_ci.pQueueFamilyIndices = nullptr; 134 });
135
136 buffer = device.GetLogical().CreateBuffer(buffer_ci);
137 135
138 const auto requirements = device.GetLogical().GetBufferMemoryRequirements(*buffer); 136 const auto requirements = device.GetLogical().GetBufferMemoryRequirements(*buffer);
139 const u32 required_flags = requirements.memoryTypeBits; 137 const u32 required_flags = requirements.memoryTypeBits;
140 stream_buffer_size = static_cast<u64>(requirements.size); 138 stream_buffer_size = static_cast<u64>(requirements.size);
141 139
142 VkMemoryAllocateInfo memory_ai; 140 memory = device.GetLogical().AllocateMemory({
143 memory_ai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; 141 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
144 memory_ai.pNext = nullptr; 142 .pNext = nullptr,
145 memory_ai.allocationSize = requirements.size; 143 .allocationSize = requirements.size,
146 memory_ai.memoryTypeIndex = GetMemoryType(memory_properties, required_flags); 144 .memoryTypeIndex = GetMemoryType(memory_properties, required_flags),
147 145 });
148 memory = device.GetLogical().AllocateMemory(memory_ai);
149 buffer.BindMemory(*memory, 0); 146 buffer.BindMemory(*memory, 0);
150} 147}
151 148
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp
index bffd8f32a..c25e312b6 100644
--- a/src/video_core/renderer_vulkan/vk_swapchain.cpp
+++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp
@@ -95,15 +95,16 @@ bool VKSwapchain::Present(VkSemaphore render_semaphore, VKFence& fence) {
95 const auto present_queue{device.GetPresentQueue()}; 95 const auto present_queue{device.GetPresentQueue()};
96 bool recreated = false; 96 bool recreated = false;
97 97
98 VkPresentInfoKHR present_info; 98 const VkPresentInfoKHR present_info{
99 present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; 99 .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
100 present_info.pNext = nullptr; 100 .pNext = nullptr,
101 present_info.waitSemaphoreCount = render_semaphore ? 2U : 1U; 101 .waitSemaphoreCount = render_semaphore ? 2U : 1U,
102 present_info.pWaitSemaphores = semaphores.data(); 102 .pWaitSemaphores = semaphores.data(),
103 present_info.swapchainCount = 1; 103 .swapchainCount = 1,
104 present_info.pSwapchains = swapchain.address(); 104 .pSwapchains = swapchain.address(),
105 present_info.pImageIndices = &image_index; 105 .pImageIndices = &image_index,
106 present_info.pResults = nullptr; 106 .pResults = nullptr,
107 };
107 108
108 switch (const VkResult result = present_queue.Present(present_info)) { 109 switch (const VkResult result = present_queue.Present(present_info)) {
109 case VK_SUCCESS: 110 case VK_SUCCESS:
@@ -147,24 +148,25 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
147 requested_image_count = capabilities.maxImageCount; 148 requested_image_count = capabilities.maxImageCount;
148 } 149 }
149 150
150 VkSwapchainCreateInfoKHR swapchain_ci; 151 VkSwapchainCreateInfoKHR swapchain_ci{
151 swapchain_ci.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; 152 .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
152 swapchain_ci.pNext = nullptr; 153 .pNext = nullptr,
153 swapchain_ci.flags = 0; 154 .flags = 0,
154 swapchain_ci.surface = surface; 155 .surface = surface,
155 swapchain_ci.minImageCount = requested_image_count; 156 .minImageCount = requested_image_count,
156 swapchain_ci.imageFormat = surface_format.format; 157 .imageFormat = surface_format.format,
157 swapchain_ci.imageColorSpace = surface_format.colorSpace; 158 .imageColorSpace = surface_format.colorSpace,
158 swapchain_ci.imageArrayLayers = 1; 159 .imageArrayLayers = 1,
159 swapchain_ci.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; 160 .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
160 swapchain_ci.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE; 161 .imageSharingMode = VK_SHARING_MODE_EXCLUSIVE,
161 swapchain_ci.queueFamilyIndexCount = 0; 162 .queueFamilyIndexCount = 0,
162 swapchain_ci.pQueueFamilyIndices = nullptr; 163 .pQueueFamilyIndices = nullptr,
163 swapchain_ci.preTransform = capabilities.currentTransform; 164 .preTransform = capabilities.currentTransform,
164 swapchain_ci.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR; 165 .compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
165 swapchain_ci.presentMode = present_mode; 166 .presentMode = present_mode,
166 swapchain_ci.clipped = VK_FALSE; 167 .clipped = VK_FALSE,
167 swapchain_ci.oldSwapchain = nullptr; 168 .oldSwapchain = nullptr,
169 };
168 170
169 const u32 graphics_family{device.GetGraphicsFamily()}; 171 const u32 graphics_family{device.GetGraphicsFamily()};
170 const u32 present_family{device.GetPresentFamily()}; 172 const u32 present_family{device.GetPresentFamily()};
@@ -173,8 +175,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
173 swapchain_ci.imageSharingMode = VK_SHARING_MODE_CONCURRENT; 175 swapchain_ci.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
174 swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size()); 176 swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size());
175 swapchain_ci.pQueueFamilyIndices = queue_indices.data(); 177 swapchain_ci.pQueueFamilyIndices = queue_indices.data();
176 } else {
177 swapchain_ci.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
178 } 178 }
179 179
180 // Request the size again to reduce the possibility of a TOCTOU race condition. 180 // Request the size again to reduce the possibility of a TOCTOU race condition.
@@ -200,20 +200,28 @@ void VKSwapchain::CreateSemaphores() {
200} 200}
201 201
202void VKSwapchain::CreateImageViews() { 202void VKSwapchain::CreateImageViews() {
203 VkImageViewCreateInfo ci; 203 VkImageViewCreateInfo ci{
204 ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; 204 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
205 ci.pNext = nullptr; 205 .pNext = nullptr,
206 ci.flags = 0; 206 .flags = 0,
207 // ci.image 207 .viewType = VK_IMAGE_VIEW_TYPE_2D,
208 ci.viewType = VK_IMAGE_VIEW_TYPE_2D; 208 .format = image_format,
209 ci.format = image_format; 209 .components =
210 ci.components = {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, 210 {
211 VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}; 211 .r = VK_COMPONENT_SWIZZLE_IDENTITY,
212 ci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; 212 .g = VK_COMPONENT_SWIZZLE_IDENTITY,
213 ci.subresourceRange.baseMipLevel = 0; 213 .b = VK_COMPONENT_SWIZZLE_IDENTITY,
214 ci.subresourceRange.levelCount = 1; 214 .a = VK_COMPONENT_SWIZZLE_IDENTITY,
215 ci.subresourceRange.baseArrayLayer = 0; 215 },
216 ci.subresourceRange.layerCount = 1; 216 .subresourceRange =
217 {
218 .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
219 .baseMipLevel = 0,
220 .levelCount = 1,
221 .baseArrayLayer = 0,
222 .layerCount = 1,
223 },
224 };
217 225
218 image_views.resize(image_count); 226 image_views.resize(image_count);
219 for (std::size_t i = 0; i < image_count; i++) { 227 for (std::size_t i = 0; i < image_count; i++) {
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index bd93dcf20..9bc18c21a 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -95,17 +95,18 @@ VkImageViewType GetImageViewType(SurfaceTarget target) {
95vk::Buffer CreateBuffer(const VKDevice& device, const SurfaceParams& params, 95vk::Buffer CreateBuffer(const VKDevice& device, const SurfaceParams& params,
96 std::size_t host_memory_size) { 96 std::size_t host_memory_size) {
97 // TODO(Rodrigo): Move texture buffer creation to the buffer cache 97 // TODO(Rodrigo): Move texture buffer creation to the buffer cache
98 VkBufferCreateInfo ci; 98 return device.GetLogical().CreateBuffer({
99 ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; 99 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
100 ci.pNext = nullptr; 100 .pNext = nullptr,
101 ci.flags = 0; 101 .flags = 0,
102 ci.size = static_cast<VkDeviceSize>(host_memory_size); 102 .size = static_cast<VkDeviceSize>(host_memory_size),
103 ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | 103 .usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
104 VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; 104 VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
105 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; 105 VK_BUFFER_USAGE_TRANSFER_DST_BIT,
106 ci.queueFamilyIndexCount = 0; 106 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
107 ci.pQueueFamilyIndices = nullptr; 107 .queueFamilyIndexCount = 0,
108 return device.GetLogical().CreateBuffer(ci); 108 .pQueueFamilyIndices = nullptr,
109 });
109} 110}
110 111
111VkBufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device, 112VkBufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device,
@@ -113,15 +114,16 @@ VkBufferViewCreateInfo GenerateBufferViewCreateInfo(const VKDevice& device,
113 std::size_t host_memory_size) { 114 std::size_t host_memory_size) {
114 ASSERT(params.IsBuffer()); 115 ASSERT(params.IsBuffer());
115 116
116 VkBufferViewCreateInfo ci; 117 return {
117 ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; 118 .sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
118 ci.pNext = nullptr; 119 .pNext = nullptr,
119 ci.flags = 0; 120 .flags = 0,
120 ci.buffer = buffer; 121 .buffer = buffer,
121 ci.format = MaxwellToVK::SurfaceFormat(device, FormatType::Buffer, params.pixel_format).format; 122 .format =
122 ci.offset = 0; 123 MaxwellToVK::SurfaceFormat(device, FormatType::Buffer, params.pixel_format).format,
123 ci.range = static_cast<VkDeviceSize>(host_memory_size); 124 .offset = 0,
124 return ci; 125 .range = static_cast<VkDeviceSize>(host_memory_size),
126 };
125} 127}
126 128
127VkImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceParams& params) { 129VkImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceParams& params) {
@@ -130,23 +132,23 @@ VkImageCreateInfo GenerateImageCreateInfo(const VKDevice& device, const SurfaceP
130 const auto [format, attachable, storage] = 132 const auto [format, attachable, storage] =
131 MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.pixel_format); 133 MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.pixel_format);
132 134
133 VkImageCreateInfo ci; 135 VkImageCreateInfo ci{
134 ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; 136 .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
135 ci.pNext = nullptr; 137 .pNext = nullptr,
136 ci.flags = 0; 138 .flags = 0,
137 ci.imageType = SurfaceTargetToImage(params.target); 139 .imageType = SurfaceTargetToImage(params.target),
138 ci.format = format; 140 .format = format,
139 ci.mipLevels = params.num_levels; 141 .mipLevels = params.num_levels,
140 ci.arrayLayers = static_cast<u32>(params.GetNumLayers()); 142 .arrayLayers = static_cast<u32>(params.GetNumLayers()),
141 ci.samples = VK_SAMPLE_COUNT_1_BIT; 143 .samples = VK_SAMPLE_COUNT_1_BIT,
142 ci.tiling = VK_IMAGE_TILING_OPTIMAL; 144 .tiling = VK_IMAGE_TILING_OPTIMAL,
143 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; 145 .usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
144 ci.queueFamilyIndexCount = 0; 146 VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
145 ci.pQueueFamilyIndices = nullptr; 147 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
146 ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; 148 .queueFamilyIndexCount = 0,
147 149 .pQueueFamilyIndices = nullptr,
148 ci.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | 150 .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
149 VK_IMAGE_USAGE_TRANSFER_SRC_BIT; 151 };
150 if (attachable) { 152 if (attachable) {
151 ci.usage |= params.IsPixelFormatZeta() ? VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT 153 ci.usage |= params.IsPixelFormatZeta() ? VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT
152 : VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; 154 : VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
@@ -321,22 +323,25 @@ void CachedSurface::UploadImage(const std::vector<u8>& staging_buffer) {
321} 323}
322 324
323VkBufferImageCopy CachedSurface::GetBufferImageCopy(u32 level) const { 325VkBufferImageCopy CachedSurface::GetBufferImageCopy(u32 level) const {
324 VkBufferImageCopy copy; 326 return {
325 copy.bufferOffset = params.GetHostMipmapLevelOffset(level, is_converted); 327 .bufferOffset = params.GetHostMipmapLevelOffset(level, is_converted),
326 copy.bufferRowLength = 0; 328 .bufferRowLength = 0,
327 copy.bufferImageHeight = 0; 329 .bufferImageHeight = 0,
328 copy.imageSubresource.aspectMask = image->GetAspectMask(); 330 .imageSubresource =
329 copy.imageSubresource.mipLevel = level; 331 {
330 copy.imageSubresource.baseArrayLayer = 0; 332 .aspectMask = image->GetAspectMask(),
331 copy.imageSubresource.layerCount = static_cast<u32>(params.GetNumLayers()); 333 .mipLevel = level,
332 copy.imageOffset.x = 0; 334 .baseArrayLayer = 0,
333 copy.imageOffset.y = 0; 335 .layerCount = static_cast<u32>(params.GetNumLayers()),
334 copy.imageOffset.z = 0; 336 },
335 copy.imageExtent.width = params.GetMipWidth(level); 337 .imageOffset = {.x = 0, .y = 0, .z = 0},
336 copy.imageExtent.height = params.GetMipHeight(level); 338 .imageExtent =
337 copy.imageExtent.depth = 339 {
338 params.target == SurfaceTarget::Texture3D ? params.GetMipDepth(level) : 1; 340 .width = params.GetMipWidth(level),
339 return copy; 341 .height = params.GetMipHeight(level),
342 .depth = params.target == SurfaceTarget::Texture3D ? params.GetMipDepth(level) : 1U,
343 },
344 };
340} 345}
341 346
342VkImageSubresourceRange CachedSurface::GetImageSubresourceRange() const { 347VkImageSubresourceRange CachedSurface::GetImageSubresourceRange() const {
@@ -416,20 +421,29 @@ VkImageView CachedSurfaceView::GetImageView(SwizzleSource x_source, SwizzleSourc
416 ASSERT(num_slices == params.depth); 421 ASSERT(num_slices == params.depth);
417 } 422 }
418 423
419 VkImageViewCreateInfo ci; 424 image_view = device.GetLogical().CreateImageView({
420 ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; 425 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
421 ci.pNext = nullptr; 426 .pNext = nullptr,
422 ci.flags = 0; 427 .flags = 0,
423 ci.image = surface.GetImageHandle(); 428 .image = surface.GetImageHandle(),
424 ci.viewType = image_view_type; 429 .viewType = image_view_type,
425 ci.format = surface.GetImage().GetFormat(); 430 .format = surface.GetImage().GetFormat(),
426 ci.components = {swizzle[0], swizzle[1], swizzle[2], swizzle[3]}; 431 .components =
427 ci.subresourceRange.aspectMask = aspect; 432 {
428 ci.subresourceRange.baseMipLevel = base_level; 433 .r = swizzle[0],
429 ci.subresourceRange.levelCount = num_levels; 434 .g = swizzle[1],
430 ci.subresourceRange.baseArrayLayer = base_layer; 435 .b = swizzle[2],
431 ci.subresourceRange.layerCount = num_layers; 436 .a = swizzle[3],
432 image_view = device.GetLogical().CreateImageView(ci); 437 },
438 .subresourceRange =
439 {
440 .aspectMask = aspect,
441 .baseMipLevel = base_level,
442 .levelCount = num_levels,
443 .baseArrayLayer = base_layer,
444 .layerCount = num_layers,
445 },
446 });
433 447
434 return last_image_view = *image_view; 448 return last_image_view = *image_view;
435} 449}
@@ -439,17 +453,26 @@ VkImageView CachedSurfaceView::GetAttachment() {
439 return *render_target; 453 return *render_target;
440 } 454 }
441 455
442 VkImageViewCreateInfo ci; 456 VkImageViewCreateInfo ci{
443 ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; 457 .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
444 ci.pNext = nullptr; 458 .pNext = nullptr,
445 ci.flags = 0; 459 .flags = 0,
446 ci.image = surface.GetImageHandle(); 460 .image = surface.GetImageHandle(),
447 ci.format = surface.GetImage().GetFormat(); 461 .format = surface.GetImage().GetFormat(),
448 ci.components = {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, 462 .components =
449 VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}; 463 {
450 ci.subresourceRange.aspectMask = aspect_mask; 464 .r = VK_COMPONENT_SWIZZLE_IDENTITY,
451 ci.subresourceRange.baseMipLevel = base_level; 465 .g = VK_COMPONENT_SWIZZLE_IDENTITY,
452 ci.subresourceRange.levelCount = num_levels; 466 .b = VK_COMPONENT_SWIZZLE_IDENTITY,
467 .a = VK_COMPONENT_SWIZZLE_IDENTITY,
468 },
469 .subresourceRange =
470 {
471 .aspectMask = aspect_mask,
472 .baseMipLevel = base_level,
473 .levelCount = num_levels,
474 },
475 };
453 if (image_view_type == VK_IMAGE_VIEW_TYPE_3D) { 476 if (image_view_type == VK_IMAGE_VIEW_TYPE_3D) {
454 ci.viewType = num_slices > 1 ? VK_IMAGE_VIEW_TYPE_2D_ARRAY : VK_IMAGE_VIEW_TYPE_2D; 477 ci.viewType = num_slices > 1 ? VK_IMAGE_VIEW_TYPE_2D_ARRAY : VK_IMAGE_VIEW_TYPE_2D;
455 ci.subresourceRange.baseArrayLayer = base_slice; 478 ci.subresourceRange.baseArrayLayer = base_slice;
@@ -502,24 +525,40 @@ void VKTextureCache::ImageCopy(Surface& src_surface, Surface& dst_surface,
502 VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, 525 VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
503 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); 526 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
504 527
505 VkImageCopy copy; 528 const VkImageCopy copy{
506 copy.srcSubresource.aspectMask = src_surface->GetAspectMask(); 529 .srcSubresource =
507 copy.srcSubresource.mipLevel = copy_params.source_level; 530 {
508 copy.srcSubresource.baseArrayLayer = copy_params.source_z; 531 .aspectMask = src_surface->GetAspectMask(),
509 copy.srcSubresource.layerCount = num_layers; 532 .mipLevel = copy_params.source_level,
510 copy.srcOffset.x = copy_params.source_x; 533 .baseArrayLayer = copy_params.source_z,
511 copy.srcOffset.y = copy_params.source_y; 534 .layerCount = num_layers,
512 copy.srcOffset.z = 0; 535 },
513 copy.dstSubresource.aspectMask = dst_surface->GetAspectMask(); 536 .srcOffset =
514 copy.dstSubresource.mipLevel = copy_params.dest_level; 537 {
515 copy.dstSubresource.baseArrayLayer = dst_base_layer; 538 .x = static_cast<s32>(copy_params.source_x),
516 copy.dstSubresource.layerCount = num_layers; 539 .y = static_cast<s32>(copy_params.source_y),
517 copy.dstOffset.x = copy_params.dest_x; 540 .z = 0,
518 copy.dstOffset.y = copy_params.dest_y; 541 },
519 copy.dstOffset.z = dst_offset_z; 542 .dstSubresource =
520 copy.extent.width = copy_params.width; 543 {
521 copy.extent.height = copy_params.height; 544 .aspectMask = dst_surface->GetAspectMask(),
522 copy.extent.depth = extent_z; 545 .mipLevel = copy_params.dest_level,
546 .baseArrayLayer = dst_base_layer,
547 .layerCount = num_layers,
548 },
549 .dstOffset =
550 {
551 .x = static_cast<s32>(copy_params.dest_x),
552 .y = static_cast<s32>(copy_params.dest_y),
553 .z = static_cast<s32>(dst_offset_z),
554 },
555 .extent =
556 {
557 .width = copy_params.width,
558 .height = copy_params.height,
559 .depth = extent_z,
560 },
561 };
523 562
524 const VkImage src_image = src_surface->GetImageHandle(); 563 const VkImage src_image = src_surface->GetImageHandle();
525 const VkImage dst_image = dst_surface->GetImageHandle(); 564 const VkImage dst_image = dst_surface->GetImageHandle();
diff --git a/src/video_core/renderer_vulkan/wrapper.cpp b/src/video_core/renderer_vulkan/wrapper.cpp
index 051298cc8..14cac38ea 100644
--- a/src/video_core/renderer_vulkan/wrapper.cpp
+++ b/src/video_core/renderer_vulkan/wrapper.cpp
@@ -377,24 +377,26 @@ VkResult Free(VkDevice device, VkCommandPool handle, Span<VkCommandBuffer> buffe
377 377
378Instance Instance::Create(Span<const char*> layers, Span<const char*> extensions, 378Instance Instance::Create(Span<const char*> layers, Span<const char*> extensions,
379 InstanceDispatch& dld) noexcept { 379 InstanceDispatch& dld) noexcept {
380 VkApplicationInfo application_info; 380 static constexpr VkApplicationInfo application_info{
381 application_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; 381 .sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
382 application_info.pNext = nullptr; 382 .pNext = nullptr,
383 application_info.pApplicationName = "yuzu Emulator"; 383 .pApplicationName = "yuzu Emulator",
384 application_info.applicationVersion = VK_MAKE_VERSION(0, 1, 0); 384 .applicationVersion = VK_MAKE_VERSION(0, 1, 0),
385 application_info.pEngineName = "yuzu Emulator"; 385 .pEngineName = "yuzu Emulator",
386 application_info.engineVersion = VK_MAKE_VERSION(0, 1, 0); 386 .engineVersion = VK_MAKE_VERSION(0, 1, 0),
387 application_info.apiVersion = VK_API_VERSION_1_1; 387 .apiVersion = VK_API_VERSION_1_1,
388 388 };
389 VkInstanceCreateInfo ci; 389
390 ci.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO; 390 const VkInstanceCreateInfo ci{
391 ci.pNext = nullptr; 391 .sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
392 ci.flags = 0; 392 .pNext = nullptr,
393 ci.pApplicationInfo = &application_info; 393 .flags = 0,
394 ci.enabledLayerCount = layers.size(); 394 .pApplicationInfo = &application_info,
395 ci.ppEnabledLayerNames = layers.data(); 395 .enabledLayerCount = layers.size(),
396 ci.enabledExtensionCount = extensions.size(); 396 .ppEnabledLayerNames = layers.data(),
397 ci.ppEnabledExtensionNames = extensions.data(); 397 .enabledExtensionCount = extensions.size(),
398 .ppEnabledExtensionNames = extensions.data(),
399 };
398 400
399 VkInstance instance; 401 VkInstance instance;
400 if (dld.vkCreateInstance(&ci, nullptr, &instance) != VK_SUCCESS) { 402 if (dld.vkCreateInstance(&ci, nullptr, &instance) != VK_SUCCESS) {
@@ -425,19 +427,20 @@ std::optional<std::vector<VkPhysicalDevice>> Instance::EnumeratePhysicalDevices(
425 427
426DebugCallback Instance::TryCreateDebugCallback( 428DebugCallback Instance::TryCreateDebugCallback(
427 PFN_vkDebugUtilsMessengerCallbackEXT callback) noexcept { 429 PFN_vkDebugUtilsMessengerCallbackEXT callback) noexcept {
428 VkDebugUtilsMessengerCreateInfoEXT ci; 430 const VkDebugUtilsMessengerCreateInfoEXT ci{
429 ci.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT; 431 .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
430 ci.pNext = nullptr; 432 .pNext = nullptr,
431 ci.flags = 0; 433 .flags = 0,
432 ci.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT | 434 .messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT |
433 VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | 435 VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT |
434 VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT | 436 VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT |
435 VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT; 437 VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT,
436 ci.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | 438 .messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
437 VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | 439 VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
438 VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT; 440 VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT,
439 ci.pfnUserCallback = callback; 441 .pfnUserCallback = callback,
440 ci.pUserData = nullptr; 442 .pUserData = nullptr,
443 };
441 444
442 VkDebugUtilsMessengerEXT messenger; 445 VkDebugUtilsMessengerEXT messenger;
443 if (dld->vkCreateDebugUtilsMessengerEXT(handle, &ci, nullptr, &messenger) != VK_SUCCESS) { 446 if (dld->vkCreateDebugUtilsMessengerEXT(handle, &ci, nullptr, &messenger) != VK_SUCCESS) {
@@ -468,12 +471,13 @@ DescriptorSets DescriptorPool::Allocate(const VkDescriptorSetAllocateInfo& ai) c
468} 471}
469 472
470CommandBuffers CommandPool::Allocate(std::size_t num_buffers, VkCommandBufferLevel level) const { 473CommandBuffers CommandPool::Allocate(std::size_t num_buffers, VkCommandBufferLevel level) const {
471 VkCommandBufferAllocateInfo ai; 474 const VkCommandBufferAllocateInfo ai{
472 ai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; 475 .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
473 ai.pNext = nullptr; 476 .pNext = nullptr,
474 ai.commandPool = handle; 477 .commandPool = handle,
475 ai.level = level; 478 .level = level,
476 ai.commandBufferCount = static_cast<u32>(num_buffers); 479 .commandBufferCount = static_cast<u32>(num_buffers),
480 };
477 481
478 std::unique_ptr buffers = std::make_unique<VkCommandBuffer[]>(num_buffers); 482 std::unique_ptr buffers = std::make_unique<VkCommandBuffer[]>(num_buffers);
479 switch (const VkResult result = dld->vkAllocateCommandBuffers(owner, &ai, buffers.get())) { 483 switch (const VkResult result = dld->vkAllocateCommandBuffers(owner, &ai, buffers.get())) {
@@ -497,17 +501,18 @@ std::vector<VkImage> SwapchainKHR::GetImages() const {
497Device Device::Create(VkPhysicalDevice physical_device, Span<VkDeviceQueueCreateInfo> queues_ci, 501Device Device::Create(VkPhysicalDevice physical_device, Span<VkDeviceQueueCreateInfo> queues_ci,
498 Span<const char*> enabled_extensions, const void* next, 502 Span<const char*> enabled_extensions, const void* next,
499 DeviceDispatch& dld) noexcept { 503 DeviceDispatch& dld) noexcept {
500 VkDeviceCreateInfo ci; 504 const VkDeviceCreateInfo ci{
501 ci.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; 505 .sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
502 ci.pNext = next; 506 .pNext = next,
503 ci.flags = 0; 507 .flags = 0,
504 ci.queueCreateInfoCount = queues_ci.size(); 508 .queueCreateInfoCount = queues_ci.size(),
505 ci.pQueueCreateInfos = queues_ci.data(); 509 .pQueueCreateInfos = queues_ci.data(),
506 ci.enabledLayerCount = 0; 510 .enabledLayerCount = 0,
507 ci.ppEnabledLayerNames = nullptr; 511 .ppEnabledLayerNames = nullptr,
508 ci.enabledExtensionCount = enabled_extensions.size(); 512 .enabledExtensionCount = enabled_extensions.size(),
509 ci.ppEnabledExtensionNames = enabled_extensions.data(); 513 .ppEnabledExtensionNames = enabled_extensions.data(),
510 ci.pEnabledFeatures = nullptr; 514 .pEnabledFeatures = nullptr,
515 };
511 516
512 VkDevice device; 517 VkDevice device;
513 if (dld.vkCreateDevice(physical_device, &ci, nullptr, &device) != VK_SUCCESS) { 518 if (dld.vkCreateDevice(physical_device, &ci, nullptr, &device) != VK_SUCCESS) {
@@ -548,10 +553,11 @@ ImageView Device::CreateImageView(const VkImageViewCreateInfo& ci) const {
548} 553}
549 554
550Semaphore Device::CreateSemaphore() const { 555Semaphore Device::CreateSemaphore() const {
551 VkSemaphoreCreateInfo ci; 556 static constexpr VkSemaphoreCreateInfo ci{
552 ci.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; 557 .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
553 ci.pNext = nullptr; 558 .pNext = nullptr,
554 ci.flags = 0; 559 .flags = 0,
560 };
555 561
556 VkSemaphore object; 562 VkSemaphore object;
557 Check(dld->vkCreateSemaphore(handle, &ci, nullptr, &object)); 563 Check(dld->vkCreateSemaphore(handle, &ci, nullptr, &object));
@@ -639,10 +645,12 @@ ShaderModule Device::CreateShaderModule(const VkShaderModuleCreateInfo& ci) cons
639} 645}
640 646
641Event Device::CreateEvent() const { 647Event Device::CreateEvent() const {
642 VkEventCreateInfo ci; 648 static constexpr VkEventCreateInfo ci{
643 ci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; 649 .sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO,
644 ci.pNext = nullptr; 650 .pNext = nullptr,
645 ci.flags = 0; 651 .flags = 0,
652 };
653
646 VkEvent object; 654 VkEvent object;
647 Check(dld->vkCreateEvent(handle, &ci, nullptr, &object)); 655 Check(dld->vkCreateEvent(handle, &ci, nullptr, &object));
648 return Event(object, handle, *dld); 656 return Event(object, handle, *dld);
diff --git a/src/video_core/shader_cache.h b/src/video_core/shader_cache.h
index b7608fc7b..015a789d6 100644
--- a/src/video_core/shader_cache.h
+++ b/src/video_core/shader_cache.h
@@ -209,11 +209,11 @@ private:
209 } 209 }
210 210
211 // Remove them from the cache 211 // Remove them from the cache
212 const auto is_removed = [&removed_shaders](std::unique_ptr<T>& shader) { 212 const auto is_removed = [&removed_shaders](const std::unique_ptr<T>& shader) {
213 return std::find(removed_shaders.begin(), removed_shaders.end(), shader.get()) != 213 return std::find(removed_shaders.begin(), removed_shaders.end(), shader.get()) !=
214 removed_shaders.end(); 214 removed_shaders.end();
215 }; 215 };
216 storage.erase(std::remove_if(storage.begin(), storage.end(), is_removed), storage.end()); 216 std::erase_if(storage, is_removed);
217 } 217 }
218 218
219 /// @brief Creates a new entry in the lookup cache and returns its pointer 219 /// @brief Creates a new entry in the lookup cache and returns its pointer