summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-x[-rw-r--r--].ci/scripts/windows/docker.sh8
m---------externals/cubeb0
m---------externals/dynarmic0
m---------externals/sirit0
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp7
-rw-r--r--src/core/arm/unicorn/arm_unicorn.cpp5
-rw-r--r--src/core/arm/unicorn/arm_unicorn.h7
-rw-r--r--src/core/hle/kernel/handle_table.cpp2
-rw-r--r--src/core/hle/kernel/mutex.cpp5
-rw-r--r--src/core/hle/kernel/physical_core.cpp4
-rw-r--r--src/core/hle/kernel/process_capability.cpp30
-rw-r--r--src/core/hle/kernel/readable_event.cpp3
-rw-r--r--src/core/hle/kernel/resource_limit.cpp2
-rw-r--r--src/core/hle/kernel/svc.cpp11
-rw-r--r--src/core/hle/kernel/thread.cpp2
-rw-r--r--src/core/hle/service/acc/acc.cpp46
-rw-r--r--src/core/hle/service/acc/acc.h3
-rw-r--r--src/core/hle/service/acc/acc_su.cpp2
-rw-r--r--src/core/hle/service/acc/acc_u0.cpp2
-rw-r--r--src/core/hle/service/acc/acc_u1.cpp2
-rw-r--r--src/core/hle/service/am/am.cpp40
-rw-r--r--src/core/hle/service/am/am.h3
-rw-r--r--src/core/hle/service/caps/caps_su.cpp12
-rw-r--r--src/core/hle/service/caps/caps_su.h3
-rw-r--r--src/core/hle/service/filesystem/fsp_srv.cpp4
-rw-r--r--src/core/hle/service/glue/errors.h8
-rw-r--r--src/core/hle/service/nim/nim.cpp70
-rw-r--r--src/core/hle/service/ns/ns.cpp8
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h5
-rw-r--r--src/core/hle/service/ptm/psm.cpp21
-rw-r--r--src/core/hle/service/set/set.cpp1
-rw-r--r--src/core/hle/service/sm/sm.cpp12
-rw-r--r--src/core/hle/service/vi/vi.cpp9
-rw-r--r--src/video_core/CMakeLists.txt2
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h1
-rw-r--r--src/video_core/engines/maxwell_3d.h1
-rw-r--r--src/video_core/engines/shader_bytecode.h4
-rw-r--r--src/video_core/fence_manager.h6
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp79
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h3
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp82
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp62
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.h33
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.cpp2
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.h2
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.cpp3
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h3
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_device.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.h3
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp74
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h10
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp22
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_sampler_cache.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.cpp83
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_util.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_util.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.h1
-rw-r--r--src/video_core/shader/control_flow.cpp12
-rw-r--r--src/video_core/shader/decode.cpp30
-rw-r--r--src/video_core/shader/decode/arithmetic_integer.cpp31
-rw-r--r--src/video_core/shader/decode/image.cpp18
-rw-r--r--src/video_core/shader/decode/register_set_predicate.cpp52
-rw-r--r--src/video_core/shader/decode/texture.cpp192
-rw-r--r--src/video_core/shader/memory_util.cpp77
-rw-r--r--src/video_core/shader/memory_util.h47
-rw-r--r--src/video_core/shader/node.h131
-rw-r--r--src/video_core/shader/shader_ir.h37
-rw-r--r--src/video_core/shader/track.cpp20
-rw-r--r--src/video_core/texture_cache/surface_params.cpp10
-rw-r--r--src/video_core/texture_cache/texture_cache.h70
-rw-r--r--src/yuzu/main.cpp4
90 files changed, 866 insertions, 726 deletions
diff --git a/.ci/scripts/windows/docker.sh b/.ci/scripts/windows/docker.sh
index beb554b65..a55541e10 100644..100755
--- a/.ci/scripts/windows/docker.sh
+++ b/.ci/scripts/windows/docker.sh
@@ -29,7 +29,13 @@ echo 'Prepare binaries...'
29cd .. 29cd ..
30mkdir package 30mkdir package
31 31
32QT_PLATFORM_DLL_PATH='/usr/x86_64-w64-mingw32/lib/qt5/plugins/platforms/' 32if [ -d "/usr/x86_64-w64-mingw32/lib/qt5/plugins/platforms/" ]; then
33 QT_PLATFORM_DLL_PATH='/usr/x86_64-w64-mingw32/lib/qt5/plugins/platforms/'
34else
35 #fallback to qt
36 QT_PLATFORM_DLL_PATH='/usr/x86_64-w64-mingw32/lib/qt/plugins/platforms/'
37fi
38
33find build/ -name "yuzu*.exe" -exec cp {} 'package' \; 39find build/ -name "yuzu*.exe" -exec cp {} 'package' \;
34 40
35# copy Qt plugins 41# copy Qt plugins
diff --git a/externals/cubeb b/externals/cubeb
Subproject 6f2420de8f155b10330cf973900ac7bdbfee589 Subproject 616d773441b5355800ce64197a699e6cd6b3617
diff --git a/externals/dynarmic b/externals/dynarmic
Subproject a3cd05577c9b6c51f0f345d0e915b6feab68fe1 Subproject e7166e8ba74d7b9c85e87afc0aaf667e7e84cfe
diff --git a/externals/sirit b/externals/sirit
Subproject a712959f1e373a33b48042b5934e288a243d595 Subproject 414fc4dbd28d8fe48f735a0c389db8a234f733c
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 65cbfe5e6..337b97be9 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -185,10 +185,9 @@ void ARM_Dynarmic_64::Step() {
185 185
186ARM_Dynarmic_64::ARM_Dynarmic_64(System& system, ExclusiveMonitor& exclusive_monitor, 186ARM_Dynarmic_64::ARM_Dynarmic_64(System& system, ExclusiveMonitor& exclusive_monitor,
187 std::size_t core_index) 187 std::size_t core_index)
188 : ARM_Interface{system}, 188 : ARM_Interface{system}, cb(std::make_unique<DynarmicCallbacks64>(*this)),
189 cb(std::make_unique<DynarmicCallbacks64>(*this)), inner_unicorn{system}, 189 inner_unicorn{system, ARM_Unicorn::Arch::AArch64}, core_index{core_index},
190 core_index{core_index}, exclusive_monitor{ 190 exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
191 dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
192 191
193ARM_Dynarmic_64::~ARM_Dynarmic_64() = default; 192ARM_Dynarmic_64::~ARM_Dynarmic_64() = default;
194 193
diff --git a/src/core/arm/unicorn/arm_unicorn.cpp b/src/core/arm/unicorn/arm_unicorn.cpp
index b96583123..e40e9626a 100644
--- a/src/core/arm/unicorn/arm_unicorn.cpp
+++ b/src/core/arm/unicorn/arm_unicorn.cpp
@@ -62,8 +62,9 @@ static bool UnmappedMemoryHook(uc_engine* uc, uc_mem_type type, u64 addr, int si
62 return false; 62 return false;
63} 63}
64 64
65ARM_Unicorn::ARM_Unicorn(System& system) : ARM_Interface{system} { 65ARM_Unicorn::ARM_Unicorn(System& system, Arch architecture) : ARM_Interface{system} {
66 CHECKED(uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc)); 66 const auto arch = architecture == Arch::AArch32 ? UC_ARCH_ARM : UC_ARCH_ARM64;
67 CHECKED(uc_open(arch, UC_MODE_ARM, &uc));
67 68
68 auto fpv = 3 << 20; 69 auto fpv = 3 << 20;
69 CHECKED(uc_reg_write(uc, UC_ARM64_REG_CPACR_EL1, &fpv)); 70 CHECKED(uc_reg_write(uc, UC_ARM64_REG_CPACR_EL1, &fpv));
diff --git a/src/core/arm/unicorn/arm_unicorn.h b/src/core/arm/unicorn/arm_unicorn.h
index f30d13cb6..725c65085 100644
--- a/src/core/arm/unicorn/arm_unicorn.h
+++ b/src/core/arm/unicorn/arm_unicorn.h
@@ -15,7 +15,12 @@ class System;
15 15
16class ARM_Unicorn final : public ARM_Interface { 16class ARM_Unicorn final : public ARM_Interface {
17public: 17public:
18 explicit ARM_Unicorn(System& system); 18 enum class Arch {
19 AArch32, // 32-bit ARM
20 AArch64, // 64-bit ARM
21 };
22
23 explicit ARM_Unicorn(System& system, Arch architecture);
19 ~ARM_Unicorn() override; 24 ~ARM_Unicorn() override;
20 25
21 void SetPC(u64 pc) override; 26 void SetPC(u64 pc) override;
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index e441a27fc..35448b576 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -30,6 +30,7 @@ HandleTable::~HandleTable() = default;
30 30
31ResultCode HandleTable::SetSize(s32 handle_table_size) { 31ResultCode HandleTable::SetSize(s32 handle_table_size) {
32 if (static_cast<u32>(handle_table_size) > MAX_COUNT) { 32 if (static_cast<u32>(handle_table_size) > MAX_COUNT) {
33 LOG_ERROR(Kernel, "Handle table size {} is greater than {}", handle_table_size, MAX_COUNT);
33 return ERR_OUT_OF_MEMORY; 34 return ERR_OUT_OF_MEMORY;
34 } 35 }
35 36
@@ -80,6 +81,7 @@ ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
80 81
81ResultCode HandleTable::Close(Handle handle) { 82ResultCode HandleTable::Close(Handle handle) {
82 if (!IsValid(handle)) { 83 if (!IsValid(handle)) {
84 LOG_ERROR(Kernel, "Handle is not valid! handle={:08X}", handle);
83 return ERR_INVALID_HANDLE; 85 return ERR_INVALID_HANDLE;
84 } 86 }
85 87
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index eff4e45b0..7869eb32b 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -7,6 +7,7 @@
7#include <vector> 7#include <vector>
8 8
9#include "common/assert.h" 9#include "common/assert.h"
10#include "common/logging/log.h"
10#include "core/core.h" 11#include "core/core.h"
11#include "core/hle/kernel/errors.h" 12#include "core/hle/kernel/errors.h"
12#include "core/hle/kernel/handle_table.h" 13#include "core/hle/kernel/handle_table.h"
@@ -67,6 +68,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
67 Handle requesting_thread_handle) { 68 Handle requesting_thread_handle) {
68 // The mutex address must be 4-byte aligned 69 // The mutex address must be 4-byte aligned
69 if ((address % sizeof(u32)) != 0) { 70 if ((address % sizeof(u32)) != 0) {
71 LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
70 return ERR_INVALID_ADDRESS; 72 return ERR_INVALID_ADDRESS;
71 } 73 }
72 74
@@ -88,6 +90,8 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
88 } 90 }
89 91
90 if (holding_thread == nullptr) { 92 if (holding_thread == nullptr) {
93 LOG_ERROR(Kernel, "Holding thread does not exist! thread_handle={:08X}",
94 holding_thread_handle);
91 return ERR_INVALID_HANDLE; 95 return ERR_INVALID_HANDLE;
92 } 96 }
93 97
@@ -109,6 +113,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
109ResultCode Mutex::Release(VAddr address) { 113ResultCode Mutex::Release(VAddr address) {
110 // The mutex address must be 4-byte aligned 114 // The mutex address must be 4-byte aligned
111 if ((address % sizeof(u32)) != 0) { 115 if ((address % sizeof(u32)) != 0) {
116 LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
112 return ERR_INVALID_ADDRESS; 117 return ERR_INVALID_ADDRESS;
113 } 118 }
114 119
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index aa2787467..a15011076 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -27,7 +27,9 @@ PhysicalCore::PhysicalCore(Core::System& system, std::size_t id,
27 std::make_unique<Core::ARM_Dynarmic_64>(system, exclusive_monitor, core_index); 27 std::make_unique<Core::ARM_Dynarmic_64>(system, exclusive_monitor, core_index);
28 28
29#else 29#else
30 arm_interface = std::make_shared<Core::ARM_Unicorn>(system); 30 using Core::ARM_Unicorn;
31 arm_interface_32 = std::make_unique<ARM_Unicorn>(system, ARM_Unicorn::Arch::AArch32);
32 arm_interface_64 = std::make_unique<ARM_Unicorn>(system, ARM_Unicorn::Arch::AArch64);
31 LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available"); 33 LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
32#endif 34#endif
33 35
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
index 48e5ae682..63880f13d 100644
--- a/src/core/hle/kernel/process_capability.cpp
+++ b/src/core/hle/kernel/process_capability.cpp
@@ -3,6 +3,7 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "common/bit_util.h" 5#include "common/bit_util.h"
6#include "common/logging/log.h"
6#include "core/hle/kernel/errors.h" 7#include "core/hle/kernel/errors.h"
7#include "core/hle/kernel/handle_table.h" 8#include "core/hle/kernel/handle_table.h"
8#include "core/hle/kernel/memory/page_table.h" 9#include "core/hle/kernel/memory/page_table.h"
@@ -119,22 +120,30 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
119 // The MapPhysical type uses two descriptor flags for its parameters. 120 // The MapPhysical type uses two descriptor flags for its parameters.
120 // If there's only one, then there's a problem. 121 // If there's only one, then there's a problem.
121 if (i >= num_capabilities) { 122 if (i >= num_capabilities) {
123 LOG_ERROR(Kernel, "Invalid combination! i={}", i);
122 return ERR_INVALID_COMBINATION; 124 return ERR_INVALID_COMBINATION;
123 } 125 }
124 126
125 const auto size_flags = capabilities[i]; 127 const auto size_flags = capabilities[i];
126 if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) { 128 if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) {
129 LOG_ERROR(Kernel, "Invalid capability type! size_flags={}", size_flags);
127 return ERR_INVALID_COMBINATION; 130 return ERR_INVALID_COMBINATION;
128 } 131 }
129 132
130 const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table); 133 const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table);
131 if (result.IsError()) { 134 if (result.IsError()) {
135 LOG_ERROR(Kernel, "Failed to map physical flags! descriptor={}, size_flags={}",
136 descriptor, size_flags);
132 return result; 137 return result;
133 } 138 }
134 } else { 139 } else {
135 const auto result = 140 const auto result =
136 ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, page_table); 141 ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, page_table);
137 if (result.IsError()) { 142 if (result.IsError()) {
143 LOG_ERROR(
144 Kernel,
145 "Failed to parse capability flag! set_flags={}, set_svc_bits={}, descriptor={}",
146 set_flags, set_svc_bits, descriptor);
138 return result; 147 return result;
139 } 148 }
140 } 149 }
@@ -162,6 +171,9 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
162 const u32 flag_length = GetFlagBitOffset(type); 171 const u32 flag_length = GetFlagBitOffset(type);
163 const u32 set_flag = 1U << flag_length; 172 const u32 set_flag = 1U << flag_length;
164 if ((set_flag & set_flags & InitializeOnceMask) != 0) { 173 if ((set_flag & set_flags & InitializeOnceMask) != 0) {
174 LOG_ERROR(Kernel,
175 "Attempted to initialize flags that may only be initialized once. set_flags={}",
176 set_flags);
165 return ERR_INVALID_COMBINATION; 177 return ERR_INVALID_COMBINATION;
166 } 178 }
167 set_flags |= set_flag; 179 set_flags |= set_flag;
@@ -187,6 +199,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
187 break; 199 break;
188 } 200 }
189 201
202 LOG_ERROR(Kernel, "Invalid capability type! type={}", static_cast<u32>(type));
190 return ERR_INVALID_CAPABILITY_DESCRIPTOR; 203 return ERR_INVALID_CAPABILITY_DESCRIPTOR;
191} 204}
192 205
@@ -208,23 +221,31 @@ void ProcessCapabilities::Clear() {
208 221
209ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) { 222ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
210 if (priority_mask != 0 || core_mask != 0) { 223 if (priority_mask != 0 || core_mask != 0) {
224 LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
225 priority_mask, core_mask);
211 return ERR_INVALID_CAPABILITY_DESCRIPTOR; 226 return ERR_INVALID_CAPABILITY_DESCRIPTOR;
212 } 227 }
213 228
214 const u32 core_num_min = (flags >> 16) & 0xFF; 229 const u32 core_num_min = (flags >> 16) & 0xFF;
215 const u32 core_num_max = (flags >> 24) & 0xFF; 230 const u32 core_num_max = (flags >> 24) & 0xFF;
216 if (core_num_min > core_num_max) { 231 if (core_num_min > core_num_max) {
232 LOG_ERROR(Kernel, "Core min is greater than core max! core_num_min={}, core_num_max={}",
233 core_num_min, core_num_max);
217 return ERR_INVALID_COMBINATION; 234 return ERR_INVALID_COMBINATION;
218 } 235 }
219 236
220 const u32 priority_min = (flags >> 10) & 0x3F; 237 const u32 priority_min = (flags >> 10) & 0x3F;
221 const u32 priority_max = (flags >> 4) & 0x3F; 238 const u32 priority_max = (flags >> 4) & 0x3F;
222 if (priority_min > priority_max) { 239 if (priority_min > priority_max) {
240 LOG_ERROR(Kernel,
241 "Priority min is greater than priority max! priority_min={}, priority_max={}",
242 core_num_min, priority_max);
223 return ERR_INVALID_COMBINATION; 243 return ERR_INVALID_COMBINATION;
224 } 244 }
225 245
226 // The switch only has 4 usable cores. 246 // The switch only has 4 usable cores.
227 if (core_num_max >= 4) { 247 if (core_num_max >= 4) {
248 LOG_ERROR(Kernel, "Invalid max cores specified! core_num_max={}", core_num_max);
228 return ERR_INVALID_PROCESSOR_ID; 249 return ERR_INVALID_PROCESSOR_ID;
229 } 250 }
230 251
@@ -259,6 +280,7 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags)
259 } 280 }
260 281
261 if (svc_number >= svc_capabilities.size()) { 282 if (svc_number >= svc_capabilities.size()) {
283 LOG_ERROR(Kernel, "Process svc capability is out of range! svc_number={}", svc_number);
262 return ERR_OUT_OF_RANGE; 284 return ERR_OUT_OF_RANGE;
263 } 285 }
264 286
@@ -295,6 +317,8 @@ ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
295 // emulate that, it's sufficient to mark every interrupt as defined. 317 // emulate that, it's sufficient to mark every interrupt as defined.
296 318
297 if (interrupt >= interrupt_capabilities.size()) { 319 if (interrupt >= interrupt_capabilities.size()) {
320 LOG_ERROR(Kernel, "Process interrupt capability is out of range! svc_number={}",
321 interrupt);
298 return ERR_OUT_OF_RANGE; 322 return ERR_OUT_OF_RANGE;
299 } 323 }
300 324
@@ -307,6 +331,7 @@ ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
307ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) { 331ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
308 const u32 reserved = flags >> 17; 332 const u32 reserved = flags >> 17;
309 if (reserved != 0) { 333 if (reserved != 0) {
334 LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
310 return ERR_RESERVED_VALUE; 335 return ERR_RESERVED_VALUE;
311 } 336 }
312 337
@@ -324,6 +349,9 @@ ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
324 const u32 major_version = kernel_version >> 19; 349 const u32 major_version = kernel_version >> 19;
325 350
326 if (major_version != 0 || flags < 0x80000) { 351 if (major_version != 0 || flags < 0x80000) {
352 LOG_ERROR(Kernel,
353 "Kernel version is non zero or flags are too small! major_version={}, flags={}",
354 major_version, flags);
327 return ERR_INVALID_CAPABILITY_DESCRIPTOR; 355 return ERR_INVALID_CAPABILITY_DESCRIPTOR;
328 } 356 }
329 357
@@ -334,6 +362,7 @@ ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
334ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) { 362ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
335 const u32 reserved = flags >> 26; 363 const u32 reserved = flags >> 26;
336 if (reserved != 0) { 364 if (reserved != 0) {
365 LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
337 return ERR_RESERVED_VALUE; 366 return ERR_RESERVED_VALUE;
338 } 367 }
339 368
@@ -344,6 +373,7 @@ ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
344ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) { 373ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) {
345 const u32 reserved = flags >> 19; 374 const u32 reserved = flags >> 19;
346 if (reserved != 0) { 375 if (reserved != 0) {
376 LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
347 return ERR_RESERVED_VALUE; 377 return ERR_RESERVED_VALUE;
348 } 378 }
349 379
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index 9d3d3a81b..e2a404d07 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -4,6 +4,7 @@
4 4
5#include <algorithm> 5#include <algorithm>
6#include "common/assert.h" 6#include "common/assert.h"
7#include "common/logging/log.h"
7#include "core/hle/kernel/errors.h" 8#include "core/hle/kernel/errors.h"
8#include "core/hle/kernel/object.h" 9#include "core/hle/kernel/object.h"
9#include "core/hle/kernel/readable_event.h" 10#include "core/hle/kernel/readable_event.h"
@@ -35,6 +36,8 @@ void ReadableEvent::Clear() {
35 36
36ResultCode ReadableEvent::Reset() { 37ResultCode ReadableEvent::Reset() {
37 if (!is_signaled) { 38 if (!is_signaled) {
39 LOG_ERROR(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
40 GetObjectId(), GetTypeName(), GetName());
38 return ERR_INVALID_STATE; 41 return ERR_INVALID_STATE;
39 } 42 }
40 43
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp
index 96e5b9892..d9beaa3a4 100644
--- a/src/core/hle/kernel/resource_limit.cpp
+++ b/src/core/hle/kernel/resource_limit.cpp
@@ -69,6 +69,8 @@ ResultCode ResourceLimit::SetLimitValue(ResourceType resource, s64 value) {
69 limit[index] = value; 69 limit[index] = value;
70 return RESULT_SUCCESS; 70 return RESULT_SUCCESS;
71 } else { 71 } else {
72 LOG_ERROR(Kernel, "Limit value is too large! resource={}, value={}, index={}",
73 static_cast<u32>(resource), value, index);
72 return ERR_INVALID_STATE; 74 return ERR_INVALID_STATE;
73 } 75 }
74} 76}
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 25b4a23b4..4ae4529f5 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -685,6 +685,8 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
685 case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource: 685 case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
686 case GetInfoType::TotalPhysicalMemoryUsedWithoutSystemResource: { 686 case GetInfoType::TotalPhysicalMemoryUsedWithoutSystemResource: {
687 if (info_sub_id != 0) { 687 if (info_sub_id != 0) {
688 LOG_ERROR(Kernel_SVC, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
689 info_sub_id);
688 return ERR_INVALID_ENUM_VALUE; 690 return ERR_INVALID_ENUM_VALUE;
689 } 691 }
690 692
@@ -692,6 +694,8 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
692 system.Kernel().CurrentProcess()->GetHandleTable(); 694 system.Kernel().CurrentProcess()->GetHandleTable();
693 const auto process = current_process_handle_table.Get<Process>(static_cast<Handle>(handle)); 695 const auto process = current_process_handle_table.Get<Process>(static_cast<Handle>(handle));
694 if (!process) { 696 if (!process) {
697 LOG_ERROR(Kernel_SVC, "Process is not valid! info_id={}, info_sub_id={}, handle={:08X}",
698 info_id, info_sub_id, handle);
695 return ERR_INVALID_HANDLE; 699 return ERR_INVALID_HANDLE;
696 } 700 }
697 701
@@ -773,7 +777,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
773 break; 777 break;
774 } 778 }
775 779
776 LOG_WARNING(Kernel_SVC, "(STUBBED) Unimplemented svcGetInfo id=0x{:016X}", info_id); 780 LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
777 return ERR_INVALID_ENUM_VALUE; 781 return ERR_INVALID_ENUM_VALUE;
778 } 782 }
779 783
@@ -783,10 +787,13 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
783 787
784 case GetInfoType::RegisterResourceLimit: { 788 case GetInfoType::RegisterResourceLimit: {
785 if (handle != 0) { 789 if (handle != 0) {
790 LOG_ERROR(Kernel, "Handle is non zero! handle={:08X}", handle);
786 return ERR_INVALID_HANDLE; 791 return ERR_INVALID_HANDLE;
787 } 792 }
788 793
789 if (info_sub_id != 0) { 794 if (info_sub_id != 0) {
795 LOG_ERROR(Kernel, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
796 info_sub_id);
790 return ERR_INVALID_COMBINATION; 797 return ERR_INVALID_COMBINATION;
791 } 798 }
792 799
@@ -866,7 +873,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
866 } 873 }
867 874
868 default: 875 default:
869 LOG_WARNING(Kernel_SVC, "(STUBBED) Unimplemented svcGetInfo id=0x{:016X}", info_id); 876 LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
870 return ERR_INVALID_ENUM_VALUE; 877 return ERR_INVALID_ENUM_VALUE;
871 } 878 }
872} 879}
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index a919750a6..db7f379ac 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -423,6 +423,8 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
423 if (new_core == THREADPROCESSORID_DONT_UPDATE) { 423 if (new_core == THREADPROCESSORID_DONT_UPDATE) {
424 new_core = use_override ? ideal_core_override : ideal_core; 424 new_core = use_override ? ideal_core_override : ideal_core;
425 if ((new_affinity_mask & (1ULL << new_core)) == 0) { 425 if ((new_affinity_mask & (1ULL << new_core)) == 0) {
426 LOG_ERROR(Kernel, "New affinity mask is incorrect! new_core={}, new_affinity_mask={}",
427 new_core, new_affinity_mask);
426 return ERR_INVALID_COMBINATION; 428 return ERR_INVALID_COMBINATION;
427 } 429 }
428 } 430 }
diff --git a/src/core/hle/service/acc/acc.cpp b/src/core/hle/service/acc/acc.cpp
index cfac8ca9a..9a7992f58 100644
--- a/src/core/hle/service/acc/acc.cpp
+++ b/src/core/hle/service/acc/acc.cpp
@@ -319,46 +319,37 @@ void Module::Interface::IsUserRegistrationRequestPermitted(Kernel::HLERequestCon
319 319
320void Module::Interface::InitializeApplicationInfo(Kernel::HLERequestContext& ctx) { 320void Module::Interface::InitializeApplicationInfo(Kernel::HLERequestContext& ctx) {
321 IPC::RequestParser rp{ctx}; 321 IPC::RequestParser rp{ctx};
322 auto pid = rp.Pop<u64>();
323 322
324 LOG_DEBUG(Service_ACC, "called, process_id={}", pid); 323 LOG_DEBUG(Service_ACC, "called");
325 IPC::ResponseBuilder rb{ctx, 2}; 324 IPC::ResponseBuilder rb{ctx, 2};
326 rb.Push(InitializeApplicationInfoBase(pid)); 325 rb.Push(InitializeApplicationInfoBase());
327} 326}
328 327
329void Module::Interface::InitializeApplicationInfoRestricted(Kernel::HLERequestContext& ctx) { 328void Module::Interface::InitializeApplicationInfoRestricted(Kernel::HLERequestContext& ctx) {
330 IPC::RequestParser rp{ctx}; 329 IPC::RequestParser rp{ctx};
331 auto pid = rp.Pop<u64>();
332 330
333 LOG_WARNING(Service_ACC, "(Partial implementation) called, process_id={}", pid); 331 LOG_WARNING(Service_ACC, "(Partial implementation) called");
334 332
335 // TODO(ogniK): We require checking if the user actually owns the title and what not. As of 333 // TODO(ogniK): We require checking if the user actually owns the title and what not. As of
336 // currently, we assume the user owns the title. InitializeApplicationInfoBase SHOULD be called 334 // currently, we assume the user owns the title. InitializeApplicationInfoBase SHOULD be called
337 // first then we do extra checks if the game is a digital copy. 335 // first then we do extra checks if the game is a digital copy.
338 336
339 IPC::ResponseBuilder rb{ctx, 2}; 337 IPC::ResponseBuilder rb{ctx, 2};
340 rb.Push(InitializeApplicationInfoBase(pid)); 338 rb.Push(InitializeApplicationInfoBase());
341} 339}
342 340
343ResultCode Module::Interface::InitializeApplicationInfoBase(u64 process_id) { 341ResultCode Module::Interface::InitializeApplicationInfoBase() {
344 if (application_info) { 342 if (application_info) {
345 LOG_ERROR(Service_ACC, "Application already initialized"); 343 LOG_ERROR(Service_ACC, "Application already initialized");
346 return ERR_ACCOUNTINFO_ALREADY_INITIALIZED; 344 return ERR_ACCOUNTINFO_ALREADY_INITIALIZED;
347 } 345 }
348 346
349 const auto& list = system.Kernel().GetProcessList(); 347 // TODO(ogniK): This should be changed to reflect the target process for when we have multiple
350 const auto iter = std::find_if(list.begin(), list.end(), [&process_id](const auto& process) { 348 // processes emulated. As we don't actually have pid support we should assume we're just using
351 return process->GetProcessID() == process_id; 349 // our own process
352 }); 350 const auto& current_process = system.Kernel().CurrentProcess();
353 351 const auto launch_property =
354 if (iter == list.end()) { 352 system.GetARPManager().GetLaunchProperty(current_process->GetTitleID());
355 LOG_ERROR(Service_ACC, "Failed to find process ID");
356 application_info.application_type = ApplicationType::Unknown;
357
358 return ERR_ACCOUNTINFO_BAD_APPLICATION;
359 }
360
361 const auto launch_property = system.GetARPManager().GetLaunchProperty((*iter)->GetTitleID());
362 353
363 if (launch_property.Failed()) { 354 if (launch_property.Failed()) {
364 LOG_ERROR(Service_ACC, "Failed to get launch property"); 355 LOG_ERROR(Service_ACC, "Failed to get launch property");
@@ -372,10 +363,12 @@ ResultCode Module::Interface::InitializeApplicationInfoBase(u64 process_id) {
372 case FileSys::StorageId::Host: 363 case FileSys::StorageId::Host:
373 case FileSys::StorageId::NandUser: 364 case FileSys::StorageId::NandUser:
374 case FileSys::StorageId::SdCard: 365 case FileSys::StorageId::SdCard:
366 case FileSys::StorageId::None: // Yuzu specific, differs from hardware
375 application_info.application_type = ApplicationType::Digital; 367 application_info.application_type = ApplicationType::Digital;
376 break; 368 break;
377 default: 369 default:
378 LOG_ERROR(Service_ACC, "Invalid game storage ID"); 370 LOG_ERROR(Service_ACC, "Invalid game storage ID! storage_id={}",
371 launch_property->base_game_storage_id);
379 return ERR_ACCOUNTINFO_BAD_APPLICATION; 372 return ERR_ACCOUNTINFO_BAD_APPLICATION;
380 } 373 }
381 374
@@ -428,6 +421,17 @@ void Module::Interface::GetProfileEditor(Kernel::HLERequestContext& ctx) {
428 rb.PushIpcInterface<IProfileEditor>(user_id, *profile_manager); 421 rb.PushIpcInterface<IProfileEditor>(user_id, *profile_manager);
429} 422}
430 423
424void Module::Interface::ListQualifiedUsers(Kernel::HLERequestContext& ctx) {
425 LOG_DEBUG(Service_ACC, "called");
426
427 // All users should be qualified. We don't actually have parental control or anything to do with
428 // nintendo online currently. We're just going to assume the user running the game has access to
429 // the game regardless of parental control settings.
430 ctx.WriteBuffer(profile_manager->GetAllUsers());
431 IPC::ResponseBuilder rb{ctx, 2};
432 rb.Push(RESULT_SUCCESS);
433}
434
431void Module::Interface::TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx) { 435void Module::Interface::TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx) {
432 LOG_DEBUG(Service_ACC, "called"); 436 LOG_DEBUG(Service_ACC, "called");
433 // A u8 is passed into this function which we can safely ignore. It's to determine if we have 437 // A u8 is passed into this function which we can safely ignore. It's to determine if we have
diff --git a/src/core/hle/service/acc/acc.h b/src/core/hle/service/acc/acc.h
index 7a7dc9ec6..74ca39d6e 100644
--- a/src/core/hle/service/acc/acc.h
+++ b/src/core/hle/service/acc/acc.h
@@ -33,9 +33,10 @@ public:
33 void TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx); 33 void TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx);
34 void IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx); 34 void IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx);
35 void GetProfileEditor(Kernel::HLERequestContext& ctx); 35 void GetProfileEditor(Kernel::HLERequestContext& ctx);
36 void ListQualifiedUsers(Kernel::HLERequestContext& ctx);
36 37
37 private: 38 private:
38 ResultCode InitializeApplicationInfoBase(u64 process_id); 39 ResultCode InitializeApplicationInfoBase();
39 40
40 enum class ApplicationType : u32_le { 41 enum class ApplicationType : u32_le {
41 GameCard = 0, 42 GameCard = 0,
diff --git a/src/core/hle/service/acc/acc_su.cpp b/src/core/hle/service/acc/acc_su.cpp
index ae88deda5..2eefc6df5 100644
--- a/src/core/hle/service/acc/acc_su.cpp
+++ b/src/core/hle/service/acc/acc_su.cpp
@@ -35,7 +35,7 @@ ACC_SU::ACC_SU(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
35 {113, nullptr, "GetSaveDataThumbnailExistence"}, 35 {113, nullptr, "GetSaveDataThumbnailExistence"},
36 {120, nullptr, "ListOpenUsersInApplication"}, 36 {120, nullptr, "ListOpenUsersInApplication"},
37 {130, nullptr, "ActivateOpenContextRetention"}, 37 {130, nullptr, "ActivateOpenContextRetention"},
38 {140, nullptr, "ListQualifiedUsers"}, 38 {140, &ACC_SU::ListQualifiedUsers, "ListQualifiedUsers"},
39 {150, nullptr, "AuthenticateApplicationAsync"}, 39 {150, nullptr, "AuthenticateApplicationAsync"},
40 {190, nullptr, "GetUserLastOpenedApplication"}, 40 {190, nullptr, "GetUserLastOpenedApplication"},
41 {191, nullptr, "ActivateOpenContextHolder"}, 41 {191, nullptr, "ActivateOpenContextHolder"},
diff --git a/src/core/hle/service/acc/acc_u0.cpp b/src/core/hle/service/acc/acc_u0.cpp
index 0ac19f4ff..fb4e7e772 100644
--- a/src/core/hle/service/acc/acc_u0.cpp
+++ b/src/core/hle/service/acc/acc_u0.cpp
@@ -32,7 +32,7 @@ ACC_U0::ACC_U0(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
32 {130, nullptr, "LoadOpenContext"}, 32 {130, nullptr, "LoadOpenContext"},
33 {131, nullptr, "ListOpenContextStoredUsers"}, 33 {131, nullptr, "ListOpenContextStoredUsers"},
34 {140, &ACC_U0::InitializeApplicationInfoRestricted, "InitializeApplicationInfoRestricted"}, 34 {140, &ACC_U0::InitializeApplicationInfoRestricted, "InitializeApplicationInfoRestricted"},
35 {141, nullptr, "ListQualifiedUsers"}, 35 {141, &ACC_U0::ListQualifiedUsers, "ListQualifiedUsers"},
36 {150, &ACC_U0::IsUserAccountSwitchLocked, "IsUserAccountSwitchLocked"}, 36 {150, &ACC_U0::IsUserAccountSwitchLocked, "IsUserAccountSwitchLocked"},
37 }; 37 };
38 // clang-format on 38 // clang-format on
diff --git a/src/core/hle/service/acc/acc_u1.cpp b/src/core/hle/service/acc/acc_u1.cpp
index 2b9c11928..9f29cdc82 100644
--- a/src/core/hle/service/acc/acc_u1.cpp
+++ b/src/core/hle/service/acc/acc_u1.cpp
@@ -34,7 +34,7 @@ ACC_U1::ACC_U1(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
34 {112, nullptr, "LoadSaveDataThumbnail"}, 34 {112, nullptr, "LoadSaveDataThumbnail"},
35 {113, nullptr, "GetSaveDataThumbnailExistence"}, 35 {113, nullptr, "GetSaveDataThumbnailExistence"},
36 {130, nullptr, "ActivateOpenContextRetention"}, 36 {130, nullptr, "ActivateOpenContextRetention"},
37 {140, nullptr, "ListQualifiedUsers"}, 37 {140, &ACC_U1::ListQualifiedUsers, "ListQualifiedUsers"},
38 {150, nullptr, "AuthenticateApplicationAsync"}, 38 {150, nullptr, "AuthenticateApplicationAsync"},
39 {190, nullptr, "GetUserLastOpenedApplication"}, 39 {190, nullptr, "GetUserLastOpenedApplication"},
40 {191, nullptr, "ActivateOpenContextHolder"}, 40 {191, nullptr, "ActivateOpenContextHolder"},
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index bee4a9d3f..9450de6e9 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -43,20 +43,15 @@
43 43
44namespace Service::AM { 44namespace Service::AM {
45 45
46constexpr ResultCode ERR_NO_DATA_IN_CHANNEL{ErrorModule::AM, 0x2}; 46constexpr ResultCode ERR_NO_DATA_IN_CHANNEL{ErrorModule::AM, 2};
47constexpr ResultCode ERR_NO_MESSAGES{ErrorModule::AM, 0x3}; 47constexpr ResultCode ERR_NO_MESSAGES{ErrorModule::AM, 3};
48constexpr ResultCode ERR_SIZE_OUT_OF_BOUNDS{ErrorModule::AM, 0x1F7}; 48constexpr ResultCode ERR_SIZE_OUT_OF_BOUNDS{ErrorModule::AM, 503};
49 49
50enum class LaunchParameterKind : u32 { 50enum class LaunchParameterKind : u32 {
51 ApplicationSpecific = 1, 51 ApplicationSpecific = 1,
52 AccountPreselectedUser = 2, 52 AccountPreselectedUser = 2,
53}; 53};
54 54
55enum class VrMode : u8 {
56 Disabled = 0,
57 Enabled = 1,
58};
59
60constexpr u32 LAUNCH_PARAMETER_ACCOUNT_PRESELECTED_USER_MAGIC = 0xC79497CA; 55constexpr u32 LAUNCH_PARAMETER_ACCOUNT_PRESELECTED_USER_MAGIC = 0xC79497CA;
61 56
62struct LaunchParameterAccountPreselectedUser { 57struct LaunchParameterAccountPreselectedUser {
@@ -685,27 +680,21 @@ void ICommonStateGetter::GetCurrentFocusState(Kernel::HLERequestContext& ctx) {
685} 680}
686 681
687void ICommonStateGetter::IsVrModeEnabled(Kernel::HLERequestContext& ctx) { 682void ICommonStateGetter::IsVrModeEnabled(Kernel::HLERequestContext& ctx) {
688 LOG_WARNING(Service_AM, "(STUBBED) called"); 683 LOG_DEBUG(Service_AM, "called");
689 684
690 IPC::ResponseBuilder rb{ctx, 3}; 685 IPC::ResponseBuilder rb{ctx, 3};
691 rb.Push(RESULT_SUCCESS); 686 rb.Push(RESULT_SUCCESS);
692 rb.PushEnum(VrMode::Disabled); 687 rb.Push(vr_mode_state);
693} 688}
694 689
695void ICommonStateGetter::SetVrModeEnabled(Kernel::HLERequestContext& ctx) { 690void ICommonStateGetter::SetVrModeEnabled(Kernel::HLERequestContext& ctx) {
696 IPC::RequestParser rp{ctx}; 691 IPC::RequestParser rp{ctx};
697 const auto is_vr_mode_enabled = rp.Pop<bool>(); 692 vr_mode_state = rp.Pop<bool>();
698 693
699 LOG_WARNING(Service_AM, "(STUBBED) called. is_vr_mode_enabled={}", is_vr_mode_enabled); 694 LOG_WARNING(Service_AM, "VR Mode is {}", vr_mode_state ? "on" : "off");
700 695
701 IPC::ResponseBuilder rb{ctx, 2}; 696 IPC::ResponseBuilder rb{ctx, 2};
702 if (!is_vr_mode_enabled) { 697 rb.Push(RESULT_SUCCESS);
703 rb.Push(RESULT_SUCCESS);
704 } else {
705 // TODO: Find better error code for this
706 UNIMPLEMENTED_MSG("is_vr_mode_enabled={}", is_vr_mode_enabled);
707 rb.Push(RESULT_UNKNOWN);
708 }
709} 698}
710 699
711void ICommonStateGetter::SetLcdBacklighOffEnabled(Kernel::HLERequestContext& ctx) { 700void ICommonStateGetter::SetLcdBacklighOffEnabled(Kernel::HLERequestContext& ctx) {
@@ -1169,7 +1158,7 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
1169 {121, nullptr, "ClearUserChannel"}, 1158 {121, nullptr, "ClearUserChannel"},
1170 {122, nullptr, "UnpopToUserChannel"}, 1159 {122, nullptr, "UnpopToUserChannel"},
1171 {130, &IApplicationFunctions::GetGpuErrorDetectedSystemEvent, "GetGpuErrorDetectedSystemEvent"}, 1160 {130, &IApplicationFunctions::GetGpuErrorDetectedSystemEvent, "GetGpuErrorDetectedSystemEvent"},
1172 {140, nullptr, "GetFriendInvitationStorageChannelEvent"}, 1161 {140, &IApplicationFunctions::GetFriendInvitationStorageChannelEvent, "GetFriendInvitationStorageChannelEvent"},
1173 {141, nullptr, "TryPopFromFriendInvitationStorageChannel"}, 1162 {141, nullptr, "TryPopFromFriendInvitationStorageChannel"},
1174 {150, nullptr, "GetNotificationStorageChannelEvent"}, 1163 {150, nullptr, "GetNotificationStorageChannelEvent"},
1175 {151, nullptr, "TryPopFromNotificationStorageChannel"}, 1164 {151, nullptr, "TryPopFromNotificationStorageChannel"},
@@ -1186,6 +1175,9 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
1186 auto& kernel = system.Kernel(); 1175 auto& kernel = system.Kernel();
1187 gpu_error_detected_event = Kernel::WritableEvent::CreateEventPair( 1176 gpu_error_detected_event = Kernel::WritableEvent::CreateEventPair(
1188 kernel, "IApplicationFunctions:GpuErrorDetectedSystemEvent"); 1177 kernel, "IApplicationFunctions:GpuErrorDetectedSystemEvent");
1178
1179 friend_invitation_storage_channel_event = Kernel::WritableEvent::CreateEventPair(
1180 kernel, "IApplicationFunctions:FriendInvitationStorageChannelEvent");
1189} 1181}
1190 1182
1191IApplicationFunctions::~IApplicationFunctions() = default; 1183IApplicationFunctions::~IApplicationFunctions() = default;
@@ -1500,6 +1492,14 @@ void IApplicationFunctions::GetGpuErrorDetectedSystemEvent(Kernel::HLERequestCon
1500 rb.PushCopyObjects(gpu_error_detected_event.readable); 1492 rb.PushCopyObjects(gpu_error_detected_event.readable);
1501} 1493}
1502 1494
1495void IApplicationFunctions::GetFriendInvitationStorageChannelEvent(Kernel::HLERequestContext& ctx) {
1496 LOG_DEBUG(Service_AM, "called");
1497
1498 IPC::ResponseBuilder rb{ctx, 2, 1};
1499 rb.Push(RESULT_SUCCESS);
1500 rb.PushCopyObjects(friend_invitation_storage_channel_event.readable);
1501}
1502
1503void InstallInterfaces(SM::ServiceManager& service_manager, 1503void InstallInterfaces(SM::ServiceManager& service_manager,
1504 std::shared_ptr<NVFlinger::NVFlinger> nvflinger, Core::System& system) { 1504 std::shared_ptr<NVFlinger::NVFlinger> nvflinger, Core::System& system) {
1505 auto message_queue = std::make_shared<AppletMessageQueue>(system.Kernel()); 1505 auto message_queue = std::make_shared<AppletMessageQueue>(system.Kernel());
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h
index 53cfce10f..dfa701d73 100644
--- a/src/core/hle/service/am/am.h
+++ b/src/core/hle/service/am/am.h
@@ -191,6 +191,7 @@ private:
191 191
192 Core::System& system; 192 Core::System& system;
193 std::shared_ptr<AppletMessageQueue> msg_queue; 193 std::shared_ptr<AppletMessageQueue> msg_queue;
194 bool vr_mode_state{};
194}; 195};
195 196
196class IStorageImpl { 197class IStorageImpl {
@@ -280,10 +281,12 @@ private:
280 void QueryApplicationPlayStatistics(Kernel::HLERequestContext& ctx); 281 void QueryApplicationPlayStatistics(Kernel::HLERequestContext& ctx);
281 void QueryApplicationPlayStatisticsByUid(Kernel::HLERequestContext& ctx); 282 void QueryApplicationPlayStatisticsByUid(Kernel::HLERequestContext& ctx);
282 void GetGpuErrorDetectedSystemEvent(Kernel::HLERequestContext& ctx); 283 void GetGpuErrorDetectedSystemEvent(Kernel::HLERequestContext& ctx);
284 void GetFriendInvitationStorageChannelEvent(Kernel::HLERequestContext& ctx);
283 285
284 bool launch_popped_application_specific = false; 286 bool launch_popped_application_specific = false;
285 bool launch_popped_account_preselect = false; 287 bool launch_popped_account_preselect = false;
286 Kernel::EventPair gpu_error_detected_event; 288 Kernel::EventPair gpu_error_detected_event;
289 Kernel::EventPair friend_invitation_storage_channel_event;
287 Core::System& system; 290 Core::System& system;
288}; 291};
289 292
diff --git a/src/core/hle/service/caps/caps_su.cpp b/src/core/hle/service/caps/caps_su.cpp
index 2b4c2d808..e8b0698e8 100644
--- a/src/core/hle/service/caps/caps_su.cpp
+++ b/src/core/hle/service/caps/caps_su.cpp
@@ -2,6 +2,8 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "common/logging/log.h"
6#include "core/hle/ipc_helpers.h"
5#include "core/hle/service/caps/caps_su.h" 7#include "core/hle/service/caps/caps_su.h"
6 8
7namespace Service::Capture { 9namespace Service::Capture {
@@ -9,8 +11,11 @@ namespace Service::Capture {
9CAPS_SU::CAPS_SU() : ServiceFramework("caps:su") { 11CAPS_SU::CAPS_SU() : ServiceFramework("caps:su") {
10 // clang-format off 12 // clang-format off
11 static const FunctionInfo functions[] = { 13 static const FunctionInfo functions[] = {
14 {32, &CAPS_SU::SetShimLibraryVersion, "SetShimLibraryVersion"},
12 {201, nullptr, "SaveScreenShot"}, 15 {201, nullptr, "SaveScreenShot"},
13 {203, nullptr, "SaveScreenShotEx0"}, 16 {203, nullptr, "SaveScreenShotEx0"},
17 {205, nullptr, "SaveScreenShotEx1"},
18 {210, nullptr, "SaveScreenShotEx2"},
14 }; 19 };
15 // clang-format on 20 // clang-format on
16 21
@@ -19,4 +24,11 @@ CAPS_SU::CAPS_SU() : ServiceFramework("caps:su") {
19 24
20CAPS_SU::~CAPS_SU() = default; 25CAPS_SU::~CAPS_SU() = default;
21 26
27void CAPS_SU::SetShimLibraryVersion(Kernel::HLERequestContext& ctx) {
28 LOG_WARNING(Service_Capture, "(STUBBED) called");
29
30 IPC::ResponseBuilder rb{ctx, 2};
31 rb.Push(RESULT_SUCCESS);
32}
33
22} // namespace Service::Capture 34} // namespace Service::Capture
diff --git a/src/core/hle/service/caps/caps_su.h b/src/core/hle/service/caps/caps_su.h
index cb11f7c9a..c494d7c84 100644
--- a/src/core/hle/service/caps/caps_su.h
+++ b/src/core/hle/service/caps/caps_su.h
@@ -16,6 +16,9 @@ class CAPS_SU final : public ServiceFramework<CAPS_SU> {
16public: 16public:
17 explicit CAPS_SU(); 17 explicit CAPS_SU();
18 ~CAPS_SU() override; 18 ~CAPS_SU() override;
19
20private:
21 void SetShimLibraryVersion(Kernel::HLERequestContext& ctx);
19}; 22};
20 23
21} // namespace Service::Capture 24} // namespace Service::Capture
diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp
index 6b9b4f3b9..f6503fe2f 100644
--- a/src/core/hle/service/filesystem/fsp_srv.cpp
+++ b/src/core/hle/service/filesystem/fsp_srv.cpp
@@ -316,8 +316,8 @@ public:
316 {8, &IFileSystem::OpenFile, "OpenFile"}, 316 {8, &IFileSystem::OpenFile, "OpenFile"},
317 {9, &IFileSystem::OpenDirectory, "OpenDirectory"}, 317 {9, &IFileSystem::OpenDirectory, "OpenDirectory"},
318 {10, &IFileSystem::Commit, "Commit"}, 318 {10, &IFileSystem::Commit, "Commit"},
319 {11, nullptr, "GetFreeSpaceSize"}, 319 {11, &IFileSystem::GetFreeSpaceSize, "GetFreeSpaceSize"},
320 {12, nullptr, "GetTotalSpaceSize"}, 320 {12, &IFileSystem::GetTotalSpaceSize, "GetTotalSpaceSize"},
321 {13, &IFileSystem::CleanDirectoryRecursively, "CleanDirectoryRecursively"}, 321 {13, &IFileSystem::CleanDirectoryRecursively, "CleanDirectoryRecursively"},
322 {14, nullptr, "GetFileTimeStampRaw"}, 322 {14, nullptr, "GetFileTimeStampRaw"},
323 {15, nullptr, "QueryEntry"}, 323 {15, nullptr, "QueryEntry"},
diff --git a/src/core/hle/service/glue/errors.h b/src/core/hle/service/glue/errors.h
index c2874c585..f6647f724 100644
--- a/src/core/hle/service/glue/errors.h
+++ b/src/core/hle/service/glue/errors.h
@@ -8,9 +8,9 @@
8 8
9namespace Service::Glue { 9namespace Service::Glue {
10 10
11constexpr ResultCode ERR_INVALID_RESOURCE{ErrorModule::ARP, 0x1E}; 11constexpr ResultCode ERR_INVALID_RESOURCE{ErrorModule::ARP, 30};
12constexpr ResultCode ERR_INVALID_PROCESS_ID{ErrorModule::ARP, 0x1F}; 12constexpr ResultCode ERR_INVALID_PROCESS_ID{ErrorModule::ARP, 31};
13constexpr ResultCode ERR_INVALID_ACCESS{ErrorModule::ARP, 0x2A}; 13constexpr ResultCode ERR_INVALID_ACCESS{ErrorModule::ARP, 42};
14constexpr ResultCode ERR_NOT_REGISTERED{ErrorModule::ARP, 0x66}; 14constexpr ResultCode ERR_NOT_REGISTERED{ErrorModule::ARP, 102};
15 15
16} // namespace Service::Glue 16} // namespace Service::Glue
diff --git a/src/core/hle/service/nim/nim.cpp b/src/core/hle/service/nim/nim.cpp
index e85f123e2..f19affce7 100644
--- a/src/core/hle/service/nim/nim.cpp
+++ b/src/core/hle/service/nim/nim.cpp
@@ -15,6 +15,66 @@
15 15
16namespace Service::NIM { 16namespace Service::NIM {
17 17
18class IShopServiceAsync final : public ServiceFramework<IShopServiceAsync> {
19public:
20 IShopServiceAsync() : ServiceFramework("IShopServiceAsync") {
21 // clang-format off
22 static const FunctionInfo functions[] = {
23 {0, nullptr, "Cancel"},
24 {1, nullptr, "GetSize"},
25 {2, nullptr, "Read"},
26 {3, nullptr, "GetErrorCode"},
27 {4, nullptr, "Request"},
28 {5, nullptr, "Prepare"},
29 };
30 // clang-format on
31
32 RegisterHandlers(functions);
33 }
34};
35
36class IShopServiceAccessor final : public ServiceFramework<IShopServiceAccessor> {
37public:
38 IShopServiceAccessor() : ServiceFramework("IShopServiceAccessor") {
39 // clang-format off
40 static const FunctionInfo functions[] = {
41 {0, &IShopServiceAccessor::CreateAsyncInterface, "CreateAsyncInterface"},
42 };
43 // clang-format on
44
45 RegisterHandlers(functions);
46 }
47
48private:
49 void CreateAsyncInterface(Kernel::HLERequestContext& ctx) {
50 LOG_WARNING(Service_NIM, "(STUBBED) called");
51 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
52 rb.Push(RESULT_SUCCESS);
53 rb.PushIpcInterface<IShopServiceAsync>();
54 }
55};
56
57class IShopServiceAccessServer final : public ServiceFramework<IShopServiceAccessServer> {
58public:
59 IShopServiceAccessServer() : ServiceFramework("IShopServiceAccessServer") {
60 // clang-format off
61 static const FunctionInfo functions[] = {
62 {0, &IShopServiceAccessServer::CreateAccessorInterface, "CreateAccessorInterface"},
63 };
64 // clang-format on
65
66 RegisterHandlers(functions);
67 }
68
69private:
70 void CreateAccessorInterface(Kernel::HLERequestContext& ctx) {
71 LOG_WARNING(Service_NIM, "(STUBBED) called");
72 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
73 rb.Push(RESULT_SUCCESS);
74 rb.PushIpcInterface<IShopServiceAccessor>();
75 }
76};
77
18class NIM final : public ServiceFramework<NIM> { 78class NIM final : public ServiceFramework<NIM> {
19public: 79public:
20 explicit NIM() : ServiceFramework{"nim"} { 80 explicit NIM() : ServiceFramework{"nim"} {
@@ -78,7 +138,7 @@ public:
78 explicit NIM_ECA() : ServiceFramework{"nim:eca"} { 138 explicit NIM_ECA() : ServiceFramework{"nim:eca"} {
79 // clang-format off 139 // clang-format off
80 static const FunctionInfo functions[] = { 140 static const FunctionInfo functions[] = {
81 {0, nullptr, "CreateServerInterface"}, 141 {0, &NIM_ECA::CreateServerInterface, "CreateServerInterface"},
82 {1, nullptr, "RefreshDebugAvailability"}, 142 {1, nullptr, "RefreshDebugAvailability"},
83 {2, nullptr, "ClearDebugResponse"}, 143 {2, nullptr, "ClearDebugResponse"},
84 {3, nullptr, "RegisterDebugResponse"}, 144 {3, nullptr, "RegisterDebugResponse"},
@@ -87,6 +147,14 @@ public:
87 147
88 RegisterHandlers(functions); 148 RegisterHandlers(functions);
89 } 149 }
150
151private:
152 void CreateServerInterface(Kernel::HLERequestContext& ctx) {
153 LOG_WARNING(Service_NIM, "(STUBBED) called");
154 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
155 rb.Push(RESULT_SUCCESS);
156 rb.PushIpcInterface<IShopServiceAccessServer>();
157 }
90}; 158};
91 159
92class NIM_SHP final : public ServiceFramework<NIM_SHP> { 160class NIM_SHP final : public ServiceFramework<NIM_SHP> {
diff --git a/src/core/hle/service/ns/ns.cpp b/src/core/hle/service/ns/ns.cpp
index 8fb88990e..7e5ceccdb 100644
--- a/src/core/hle/service/ns/ns.cpp
+++ b/src/core/hle/service/ns/ns.cpp
@@ -371,10 +371,15 @@ ResultVal<u8> IApplicationManagerInterface::GetApplicationDesiredLanguage(
371 // Convert to application language, get priority list 371 // Convert to application language, get priority list
372 const auto application_language = ConvertToApplicationLanguage(language_code); 372 const auto application_language = ConvertToApplicationLanguage(language_code);
373 if (application_language == std::nullopt) { 373 if (application_language == std::nullopt) {
374 LOG_ERROR(Service_NS, "Could not convert application language! language_code={}",
375 language_code);
374 return ERR_APPLICATION_LANGUAGE_NOT_FOUND; 376 return ERR_APPLICATION_LANGUAGE_NOT_FOUND;
375 } 377 }
376 const auto priority_list = GetApplicationLanguagePriorityList(*application_language); 378 const auto priority_list = GetApplicationLanguagePriorityList(*application_language);
377 if (!priority_list) { 379 if (!priority_list) {
380 LOG_ERROR(Service_NS,
381 "Could not find application language priorities! application_language={}",
382 *application_language);
378 return ERR_APPLICATION_LANGUAGE_NOT_FOUND; 383 return ERR_APPLICATION_LANGUAGE_NOT_FOUND;
379 } 384 }
380 385
@@ -386,6 +391,8 @@ ResultVal<u8> IApplicationManagerInterface::GetApplicationDesiredLanguage(
386 } 391 }
387 } 392 }
388 393
394 LOG_ERROR(Service_NS, "Could not find a valid language! supported_languages={:08X}",
395 supported_languages);
389 return ERR_APPLICATION_LANGUAGE_NOT_FOUND; 396 return ERR_APPLICATION_LANGUAGE_NOT_FOUND;
390} 397}
391 398
@@ -410,6 +417,7 @@ ResultVal<u64> IApplicationManagerInterface::ConvertApplicationLanguageToLanguag
410 const auto language_code = 417 const auto language_code =
411 ConvertToLanguageCode(static_cast<ApplicationLanguage>(application_language)); 418 ConvertToLanguageCode(static_cast<ApplicationLanguage>(application_language));
412 if (language_code == std::nullopt) { 419 if (language_code == std::nullopt) {
420 LOG_ERROR(Service_NS, "Language not found! application_language={}", application_language);
413 return ERR_APPLICATION_LANGUAGE_NOT_FOUND; 421 return ERR_APPLICATION_LANGUAGE_NOT_FOUND;
414 } 422 }
415 423
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
index 642b0a2cb..07b644ec5 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
@@ -159,9 +159,10 @@ private:
159 static_assert(sizeof(IoctlFlushL2) == 8, "IoctlFlushL2 is incorrect size"); 159 static_assert(sizeof(IoctlFlushL2) == 8, "IoctlFlushL2 is incorrect size");
160 160
161 struct IoctlGetGpuTime { 161 struct IoctlGetGpuTime {
162 u64_le gpu_time; 162 u64_le gpu_time{};
163 INSERT_PADDING_WORDS(2);
163 }; 164 };
164 static_assert(sizeof(IoctlGetGpuTime) == 8, "IoctlGetGpuTime is incorrect size"); 165 static_assert(sizeof(IoctlGetGpuTime) == 0x10, "IoctlGetGpuTime is incorrect size");
165 166
166 u32 GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output, 167 u32 GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output,
167 std::vector<u8>& output2, IoctlVersion version); 168 std::vector<u8>& output2, IoctlVersion version);
diff --git a/src/core/hle/service/ptm/psm.cpp b/src/core/hle/service/ptm/psm.cpp
index c2d5fda94..12d154ecf 100644
--- a/src/core/hle/service/ptm/psm.cpp
+++ b/src/core/hle/service/ptm/psm.cpp
@@ -12,9 +12,6 @@
12 12
13namespace Service::PSM { 13namespace Service::PSM {
14 14
15constexpr u32 BATTERY_FULLY_CHARGED = 100; // 100% Full
16constexpr u32 BATTERY_CURRENTLY_CHARGING = 1; // Plugged into an official dock
17
18class PSM final : public ServiceFramework<PSM> { 15class PSM final : public ServiceFramework<PSM> {
19public: 16public:
20 explicit PSM() : ServiceFramework{"psm"} { 17 explicit PSM() : ServiceFramework{"psm"} {
@@ -48,20 +45,30 @@ public:
48 45
49private: 46private:
50 void GetBatteryChargePercentage(Kernel::HLERequestContext& ctx) { 47 void GetBatteryChargePercentage(Kernel::HLERequestContext& ctx) {
51 LOG_WARNING(Service_PSM, "(STUBBED) called"); 48 LOG_DEBUG(Service_PSM, "called");
52 49
53 IPC::ResponseBuilder rb{ctx, 3}; 50 IPC::ResponseBuilder rb{ctx, 3};
54 rb.Push(RESULT_SUCCESS); 51 rb.Push(RESULT_SUCCESS);
55 rb.Push<u32>(BATTERY_FULLY_CHARGED); 52 rb.Push<u32>(battery_charge_percentage);
56 } 53 }
57 54
58 void GetChargerType(Kernel::HLERequestContext& ctx) { 55 void GetChargerType(Kernel::HLERequestContext& ctx) {
59 LOG_WARNING(Service_PSM, "(STUBBED) called"); 56 LOG_DEBUG(Service_PSM, "called");
60 57
61 IPC::ResponseBuilder rb{ctx, 3}; 58 IPC::ResponseBuilder rb{ctx, 3};
62 rb.Push(RESULT_SUCCESS); 59 rb.Push(RESULT_SUCCESS);
63 rb.Push<u32>(BATTERY_CURRENTLY_CHARGING); 60 rb.PushEnum(charger_type);
64 } 61 }
62
63 enum class ChargerType : u32 {
64 Unplugged = 0,
65 RegularCharger = 1,
66 LowPowerCharger = 2,
67 Unknown = 3,
68 };
69
70 u32 battery_charge_percentage{100}; // 100%
71 ChargerType charger_type{ChargerType::RegularCharger};
65}; 72};
66 73
67void InstallInterfaces(SM::ServiceManager& sm) { 74void InstallInterfaces(SM::ServiceManager& sm) {
diff --git a/src/core/hle/service/set/set.cpp b/src/core/hle/service/set/set.cpp
index 9e12c76fc..f3b4b286c 100644
--- a/src/core/hle/service/set/set.cpp
+++ b/src/core/hle/service/set/set.cpp
@@ -67,6 +67,7 @@ void SET::MakeLanguageCode(Kernel::HLERequestContext& ctx) {
67 const auto index = rp.Pop<u32>(); 67 const auto index = rp.Pop<u32>();
68 68
69 if (index >= available_language_codes.size()) { 69 if (index >= available_language_codes.size()) {
70 LOG_ERROR(Service_SET, "Invalid language code index! index={}", index);
70 IPC::ResponseBuilder rb{ctx, 2}; 71 IPC::ResponseBuilder rb{ctx, 2};
71 rb.Push(ERR_INVALID_LANGUAGE); 72 rb.Push(ERR_INVALID_LANGUAGE);
72 return; 73 return;
diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp
index 88909504d..6ada13be4 100644
--- a/src/core/hle/service/sm/sm.cpp
+++ b/src/core/hle/service/sm/sm.cpp
@@ -28,9 +28,11 @@ void ServiceManager::InvokeControlRequest(Kernel::HLERequestContext& context) {
28 28
29static ResultCode ValidateServiceName(const std::string& name) { 29static ResultCode ValidateServiceName(const std::string& name) {
30 if (name.size() <= 0 || name.size() > 8) { 30 if (name.size() <= 0 || name.size() > 8) {
31 LOG_ERROR(Service_SM, "Invalid service name! service={}", name);
31 return ERR_INVALID_NAME; 32 return ERR_INVALID_NAME;
32 } 33 }
33 if (name.find('\0') != std::string::npos) { 34 if (name.find('\0') != std::string::npos) {
35 LOG_ERROR(Service_SM, "A non null terminated service was passed");
34 return ERR_INVALID_NAME; 36 return ERR_INVALID_NAME;
35 } 37 }
36 return RESULT_SUCCESS; 38 return RESULT_SUCCESS;
@@ -51,8 +53,10 @@ ResultVal<std::shared_ptr<Kernel::ServerPort>> ServiceManager::RegisterService(
51 53
52 CASCADE_CODE(ValidateServiceName(name)); 54 CASCADE_CODE(ValidateServiceName(name));
53 55
54 if (registered_services.find(name) != registered_services.end()) 56 if (registered_services.find(name) != registered_services.end()) {
57 LOG_ERROR(Service_SM, "Service is already registered! service={}", name);
55 return ERR_ALREADY_REGISTERED; 58 return ERR_ALREADY_REGISTERED;
59 }
56 60
57 auto& kernel = Core::System::GetInstance().Kernel(); 61 auto& kernel = Core::System::GetInstance().Kernel();
58 auto [server_port, client_port] = 62 auto [server_port, client_port] =
@@ -66,9 +70,10 @@ ResultCode ServiceManager::UnregisterService(const std::string& name) {
66 CASCADE_CODE(ValidateServiceName(name)); 70 CASCADE_CODE(ValidateServiceName(name));
67 71
68 const auto iter = registered_services.find(name); 72 const auto iter = registered_services.find(name);
69 if (iter == registered_services.end()) 73 if (iter == registered_services.end()) {
74 LOG_ERROR(Service_SM, "Server is not registered! service={}", name);
70 return ERR_SERVICE_NOT_REGISTERED; 75 return ERR_SERVICE_NOT_REGISTERED;
71 76 }
72 registered_services.erase(iter); 77 registered_services.erase(iter);
73 return RESULT_SUCCESS; 78 return RESULT_SUCCESS;
74} 79}
@@ -79,6 +84,7 @@ ResultVal<std::shared_ptr<Kernel::ClientPort>> ServiceManager::GetServicePort(
79 CASCADE_CODE(ValidateServiceName(name)); 84 CASCADE_CODE(ValidateServiceName(name));
80 auto it = registered_services.find(name); 85 auto it = registered_services.find(name);
81 if (it == registered_services.end()) { 86 if (it == registered_services.end()) {
87 LOG_ERROR(Service_SM, "Server is not registered! service={}", name);
82 return ERR_SERVICE_NOT_REGISTERED; 88 return ERR_SERVICE_NOT_REGISTERED;
83 } 89 }
84 90
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index 9390ca83d..46e14c2a3 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -867,6 +867,7 @@ private:
867 867
868 const auto layer_id = nv_flinger->CreateLayer(display); 868 const auto layer_id = nv_flinger->CreateLayer(display);
869 if (!layer_id) { 869 if (!layer_id) {
870 LOG_ERROR(Service_VI, "Layer not found! display=0x{:016X}", display);
870 IPC::ResponseBuilder rb{ctx, 2}; 871 IPC::ResponseBuilder rb{ctx, 2};
871 rb.Push(ERR_NOT_FOUND); 872 rb.Push(ERR_NOT_FOUND);
872 return; 873 return;
@@ -983,6 +984,7 @@ private:
983 984
984 const auto display_id = nv_flinger->OpenDisplay(name); 985 const auto display_id = nv_flinger->OpenDisplay(name);
985 if (!display_id) { 986 if (!display_id) {
987 LOG_ERROR(Service_VI, "Display not found! display_name={}", name);
986 IPC::ResponseBuilder rb{ctx, 2}; 988 IPC::ResponseBuilder rb{ctx, 2};
987 rb.Push(ERR_NOT_FOUND); 989 rb.Push(ERR_NOT_FOUND);
988 return; 990 return;
@@ -1082,6 +1084,7 @@ private:
1082 1084
1083 const auto display_id = nv_flinger->OpenDisplay(display_name); 1085 const auto display_id = nv_flinger->OpenDisplay(display_name);
1084 if (!display_id) { 1086 if (!display_id) {
1087 LOG_ERROR(Service_VI, "Layer not found! layer_id={}", layer_id);
1085 IPC::ResponseBuilder rb{ctx, 2}; 1088 IPC::ResponseBuilder rb{ctx, 2};
1086 rb.Push(ERR_NOT_FOUND); 1089 rb.Push(ERR_NOT_FOUND);
1087 return; 1090 return;
@@ -1089,6 +1092,7 @@ private:
1089 1092
1090 const auto buffer_queue_id = nv_flinger->FindBufferQueueId(*display_id, layer_id); 1093 const auto buffer_queue_id = nv_flinger->FindBufferQueueId(*display_id, layer_id);
1091 if (!buffer_queue_id) { 1094 if (!buffer_queue_id) {
1095 LOG_ERROR(Service_VI, "Buffer queue id not found! display_id={}", *display_id);
1092 IPC::ResponseBuilder rb{ctx, 2}; 1096 IPC::ResponseBuilder rb{ctx, 2};
1093 rb.Push(ERR_NOT_FOUND); 1097 rb.Push(ERR_NOT_FOUND);
1094 return; 1098 return;
@@ -1124,6 +1128,7 @@ private:
1124 1128
1125 const auto layer_id = nv_flinger->CreateLayer(display_id); 1129 const auto layer_id = nv_flinger->CreateLayer(display_id);
1126 if (!layer_id) { 1130 if (!layer_id) {
1131 LOG_ERROR(Service_VI, "Layer not found! layer_id={}", *layer_id);
1127 IPC::ResponseBuilder rb{ctx, 2}; 1132 IPC::ResponseBuilder rb{ctx, 2};
1128 rb.Push(ERR_NOT_FOUND); 1133 rb.Push(ERR_NOT_FOUND);
1129 return; 1134 return;
@@ -1131,6 +1136,7 @@ private:
1131 1136
1132 const auto buffer_queue_id = nv_flinger->FindBufferQueueId(display_id, *layer_id); 1137 const auto buffer_queue_id = nv_flinger->FindBufferQueueId(display_id, *layer_id);
1133 if (!buffer_queue_id) { 1138 if (!buffer_queue_id) {
1139 LOG_ERROR(Service_VI, "Buffer queue id not found! display_id={}", display_id);
1134 IPC::ResponseBuilder rb{ctx, 2}; 1140 IPC::ResponseBuilder rb{ctx, 2};
1135 rb.Push(ERR_NOT_FOUND); 1141 rb.Push(ERR_NOT_FOUND);
1136 return; 1142 return;
@@ -1161,6 +1167,7 @@ private:
1161 1167
1162 const auto vsync_event = nv_flinger->FindVsyncEvent(display_id); 1168 const auto vsync_event = nv_flinger->FindVsyncEvent(display_id);
1163 if (!vsync_event) { 1169 if (!vsync_event) {
1170 LOG_ERROR(Service_VI, "Vsync event was not found for display_id={}", display_id);
1164 IPC::ResponseBuilder rb{ctx, 2}; 1171 IPC::ResponseBuilder rb{ctx, 2};
1165 rb.Push(ERR_NOT_FOUND); 1172 rb.Push(ERR_NOT_FOUND);
1166 return; 1173 return;
@@ -1201,6 +1208,7 @@ private:
1201 case NintendoScaleMode::PreserveAspectRatio: 1208 case NintendoScaleMode::PreserveAspectRatio:
1202 return MakeResult(ConvertedScaleMode::PreserveAspectRatio); 1209 return MakeResult(ConvertedScaleMode::PreserveAspectRatio);
1203 default: 1210 default:
1211 LOG_ERROR(Service_VI, "Invalid scaling mode specified, mode={}", mode);
1204 return ERR_OPERATION_FAILED; 1212 return ERR_OPERATION_FAILED;
1205 } 1213 }
1206 } 1214 }
@@ -1257,6 +1265,7 @@ void detail::GetDisplayServiceImpl(Kernel::HLERequestContext& ctx,
1257 const auto policy = rp.PopEnum<Policy>(); 1265 const auto policy = rp.PopEnum<Policy>();
1258 1266
1259 if (!IsValidServiceAccess(permission, policy)) { 1267 if (!IsValidServiceAccess(permission, policy)) {
1268 LOG_ERROR(Service_VI, "Permission denied for policy {}", static_cast<u32>(policy));
1260 IPC::ResponseBuilder rb{ctx, 2}; 1269 IPC::ResponseBuilder rb{ctx, 2};
1261 rb.Push(ERR_PERMISSION_DENIED); 1270 rb.Push(ERR_PERMISSION_DENIED);
1262 return; 1271 return;
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 8ede4ba9b..ff53282c9 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -124,6 +124,8 @@ add_library(video_core STATIC
124 shader/decode.cpp 124 shader/decode.cpp
125 shader/expr.cpp 125 shader/expr.cpp
126 shader/expr.h 126 shader/expr.h
127 shader/memory_util.cpp
128 shader/memory_util.h
127 shader/node_helper.cpp 129 shader/node_helper.cpp
128 shader/node_helper.h 130 shader/node_helper.h
129 shader/node.h 131 shader/node.h
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 510f11089..398f16181 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -4,7 +4,6 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <array>
8#include <list> 7#include <list>
9#include <memory> 8#include <memory>
10#include <mutex> 9#include <mutex>
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index 3dfba8197..5e522e0d2 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -1179,6 +1179,7 @@ public:
1179 BitField<0, 1, u32> depth_range_0_1; 1179 BitField<0, 1, u32> depth_range_0_1;
1180 BitField<3, 1, u32> depth_clamp_near; 1180 BitField<3, 1, u32> depth_clamp_near;
1181 BitField<4, 1, u32> depth_clamp_far; 1181 BitField<4, 1, u32> depth_clamp_far;
1182 BitField<11, 1, u32> depth_clamp_disabled;
1182 } view_volume_clip_control; 1183 } view_volume_clip_control;
1183 1184
1184 INSERT_UNION_PADDING_WORDS(0x1F); 1185 INSERT_UNION_PADDING_WORDS(0x1F);
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h
index cde3a26b9..8dae754d4 100644
--- a/src/video_core/engines/shader_bytecode.h
+++ b/src/video_core/engines/shader_bytecode.h
@@ -814,6 +814,10 @@ union Instruction {
814 } alu_integer; 814 } alu_integer;
815 815
816 union { 816 union {
817 BitField<43, 1, u64> x;
818 } iadd;
819
820 union {
817 BitField<39, 1, u64> ftz; 821 BitField<39, 1, u64> ftz;
818 BitField<32, 1, u64> saturate; 822 BitField<32, 1, u64> saturate;
819 BitField<49, 2, HalfMerge> merge; 823 BitField<49, 2, HalfMerge> merge;
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h
index dabd1588c..8b2a6a42c 100644
--- a/src/video_core/fence_manager.h
+++ b/src/video_core/fence_manager.h
@@ -88,7 +88,8 @@ public:
88 } 88 }
89 PopAsyncFlushes(); 89 PopAsyncFlushes();
90 if (current_fence->IsSemaphore()) { 90 if (current_fence->IsSemaphore()) {
91 memory_manager.Write<u32>(current_fence->GetAddress(), current_fence->GetPayload()); 91 memory_manager.template Write<u32>(current_fence->GetAddress(),
92 current_fence->GetPayload());
92 } else { 93 } else {
93 gpu.IncrementSyncPoint(current_fence->GetPayload()); 94 gpu.IncrementSyncPoint(current_fence->GetPayload());
94 } 95 }
@@ -134,7 +135,8 @@ private:
134 } 135 }
135 PopAsyncFlushes(); 136 PopAsyncFlushes();
136 if (current_fence->IsSemaphore()) { 137 if (current_fence->IsSemaphore()) {
137 memory_manager.Write<u32>(current_fence->GetAddress(), current_fence->GetPayload()); 138 memory_manager.template Write<u32>(current_fence->GetAddress(),
139 current_fence->GetPayload());
138 } else { 140 } else {
139 gpu.IncrementSyncPoint(current_fence->GetPayload()); 141 gpu.IncrementSyncPoint(current_fence->GetPayload());
140 } 142 }
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 6fe155bcc..725b4c32d 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -59,14 +59,12 @@ constexpr std::size_t NumSupportedVertexAttributes = 16;
59template <typename Engine, typename Entry> 59template <typename Engine, typename Entry>
60Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry, 60Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry,
61 ShaderType shader_type, std::size_t index = 0) { 61 ShaderType shader_type, std::size_t index = 0) {
62 if (entry.IsBindless()) { 62 if (entry.is_bindless) {
63 const Tegra::Texture::TextureHandle tex_handle = 63 const auto tex_handle = engine.AccessConstBuffer32(shader_type, entry.buffer, entry.offset);
64 engine.AccessConstBuffer32(shader_type, entry.GetBuffer(), entry.GetOffset());
65 return engine.GetTextureInfo(tex_handle); 64 return engine.GetTextureInfo(tex_handle);
66 } 65 }
67 const auto& gpu_profile = engine.AccessGuestDriverProfile(); 66 const auto& gpu_profile = engine.AccessGuestDriverProfile();
68 const u32 offset = 67 const u32 offset = entry.offset + static_cast<u32>(index * gpu_profile.GetTextureHandlerSize());
69 entry.GetOffset() + static_cast<u32>(index * gpu_profile.GetTextureHandlerSize());
70 if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) { 68 if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) {
71 return engine.GetStageTexture(shader_type, offset); 69 return engine.GetStageTexture(shader_type, offset);
72 } else { 70 } else {
@@ -348,7 +346,7 @@ void RasterizerOpenGL::ConfigureFramebuffers() {
348 346
349 texture_cache.GuardRenderTargets(true); 347 texture_cache.GuardRenderTargets(true);
350 348
351 View depth_surface = texture_cache.GetDepthBufferSurface(); 349 View depth_surface = texture_cache.GetDepthBufferSurface(true);
352 350
353 const auto& regs = gpu.regs; 351 const auto& regs = gpu.regs;
354 UNIMPLEMENTED_IF(regs.rt_separate_frag_data == 0); 352 UNIMPLEMENTED_IF(regs.rt_separate_frag_data == 0);
@@ -357,7 +355,7 @@ void RasterizerOpenGL::ConfigureFramebuffers() {
357 FramebufferCacheKey key; 355 FramebufferCacheKey key;
358 const auto colors_count = static_cast<std::size_t>(regs.rt_control.count); 356 const auto colors_count = static_cast<std::size_t>(regs.rt_control.count);
359 for (std::size_t index = 0; index < colors_count; ++index) { 357 for (std::size_t index = 0; index < colors_count; ++index) {
360 View color_surface{texture_cache.GetColorBufferSurface(index)}; 358 View color_surface{texture_cache.GetColorBufferSurface(index, true)};
361 if (!color_surface) { 359 if (!color_surface) {
362 continue; 360 continue;
363 } 361 }
@@ -381,28 +379,52 @@ void RasterizerOpenGL::ConfigureFramebuffers() {
381 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key)); 379 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key));
382} 380}
383 381
384void RasterizerOpenGL::ConfigureClearFramebuffer(bool using_color_fb, bool using_depth_fb, 382void RasterizerOpenGL::ConfigureClearFramebuffer(bool using_color, bool using_depth_stencil) {
385 bool using_stencil_fb) {
386 auto& gpu = system.GPU().Maxwell3D(); 383 auto& gpu = system.GPU().Maxwell3D();
387 const auto& regs = gpu.regs; 384 const auto& regs = gpu.regs;
388 385
389 texture_cache.GuardRenderTargets(true); 386 texture_cache.GuardRenderTargets(true);
390 View color_surface; 387 View color_surface;
391 if (using_color_fb) { 388
389 if (using_color) {
390 // Determine if we have to preserve the contents.
391 // First we have to make sure all clear masks are enabled.
392 bool preserve_contents = !regs.clear_buffers.R || !regs.clear_buffers.G ||
393 !regs.clear_buffers.B || !regs.clear_buffers.A;
392 const std::size_t index = regs.clear_buffers.RT; 394 const std::size_t index = regs.clear_buffers.RT;
393 color_surface = texture_cache.GetColorBufferSurface(index); 395 if (regs.clear_flags.scissor) {
396 // Then we have to confirm scissor testing clears the whole image.
397 const auto& scissor = regs.scissor_test[0];
398 preserve_contents |= scissor.min_x > 0;
399 preserve_contents |= scissor.min_y > 0;
400 preserve_contents |= scissor.max_x < regs.rt[index].width;
401 preserve_contents |= scissor.max_y < regs.rt[index].height;
402 }
403
404 color_surface = texture_cache.GetColorBufferSurface(index, preserve_contents);
394 texture_cache.MarkColorBufferInUse(index); 405 texture_cache.MarkColorBufferInUse(index);
395 } 406 }
407
396 View depth_surface; 408 View depth_surface;
397 if (using_depth_fb || using_stencil_fb) { 409 if (using_depth_stencil) {
398 depth_surface = texture_cache.GetDepthBufferSurface(); 410 bool preserve_contents = false;
411 if (regs.clear_flags.scissor) {
412 // For depth stencil clears we only have to confirm scissor test covers the whole image.
413 const auto& scissor = regs.scissor_test[0];
414 preserve_contents |= scissor.min_x > 0;
415 preserve_contents |= scissor.min_y > 0;
416 preserve_contents |= scissor.max_x < regs.zeta_width;
417 preserve_contents |= scissor.max_y < regs.zeta_height;
418 }
419
420 depth_surface = texture_cache.GetDepthBufferSurface(preserve_contents);
399 texture_cache.MarkDepthBufferInUse(); 421 texture_cache.MarkDepthBufferInUse();
400 } 422 }
401 texture_cache.GuardRenderTargets(false); 423 texture_cache.GuardRenderTargets(false);
402 424
403 FramebufferCacheKey key; 425 FramebufferCacheKey key;
404 key.colors[0] = color_surface; 426 key.colors[0] = std::move(color_surface);
405 key.zeta = depth_surface; 427 key.zeta = std::move(depth_surface);
406 428
407 state_tracker.NotifyFramebuffer(); 429 state_tracker.NotifyFramebuffer();
408 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key)); 430 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key));
@@ -422,8 +444,7 @@ void RasterizerOpenGL::Clear() {
422 if (regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B || 444 if (regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B ||
423 regs.clear_buffers.A) { 445 regs.clear_buffers.A) {
424 use_color = true; 446 use_color = true;
425 } 447
426 if (use_color) {
427 state_tracker.NotifyColorMask0(); 448 state_tracker.NotifyColorMask0();
428 glColorMaski(0, regs.clear_buffers.R != 0, regs.clear_buffers.G != 0, 449 glColorMaski(0, regs.clear_buffers.R != 0, regs.clear_buffers.G != 0,
429 regs.clear_buffers.B != 0, regs.clear_buffers.A != 0); 450 regs.clear_buffers.B != 0, regs.clear_buffers.A != 0);
@@ -461,7 +482,7 @@ void RasterizerOpenGL::Clear() {
461 482
462 UNIMPLEMENTED_IF(regs.clear_flags.viewport); 483 UNIMPLEMENTED_IF(regs.clear_flags.viewport);
463 484
464 ConfigureClearFramebuffer(use_color, use_depth, use_stencil); 485 ConfigureClearFramebuffer(use_color, use_depth || use_stencil);
465 486
466 if (use_color) { 487 if (use_color) {
467 glClearBufferfv(GL_COLOR, 0, regs.clear_color); 488 glClearBufferfv(GL_COLOR, 0, regs.clear_color);
@@ -833,9 +854,9 @@ void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, const Shad
833 854
834 u32 binding = device.GetBaseBindings(stage_index).shader_storage_buffer; 855 u32 binding = device.GetBaseBindings(stage_index).shader_storage_buffer;
835 for (const auto& entry : shader->GetEntries().global_memory_entries) { 856 for (const auto& entry : shader->GetEntries().global_memory_entries) {
836 const auto addr{cbufs.const_buffers[entry.GetCbufIndex()].address + entry.GetCbufOffset()}; 857 const GPUVAddr addr{cbufs.const_buffers[entry.cbuf_index].address + entry.cbuf_offset};
837 const auto gpu_addr{memory_manager.Read<u64>(addr)}; 858 const GPUVAddr gpu_addr{memory_manager.Read<u64>(addr)};
838 const auto size{memory_manager.Read<u32>(addr + 8)}; 859 const u32 size{memory_manager.Read<u32>(addr + 8)};
839 SetupGlobalMemory(binding++, entry, gpu_addr, size); 860 SetupGlobalMemory(binding++, entry, gpu_addr, size);
840 } 861 }
841} 862}
@@ -847,7 +868,7 @@ void RasterizerOpenGL::SetupComputeGlobalMemory(const Shader& kernel) {
847 868
848 u32 binding = 0; 869 u32 binding = 0;
849 for (const auto& entry : kernel->GetEntries().global_memory_entries) { 870 for (const auto& entry : kernel->GetEntries().global_memory_entries) {
850 const auto addr{cbufs[entry.GetCbufIndex()].Address() + entry.GetCbufOffset()}; 871 const auto addr{cbufs[entry.cbuf_index].Address() + entry.cbuf_offset};
851 const auto gpu_addr{memory_manager.Read<u64>(addr)}; 872 const auto gpu_addr{memory_manager.Read<u64>(addr)};
852 const auto size{memory_manager.Read<u32>(addr + 8)}; 873 const auto size{memory_manager.Read<u32>(addr + 8)};
853 SetupGlobalMemory(binding++, entry, gpu_addr, size); 874 SetupGlobalMemory(binding++, entry, gpu_addr, size);
@@ -858,7 +879,7 @@ void RasterizerOpenGL::SetupGlobalMemory(u32 binding, const GlobalMemoryEntry& e
858 GPUVAddr gpu_addr, std::size_t size) { 879 GPUVAddr gpu_addr, std::size_t size) {
859 const auto alignment{device.GetShaderStorageBufferAlignment()}; 880 const auto alignment{device.GetShaderStorageBufferAlignment()};
860 const auto [ssbo, buffer_offset] = 881 const auto [ssbo, buffer_offset] =
861 buffer_cache.UploadMemory(gpu_addr, size, alignment, entry.IsWritten()); 882 buffer_cache.UploadMemory(gpu_addr, size, alignment, entry.is_written);
862 glBindBufferRange(GL_SHADER_STORAGE_BUFFER, binding, ssbo, buffer_offset, 883 glBindBufferRange(GL_SHADER_STORAGE_BUFFER, binding, ssbo, buffer_offset,
863 static_cast<GLsizeiptr>(size)); 884 static_cast<GLsizeiptr>(size));
864} 885}
@@ -869,7 +890,7 @@ void RasterizerOpenGL::SetupDrawTextures(std::size_t stage_index, const Shader&
869 u32 binding = device.GetBaseBindings(stage_index).sampler; 890 u32 binding = device.GetBaseBindings(stage_index).sampler;
870 for (const auto& entry : shader->GetEntries().samplers) { 891 for (const auto& entry : shader->GetEntries().samplers) {
871 const auto shader_type = static_cast<ShaderType>(stage_index); 892 const auto shader_type = static_cast<ShaderType>(stage_index);
872 for (std::size_t i = 0; i < entry.Size(); ++i) { 893 for (std::size_t i = 0; i < entry.size; ++i) {
873 const auto texture = GetTextureInfo(maxwell3d, entry, shader_type, i); 894 const auto texture = GetTextureInfo(maxwell3d, entry, shader_type, i);
874 SetupTexture(binding++, texture, entry); 895 SetupTexture(binding++, texture, entry);
875 } 896 }
@@ -881,7 +902,7 @@ void RasterizerOpenGL::SetupComputeTextures(const Shader& kernel) {
881 const auto& compute = system.GPU().KeplerCompute(); 902 const auto& compute = system.GPU().KeplerCompute();
882 u32 binding = 0; 903 u32 binding = 0;
883 for (const auto& entry : kernel->GetEntries().samplers) { 904 for (const auto& entry : kernel->GetEntries().samplers) {
884 for (std::size_t i = 0; i < entry.Size(); ++i) { 905 for (std::size_t i = 0; i < entry.size; ++i) {
885 const auto texture = GetTextureInfo(compute, entry, ShaderType::Compute, i); 906 const auto texture = GetTextureInfo(compute, entry, ShaderType::Compute, i);
886 SetupTexture(binding++, texture, entry); 907 SetupTexture(binding++, texture, entry);
887 } 908 }
@@ -938,7 +959,7 @@ void RasterizerOpenGL::SetupImage(u32 binding, const Tegra::Texture::TICEntry& t
938 if (!tic.IsBuffer()) { 959 if (!tic.IsBuffer()) {
939 view->ApplySwizzle(tic.x_source, tic.y_source, tic.z_source, tic.w_source); 960 view->ApplySwizzle(tic.x_source, tic.y_source, tic.z_source, tic.w_source);
940 } 961 }
941 if (entry.IsWritten()) { 962 if (entry.is_written) {
942 view->MarkAsModified(texture_cache.Tick()); 963 view->MarkAsModified(texture_cache.Tick());
943 } 964 }
944 glBindImageTexture(binding, view->GetTexture(), 0, GL_TRUE, 0, GL_READ_WRITE, 965 glBindImageTexture(binding, view->GetTexture(), 0, GL_TRUE, 0, GL_READ_WRITE,
@@ -999,11 +1020,7 @@ void RasterizerOpenGL::SyncDepthClamp() {
999 } 1020 }
1000 flags[Dirty::DepthClampEnabled] = false; 1021 flags[Dirty::DepthClampEnabled] = false;
1001 1022
1002 const auto& state = gpu.regs.view_volume_clip_control; 1023 oglEnable(GL_DEPTH_CLAMP, gpu.regs.view_volume_clip_control.depth_clamp_disabled == 0);
1003 UNIMPLEMENTED_IF_MSG(state.depth_clamp_far != state.depth_clamp_near,
1004 "Unimplemented depth clamp separation!");
1005
1006 oglEnable(GL_DEPTH_CLAMP, state.depth_clamp_far || state.depth_clamp_near);
1007} 1024}
1008 1025
1009void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) { 1026void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) {
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index ebd2173eb..87249fb6f 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -95,7 +95,8 @@ private:
95 /// Configures the color and depth framebuffer states. 95 /// Configures the color and depth framebuffer states.
96 void ConfigureFramebuffers(); 96 void ConfigureFramebuffers();
97 97
98 void ConfigureClearFramebuffer(bool using_color_fb, bool using_depth_fb, bool using_stencil_fb); 98 /// Configures the color and depth framebuffer for clearing.
99 void ConfigureClearFramebuffer(bool using_color, bool using_depth_stencil);
99 100
100 /// Configures the current constbuffers to use for the draw command. 101 /// Configures the current constbuffers to use for the draw command.
101 void SetupDrawConstBuffers(std::size_t stage_index, const Shader& shader); 102 void SetupDrawConstBuffers(std::size_t stage_index, const Shader& shader);
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index f63156b8d..9759a7078 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -10,8 +10,6 @@
10#include <thread> 10#include <thread>
11#include <unordered_set> 11#include <unordered_set>
12 12
13#include <boost/functional/hash.hpp>
14
15#include "common/alignment.h" 13#include "common/alignment.h"
16#include "common/assert.h" 14#include "common/assert.h"
17#include "common/logging/log.h" 15#include "common/logging/log.h"
@@ -28,76 +26,26 @@
28#include "video_core/renderer_opengl/gl_shader_disk_cache.h" 26#include "video_core/renderer_opengl/gl_shader_disk_cache.h"
29#include "video_core/renderer_opengl/gl_state_tracker.h" 27#include "video_core/renderer_opengl/gl_state_tracker.h"
30#include "video_core/renderer_opengl/utils.h" 28#include "video_core/renderer_opengl/utils.h"
29#include "video_core/shader/memory_util.h"
31#include "video_core/shader/registry.h" 30#include "video_core/shader/registry.h"
32#include "video_core/shader/shader_ir.h" 31#include "video_core/shader/shader_ir.h"
33 32
34namespace OpenGL { 33namespace OpenGL {
35 34
36using Tegra::Engines::ShaderType; 35using Tegra::Engines::ShaderType;
36using VideoCommon::Shader::GetShaderAddress;
37using VideoCommon::Shader::GetShaderCode;
38using VideoCommon::Shader::GetUniqueIdentifier;
39using VideoCommon::Shader::KERNEL_MAIN_OFFSET;
37using VideoCommon::Shader::ProgramCode; 40using VideoCommon::Shader::ProgramCode;
38using VideoCommon::Shader::Registry; 41using VideoCommon::Shader::Registry;
39using VideoCommon::Shader::ShaderIR; 42using VideoCommon::Shader::ShaderIR;
43using VideoCommon::Shader::STAGE_MAIN_OFFSET;
40 44
41namespace { 45namespace {
42 46
43constexpr u32 STAGE_MAIN_OFFSET = 10;
44constexpr u32 KERNEL_MAIN_OFFSET = 0;
45
46constexpr VideoCommon::Shader::CompilerSettings COMPILER_SETTINGS{}; 47constexpr VideoCommon::Shader::CompilerSettings COMPILER_SETTINGS{};
47 48
48/// Gets the address for the specified shader stage program
49GPUVAddr GetShaderAddress(Core::System& system, Maxwell::ShaderProgram program) {
50 const auto& gpu{system.GPU().Maxwell3D()};
51 const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]};
52 return gpu.regs.code_address.CodeAddress() + shader_config.offset;
53}
54
55/// Gets if the current instruction offset is a scheduler instruction
56constexpr bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) {
57 // Sched instructions appear once every 4 instructions.
58 constexpr std::size_t SchedPeriod = 4;
59 const std::size_t absolute_offset = offset - main_offset;
60 return (absolute_offset % SchedPeriod) == 0;
61}
62
63/// Calculates the size of a program stream
64std::size_t CalculateProgramSize(const ProgramCode& program) {
65 constexpr std::size_t start_offset = 10;
66 // This is the encoded version of BRA that jumps to itself. All Nvidia
67 // shaders end with one.
68 constexpr u64 self_jumping_branch = 0xE2400FFFFF07000FULL;
69 constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL;
70 std::size_t offset = start_offset;
71 while (offset < program.size()) {
72 const u64 instruction = program[offset];
73 if (!IsSchedInstruction(offset, start_offset)) {
74 if ((instruction & mask) == self_jumping_branch) {
75 // End on Maxwell's "nop" instruction
76 break;
77 }
78 if (instruction == 0) {
79 break;
80 }
81 }
82 offset++;
83 }
84 // The last instruction is included in the program size
85 return std::min(offset + 1, program.size());
86}
87
88/// Gets the shader program code from memory for the specified address
89ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr gpu_addr,
90 const u8* host_ptr) {
91 ProgramCode code(VideoCommon::Shader::MAX_PROGRAM_LENGTH);
92 ASSERT_OR_EXECUTE(host_ptr != nullptr, {
93 std::fill(code.begin(), code.end(), 0);
94 return code;
95 });
96 memory_manager.ReadBlockUnsafe(gpu_addr, code.data(), code.size() * sizeof(u64));
97 code.resize(CalculateProgramSize(code));
98 return code;
99}
100
101/// Gets the shader type from a Maxwell program type 49/// Gets the shader type from a Maxwell program type
102constexpr GLenum GetGLShaderType(ShaderType shader_type) { 50constexpr GLenum GetGLShaderType(ShaderType shader_type) {
103 switch (shader_type) { 51 switch (shader_type) {
@@ -114,17 +62,6 @@ constexpr GLenum GetGLShaderType(ShaderType shader_type) {
114 } 62 }
115} 63}
116 64
117/// Hashes one (or two) program streams
118u64 GetUniqueIdentifier(ShaderType shader_type, bool is_a, const ProgramCode& code,
119 const ProgramCode& code_b = {}) {
120 u64 unique_identifier = boost::hash_value(code);
121 if (is_a) {
122 // VertexA programs include two programs
123 boost::hash_combine(unique_identifier, boost::hash_value(code_b));
124 }
125 return unique_identifier;
126}
127
128constexpr const char* GetShaderTypeName(ShaderType shader_type) { 65constexpr const char* GetShaderTypeName(ShaderType shader_type) {
129 switch (shader_type) { 66 switch (shader_type) {
130 case ShaderType::Vertex: 67 case ShaderType::Vertex:
@@ -456,11 +393,12 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
456 const auto host_ptr{memory_manager.GetPointer(address)}; 393 const auto host_ptr{memory_manager.GetPointer(address)};
457 394
458 // No shader found - create a new one 395 // No shader found - create a new one
459 ProgramCode code{GetShaderCode(memory_manager, address, host_ptr)}; 396 ProgramCode code{GetShaderCode(memory_manager, address, host_ptr, false)};
460 ProgramCode code_b; 397 ProgramCode code_b;
461 if (program == Maxwell::ShaderProgram::VertexA) { 398 if (program == Maxwell::ShaderProgram::VertexA) {
462 const GPUVAddr address_b{GetShaderAddress(system, Maxwell::ShaderProgram::VertexB)}; 399 const GPUVAddr address_b{GetShaderAddress(system, Maxwell::ShaderProgram::VertexB)};
463 code_b = GetShaderCode(memory_manager, address_b, memory_manager.GetPointer(address_b)); 400 const u8* host_ptr_b = memory_manager.GetPointer(address_b);
401 code_b = GetShaderCode(memory_manager, address_b, host_ptr_b, false);
464 } 402 }
465 403
466 const auto unique_identifier = GetUniqueIdentifier( 404 const auto unique_identifier = GetUniqueIdentifier(
@@ -498,7 +436,7 @@ Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) {
498 436
499 const auto host_ptr{memory_manager.GetPointer(code_addr)}; 437 const auto host_ptr{memory_manager.GetPointer(code_addr)};
500 // No kernel found, create a new one 438 // No kernel found, create a new one
501 auto code{GetShaderCode(memory_manager, code_addr, host_ptr)}; 439 auto code{GetShaderCode(memory_manager, code_addr, host_ptr, true)};
502 const auto unique_identifier{GetUniqueIdentifier(ShaderType::Compute, false, code)}; 440 const auto unique_identifier{GetUniqueIdentifier(ShaderType::Compute, false, code)};
503 441
504 const ShaderParameters params{system, disk_cache, device, 442 const ShaderParameters params{system, disk_cache, device,
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index 0cd3ad7e1..99fd4ae2c 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -870,13 +870,13 @@ private:
870 for (const auto& sampler : ir.GetSamplers()) { 870 for (const auto& sampler : ir.GetSamplers()) {
871 const std::string name = GetSampler(sampler); 871 const std::string name = GetSampler(sampler);
872 const std::string description = fmt::format("layout (binding = {}) uniform", binding); 872 const std::string description = fmt::format("layout (binding = {}) uniform", binding);
873 binding += sampler.IsIndexed() ? sampler.Size() : 1; 873 binding += sampler.is_indexed ? sampler.size : 1;
874 874
875 std::string sampler_type = [&]() { 875 std::string sampler_type = [&]() {
876 if (sampler.IsBuffer()) { 876 if (sampler.is_buffer) {
877 return "samplerBuffer"; 877 return "samplerBuffer";
878 } 878 }
879 switch (sampler.GetType()) { 879 switch (sampler.type) {
880 case Tegra::Shader::TextureType::Texture1D: 880 case Tegra::Shader::TextureType::Texture1D:
881 return "sampler1D"; 881 return "sampler1D";
882 case Tegra::Shader::TextureType::Texture2D: 882 case Tegra::Shader::TextureType::Texture2D:
@@ -890,17 +890,17 @@ private:
890 return "sampler2D"; 890 return "sampler2D";
891 } 891 }
892 }(); 892 }();
893 if (sampler.IsArray()) { 893 if (sampler.is_array) {
894 sampler_type += "Array"; 894 sampler_type += "Array";
895 } 895 }
896 if (sampler.IsShadow()) { 896 if (sampler.is_shadow) {
897 sampler_type += "Shadow"; 897 sampler_type += "Shadow";
898 } 898 }
899 899
900 if (!sampler.IsIndexed()) { 900 if (!sampler.is_indexed) {
901 code.AddLine("{} {} {};", description, sampler_type, name); 901 code.AddLine("{} {} {};", description, sampler_type, name);
902 } else { 902 } else {
903 code.AddLine("{} {} {}[{}];", description, sampler_type, name, sampler.Size()); 903 code.AddLine("{} {} {}[{}];", description, sampler_type, name, sampler.size);
904 } 904 }
905 } 905 }
906 if (!ir.GetSamplers().empty()) { 906 if (!ir.GetSamplers().empty()) {
@@ -946,14 +946,14 @@ private:
946 u32 binding = device.GetBaseBindings(stage).image; 946 u32 binding = device.GetBaseBindings(stage).image;
947 for (const auto& image : ir.GetImages()) { 947 for (const auto& image : ir.GetImages()) {
948 std::string qualifier = "coherent volatile"; 948 std::string qualifier = "coherent volatile";
949 if (image.IsRead() && !image.IsWritten()) { 949 if (image.is_read && !image.is_written) {
950 qualifier += " readonly"; 950 qualifier += " readonly";
951 } else if (image.IsWritten() && !image.IsRead()) { 951 } else if (image.is_written && !image.is_read) {
952 qualifier += " writeonly"; 952 qualifier += " writeonly";
953 } 953 }
954 954
955 const char* format = image.IsAtomic() ? "r32ui, " : ""; 955 const char* format = image.is_atomic ? "r32ui, " : "";
956 const char* type_declaration = GetImageTypeDeclaration(image.GetType()); 956 const char* type_declaration = GetImageTypeDeclaration(image.type);
957 code.AddLine("layout ({}binding = {}) {} uniform uimage{} {};", format, binding++, 957 code.AddLine("layout ({}binding = {}) {} uniform uimage{} {};", format, binding++,
958 qualifier, type_declaration, GetImage(image)); 958 qualifier, type_declaration, GetImage(image));
959 } 959 }
@@ -1337,8 +1337,8 @@ private:
1337 ASSERT(meta); 1337 ASSERT(meta);
1338 1338
1339 const std::size_t count = operation.GetOperandsCount(); 1339 const std::size_t count = operation.GetOperandsCount();
1340 const bool has_array = meta->sampler.IsArray(); 1340 const bool has_array = meta->sampler.is_array;
1341 const bool has_shadow = meta->sampler.IsShadow(); 1341 const bool has_shadow = meta->sampler.is_shadow;
1342 1342
1343 std::string expr = "texture" + function_suffix; 1343 std::string expr = "texture" + function_suffix;
1344 if (!meta->aoffi.empty()) { 1344 if (!meta->aoffi.empty()) {
@@ -1346,7 +1346,7 @@ private:
1346 } else if (!meta->ptp.empty()) { 1346 } else if (!meta->ptp.empty()) {
1347 expr += "Offsets"; 1347 expr += "Offsets";
1348 } 1348 }
1349 if (!meta->sampler.IsIndexed()) { 1349 if (!meta->sampler.is_indexed) {
1350 expr += '(' + GetSampler(meta->sampler) + ", "; 1350 expr += '(' + GetSampler(meta->sampler) + ", ";
1351 } else { 1351 } else {
1352 expr += '(' + GetSampler(meta->sampler) + '[' + Visit(meta->index).AsUint() + "], "; 1352 expr += '(' + GetSampler(meta->sampler) + '[' + Visit(meta->index).AsUint() + "], ";
@@ -1870,6 +1870,14 @@ private:
1870 return GenerateBinaryInfix(operation, ">=", Type::Bool, type, type); 1870 return GenerateBinaryInfix(operation, ">=", Type::Bool, type, type);
1871 } 1871 }
1872 1872
1873 Expression LogicalAddCarry(Operation operation) {
1874 const std::string carry = code.GenerateTemporary();
1875 code.AddLine("uint {};", carry);
1876 code.AddLine("uaddCarry({}, {}, {});", VisitOperand(operation, 0).AsUint(),
1877 VisitOperand(operation, 1).AsUint(), carry);
1878 return {fmt::format("({} != 0)", carry), Type::Bool};
1879 }
1880
1873 Expression LogicalFIsNan(Operation operation) { 1881 Expression LogicalFIsNan(Operation operation) {
1874 return GenerateUnary(operation, "isnan", Type::Bool, Type::Float); 1882 return GenerateUnary(operation, "isnan", Type::Bool, Type::Float);
1875 } 1883 }
@@ -1974,7 +1982,7 @@ private:
1974 1982
1975 std::string expr = GenerateTexture( 1983 std::string expr = GenerateTexture(
1976 operation, "", {TextureOffset{}, TextureArgument{Type::Float, meta->bias}}); 1984 operation, "", {TextureOffset{}, TextureArgument{Type::Float, meta->bias}});
1977 if (meta->sampler.IsShadow()) { 1985 if (meta->sampler.is_shadow) {
1978 expr = "vec4(" + expr + ')'; 1986 expr = "vec4(" + expr + ')';
1979 } 1987 }
1980 return {expr + GetSwizzle(meta->element), Type::Float}; 1988 return {expr + GetSwizzle(meta->element), Type::Float};
@@ -1986,7 +1994,7 @@ private:
1986 1994
1987 std::string expr = GenerateTexture( 1995 std::string expr = GenerateTexture(
1988 operation, "Lod", {TextureArgument{Type::Float, meta->lod}, TextureOffset{}}); 1996 operation, "Lod", {TextureArgument{Type::Float, meta->lod}, TextureOffset{}});
1989 if (meta->sampler.IsShadow()) { 1997 if (meta->sampler.is_shadow) {
1990 expr = "vec4(" + expr + ')'; 1998 expr = "vec4(" + expr + ')';
1991 } 1999 }
1992 return {expr + GetSwizzle(meta->element), Type::Float}; 2000 return {expr + GetSwizzle(meta->element), Type::Float};
@@ -1995,11 +2003,11 @@ private:
1995 Expression TextureGather(Operation operation) { 2003 Expression TextureGather(Operation operation) {
1996 const auto& meta = std::get<MetaTexture>(operation.GetMeta()); 2004 const auto& meta = std::get<MetaTexture>(operation.GetMeta());
1997 2005
1998 const auto type = meta.sampler.IsShadow() ? Type::Float : Type::Int; 2006 const auto type = meta.sampler.is_shadow ? Type::Float : Type::Int;
1999 const bool separate_dc = meta.sampler.IsShadow(); 2007 const bool separate_dc = meta.sampler.is_shadow;
2000 2008
2001 std::vector<TextureIR> ir; 2009 std::vector<TextureIR> ir;
2002 if (meta.sampler.IsShadow()) { 2010 if (meta.sampler.is_shadow) {
2003 ir = {TextureOffset{}}; 2011 ir = {TextureOffset{}};
2004 } else { 2012 } else {
2005 ir = {TextureOffset{}, TextureArgument{type, meta.component}}; 2013 ir = {TextureOffset{}, TextureArgument{type, meta.component}};
@@ -2044,7 +2052,7 @@ private:
2044 constexpr std::array constructors = {"int", "ivec2", "ivec3", "ivec4"}; 2052 constexpr std::array constructors = {"int", "ivec2", "ivec3", "ivec4"};
2045 const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); 2053 const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
2046 ASSERT(meta); 2054 ASSERT(meta);
2047 UNIMPLEMENTED_IF(meta->sampler.IsArray()); 2055 UNIMPLEMENTED_IF(meta->sampler.is_array);
2048 const std::size_t count = operation.GetOperandsCount(); 2056 const std::size_t count = operation.GetOperandsCount();
2049 2057
2050 std::string expr = "texelFetch("; 2058 std::string expr = "texelFetch(";
@@ -2065,7 +2073,7 @@ private:
2065 } 2073 }
2066 expr += ')'; 2074 expr += ')';
2067 2075
2068 if (meta->lod && !meta->sampler.IsBuffer()) { 2076 if (meta->lod && !meta->sampler.is_buffer) {
2069 expr += ", "; 2077 expr += ", ";
2070 expr += Visit(meta->lod).AsInt(); 2078 expr += Visit(meta->lod).AsInt();
2071 } 2079 }
@@ -2076,12 +2084,10 @@ private:
2076 } 2084 }
2077 2085
2078 Expression TextureGradient(Operation operation) { 2086 Expression TextureGradient(Operation operation) {
2079 const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); 2087 const auto& meta = std::get<MetaTexture>(operation.GetMeta());
2080 ASSERT(meta);
2081
2082 std::string expr = 2088 std::string expr =
2083 GenerateTexture(operation, "Grad", {TextureDerivates{}, TextureOffset{}}); 2089 GenerateTexture(operation, "Grad", {TextureDerivates{}, TextureOffset{}});
2084 return {std::move(expr) + GetSwizzle(meta->element), Type::Float}; 2090 return {std::move(expr) + GetSwizzle(meta.element), Type::Float};
2085 } 2091 }
2086 2092
2087 Expression ImageLoad(Operation operation) { 2093 Expression ImageLoad(Operation operation) {
@@ -2441,6 +2447,8 @@ private:
2441 &GLSLDecompiler::LogicalNotEqual<Type::Uint>, 2447 &GLSLDecompiler::LogicalNotEqual<Type::Uint>,
2442 &GLSLDecompiler::LogicalGreaterEqual<Type::Uint>, 2448 &GLSLDecompiler::LogicalGreaterEqual<Type::Uint>,
2443 2449
2450 &GLSLDecompiler::LogicalAddCarry,
2451
2444 &GLSLDecompiler::Logical2HLessThan<false>, 2452 &GLSLDecompiler::Logical2HLessThan<false>,
2445 &GLSLDecompiler::Logical2HEqual<false>, 2453 &GLSLDecompiler::Logical2HEqual<false>,
2446 &GLSLDecompiler::Logical2HLessEqual<false>, 2454 &GLSLDecompiler::Logical2HLessEqual<false>,
@@ -2598,11 +2606,11 @@ private:
2598 } 2606 }
2599 2607
2600 std::string GetSampler(const Sampler& sampler) const { 2608 std::string GetSampler(const Sampler& sampler) const {
2601 return AppendSuffix(static_cast<u32>(sampler.GetIndex()), "sampler"); 2609 return AppendSuffix(sampler.index, "sampler");
2602 } 2610 }
2603 2611
2604 std::string GetImage(const Image& image) const { 2612 std::string GetImage(const Image& image) const {
2605 return AppendSuffix(static_cast<u32>(image.GetIndex()), "image"); 2613 return AppendSuffix(image.index, "image");
2606 } 2614 }
2607 2615
2608 std::string AppendSuffix(u32 index, std::string_view name) const { 2616 std::string AppendSuffix(u32 index, std::string_view name) const {
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.h b/src/video_core/renderer_opengl/gl_shader_decompiler.h
index e7dbd810c..e8a178764 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.h
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.h
@@ -33,36 +33,19 @@ public:
33 } 33 }
34 34
35private: 35private:
36 u32 index{}; 36 u32 index = 0;
37}; 37};
38 38
39class GlobalMemoryEntry { 39struct GlobalMemoryEntry {
40public: 40 constexpr explicit GlobalMemoryEntry(u32 cbuf_index, u32 cbuf_offset, bool is_read,
41 explicit GlobalMemoryEntry(u32 cbuf_index, u32 cbuf_offset, bool is_read, bool is_written) 41 bool is_written)
42 : cbuf_index{cbuf_index}, cbuf_offset{cbuf_offset}, is_read{is_read}, is_written{ 42 : cbuf_index{cbuf_index}, cbuf_offset{cbuf_offset}, is_read{is_read}, is_written{
43 is_written} {} 43 is_written} {}
44 44
45 u32 GetCbufIndex() const { 45 u32 cbuf_index = 0;
46 return cbuf_index; 46 u32 cbuf_offset = 0;
47 } 47 bool is_read = false;
48 48 bool is_written = false;
49 u32 GetCbufOffset() const {
50 return cbuf_offset;
51 }
52
53 bool IsRead() const {
54 return is_read;
55 }
56
57 bool IsWritten() const {
58 return is_written;
59 }
60
61private:
62 u32 cbuf_index{};
63 u32 cbuf_offset{};
64 bool is_read{};
65 bool is_written{};
66}; 49};
67 50
68struct ShaderEntries { 51struct ShaderEntries {
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
index a7f256ff9..648b1e71b 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
@@ -81,7 +81,7 @@ void FixedPipelineState::Rasterizer::Fill(const Maxwell& regs) noexcept {
81 primitive_restart_enable.Assign(regs.primitive_restart.enabled != 0 ? 1 : 0); 81 primitive_restart_enable.Assign(regs.primitive_restart.enabled != 0 ? 1 : 0);
82 cull_enable.Assign(regs.cull_test_enabled != 0 ? 1 : 0); 82 cull_enable.Assign(regs.cull_test_enabled != 0 ? 1 : 0);
83 depth_bias_enable.Assign(enabled_lut[POLYGON_OFFSET_ENABLE_LUT[topology_index]] != 0 ? 1 : 0); 83 depth_bias_enable.Assign(enabled_lut[POLYGON_OFFSET_ENABLE_LUT[topology_index]] != 0 ? 1 : 0);
84 depth_clamp_enable.Assign(clip.depth_clamp_near == 1 || clip.depth_clamp_far == 1 ? 1 : 0); 84 depth_clamp_disabled.Assign(regs.view_volume_clip_control.depth_clamp_disabled.Value());
85 ndc_minus_one_to_one.Assign(regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1 : 0); 85 ndc_minus_one_to_one.Assign(regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1 : 0);
86 cull_face.Assign(PackCullFace(regs.cull_face)); 86 cull_face.Assign(PackCullFace(regs.cull_face));
87 front_face.Assign(packed_front_face); 87 front_face.Assign(packed_front_face);
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
index 77188b862..8652067a7 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.h
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
@@ -153,7 +153,7 @@ struct FixedPipelineState {
153 BitField<4, 1, u32> primitive_restart_enable; 153 BitField<4, 1, u32> primitive_restart_enable;
154 BitField<5, 1, u32> cull_enable; 154 BitField<5, 1, u32> cull_enable;
155 BitField<6, 1, u32> depth_bias_enable; 155 BitField<6, 1, u32> depth_bias_enable;
156 BitField<7, 1, u32> depth_clamp_enable; 156 BitField<7, 1, u32> depth_clamp_disabled;
157 BitField<8, 1, u32> ndc_minus_one_to_one; 157 BitField<8, 1, u32> ndc_minus_one_to_one;
158 BitField<9, 2, u32> cull_face; 158 BitField<9, 2, u32> cull_face;
159 BitField<11, 1, u32> front_face; 159 BitField<11, 1, u32> front_face;
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index 04532f8f8..59b441943 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -12,15 +12,12 @@
12 12
13#include <fmt/format.h> 13#include <fmt/format.h>
14 14
15#include "common/assert.h"
16#include "common/dynamic_library.h" 15#include "common/dynamic_library.h"
17#include "common/logging/log.h" 16#include "common/logging/log.h"
18#include "common/telemetry.h" 17#include "common/telemetry.h"
19#include "core/core.h" 18#include "core/core.h"
20#include "core/core_timing.h" 19#include "core/core_timing.h"
21#include "core/frontend/emu_window.h" 20#include "core/frontend/emu_window.h"
22#include "core/memory.h"
23#include "core/perf_stats.h"
24#include "core/settings.h" 21#include "core/settings.h"
25#include "core/telemetry_session.h" 22#include "core/telemetry_session.h"
26#include "video_core/gpu.h" 23#include "video_core/gpu.h"
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h
index 18270909b..522b5bff8 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.h
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.h
@@ -5,7 +5,6 @@
5#pragma once 5#pragma once
6 6
7#include <memory> 7#include <memory>
8#include <optional>
9#include <string> 8#include <string>
10#include <vector> 9#include <vector>
11 10
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h
index 5eb544aea..243640fab 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.h
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.h
@@ -4,7 +4,6 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <array>
8#include <memory> 7#include <memory>
9#include <tuple> 8#include <tuple>
10 9
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 81e1de2be..5b494da8c 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -5,11 +5,7 @@
5#include <algorithm> 5#include <algorithm>
6#include <cstring> 6#include <cstring>
7#include <memory> 7#include <memory>
8#include <optional>
9#include <tuple>
10 8
11#include "common/assert.h"
12#include "common/bit_util.h"
13#include "core/core.h" 9#include "core/core.h"
14#include "video_core/renderer_vulkan/vk_buffer_cache.h" 10#include "video_core/renderer_vulkan/vk_buffer_cache.h"
15#include "video_core/renderer_vulkan/vk_device.h" 11#include "video_core/renderer_vulkan/vk_device.h"
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index 3cd2e2774..a2d0b42b1 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -5,14 +5,11 @@
5#pragma once 5#pragma once
6 6
7#include <memory> 7#include <memory>
8#include <unordered_map>
9#include <vector>
10 8
11#include "common/common_types.h" 9#include "common/common_types.h"
12#include "video_core/buffer_cache/buffer_cache.h" 10#include "video_core/buffer_cache/buffer_cache.h"
13#include "video_core/rasterizer_cache.h" 11#include "video_core/rasterizer_cache.h"
14#include "video_core/renderer_vulkan/vk_memory_manager.h" 12#include "video_core/renderer_vulkan/vk_memory_manager.h"
15#include "video_core/renderer_vulkan/vk_resource_manager.h"
16#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" 13#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
17#include "video_core/renderer_vulkan/vk_stream_buffer.h" 14#include "video_core/renderer_vulkan/vk_stream_buffer.h"
18#include "video_core/renderer_vulkan/wrapper.h" 15#include "video_core/renderer_vulkan/wrapper.h"
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 7b0268033..da71e710c 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -6,7 +6,7 @@
6#include <memory> 6#include <memory>
7#include <optional> 7#include <optional>
8#include <utility> 8#include <utility>
9#include <vector> 9
10#include "common/alignment.h" 10#include "common/alignment.h"
11#include "common/assert.h" 11#include "common/assert.h"
12#include "common/common_types.h" 12#include "common/common_types.h"
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index 26bf834de..230b526bc 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -6,7 +6,7 @@
6 6
7#include <optional> 7#include <optional>
8#include <utility> 8#include <utility>
9#include <vector> 9
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "video_core/engines/maxwell_3d.h" 11#include "video_core/engines/maxwell_3d.h"
12#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 12#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 52566bb79..8e1b46277 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -2,14 +2,12 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <memory>
6#include <vector> 5#include <vector>
7 6
8#include "video_core/renderer_vulkan/vk_compute_pipeline.h" 7#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
9#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 8#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
10#include "video_core/renderer_vulkan/vk_device.h" 9#include "video_core/renderer_vulkan/vk_device.h"
11#include "video_core/renderer_vulkan/vk_pipeline_cache.h" 10#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
12#include "video_core/renderer_vulkan/vk_resource_manager.h"
13#include "video_core/renderer_vulkan/vk_scheduler.h" 11#include "video_core/renderer_vulkan/vk_scheduler.h"
14#include "video_core/renderer_vulkan/vk_shader_decompiler.h" 12#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
15#include "video_core/renderer_vulkan/vk_update_descriptor.h" 13#include "video_core/renderer_vulkan/vk_update_descriptor.h"
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.h b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
index 33b9af29e..6e2f22a4a 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
@@ -4,8 +4,6 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <memory>
8
9#include "common/common_types.h" 7#include "common/common_types.h"
10#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 8#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
11#include "video_core/renderer_vulkan/vk_shader_decompiler.h" 9#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
index e9d528aa6..890fd52cf 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
@@ -2,7 +2,6 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <memory>
6#include <vector> 5#include <vector>
7 6
8#include "common/common_types.h" 7#include "common/common_types.h"
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.h b/src/video_core/renderer_vulkan/vk_descriptor_pool.h
index ab40c70f0..9efa66bef 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.h
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.h
@@ -4,10 +4,8 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <memory>
8#include <vector> 7#include <vector>
9 8
10#include "common/common_types.h"
11#include "video_core/renderer_vulkan/vk_resource_manager.h" 9#include "video_core/renderer_vulkan/vk_resource_manager.h"
12#include "video_core/renderer_vulkan/wrapper.h" 10#include "video_core/renderer_vulkan/wrapper.h"
13 11
diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp
index e90c76492..0e4bbca97 100644
--- a/src/video_core/renderer_vulkan/vk_device.cpp
+++ b/src/video_core/renderer_vulkan/vk_device.cpp
@@ -4,7 +4,6 @@
4 4
5#include <bitset> 5#include <bitset>
6#include <chrono> 6#include <chrono>
7#include <cstdlib>
8#include <optional> 7#include <optional>
9#include <string_view> 8#include <string_view>
10#include <thread> 9#include <thread>
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index 45bd1fc6c..1ac981974 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -6,7 +6,6 @@
6#include <cstring> 6#include <cstring>
7#include <vector> 7#include <vector>
8 8
9#include "common/assert.h"
10#include "common/common_types.h" 9#include "common/common_types.h"
11#include "common/microprofile.h" 10#include "common/microprofile.h"
12#include "video_core/renderer_vulkan/fixed_pipeline_state.h" 11#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
@@ -249,7 +248,7 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpa
249 rasterization_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; 248 rasterization_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
250 rasterization_ci.pNext = nullptr; 249 rasterization_ci.pNext = nullptr;
251 rasterization_ci.flags = 0; 250 rasterization_ci.flags = 0;
252 rasterization_ci.depthClampEnable = rs.depth_clamp_enable; 251 rasterization_ci.depthClampEnable = rs.depth_clamp_disabled == 0 ? VK_TRUE : VK_FALSE;
253 rasterization_ci.rasterizerDiscardEnable = VK_FALSE; 252 rasterization_ci.rasterizerDiscardEnable = VK_FALSE;
254 rasterization_ci.polygonMode = VK_POLYGON_MODE_FILL; 253 rasterization_ci.polygonMode = VK_POLYGON_MODE_FILL;
255 rasterization_ci.cullMode = 254 rasterization_ci.cullMode =
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
index 7aba70960..a1d699a6c 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
@@ -5,16 +5,13 @@
5#pragma once 5#pragma once
6 6
7#include <array> 7#include <array>
8#include <memory>
9#include <optional> 8#include <optional>
10#include <unordered_map>
11#include <vector> 9#include <vector>
12 10
13#include "video_core/engines/maxwell_3d.h" 11#include "video_core/engines/maxwell_3d.h"
14#include "video_core/renderer_vulkan/fixed_pipeline_state.h" 12#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
15#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 13#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
16#include "video_core/renderer_vulkan/vk_renderpass_cache.h" 14#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
17#include "video_core/renderer_vulkan/vk_resource_manager.h"
18#include "video_core/renderer_vulkan/vk_shader_decompiler.h" 15#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
19#include "video_core/renderer_vulkan/wrapper.h" 16#include "video_core/renderer_vulkan/wrapper.h"
20 17
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index e6d4adc92..fe45ed269 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -22,17 +22,22 @@
22#include "video_core/renderer_vulkan/vk_pipeline_cache.h" 22#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
23#include "video_core/renderer_vulkan/vk_rasterizer.h" 23#include "video_core/renderer_vulkan/vk_rasterizer.h"
24#include "video_core/renderer_vulkan/vk_renderpass_cache.h" 24#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
25#include "video_core/renderer_vulkan/vk_resource_manager.h"
26#include "video_core/renderer_vulkan/vk_scheduler.h" 25#include "video_core/renderer_vulkan/vk_scheduler.h"
27#include "video_core/renderer_vulkan/vk_update_descriptor.h" 26#include "video_core/renderer_vulkan/vk_update_descriptor.h"
28#include "video_core/renderer_vulkan/wrapper.h" 27#include "video_core/renderer_vulkan/wrapper.h"
29#include "video_core/shader/compiler_settings.h" 28#include "video_core/shader/compiler_settings.h"
29#include "video_core/shader/memory_util.h"
30 30
31namespace Vulkan { 31namespace Vulkan {
32 32
33MICROPROFILE_DECLARE(Vulkan_PipelineCache); 33MICROPROFILE_DECLARE(Vulkan_PipelineCache);
34 34
35using Tegra::Engines::ShaderType; 35using Tegra::Engines::ShaderType;
36using VideoCommon::Shader::GetShaderAddress;
37using VideoCommon::Shader::GetShaderCode;
38using VideoCommon::Shader::KERNEL_MAIN_OFFSET;
39using VideoCommon::Shader::ProgramCode;
40using VideoCommon::Shader::STAGE_MAIN_OFFSET;
36 41
37namespace { 42namespace {
38 43
@@ -45,60 +50,6 @@ constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
45constexpr VideoCommon::Shader::CompilerSettings compiler_settings{ 50constexpr VideoCommon::Shader::CompilerSettings compiler_settings{
46 VideoCommon::Shader::CompileDepth::FullDecompile}; 51 VideoCommon::Shader::CompileDepth::FullDecompile};
47 52
48/// Gets the address for the specified shader stage program
49GPUVAddr GetShaderAddress(Core::System& system, Maxwell::ShaderProgram program) {
50 const auto& gpu{system.GPU().Maxwell3D()};
51 const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]};
52 return gpu.regs.code_address.CodeAddress() + shader_config.offset;
53}
54
55/// Gets if the current instruction offset is a scheduler instruction
56constexpr bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) {
57 // Sched instructions appear once every 4 instructions.
58 constexpr std::size_t SchedPeriod = 4;
59 const std::size_t absolute_offset = offset - main_offset;
60 return (absolute_offset % SchedPeriod) == 0;
61}
62
63/// Calculates the size of a program stream
64std::size_t CalculateProgramSize(const ProgramCode& program, bool is_compute) {
65 const std::size_t start_offset = is_compute ? 0 : 10;
66 // This is the encoded version of BRA that jumps to itself. All Nvidia
67 // shaders end with one.
68 constexpr u64 self_jumping_branch = 0xE2400FFFFF07000FULL;
69 constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL;
70 std::size_t offset = start_offset;
71 while (offset < program.size()) {
72 const u64 instruction = program[offset];
73 if (!IsSchedInstruction(offset, start_offset)) {
74 if ((instruction & mask) == self_jumping_branch) {
75 // End on Maxwell's "nop" instruction
76 break;
77 }
78 if (instruction == 0) {
79 break;
80 }
81 }
82 ++offset;
83 }
84 // The last instruction is included in the program size
85 return std::min(offset + 1, program.size());
86}
87
88/// Gets the shader program code from memory for the specified address
89ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr gpu_addr,
90 const u8* host_ptr, bool is_compute) {
91 ProgramCode program_code(VideoCommon::Shader::MAX_PROGRAM_LENGTH);
92 ASSERT_OR_EXECUTE(host_ptr != nullptr, {
93 std::fill(program_code.begin(), program_code.end(), 0);
94 return program_code;
95 });
96 memory_manager.ReadBlockUnsafe(gpu_addr, program_code.data(),
97 program_code.size() * sizeof(u64));
98 program_code.resize(CalculateProgramSize(program_code, is_compute));
99 return program_code;
100}
101
102constexpr std::size_t GetStageFromProgram(std::size_t program) { 53constexpr std::size_t GetStageFromProgram(std::size_t program) {
103 return program == 0 ? 0 : program - 1; 54 return program == 0 ? 0 : program - 1;
104} 55}
@@ -133,7 +84,7 @@ void AddBindings(std::vector<VkDescriptorSetLayoutBinding>& bindings, u32& bindi
133 u32 count = 1; 84 u32 count = 1;
134 if constexpr (descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) { 85 if constexpr (descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
135 // Combined image samplers can be arrayed. 86 // Combined image samplers can be arrayed.
136 count = container[i].Size(); 87 count = container[i].size;
137 } 88 }
138 VkDescriptorSetLayoutBinding& entry = bindings.emplace_back(); 89 VkDescriptorSetLayoutBinding& entry = bindings.emplace_back();
139 entry.binding = binding++; 90 entry.binding = binding++;
@@ -230,9 +181,9 @@ std::array<Shader, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() {
230 const auto host_ptr{memory_manager.GetPointer(program_addr)}; 181 const auto host_ptr{memory_manager.GetPointer(program_addr)};
231 182
232 // No shader found - create a new one 183 // No shader found - create a new one
233 constexpr u32 stage_offset = 10; 184 constexpr u32 stage_offset = STAGE_MAIN_OFFSET;
234 const auto stage = static_cast<Tegra::Engines::ShaderType>(index == 0 ? 0 : index - 1); 185 const auto stage = static_cast<Tegra::Engines::ShaderType>(index == 0 ? 0 : index - 1);
235 auto code = GetShaderCode(memory_manager, program_addr, host_ptr, false); 186 ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, false);
236 187
237 shader = std::make_shared<CachedShader>(system, stage, program_addr, *cpu_addr, 188 shader = std::make_shared<CachedShader>(system, stage, program_addr, *cpu_addr,
238 std::move(code), stage_offset); 189 std::move(code), stage_offset);
@@ -288,11 +239,10 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach
288 // No shader found - create a new one 239 // No shader found - create a new one
289 const auto host_ptr = memory_manager.GetPointer(program_addr); 240 const auto host_ptr = memory_manager.GetPointer(program_addr);
290 241
291 auto code = GetShaderCode(memory_manager, program_addr, host_ptr, true); 242 ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, true);
292 constexpr u32 kernel_main_offset = 0;
293 shader = std::make_shared<CachedShader>(system, Tegra::Engines::ShaderType::Compute, 243 shader = std::make_shared<CachedShader>(system, Tegra::Engines::ShaderType::Compute,
294 program_addr, *cpu_addr, std::move(code), 244 program_addr, *cpu_addr, std::move(code),
295 kernel_main_offset); 245 KERNEL_MAIN_OFFSET);
296 if (cpu_addr) { 246 if (cpu_addr) {
297 Register(shader); 247 Register(shader);
298 } else { 248 } else {
@@ -411,7 +361,7 @@ void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u3
411 361
412 if constexpr (descriptor_type == COMBINED_IMAGE_SAMPLER) { 362 if constexpr (descriptor_type == COMBINED_IMAGE_SAMPLER) {
413 for (u32 i = 0; i < count; ++i) { 363 for (u32 i = 0; i < count; ++i) {
414 const u32 num_samplers = container[i].Size(); 364 const u32 num_samplers = container[i].size;
415 VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back(); 365 VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
416 entry.dstBinding = binding; 366 entry.dstBinding = binding;
417 entry.dstArrayElement = 0; 367 entry.dstArrayElement = 0;
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index 84d26b822..0b5796fef 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -21,12 +21,11 @@
21#include "video_core/renderer_vulkan/fixed_pipeline_state.h" 21#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
22#include "video_core/renderer_vulkan/vk_graphics_pipeline.h" 22#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
23#include "video_core/renderer_vulkan/vk_renderpass_cache.h" 23#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
24#include "video_core/renderer_vulkan/vk_resource_manager.h"
25#include "video_core/renderer_vulkan/vk_shader_decompiler.h" 24#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
26#include "video_core/renderer_vulkan/wrapper.h" 25#include "video_core/renderer_vulkan/wrapper.h"
26#include "video_core/shader/memory_util.h"
27#include "video_core/shader/registry.h" 27#include "video_core/shader/registry.h"
28#include "video_core/shader/shader_ir.h" 28#include "video_core/shader/shader_ir.h"
29#include "video_core/surface.h"
30 29
31namespace Core { 30namespace Core {
32class System; 31class System;
@@ -46,8 +45,6 @@ class CachedShader;
46using Shader = std::shared_ptr<CachedShader>; 45using Shader = std::shared_ptr<CachedShader>;
47using Maxwell = Tegra::Engines::Maxwell3D::Regs; 46using Maxwell = Tegra::Engines::Maxwell3D::Regs;
48 47
49using ProgramCode = std::vector<u64>;
50
51struct GraphicsPipelineCacheKey { 48struct GraphicsPipelineCacheKey {
52 FixedPipelineState fixed_state; 49 FixedPipelineState fixed_state;
53 RenderPassParams renderpass_params; 50 RenderPassParams renderpass_params;
@@ -108,7 +105,8 @@ namespace Vulkan {
108class CachedShader final : public RasterizerCacheObject { 105class CachedShader final : public RasterizerCacheObject {
109public: 106public:
110 explicit CachedShader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr, 107 explicit CachedShader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr,
111 VAddr cpu_addr, ProgramCode program_code, u32 main_offset); 108 VAddr cpu_addr, VideoCommon::Shader::ProgramCode program_code,
109 u32 main_offset);
112 ~CachedShader(); 110 ~CachedShader();
113 111
114 GPUVAddr GetGpuAddr() const { 112 GPUVAddr GetGpuAddr() const {
@@ -140,7 +138,7 @@ private:
140 Tegra::Engines::ShaderType stage); 138 Tegra::Engines::ShaderType stage);
141 139
142 GPUVAddr gpu_addr{}; 140 GPUVAddr gpu_addr{};
143 ProgramCode program_code; 141 VideoCommon::Shader::ProgramCode program_code;
144 VideoCommon::Shader::Registry registry; 142 VideoCommon::Shader::Registry registry;
145 VideoCommon::Shader::ShaderIR shader_ir; 143 VideoCommon::Shader::ShaderIR shader_ir;
146 ShaderEntries entries; 144 ShaderEntries entries;
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 813f7c162..bc91c48cc 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -4,7 +4,6 @@
4 4
5#include <algorithm> 5#include <algorithm>
6#include <cstddef> 6#include <cstddef>
7#include <cstdint>
8#include <utility> 7#include <utility>
9#include <vector> 8#include <vector>
10 9
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h
index b63784f4b..40119e6d3 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.h
+++ b/src/video_core/renderer_vulkan/vk_query_cache.h
@@ -5,7 +5,6 @@
5#pragma once 5#pragma once
6 6
7#include <cstddef> 7#include <cstddef>
8#include <cstdint>
9#include <memory> 8#include <memory>
10#include <utility> 9#include <utility>
11#include <vector> 10#include <vector>
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index c821b1229..722fde384 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -9,14 +9,12 @@
9#include <vector> 9#include <vector>
10 10
11#include <boost/container/static_vector.hpp> 11#include <boost/container/static_vector.hpp>
12#include <boost/functional/hash.hpp>
13 12
14#include "common/alignment.h" 13#include "common/alignment.h"
15#include "common/assert.h" 14#include "common/assert.h"
16#include "common/logging/log.h" 15#include "common/logging/log.h"
17#include "common/microprofile.h" 16#include "common/microprofile.h"
18#include "core/core.h" 17#include "core/core.h"
19#include "core/memory.h"
20#include "core/settings.h" 18#include "core/settings.h"
21#include "video_core/engines/kepler_compute.h" 19#include "video_core/engines/kepler_compute.h"
22#include "video_core/engines/maxwell_3d.h" 20#include "video_core/engines/maxwell_3d.h"
@@ -119,14 +117,13 @@ template <typename Engine, typename Entry>
119Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry, 117Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry,
120 std::size_t stage, std::size_t index = 0) { 118 std::size_t stage, std::size_t index = 0) {
121 const auto stage_type = static_cast<Tegra::Engines::ShaderType>(stage); 119 const auto stage_type = static_cast<Tegra::Engines::ShaderType>(stage);
122 if (entry.IsBindless()) { 120 if (entry.is_bindless) {
123 const Tegra::Texture::TextureHandle tex_handle = 121 const auto tex_handle = engine.AccessConstBuffer32(stage_type, entry.buffer, entry.offset);
124 engine.AccessConstBuffer32(stage_type, entry.GetBuffer(), entry.GetOffset());
125 return engine.GetTextureInfo(tex_handle); 122 return engine.GetTextureInfo(tex_handle);
126 } 123 }
127 const auto& gpu_profile = engine.AccessGuestDriverProfile(); 124 const auto& gpu_profile = engine.AccessGuestDriverProfile();
128 const u32 entry_offset = static_cast<u32>(index * gpu_profile.GetTextureHandlerSize()); 125 const u32 entry_offset = static_cast<u32>(index * gpu_profile.GetTextureHandlerSize());
129 const u32 offset = entry.GetOffset() + entry_offset; 126 const u32 offset = entry.offset + entry_offset;
130 if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) { 127 if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) {
131 return engine.GetStageTexture(stage_type, offset); 128 return engine.GetStageTexture(stage_type, offset);
132 } else { 129 } else {
@@ -656,7 +653,7 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() {
656 Texceptions texceptions; 653 Texceptions texceptions;
657 for (std::size_t rt = 0; rt < Maxwell::NumRenderTargets; ++rt) { 654 for (std::size_t rt = 0; rt < Maxwell::NumRenderTargets; ++rt) {
658 if (update_rendertargets) { 655 if (update_rendertargets) {
659 color_attachments[rt] = texture_cache.GetColorBufferSurface(rt); 656 color_attachments[rt] = texture_cache.GetColorBufferSurface(rt, true);
660 } 657 }
661 if (color_attachments[rt] && WalkAttachmentOverlaps(*color_attachments[rt])) { 658 if (color_attachments[rt] && WalkAttachmentOverlaps(*color_attachments[rt])) {
662 texceptions[rt] = true; 659 texceptions[rt] = true;
@@ -664,7 +661,7 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() {
664 } 661 }
665 662
666 if (update_rendertargets) { 663 if (update_rendertargets) {
667 zeta_attachment = texture_cache.GetDepthBufferSurface(); 664 zeta_attachment = texture_cache.GetDepthBufferSurface(true);
668 } 665 }
669 if (zeta_attachment && WalkAttachmentOverlaps(*zeta_attachment)) { 666 if (zeta_attachment && WalkAttachmentOverlaps(*zeta_attachment)) {
670 texceptions[ZETA_TEXCEPTION_INDEX] = true; 667 texceptions[ZETA_TEXCEPTION_INDEX] = true;
@@ -896,6 +893,9 @@ void RasterizerVulkan::SetupVertexArrays(FixedPipelineState::VertexInput& vertex
896 893
897void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawParameters& params, 894void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawParameters& params,
898 bool is_indexed) { 895 bool is_indexed) {
896 if (params.num_vertices == 0) {
897 return;
898 }
899 const auto& regs = system.GPU().Maxwell3D().regs; 899 const auto& regs = system.GPU().Maxwell3D().regs;
900 switch (regs.draw.topology) { 900 switch (regs.draw.topology) {
901 case Maxwell::PrimitiveTopology::Quads: { 901 case Maxwell::PrimitiveTopology::Quads: {
@@ -971,7 +971,7 @@ void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, std::
971 MICROPROFILE_SCOPE(Vulkan_Textures); 971 MICROPROFILE_SCOPE(Vulkan_Textures);
972 const auto& gpu = system.GPU().Maxwell3D(); 972 const auto& gpu = system.GPU().Maxwell3D();
973 for (const auto& entry : entries.samplers) { 973 for (const auto& entry : entries.samplers) {
974 for (std::size_t i = 0; i < entry.Size(); ++i) { 974 for (std::size_t i = 0; i < entry.size; ++i) {
975 const auto texture = GetTextureInfo(gpu, entry, stage, i); 975 const auto texture = GetTextureInfo(gpu, entry, stage, i);
976 SetupTexture(texture, entry); 976 SetupTexture(texture, entry);
977 } 977 }
@@ -1023,7 +1023,7 @@ void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) {
1023 MICROPROFILE_SCOPE(Vulkan_Textures); 1023 MICROPROFILE_SCOPE(Vulkan_Textures);
1024 const auto& gpu = system.GPU().KeplerCompute(); 1024 const auto& gpu = system.GPU().KeplerCompute();
1025 for (const auto& entry : entries.samplers) { 1025 for (const auto& entry : entries.samplers) {
1026 for (std::size_t i = 0; i < entry.Size(); ++i) { 1026 for (std::size_t i = 0; i < entry.size; ++i) {
1027 const auto texture = GetTextureInfo(gpu, entry, ComputeShaderIndex, i); 1027 const auto texture = GetTextureInfo(gpu, entry, ComputeShaderIndex, i);
1028 SetupTexture(texture, entry); 1028 SetupTexture(texture, entry);
1029 } 1029 }
@@ -1105,7 +1105,7 @@ void RasterizerVulkan::SetupTexture(const Tegra::Texture::FullTextureInfo& textu
1105void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry) { 1105void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry) {
1106 auto view = texture_cache.GetImageSurface(tic, entry); 1106 auto view = texture_cache.GetImageSurface(tic, entry);
1107 1107
1108 if (entry.IsWritten()) { 1108 if (entry.is_written) {
1109 view->MarkAsModified(texture_cache.Tick()); 1109 view->MarkAsModified(texture_cache.Tick());
1110 } 1110 }
1111 1111
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index d41a7929e..703a094c3 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -14,7 +14,6 @@
14#include <boost/functional/hash.hpp> 14#include <boost/functional/hash.hpp>
15 15
16#include "common/common_types.h" 16#include "common/common_types.h"
17#include "video_core/memory_manager.h"
18#include "video_core/rasterizer_accelerated.h" 17#include "video_core/rasterizer_accelerated.h"
19#include "video_core/rasterizer_interface.h" 18#include "video_core/rasterizer_interface.h"
20#include "video_core/renderer_vulkan/fixed_pipeline_state.h" 19#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
index 07bbcf520..2687d8d95 100644
--- a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
@@ -2,11 +2,8 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <cstring>
6#include <optional>
7#include <unordered_map> 5#include <unordered_map>
8 6
9#include "common/assert.h"
10#include "video_core/renderer_vulkan/maxwell_to_vk.h" 7#include "video_core/renderer_vulkan/maxwell_to_vk.h"
11#include "video_core/renderer_vulkan/vk_sampler_cache.h" 8#include "video_core/renderer_vulkan/vk_sampler_cache.h"
12#include "video_core/renderer_vulkan/wrapper.h" 9#include "video_core/renderer_vulkan/wrapper.h"
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index ae7ba3eb5..82ec9180e 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -8,7 +8,6 @@
8#include <thread> 8#include <thread>
9#include <utility> 9#include <utility>
10 10
11#include "common/assert.h"
12#include "common/microprofile.h" 11#include "common/microprofile.h"
13#include "video_core/renderer_vulkan/vk_device.h" 12#include "video_core/renderer_vulkan/vk_device.h"
14#include "video_core/renderer_vulkan/vk_query_cache.h" 13#include "video_core/renderer_vulkan/vk_query_cache.h"
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
index 82a8adc69..970a65566 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.h
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -7,7 +7,6 @@
7#include <atomic> 7#include <atomic>
8#include <condition_variable> 8#include <condition_variable>
9#include <memory> 9#include <memory>
10#include <optional>
11#include <stack> 10#include <stack>
12#include <thread> 11#include <thread>
13#include <utility> 12#include <utility>
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
index aaa138f52..18678968c 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
@@ -103,8 +103,8 @@ struct GenericVaryingDescription {
103}; 103};
104 104
105spv::Dim GetSamplerDim(const Sampler& sampler) { 105spv::Dim GetSamplerDim(const Sampler& sampler) {
106 ASSERT(!sampler.IsBuffer()); 106 ASSERT(!sampler.is_buffer);
107 switch (sampler.GetType()) { 107 switch (sampler.type) {
108 case Tegra::Shader::TextureType::Texture1D: 108 case Tegra::Shader::TextureType::Texture1D:
109 return spv::Dim::Dim1D; 109 return spv::Dim::Dim1D;
110 case Tegra::Shader::TextureType::Texture2D: 110 case Tegra::Shader::TextureType::Texture2D:
@@ -114,13 +114,13 @@ spv::Dim GetSamplerDim(const Sampler& sampler) {
114 case Tegra::Shader::TextureType::TextureCube: 114 case Tegra::Shader::TextureType::TextureCube:
115 return spv::Dim::Cube; 115 return spv::Dim::Cube;
116 default: 116 default:
117 UNIMPLEMENTED_MSG("Unimplemented sampler type={}", static_cast<u32>(sampler.GetType())); 117 UNIMPLEMENTED_MSG("Unimplemented sampler type={}", static_cast<int>(sampler.type));
118 return spv::Dim::Dim2D; 118 return spv::Dim::Dim2D;
119 } 119 }
120} 120}
121 121
122std::pair<spv::Dim, bool> GetImageDim(const Image& image) { 122std::pair<spv::Dim, bool> GetImageDim(const Image& image) {
123 switch (image.GetType()) { 123 switch (image.type) {
124 case Tegra::Shader::ImageType::Texture1D: 124 case Tegra::Shader::ImageType::Texture1D:
125 return {spv::Dim::Dim1D, false}; 125 return {spv::Dim::Dim1D, false};
126 case Tegra::Shader::ImageType::TextureBuffer: 126 case Tegra::Shader::ImageType::TextureBuffer:
@@ -134,7 +134,7 @@ std::pair<spv::Dim, bool> GetImageDim(const Image& image) {
134 case Tegra::Shader::ImageType::Texture3D: 134 case Tegra::Shader::ImageType::Texture3D:
135 return {spv::Dim::Dim3D, false}; 135 return {spv::Dim::Dim3D, false};
136 default: 136 default:
137 UNIMPLEMENTED_MSG("Unimplemented image type={}", static_cast<u32>(image.GetType())); 137 UNIMPLEMENTED_MSG("Unimplemented image type={}", static_cast<int>(image.type));
138 return {spv::Dim::Dim2D, false}; 138 return {spv::Dim::Dim2D, false};
139 } 139 }
140} 140}
@@ -879,11 +879,11 @@ private:
879 879
880 u32 DeclareTexelBuffers(u32 binding) { 880 u32 DeclareTexelBuffers(u32 binding) {
881 for (const auto& sampler : ir.GetSamplers()) { 881 for (const auto& sampler : ir.GetSamplers()) {
882 if (!sampler.IsBuffer()) { 882 if (!sampler.is_buffer) {
883 continue; 883 continue;
884 } 884 }
885 ASSERT(!sampler.IsArray()); 885 ASSERT(!sampler.is_array);
886 ASSERT(!sampler.IsShadow()); 886 ASSERT(!sampler.is_shadow);
887 887
888 constexpr auto dim = spv::Dim::Buffer; 888 constexpr auto dim = spv::Dim::Buffer;
889 constexpr int depth = 0; 889 constexpr int depth = 0;
@@ -894,23 +894,23 @@ private:
894 const Id image_type = TypeImage(t_float, dim, depth, arrayed, ms, sampled, format); 894 const Id image_type = TypeImage(t_float, dim, depth, arrayed, ms, sampled, format);
895 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type); 895 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
896 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant); 896 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
897 AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.GetIndex()))); 897 AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.index)));
898 Decorate(id, spv::Decoration::Binding, binding++); 898 Decorate(id, spv::Decoration::Binding, binding++);
899 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); 899 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
900 900
901 texel_buffers.emplace(sampler.GetIndex(), TexelBuffer{image_type, id}); 901 texel_buffers.emplace(sampler.index, TexelBuffer{image_type, id});
902 } 902 }
903 return binding; 903 return binding;
904 } 904 }
905 905
906 u32 DeclareSamplers(u32 binding) { 906 u32 DeclareSamplers(u32 binding) {
907 for (const auto& sampler : ir.GetSamplers()) { 907 for (const auto& sampler : ir.GetSamplers()) {
908 if (sampler.IsBuffer()) { 908 if (sampler.is_buffer) {
909 continue; 909 continue;
910 } 910 }
911 const auto dim = GetSamplerDim(sampler); 911 const auto dim = GetSamplerDim(sampler);
912 const int depth = sampler.IsShadow() ? 1 : 0; 912 const int depth = sampler.is_shadow ? 1 : 0;
913 const int arrayed = sampler.IsArray() ? 1 : 0; 913 const int arrayed = sampler.is_array ? 1 : 0;
914 constexpr bool ms = false; 914 constexpr bool ms = false;
915 constexpr int sampled = 1; 915 constexpr int sampled = 1;
916 constexpr auto format = spv::ImageFormat::Unknown; 916 constexpr auto format = spv::ImageFormat::Unknown;
@@ -918,17 +918,17 @@ private:
918 const Id sampler_type = TypeSampledImage(image_type); 918 const Id sampler_type = TypeSampledImage(image_type);
919 const Id sampler_pointer_type = 919 const Id sampler_pointer_type =
920 TypePointer(spv::StorageClass::UniformConstant, sampler_type); 920 TypePointer(spv::StorageClass::UniformConstant, sampler_type);
921 const Id type = sampler.IsIndexed() 921 const Id type = sampler.is_indexed
922 ? TypeArray(sampler_type, Constant(t_uint, sampler.Size())) 922 ? TypeArray(sampler_type, Constant(t_uint, sampler.size))
923 : sampler_type; 923 : sampler_type;
924 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, type); 924 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, type);
925 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant); 925 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
926 AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.GetIndex()))); 926 AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.index)));
927 Decorate(id, spv::Decoration::Binding, binding++); 927 Decorate(id, spv::Decoration::Binding, binding++);
928 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); 928 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
929 929
930 sampled_images.emplace(sampler.GetIndex(), SampledImage{image_type, sampler_type, 930 sampled_images.emplace(
931 sampler_pointer_type, id}); 931 sampler.index, SampledImage{image_type, sampler_type, sampler_pointer_type, id});
932 } 932 }
933 return binding; 933 return binding;
934 } 934 }
@@ -943,17 +943,17 @@ private:
943 const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {}); 943 const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {});
944 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type); 944 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
945 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant); 945 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
946 AddGlobalVariable(Name(id, fmt::format("image_{}", image.GetIndex()))); 946 AddGlobalVariable(Name(id, fmt::format("image_{}", image.index)));
947 947
948 Decorate(id, spv::Decoration::Binding, binding++); 948 Decorate(id, spv::Decoration::Binding, binding++);
949 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); 949 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
950 if (image.IsRead() && !image.IsWritten()) { 950 if (image.is_read && !image.is_written) {
951 Decorate(id, spv::Decoration::NonWritable); 951 Decorate(id, spv::Decoration::NonWritable);
952 } else if (image.IsWritten() && !image.IsRead()) { 952 } else if (image.is_written && !image.is_read) {
953 Decorate(id, spv::Decoration::NonReadable); 953 Decorate(id, spv::Decoration::NonReadable);
954 } 954 }
955 955
956 images.emplace(static_cast<u32>(image.GetIndex()), StorageImage{image_type, id}); 956 images.emplace(image.index, StorageImage{image_type, id});
957 } 957 }
958 return binding; 958 return binding;
959 } 959 }
@@ -1584,6 +1584,15 @@ private:
1584 return {OpCompositeConstruct(t_half, low, high), Type::HalfFloat}; 1584 return {OpCompositeConstruct(t_half, low, high), Type::HalfFloat};
1585 } 1585 }
1586 1586
1587 Expression LogicalAddCarry(Operation operation) {
1588 const Id op_a = AsUint(Visit(operation[0]));
1589 const Id op_b = AsUint(Visit(operation[1]));
1590
1591 const Id result = OpIAddCarry(TypeStruct({t_uint, t_uint}), op_a, op_b);
1592 const Id carry = OpCompositeExtract(t_uint, result, 1);
1593 return {OpINotEqual(t_bool, carry, Constant(t_uint, 0)), Type::Bool};
1594 }
1595
1587 Expression LogicalAssign(Operation operation) { 1596 Expression LogicalAssign(Operation operation) {
1588 const Node& dest = operation[0]; 1597 const Node& dest = operation[0];
1589 const Node& src = operation[1]; 1598 const Node& src = operation[1];
@@ -1611,11 +1620,11 @@ private:
1611 1620
1612 Id GetTextureSampler(Operation operation) { 1621 Id GetTextureSampler(Operation operation) {
1613 const auto& meta = std::get<MetaTexture>(operation.GetMeta()); 1622 const auto& meta = std::get<MetaTexture>(operation.GetMeta());
1614 ASSERT(!meta.sampler.IsBuffer()); 1623 ASSERT(!meta.sampler.is_buffer);
1615 1624
1616 const auto& entry = sampled_images.at(meta.sampler.GetIndex()); 1625 const auto& entry = sampled_images.at(meta.sampler.index);
1617 Id sampler = entry.variable; 1626 Id sampler = entry.variable;
1618 if (meta.sampler.IsIndexed()) { 1627 if (meta.sampler.is_indexed) {
1619 const Id index = AsInt(Visit(meta.index)); 1628 const Id index = AsInt(Visit(meta.index));
1620 sampler = OpAccessChain(entry.sampler_pointer_type, sampler, index); 1629 sampler = OpAccessChain(entry.sampler_pointer_type, sampler, index);
1621 } 1630 }
@@ -1624,8 +1633,8 @@ private:
1624 1633
1625 Id GetTextureImage(Operation operation) { 1634 Id GetTextureImage(Operation operation) {
1626 const auto& meta = std::get<MetaTexture>(operation.GetMeta()); 1635 const auto& meta = std::get<MetaTexture>(operation.GetMeta());
1627 const u32 index = meta.sampler.GetIndex(); 1636 const u32 index = meta.sampler.index;
1628 if (meta.sampler.IsBuffer()) { 1637 if (meta.sampler.is_buffer) {
1629 const auto& entry = texel_buffers.at(index); 1638 const auto& entry = texel_buffers.at(index);
1630 return OpLoad(entry.image_type, entry.image); 1639 return OpLoad(entry.image_type, entry.image);
1631 } else { 1640 } else {
@@ -1636,7 +1645,7 @@ private:
1636 1645
1637 Id GetImage(Operation operation) { 1646 Id GetImage(Operation operation) {
1638 const auto& meta = std::get<MetaImage>(operation.GetMeta()); 1647 const auto& meta = std::get<MetaImage>(operation.GetMeta());
1639 const auto entry = images.at(meta.image.GetIndex()); 1648 const auto entry = images.at(meta.image.index);
1640 return OpLoad(entry.image_type, entry.image); 1649 return OpLoad(entry.image_type, entry.image);
1641 } 1650 }
1642 1651
@@ -1652,7 +1661,7 @@ private:
1652 } 1661 }
1653 if (const auto meta = std::get_if<MetaTexture>(&operation.GetMeta())) { 1662 if (const auto meta = std::get_if<MetaTexture>(&operation.GetMeta())) {
1654 // Add array coordinate for textures 1663 // Add array coordinate for textures
1655 if (meta->sampler.IsArray()) { 1664 if (meta->sampler.is_array) {
1656 Id array = AsInt(Visit(meta->array)); 1665 Id array = AsInt(Visit(meta->array));
1657 if (type == Type::Float) { 1666 if (type == Type::Float) {
1658 array = OpConvertSToF(t_float, array); 1667 array = OpConvertSToF(t_float, array);
@@ -1758,7 +1767,7 @@ private:
1758 operands.push_back(GetOffsetCoordinates(operation)); 1767 operands.push_back(GetOffsetCoordinates(operation));
1759 } 1768 }
1760 1769
1761 if (meta.sampler.IsShadow()) { 1770 if (meta.sampler.is_shadow) {
1762 const Id dref = AsFloat(Visit(meta.depth_compare)); 1771 const Id dref = AsFloat(Visit(meta.depth_compare));
1763 return {OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, operands), 1772 return {OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, operands),
1764 Type::Float}; 1773 Type::Float};
@@ -1773,7 +1782,7 @@ private:
1773 1782
1774 const Id coords = GetCoordinates(operation, Type::Float); 1783 const Id coords = GetCoordinates(operation, Type::Float);
1775 Id texture{}; 1784 Id texture{};
1776 if (meta.sampler.IsShadow()) { 1785 if (meta.sampler.is_shadow) {
1777 texture = OpImageDrefGather(t_float4, GetTextureSampler(operation), coords, 1786 texture = OpImageDrefGather(t_float4, GetTextureSampler(operation), coords,
1778 AsFloat(Visit(meta.depth_compare))); 1787 AsFloat(Visit(meta.depth_compare)));
1779 } else { 1788 } else {
@@ -1800,8 +1809,8 @@ private:
1800 } 1809 }
1801 1810
1802 const Id lod = AsUint(Visit(operation[0])); 1811 const Id lod = AsUint(Visit(operation[0]));
1803 const std::size_t coords_count = [&]() { 1812 const std::size_t coords_count = [&meta] {
1804 switch (const auto type = meta.sampler.GetType(); type) { 1813 switch (const auto type = meta.sampler.type) {
1805 case Tegra::Shader::TextureType::Texture1D: 1814 case Tegra::Shader::TextureType::Texture1D:
1806 return 1; 1815 return 1;
1807 case Tegra::Shader::TextureType::Texture2D: 1816 case Tegra::Shader::TextureType::Texture2D:
@@ -1810,7 +1819,7 @@ private:
1810 case Tegra::Shader::TextureType::Texture3D: 1819 case Tegra::Shader::TextureType::Texture3D:
1811 return 3; 1820 return 3;
1812 default: 1821 default:
1813 UNREACHABLE_MSG("Invalid texture type={}", static_cast<u32>(type)); 1822 UNREACHABLE_MSG("Invalid texture type={}", static_cast<int>(type));
1814 return 2; 1823 return 2;
1815 } 1824 }
1816 }(); 1825 }();
@@ -1853,7 +1862,7 @@ private:
1853 const Id image = GetTextureImage(operation); 1862 const Id image = GetTextureImage(operation);
1854 const Id coords = GetCoordinates(operation, Type::Int); 1863 const Id coords = GetCoordinates(operation, Type::Int);
1855 Id fetch; 1864 Id fetch;
1856 if (meta.lod && !meta.sampler.IsBuffer()) { 1865 if (meta.lod && !meta.sampler.is_buffer) {
1857 fetch = OpImageFetch(t_float4, image, coords, spv::ImageOperandsMask::Lod, 1866 fetch = OpImageFetch(t_float4, image, coords, spv::ImageOperandsMask::Lod,
1858 AsInt(Visit(meta.lod))); 1867 AsInt(Visit(meta.lod)));
1859 } else { 1868 } else {
@@ -2518,6 +2527,8 @@ private:
2518 &SPIRVDecompiler::Binary<&Module::OpINotEqual, Type::Bool, Type::Uint>, 2527 &SPIRVDecompiler::Binary<&Module::OpINotEqual, Type::Bool, Type::Uint>,
2519 &SPIRVDecompiler::Binary<&Module::OpUGreaterThanEqual, Type::Bool, Type::Uint>, 2528 &SPIRVDecompiler::Binary<&Module::OpUGreaterThanEqual, Type::Bool, Type::Uint>,
2520 2529
2530 &SPIRVDecompiler::LogicalAddCarry,
2531
2521 &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool2, Type::HalfFloat>, 2532 &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool2, Type::HalfFloat>,
2522 &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool2, Type::HalfFloat>, 2533 &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool2, Type::HalfFloat>,
2523 &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool2, Type::HalfFloat>, 2534 &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool2, Type::HalfFloat>,
@@ -2969,7 +2980,7 @@ ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir) {
2969 entries.global_buffers.emplace_back(base.cbuf_index, base.cbuf_offset, usage.is_written); 2980 entries.global_buffers.emplace_back(base.cbuf_index, base.cbuf_offset, usage.is_written);
2970 } 2981 }
2971 for (const auto& sampler : ir.GetSamplers()) { 2982 for (const auto& sampler : ir.GetSamplers()) {
2972 if (sampler.IsBuffer()) { 2983 if (sampler.is_buffer) {
2973 entries.texel_buffers.emplace_back(sampler); 2984 entries.texel_buffers.emplace_back(sampler);
2974 } else { 2985 } else {
2975 entries.samplers.emplace_back(sampler); 2986 entries.samplers.emplace_back(sampler);
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.h b/src/video_core/renderer_vulkan/vk_shader_decompiler.h
index ffea4709e..f4c05ac3c 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.h
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.h
@@ -5,11 +5,7 @@
5#pragma once 5#pragma once
6 6
7#include <array> 7#include <array>
8#include <bitset>
9#include <memory>
10#include <set> 8#include <set>
11#include <type_traits>
12#include <utility>
13#include <vector> 9#include <vector>
14 10
15#include "common/common_types.h" 11#include "common/common_types.h"
diff --git a/src/video_core/renderer_vulkan/vk_shader_util.cpp b/src/video_core/renderer_vulkan/vk_shader_util.cpp
index 784839327..112df9c71 100644
--- a/src/video_core/renderer_vulkan/vk_shader_util.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_util.cpp
@@ -4,8 +4,7 @@
4 4
5#include <cstring> 5#include <cstring>
6#include <memory> 6#include <memory>
7#include <vector> 7
8#include "common/alignment.h"
9#include "common/assert.h" 8#include "common/assert.h"
10#include "common/common_types.h" 9#include "common/common_types.h"
11#include "video_core/renderer_vulkan/vk_device.h" 10#include "video_core/renderer_vulkan/vk_device.h"
diff --git a/src/video_core/renderer_vulkan/vk_shader_util.h b/src/video_core/renderer_vulkan/vk_shader_util.h
index be38d6697..d1d3f3cae 100644
--- a/src/video_core/renderer_vulkan/vk_shader_util.h
+++ b/src/video_core/renderer_vulkan/vk_shader_util.h
@@ -4,7 +4,6 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <vector>
8#include "common/common_types.h" 7#include "common/common_types.h"
9#include "video_core/renderer_vulkan/wrapper.h" 8#include "video_core/renderer_vulkan/wrapper.h"
10 9
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
index faf6418fd..3c4901437 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
@@ -5,8 +5,6 @@
5#pragma once 5#pragma once
6 6
7#include <climits> 7#include <climits>
8#include <unordered_map>
9#include <utility>
10#include <vector> 8#include <vector>
11 9
12#include "common/common_types.h" 10#include "common/common_types.h"
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index de4c23120..55f43e61b 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -10,11 +10,9 @@
10#include <variant> 10#include <variant>
11#include <vector> 11#include <vector>
12 12
13#include "common/alignment.h"
14#include "common/assert.h" 13#include "common/assert.h"
15#include "common/common_types.h" 14#include "common/common_types.h"
16#include "core/core.h" 15#include "core/core.h"
17#include "core/memory.h"
18#include "video_core/engines/maxwell_3d.h" 16#include "video_core/engines/maxwell_3d.h"
19#include "video_core/morton.h" 17#include "video_core/morton.h"
20#include "video_core/renderer_vulkan/maxwell_to_vk.h" 18#include "video_core/renderer_vulkan/maxwell_to_vk.h"
@@ -26,7 +24,6 @@
26#include "video_core/renderer_vulkan/vk_texture_cache.h" 24#include "video_core/renderer_vulkan/vk_texture_cache.h"
27#include "video_core/renderer_vulkan/wrapper.h" 25#include "video_core/renderer_vulkan/wrapper.h"
28#include "video_core/surface.h" 26#include "video_core/surface.h"
29#include "video_core/textures/convert.h"
30 27
31namespace Vulkan { 28namespace Vulkan {
32 29
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 115595f28..f211ccb1e 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -7,19 +7,13 @@
7#include <memory> 7#include <memory>
8#include <unordered_map> 8#include <unordered_map>
9 9
10#include "common/assert.h"
11#include "common/common_types.h" 10#include "common/common_types.h"
12#include "common/logging/log.h"
13#include "common/math_util.h"
14#include "video_core/gpu.h"
15#include "video_core/rasterizer_cache.h"
16#include "video_core/renderer_vulkan/vk_image.h" 11#include "video_core/renderer_vulkan/vk_image.h"
17#include "video_core/renderer_vulkan/vk_memory_manager.h" 12#include "video_core/renderer_vulkan/vk_memory_manager.h"
18#include "video_core/renderer_vulkan/vk_scheduler.h" 13#include "video_core/renderer_vulkan/vk_scheduler.h"
19#include "video_core/renderer_vulkan/wrapper.h" 14#include "video_core/renderer_vulkan/wrapper.h"
20#include "video_core/texture_cache/surface_base.h" 15#include "video_core/texture_cache/surface_base.h"
21#include "video_core/texture_cache/texture_cache.h" 16#include "video_core/texture_cache/texture_cache.h"
22#include "video_core/textures/decoders.h"
23 17
24namespace Core { 18namespace Core {
25class System; 19class System;
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.h b/src/video_core/renderer_vulkan/vk_update_descriptor.h
index 6ba2c9997..cc7e3dff4 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.h
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.h
@@ -4,7 +4,6 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <type_traits>
8#include <variant> 7#include <variant>
9#include <boost/container/static_vector.hpp> 8#include <boost/container/static_vector.hpp>
10 9
diff --git a/src/video_core/shader/control_flow.cpp b/src/video_core/shader/control_flow.cpp
index e00a3fb70..8d86020f6 100644
--- a/src/video_core/shader/control_flow.cpp
+++ b/src/video_core/shader/control_flow.cpp
@@ -13,6 +13,7 @@
13#include "common/common_types.h" 13#include "common/common_types.h"
14#include "video_core/shader/ast.h" 14#include "video_core/shader/ast.h"
15#include "video_core/shader/control_flow.h" 15#include "video_core/shader/control_flow.h"
16#include "video_core/shader/memory_util.h"
16#include "video_core/shader/registry.h" 17#include "video_core/shader/registry.h"
17#include "video_core/shader/shader_ir.h" 18#include "video_core/shader/shader_ir.h"
18 19
@@ -115,17 +116,6 @@ Pred GetPredicate(u32 index, bool negated) {
115 return static_cast<Pred>(static_cast<u64>(index) + (negated ? 8ULL : 0ULL)); 116 return static_cast<Pred>(static_cast<u64>(index) + (negated ? 8ULL : 0ULL));
116} 117}
117 118
118/**
119 * Returns whether the instruction at the specified offset is a 'sched' instruction.
120 * Sched instructions always appear before a sequence of 3 instructions.
121 */
122constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) {
123 constexpr u32 SchedPeriod = 4;
124 u32 absolute_offset = offset - main_offset;
125
126 return (absolute_offset % SchedPeriod) == 0;
127}
128
129enum class ParseResult : u32 { 119enum class ParseResult : u32 {
130 ControlCaught, 120 ControlCaught,
131 BlockEnd, 121 BlockEnd,
diff --git a/src/video_core/shader/decode.cpp b/src/video_core/shader/decode.cpp
index 87ac9ac6c..a75a5cc63 100644
--- a/src/video_core/shader/decode.cpp
+++ b/src/video_core/shader/decode.cpp
@@ -13,6 +13,7 @@
13#include "video_core/engines/shader_bytecode.h" 13#include "video_core/engines/shader_bytecode.h"
14#include "video_core/engines/shader_header.h" 14#include "video_core/engines/shader_header.h"
15#include "video_core/shader/control_flow.h" 15#include "video_core/shader/control_flow.h"
16#include "video_core/shader/memory_util.h"
16#include "video_core/shader/node_helper.h" 17#include "video_core/shader/node_helper.h"
17#include "video_core/shader/shader_ir.h" 18#include "video_core/shader/shader_ir.h"
18 19
@@ -23,17 +24,6 @@ using Tegra::Shader::OpCode;
23 24
24namespace { 25namespace {
25 26
26/**
27 * Returns whether the instruction at the specified offset is a 'sched' instruction.
28 * Sched instructions always appear before a sequence of 3 instructions.
29 */
30constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) {
31 constexpr u32 SchedPeriod = 4;
32 u32 absolute_offset = offset - main_offset;
33
34 return (absolute_offset % SchedPeriod) == 0;
35}
36
37void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver, 27void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
38 const std::list<Sampler>& used_samplers) { 28 const std::list<Sampler>& used_samplers) {
39 if (gpu_driver.IsTextureHandlerSizeKnown() || used_samplers.size() <= 1) { 29 if (gpu_driver.IsTextureHandlerSizeKnown() || used_samplers.size() <= 1) {
@@ -42,11 +32,11 @@ void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
42 u32 count{}; 32 u32 count{};
43 std::vector<u32> bound_offsets; 33 std::vector<u32> bound_offsets;
44 for (const auto& sampler : used_samplers) { 34 for (const auto& sampler : used_samplers) {
45 if (sampler.IsBindless()) { 35 if (sampler.is_bindless) {
46 continue; 36 continue;
47 } 37 }
48 ++count; 38 ++count;
49 bound_offsets.emplace_back(sampler.GetOffset()); 39 bound_offsets.emplace_back(sampler.offset);
50 } 40 }
51 if (count > 1) { 41 if (count > 1) {
52 gpu_driver.DeduceTextureHandlerSize(std::move(bound_offsets)); 42 gpu_driver.DeduceTextureHandlerSize(std::move(bound_offsets));
@@ -56,14 +46,14 @@ void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
56std::optional<u32> TryDeduceSamplerSize(const Sampler& sampler_to_deduce, 46std::optional<u32> TryDeduceSamplerSize(const Sampler& sampler_to_deduce,
57 VideoCore::GuestDriverProfile& gpu_driver, 47 VideoCore::GuestDriverProfile& gpu_driver,
58 const std::list<Sampler>& used_samplers) { 48 const std::list<Sampler>& used_samplers) {
59 const u32 base_offset = sampler_to_deduce.GetOffset(); 49 const u32 base_offset = sampler_to_deduce.offset;
60 u32 max_offset{std::numeric_limits<u32>::max()}; 50 u32 max_offset{std::numeric_limits<u32>::max()};
61 for (const auto& sampler : used_samplers) { 51 for (const auto& sampler : used_samplers) {
62 if (sampler.IsBindless()) { 52 if (sampler.is_bindless) {
63 continue; 53 continue;
64 } 54 }
65 if (sampler.GetOffset() > base_offset) { 55 if (sampler.offset > base_offset) {
66 max_offset = std::min(sampler.GetOffset(), max_offset); 56 max_offset = std::min(sampler.offset, max_offset);
67 } 57 }
68 } 58 }
69 if (max_offset == std::numeric_limits<u32>::max()) { 59 if (max_offset == std::numeric_limits<u32>::max()) {
@@ -363,14 +353,14 @@ void ShaderIR::PostDecode() {
363 return; 353 return;
364 } 354 }
365 for (auto& sampler : used_samplers) { 355 for (auto& sampler : used_samplers) {
366 if (!sampler.IsIndexed()) { 356 if (!sampler.is_indexed) {
367 continue; 357 continue;
368 } 358 }
369 if (const auto size = TryDeduceSamplerSize(sampler, gpu_driver, used_samplers)) { 359 if (const auto size = TryDeduceSamplerSize(sampler, gpu_driver, used_samplers)) {
370 sampler.SetSize(*size); 360 sampler.size = *size;
371 } else { 361 } else {
372 LOG_CRITICAL(HW_GPU, "Failed to deduce size of indexed sampler"); 362 LOG_CRITICAL(HW_GPU, "Failed to deduce size of indexed sampler");
373 sampler.SetSize(1); 363 sampler.size = 1;
374 } 364 }
375 } 365 }
376} 366}
diff --git a/src/video_core/shader/decode/arithmetic_integer.cpp b/src/video_core/shader/decode/arithmetic_integer.cpp
index 9af8c606d..a041519b7 100644
--- a/src/video_core/shader/decode/arithmetic_integer.cpp
+++ b/src/video_core/shader/decode/arithmetic_integer.cpp
@@ -35,15 +35,38 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) {
35 case OpCode::Id::IADD_C: 35 case OpCode::Id::IADD_C:
36 case OpCode::Id::IADD_R: 36 case OpCode::Id::IADD_R:
37 case OpCode::Id::IADD_IMM: { 37 case OpCode::Id::IADD_IMM: {
38 UNIMPLEMENTED_IF_MSG(instr.alu.saturate_d, "IADD saturation not implemented"); 38 UNIMPLEMENTED_IF_MSG(instr.alu.saturate_d, "IADD.SAT");
39 UNIMPLEMENTED_IF_MSG(instr.iadd.x && instr.generates_cc, "IADD.X Rd.CC");
39 40
40 op_a = GetOperandAbsNegInteger(op_a, false, instr.alu_integer.negate_a, true); 41 op_a = GetOperandAbsNegInteger(op_a, false, instr.alu_integer.negate_a, true);
41 op_b = GetOperandAbsNegInteger(op_b, false, instr.alu_integer.negate_b, true); 42 op_b = GetOperandAbsNegInteger(op_b, false, instr.alu_integer.negate_b, true);
42 43
43 const Node value = Operation(OperationCode::IAdd, PRECISE, op_a, op_b); 44 Node value = Operation(OperationCode::UAdd, op_a, op_b);
44 45
45 SetInternalFlagsFromInteger(bb, value, instr.generates_cc); 46 if (instr.iadd.x) {
46 SetRegister(bb, instr.gpr0, value); 47 Node carry = GetInternalFlag(InternalFlag::Carry);
48 Node x = Operation(OperationCode::Select, std::move(carry), Immediate(1), Immediate(0));
49 value = Operation(OperationCode::UAdd, std::move(value), std::move(x));
50 }
51
52 if (instr.generates_cc) {
53 const Node i0 = Immediate(0);
54
55 Node zero = Operation(OperationCode::LogicalIEqual, value, i0);
56 Node sign = Operation(OperationCode::LogicalILessThan, value, i0);
57 Node carry = Operation(OperationCode::LogicalAddCarry, op_a, op_b);
58
59 Node pos_a = Operation(OperationCode::LogicalIGreaterThan, op_a, i0);
60 Node pos_b = Operation(OperationCode::LogicalIGreaterThan, op_b, i0);
61 Node pos = Operation(OperationCode::LogicalAnd, std::move(pos_a), std::move(pos_b));
62 Node overflow = Operation(OperationCode::LogicalAnd, pos, sign);
63
64 SetInternalFlag(bb, InternalFlag::Zero, std::move(zero));
65 SetInternalFlag(bb, InternalFlag::Sign, std::move(sign));
66 SetInternalFlag(bb, InternalFlag::Carry, std::move(carry));
67 SetInternalFlag(bb, InternalFlag::Overflow, std::move(overflow));
68 }
69 SetRegister(bb, instr.gpr0, std::move(value));
47 break; 70 break;
48 } 71 }
49 case OpCode::Id::IADD3_C: 72 case OpCode::Id::IADD3_C:
diff --git a/src/video_core/shader/decode/image.cpp b/src/video_core/shader/decode/image.cpp
index 85ee9aa5e..60b6ad72a 100644
--- a/src/video_core/shader/decode/image.cpp
+++ b/src/video_core/shader/decode/image.cpp
@@ -485,11 +485,10 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) {
485Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type) { 485Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type) {
486 const auto offset = static_cast<u32>(image.index.Value()); 486 const auto offset = static_cast<u32>(image.index.Value());
487 487
488 const auto it = 488 const auto it = std::find_if(std::begin(used_images), std::end(used_images),
489 std::find_if(std::begin(used_images), std::end(used_images), 489 [offset](const Image& entry) { return entry.offset == offset; });
490 [offset](const Image& entry) { return entry.GetOffset() == offset; });
491 if (it != std::end(used_images)) { 490 if (it != std::end(used_images)) {
492 ASSERT(!it->IsBindless() && it->GetType() == it->GetType()); 491 ASSERT(!it->is_bindless && it->type == type);
493 return *it; 492 return *it;
494 } 493 }
495 494
@@ -505,13 +504,12 @@ Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::Im
505 const auto buffer = std::get<1>(result); 504 const auto buffer = std::get<1>(result);
506 const auto offset = std::get<2>(result); 505 const auto offset = std::get<2>(result);
507 506
508 const auto it = 507 const auto it = std::find_if(std::begin(used_images), std::end(used_images),
509 std::find_if(std::begin(used_images), std::end(used_images), 508 [buffer, offset](const Image& entry) {
510 [buffer = buffer, offset = offset](const Image& entry) { 509 return entry.buffer == buffer && entry.offset == offset;
511 return entry.GetBuffer() == buffer && entry.GetOffset() == offset; 510 });
512 });
513 if (it != std::end(used_images)) { 511 if (it != std::end(used_images)) {
514 ASSERT(it->IsBindless() && it->GetType() == it->GetType()); 512 ASSERT(it->is_bindless && it->type == type);
515 return *it; 513 return *it;
516 } 514 }
517 515
diff --git a/src/video_core/shader/decode/register_set_predicate.cpp b/src/video_core/shader/decode/register_set_predicate.cpp
index 8d54cce34..6116c31aa 100644
--- a/src/video_core/shader/decode/register_set_predicate.cpp
+++ b/src/video_core/shader/decode/register_set_predicate.cpp
@@ -2,6 +2,8 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <utility>
6
5#include "common/assert.h" 7#include "common/assert.h"
6#include "common/common_types.h" 8#include "common/common_types.h"
7#include "video_core/engines/shader_bytecode.h" 9#include "video_core/engines/shader_bytecode.h"
@@ -10,20 +12,20 @@
10 12
11namespace VideoCommon::Shader { 13namespace VideoCommon::Shader {
12 14
15using std::move;
13using Tegra::Shader::Instruction; 16using Tegra::Shader::Instruction;
14using Tegra::Shader::OpCode; 17using Tegra::Shader::OpCode;
15 18
16namespace { 19namespace {
17constexpr u64 NUM_PROGRAMMABLE_PREDICATES = 7; 20constexpr u64 NUM_CONDITION_CODES = 4;
18} 21constexpr u64 NUM_PREDICATES = 7;
22} // namespace
19 23
20u32 ShaderIR::DecodeRegisterSetPredicate(NodeBlock& bb, u32 pc) { 24u32 ShaderIR::DecodeRegisterSetPredicate(NodeBlock& bb, u32 pc) {
21 const Instruction instr = {program_code[pc]}; 25 const Instruction instr = {program_code[pc]};
22 const auto opcode = OpCode::Decode(instr); 26 const auto opcode = OpCode::Decode(instr);
23 27
24 UNIMPLEMENTED_IF(instr.p2r_r2p.mode != Tegra::Shader::R2pMode::Pr); 28 Node apply_mask = [this, opcode, instr] {
25
26 const Node apply_mask = [&] {
27 switch (opcode->get().GetId()) { 29 switch (opcode->get().GetId()) {
28 case OpCode::Id::R2P_IMM: 30 case OpCode::Id::R2P_IMM:
29 case OpCode::Id::P2R_IMM: 31 case OpCode::Id::P2R_IMM:
@@ -34,39 +36,43 @@ u32 ShaderIR::DecodeRegisterSetPredicate(NodeBlock& bb, u32 pc) {
34 } 36 }
35 }(); 37 }();
36 38
37 const auto offset = static_cast<u32>(instr.p2r_r2p.byte) * 8; 39 const u32 offset = static_cast<u32>(instr.p2r_r2p.byte) * 8;
40
41 const bool cc = instr.p2r_r2p.mode == Tegra::Shader::R2pMode::Cc;
42 const u64 num_entries = cc ? NUM_CONDITION_CODES : NUM_PREDICATES;
43 const auto get_entry = [this, cc](u64 entry) {
44 return cc ? GetInternalFlag(static_cast<InternalFlag>(entry)) : GetPredicate(entry);
45 };
38 46
39 switch (opcode->get().GetId()) { 47 switch (opcode->get().GetId()) {
40 case OpCode::Id::R2P_IMM: { 48 case OpCode::Id::R2P_IMM: {
41 const Node mask = GetRegister(instr.gpr8); 49 Node mask = GetRegister(instr.gpr8);
42 50
43 for (u64 pred = 0; pred < NUM_PROGRAMMABLE_PREDICATES; ++pred) { 51 for (u64 entry = 0; entry < num_entries; ++entry) {
44 const auto shift = static_cast<u32>(pred); 52 const u32 shift = static_cast<u32>(entry);
45 53
46 const Node apply_compare = BitfieldExtract(apply_mask, shift, 1); 54 Node apply = BitfieldExtract(apply_mask, shift, 1);
47 const Node condition = 55 Node condition = Operation(OperationCode::LogicalUNotEqual, apply, Immediate(0));
48 Operation(OperationCode::LogicalUNotEqual, apply_compare, Immediate(0));
49 56
50 const Node value_compare = BitfieldExtract(mask, offset + shift, 1); 57 Node compare = BitfieldExtract(mask, offset + shift, 1);
51 const Node value = 58 Node value = Operation(OperationCode::LogicalUNotEqual, move(compare), Immediate(0));
52 Operation(OperationCode::LogicalUNotEqual, value_compare, Immediate(0));
53 59
54 const Node code = Operation(OperationCode::LogicalAssign, GetPredicate(pred), value); 60 Node code = Operation(OperationCode::LogicalAssign, get_entry(entry), move(value));
55 bb.push_back(Conditional(condition, {code})); 61 bb.push_back(Conditional(condition, {move(code)}));
56 } 62 }
57 break; 63 break;
58 } 64 }
59 case OpCode::Id::P2R_IMM: { 65 case OpCode::Id::P2R_IMM: {
60 Node value = Immediate(0); 66 Node value = Immediate(0);
61 for (u64 pred = 0; pred < NUM_PROGRAMMABLE_PREDICATES; ++pred) { 67 for (u64 entry = 0; entry < num_entries; ++entry) {
62 Node bit = Operation(OperationCode::Select, GetPredicate(pred), Immediate(1U << pred), 68 Node bit = Operation(OperationCode::Select, get_entry(entry), Immediate(1U << entry),
63 Immediate(0)); 69 Immediate(0));
64 value = Operation(OperationCode::UBitwiseOr, std::move(value), std::move(bit)); 70 value = Operation(OperationCode::UBitwiseOr, move(value), move(bit));
65 } 71 }
66 value = Operation(OperationCode::UBitwiseAnd, std::move(value), apply_mask); 72 value = Operation(OperationCode::UBitwiseAnd, move(value), apply_mask);
67 value = BitfieldInsert(GetRegister(instr.gpr8), std::move(value), offset, 8); 73 value = BitfieldInsert(GetRegister(instr.gpr8), move(value), offset, 8);
68 74
69 SetRegister(bb, instr.gpr0, std::move(value)); 75 SetRegister(bb, instr.gpr0, move(value));
70 break; 76 break;
71 } 77 }
72 default: 78 default:
diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp
index e68f1d305..8f0bb996e 100644
--- a/src/video_core/shader/decode/texture.cpp
+++ b/src/video_core/shader/decode/texture.cpp
@@ -139,15 +139,15 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
139 } 139 }
140 const Node component = Immediate(static_cast<u32>(instr.tld4s.component)); 140 const Node component = Immediate(static_cast<u32>(instr.tld4s.component));
141 141
142 const SamplerInfo info{TextureType::Texture2D, false, is_depth_compare, false}; 142 SamplerInfo info;
143 const Sampler& sampler = *GetSampler(instr.sampler, info); 143 info.is_shadow = is_depth_compare;
144 const std::optional<Sampler> sampler = GetSampler(instr.sampler, info);
144 145
145 Node4 values; 146 Node4 values;
146 for (u32 element = 0; element < values.size(); ++element) { 147 for (u32 element = 0; element < values.size(); ++element) {
147 auto coords_copy = coords; 148 MetaTexture meta{*sampler, {}, depth_compare, aoffi, {}, {},
148 MetaTexture meta{sampler, {}, depth_compare, aoffi, {}, {}, 149 {}, {}, component, element, {}};
149 {}, {}, component, element, {}}; 150 values[element] = Operation(OperationCode::TextureGather, meta, coords);
150 values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy));
151 } 151 }
152 152
153 if (instr.tld4s.fp16_flag) { 153 if (instr.tld4s.fp16_flag) {
@@ -165,18 +165,20 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
165 "AOFFI is not implemented"); 165 "AOFFI is not implemented");
166 166
167 const bool is_array = instr.txd.is_array != 0; 167 const bool is_array = instr.txd.is_array != 0;
168 u64 base_reg = instr.gpr8.Value();
169 const auto derivate_reg = instr.gpr20.Value(); 168 const auto derivate_reg = instr.gpr20.Value();
170 const auto texture_type = instr.txd.texture_type.Value(); 169 const auto texture_type = instr.txd.texture_type.Value();
171 const auto coord_count = GetCoordCount(texture_type); 170 const auto coord_count = GetCoordCount(texture_type);
172 Node index_var{}; 171 u64 base_reg = instr.gpr8.Value();
173 const Sampler* sampler = 172 Node index_var;
174 is_bindless 173 SamplerInfo info;
175 ? GetBindlessSampler(base_reg, index_var, {{texture_type, is_array, false, false}}) 174 info.type = texture_type;
176 : GetSampler(instr.sampler, {{texture_type, is_array, false, false}}); 175 info.is_array = is_array;
176 const std::optional<Sampler> sampler = is_bindless
177 ? GetBindlessSampler(base_reg, info, index_var)
178 : GetSampler(instr.sampler, info);
177 Node4 values; 179 Node4 values;
178 if (sampler == nullptr) { 180 if (!sampler) {
179 std::generate(values.begin(), values.end(), [] { return Immediate(0); }); 181 std::generate(values.begin(), values.end(), [this] { return Immediate(0); });
180 WriteTexInstructionFloat(bb, instr, values); 182 WriteTexInstructionFloat(bb, instr, values);
181 break; 183 break;
182 } 184 }
@@ -214,14 +216,12 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
214 is_bindless = true; 216 is_bindless = true;
215 [[fallthrough]]; 217 [[fallthrough]];
216 case OpCode::Id::TXQ: { 218 case OpCode::Id::TXQ: {
217 // TODO: The new commits on the texture refactor, change the way samplers work. 219 Node index_var;
218 // Sadly, not all texture instructions specify the type of texture their sampler 220 const std::optional<Sampler> sampler = is_bindless
219 // uses. This must be fixed at a later instance. 221 ? GetBindlessSampler(instr.gpr8, {}, index_var)
220 Node index_var{}; 222 : GetSampler(instr.sampler, {});
221 const Sampler* sampler = 223
222 is_bindless ? GetBindlessSampler(instr.gpr8, index_var) : GetSampler(instr.sampler); 224 if (!sampler) {
223
224 if (sampler == nullptr) {
225 u32 indexer = 0; 225 u32 indexer = 0;
226 for (u32 element = 0; element < 4; ++element) { 226 for (u32 element = 0; element < 4; ++element) {
227 if (!instr.txq.IsComponentEnabled(element)) { 227 if (!instr.txq.IsComponentEnabled(element)) {
@@ -267,12 +267,17 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
267 UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), 267 UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV),
268 "NDV is not implemented"); 268 "NDV is not implemented");
269 269
270 auto texture_type = instr.tmml.texture_type.Value(); 270 const auto texture_type = instr.tmml.texture_type.Value();
271 Node index_var{}; 271 const bool is_array = instr.tmml.array != 0;
272 const Sampler* sampler = 272 SamplerInfo info;
273 is_bindless ? GetBindlessSampler(instr.gpr20, index_var) : GetSampler(instr.sampler); 273 info.type = texture_type;
274 274 info.is_array = is_array;
275 if (sampler == nullptr) { 275 Node index_var;
276 const std::optional<Sampler> sampler =
277 is_bindless ? GetBindlessSampler(instr.gpr20, info, index_var)
278 : GetSampler(instr.sampler, info);
279
280 if (!sampler) {
276 u32 indexer = 0; 281 u32 indexer = 0;
277 for (u32 element = 0; element < 2; ++element) { 282 for (u32 element = 0; element < 2; ++element) {
278 if (!instr.tmml.IsComponentEnabled(element)) { 283 if (!instr.tmml.IsComponentEnabled(element)) {
@@ -299,12 +304,11 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
299 coords.push_back(GetRegister(instr.gpr8.Value() + 1)); 304 coords.push_back(GetRegister(instr.gpr8.Value() + 1));
300 break; 305 break;
301 default: 306 default:
302 UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type)); 307 UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<int>(texture_type));
303 308
304 // Fallback to interpreting as a 2D texture for now 309 // Fallback to interpreting as a 2D texture for now
305 coords.push_back(GetRegister(instr.gpr8.Value() + 0)); 310 coords.push_back(GetRegister(instr.gpr8.Value() + 0));
306 coords.push_back(GetRegister(instr.gpr8.Value() + 1)); 311 coords.push_back(GetRegister(instr.gpr8.Value() + 1));
307 texture_type = TextureType::Texture2D;
308 } 312 }
309 u32 indexer = 0; 313 u32 indexer = 0;
310 for (u32 element = 0; element < 2; ++element) { 314 for (u32 element = 0; element < 2; ++element) {
@@ -353,98 +357,103 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
353 return pc; 357 return pc;
354} 358}
355 359
356ShaderIR::SamplerInfo ShaderIR::GetSamplerInfo(std::optional<SamplerInfo> sampler_info, u32 offset, 360ShaderIR::SamplerInfo ShaderIR::GetSamplerInfo(SamplerInfo info, u32 offset,
357 std::optional<u32> buffer) { 361 std::optional<u32> buffer) {
358 if (sampler_info) { 362 if (info.IsComplete()) {
359 return *sampler_info; 363 return info;
360 } 364 }
361 const auto sampler = buffer ? registry.ObtainBindlessSampler(*buffer, offset) 365 const auto sampler = buffer ? registry.ObtainBindlessSampler(*buffer, offset)
362 : registry.ObtainBoundSampler(offset); 366 : registry.ObtainBoundSampler(offset);
363 if (!sampler) { 367 if (!sampler) {
364 LOG_WARNING(HW_GPU, "Unknown sampler info"); 368 LOG_WARNING(HW_GPU, "Unknown sampler info");
365 return SamplerInfo{TextureType::Texture2D, false, false, false}; 369 info.type = info.type.value_or(Tegra::Shader::TextureType::Texture2D);
366 } 370 info.is_array = info.is_array.value_or(false);
367 return SamplerInfo{sampler->texture_type, sampler->is_array != 0, sampler->is_shadow != 0, 371 info.is_shadow = info.is_shadow.value_or(false);
368 sampler->is_buffer != 0}; 372 info.is_buffer = info.is_buffer.value_or(false);
373 return info;
374 }
375 info.type = info.type.value_or(sampler->texture_type);
376 info.is_array = info.is_array.value_or(sampler->is_array != 0);
377 info.is_shadow = info.is_shadow.value_or(sampler->is_shadow != 0);
378 info.is_buffer = info.is_buffer.value_or(sampler->is_buffer != 0);
379 return info;
369} 380}
370 381
371const Sampler* ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, 382std::optional<Sampler> ShaderIR::GetSampler(Tegra::Shader::Sampler sampler,
372 std::optional<SamplerInfo> sampler_info) { 383 SamplerInfo sampler_info) {
373 const auto offset = static_cast<u32>(sampler.index.Value()); 384 const auto offset = static_cast<u32>(sampler.index.Value());
374 const auto info = GetSamplerInfo(sampler_info, offset); 385 const auto info = GetSamplerInfo(sampler_info, offset);
375 386
376 // If this sampler has already been used, return the existing mapping. 387 // If this sampler has already been used, return the existing mapping.
377 const auto it = 388 const auto it = std::find_if(used_samplers.begin(), used_samplers.end(),
378 std::find_if(used_samplers.begin(), used_samplers.end(), 389 [offset](const Sampler& entry) { return entry.offset == offset; });
379 [offset](const Sampler& entry) { return entry.GetOffset() == offset; });
380 if (it != used_samplers.end()) { 390 if (it != used_samplers.end()) {
381 ASSERT(!it->IsBindless() && it->GetType() == info.type && it->IsArray() == info.is_array && 391 ASSERT(!it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
382 it->IsShadow() == info.is_shadow && it->IsBuffer() == info.is_buffer); 392 it->is_shadow == info.is_shadow && it->is_buffer == info.is_buffer);
383 return &*it; 393 return *it;
384 } 394 }
385 395
386 // Otherwise create a new mapping for this sampler 396 // Otherwise create a new mapping for this sampler
387 const auto next_index = static_cast<u32>(used_samplers.size()); 397 const auto next_index = static_cast<u32>(used_samplers.size());
388 return &used_samplers.emplace_back(next_index, offset, info.type, info.is_array, info.is_shadow, 398 return used_samplers.emplace_back(next_index, offset, *info.type, *info.is_array,
389 info.is_buffer, false); 399 *info.is_shadow, *info.is_buffer, false);
390} 400}
391 401
392const Sampler* ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, Node& index_var, 402std::optional<Sampler> ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, SamplerInfo info,
393 std::optional<SamplerInfo> sampler_info) { 403 Node& index_var) {
394 const Node sampler_register = GetRegister(reg); 404 const Node sampler_register = GetRegister(reg);
395 const auto [base_node, tracked_sampler_info] = 405 const auto [base_node, tracked_sampler_info] =
396 TrackBindlessSampler(sampler_register, global_code, static_cast<s64>(global_code.size())); 406 TrackBindlessSampler(sampler_register, global_code, static_cast<s64>(global_code.size()));
397 ASSERT(base_node != nullptr); 407 ASSERT(base_node != nullptr);
398 if (base_node == nullptr) { 408 if (base_node == nullptr) {
399 return nullptr; 409 return std::nullopt;
400 } 410 }
401 411
402 if (const auto bindless_sampler_info = 412 if (const auto bindless_sampler_info =
403 std::get_if<BindlessSamplerNode>(&*tracked_sampler_info)) { 413 std::get_if<BindlessSamplerNode>(&*tracked_sampler_info)) {
404 const u32 buffer = bindless_sampler_info->GetIndex(); 414 const u32 buffer = bindless_sampler_info->GetIndex();
405 const u32 offset = bindless_sampler_info->GetOffset(); 415 const u32 offset = bindless_sampler_info->GetOffset();
406 const auto info = GetSamplerInfo(sampler_info, offset, buffer); 416 info = GetSamplerInfo(info, offset, buffer);
407 417
408 // If this sampler has already been used, return the existing mapping. 418 // If this sampler has already been used, return the existing mapping.
409 const auto it = 419 const auto it = std::find_if(used_samplers.begin(), used_samplers.end(),
410 std::find_if(used_samplers.begin(), used_samplers.end(), 420 [buffer = buffer, offset = offset](const Sampler& entry) {
411 [buffer = buffer, offset = offset](const Sampler& entry) { 421 return entry.buffer == buffer && entry.offset == offset;
412 return entry.GetBuffer() == buffer && entry.GetOffset() == offset; 422 });
413 });
414 if (it != used_samplers.end()) { 423 if (it != used_samplers.end()) {
415 ASSERT(it->IsBindless() && it->GetType() == info.type && 424 ASSERT(it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
416 it->IsArray() == info.is_array && it->IsShadow() == info.is_shadow); 425 it->is_shadow == info.is_shadow);
417 return &*it; 426 return *it;
418 } 427 }
419 428
420 // Otherwise create a new mapping for this sampler 429 // Otherwise create a new mapping for this sampler
421 const auto next_index = static_cast<u32>(used_samplers.size()); 430 const auto next_index = static_cast<u32>(used_samplers.size());
422 return &used_samplers.emplace_back(next_index, offset, buffer, info.type, info.is_array, 431 return used_samplers.emplace_back(next_index, offset, buffer, *info.type, *info.is_array,
423 info.is_shadow, info.is_buffer, false); 432 *info.is_shadow, *info.is_buffer, false);
424 } else if (const auto array_sampler_info = 433 }
425 std::get_if<ArraySamplerNode>(&*tracked_sampler_info)) { 434 if (const auto array_sampler_info = std::get_if<ArraySamplerNode>(&*tracked_sampler_info)) {
426 const u32 base_offset = array_sampler_info->GetBaseOffset() / 4; 435 const u32 base_offset = array_sampler_info->GetBaseOffset() / 4;
427 index_var = GetCustomVariable(array_sampler_info->GetIndexVar()); 436 index_var = GetCustomVariable(array_sampler_info->GetIndexVar());
428 const auto info = GetSamplerInfo(sampler_info, base_offset); 437 info = GetSamplerInfo(info, base_offset);
429 438
430 // If this sampler has already been used, return the existing mapping. 439 // If this sampler has already been used, return the existing mapping.
431 const auto it = std::find_if( 440 const auto it = std::find_if(
432 used_samplers.begin(), used_samplers.end(), 441 used_samplers.begin(), used_samplers.end(),
433 [base_offset](const Sampler& entry) { return entry.GetOffset() == base_offset; }); 442 [base_offset](const Sampler& entry) { return entry.offset == base_offset; });
434 if (it != used_samplers.end()) { 443 if (it != used_samplers.end()) {
435 ASSERT(!it->IsBindless() && it->GetType() == info.type && 444 ASSERT(!it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
436 it->IsArray() == info.is_array && it->IsShadow() == info.is_shadow && 445 it->is_shadow == info.is_shadow && it->is_buffer == info.is_buffer &&
437 it->IsBuffer() == info.is_buffer && it->IsIndexed()); 446 it->is_indexed);
438 return &*it; 447 return *it;
439 } 448 }
440 449
441 uses_indexed_samplers = true; 450 uses_indexed_samplers = true;
442 // Otherwise create a new mapping for this sampler 451 // Otherwise create a new mapping for this sampler
443 const auto next_index = static_cast<u32>(used_samplers.size()); 452 const auto next_index = static_cast<u32>(used_samplers.size());
444 return &used_samplers.emplace_back(next_index, base_offset, info.type, info.is_array, 453 return used_samplers.emplace_back(next_index, base_offset, *info.type, *info.is_array,
445 info.is_shadow, info.is_buffer, true); 454 *info.is_shadow, *info.is_buffer, true);
446 } 455 }
447 return nullptr; 456 return std::nullopt;
448} 457}
449 458
450void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) { 459void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) {
@@ -529,10 +538,16 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
529 ASSERT_MSG(texture_type != TextureType::Texture3D || !is_array || !is_shadow, 538 ASSERT_MSG(texture_type != TextureType::Texture3D || !is_array || !is_shadow,
530 "Illegal texture type"); 539 "Illegal texture type");
531 540
532 const SamplerInfo info{texture_type, is_array, is_shadow, false}; 541 SamplerInfo info;
542 info.type = texture_type;
543 info.is_array = is_array;
544 info.is_shadow = is_shadow;
545 info.is_buffer = false;
546
533 Node index_var; 547 Node index_var;
534 const Sampler* sampler = is_bindless ? GetBindlessSampler(*bindless_reg, index_var, info) 548 const std::optional<Sampler> sampler = is_bindless
535 : GetSampler(instr.sampler, info); 549 ? GetBindlessSampler(*bindless_reg, info, index_var)
550 : GetSampler(instr.sampler, info);
536 if (!sampler) { 551 if (!sampler) {
537 return {Immediate(0), Immediate(0), Immediate(0), Immediate(0)}; 552 return {Immediate(0), Immediate(0), Immediate(0), Immediate(0)};
538 } 553 }
@@ -683,12 +698,17 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de
683 698
684 u64 parameter_register = instr.gpr20.Value(); 699 u64 parameter_register = instr.gpr20.Value();
685 700
686 const SamplerInfo info{texture_type, is_array, depth_compare, false}; 701 SamplerInfo info;
687 Node index_var{}; 702 info.type = texture_type;
688 const Sampler* sampler = is_bindless ? GetBindlessSampler(parameter_register++, index_var, info) 703 info.is_array = is_array;
689 : GetSampler(instr.sampler, info); 704 info.is_shadow = depth_compare;
705
706 Node index_var;
707 const std::optional<Sampler> sampler =
708 is_bindless ? GetBindlessSampler(parameter_register++, info, index_var)
709 : GetSampler(instr.sampler, info);
690 Node4 values; 710 Node4 values;
691 if (sampler == nullptr) { 711 if (!sampler) {
692 for (u32 element = 0; element < values.size(); ++element) { 712 for (u32 element = 0; element < values.size(); ++element) {
693 values[element] = Immediate(0); 713 values[element] = Immediate(0);
694 } 714 }
@@ -743,12 +763,12 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) {
743 // const Node aoffi_register{is_aoffi ? GetRegister(gpr20_cursor++) : nullptr}; 763 // const Node aoffi_register{is_aoffi ? GetRegister(gpr20_cursor++) : nullptr};
744 // const Node multisample{is_multisample ? GetRegister(gpr20_cursor++) : nullptr}; 764 // const Node multisample{is_multisample ? GetRegister(gpr20_cursor++) : nullptr};
745 765
746 const auto& sampler = *GetSampler(instr.sampler); 766 const std::optional<Sampler> sampler = GetSampler(instr.sampler, {});
747 767
748 Node4 values; 768 Node4 values;
749 for (u32 element = 0; element < values.size(); ++element) { 769 for (u32 element = 0; element < values.size(); ++element) {
750 auto coords_copy = coords; 770 auto coords_copy = coords;
751 MetaTexture meta{sampler, array_register, {}, {}, {}, {}, {}, lod, {}, element, {}}; 771 MetaTexture meta{*sampler, array_register, {}, {}, {}, {}, {}, lod, {}, element, {}};
752 values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); 772 values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
753 } 773 }
754 774
@@ -756,7 +776,11 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) {
756} 776}
757 777
758Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) { 778Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) {
759 const Sampler& sampler = *GetSampler(instr.sampler); 779 SamplerInfo info;
780 info.type = texture_type;
781 info.is_array = is_array;
782 info.is_shadow = false;
783 const std::optional<Sampler> sampler = GetSampler(instr.sampler, info);
760 784
761 const std::size_t type_coord_count = GetCoordCount(texture_type); 785 const std::size_t type_coord_count = GetCoordCount(texture_type);
762 const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL; 786 const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL;
@@ -784,7 +808,7 @@ Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is
784 Node4 values; 808 Node4 values;
785 for (u32 element = 0; element < values.size(); ++element) { 809 for (u32 element = 0; element < values.size(); ++element) {
786 auto coords_copy = coords; 810 auto coords_copy = coords;
787 MetaTexture meta{sampler, array, {}, {}, {}, {}, {}, lod, {}, element, {}}; 811 MetaTexture meta{*sampler, array, {}, {}, {}, {}, {}, lod, {}, element, {}};
788 values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); 812 values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
789 } 813 }
790 return values; 814 return values;
diff --git a/src/video_core/shader/memory_util.cpp b/src/video_core/shader/memory_util.cpp
new file mode 100644
index 000000000..074f21691
--- /dev/null
+++ b/src/video_core/shader/memory_util.cpp
@@ -0,0 +1,77 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <cstddef>
7
8#include <boost/container_hash/hash.hpp>
9
10#include "common/common_types.h"
11#include "core/core.h"
12#include "video_core/engines/maxwell_3d.h"
13#include "video_core/memory_manager.h"
14#include "video_core/shader/memory_util.h"
15#include "video_core/shader/shader_ir.h"
16
17namespace VideoCommon::Shader {
18
19GPUVAddr GetShaderAddress(Core::System& system,
20 Tegra::Engines::Maxwell3D::Regs::ShaderProgram program) {
21 const auto& gpu{system.GPU().Maxwell3D()};
22 const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]};
23 return gpu.regs.code_address.CodeAddress() + shader_config.offset;
24}
25
26bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) {
27 // Sched instructions appear once every 4 instructions.
28 constexpr std::size_t SchedPeriod = 4;
29 const std::size_t absolute_offset = offset - main_offset;
30 return (absolute_offset % SchedPeriod) == 0;
31}
32
33std::size_t CalculateProgramSize(const ProgramCode& program, bool is_compute) {
34 // This is the encoded version of BRA that jumps to itself. All Nvidia
35 // shaders end with one.
36 static constexpr u64 SELF_JUMPING_BRANCH = 0xE2400FFFFF07000FULL;
37 static constexpr u64 MASK = 0xFFFFFFFFFF7FFFFFULL;
38
39 const std::size_t start_offset = is_compute ? KERNEL_MAIN_OFFSET : STAGE_MAIN_OFFSET;
40 std::size_t offset = start_offset;
41 while (offset < program.size()) {
42 const u64 instruction = program[offset];
43 if (!IsSchedInstruction(offset, start_offset)) {
44 if ((instruction & MASK) == SELF_JUMPING_BRANCH) {
45 // End on Maxwell's "nop" instruction
46 break;
47 }
48 if (instruction == 0) {
49 break;
50 }
51 }
52 ++offset;
53 }
54 // The last instruction is included in the program size
55 return std::min(offset + 1, program.size());
56}
57
58ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, GPUVAddr gpu_addr,
59 const u8* host_ptr, bool is_compute) {
60 ProgramCode code(VideoCommon::Shader::MAX_PROGRAM_LENGTH);
61 ASSERT_OR_EXECUTE(host_ptr != nullptr, { return code; });
62 memory_manager.ReadBlockUnsafe(gpu_addr, code.data(), code.size() * sizeof(u64));
63 code.resize(CalculateProgramSize(code, is_compute));
64 return code;
65}
66
67u64 GetUniqueIdentifier(Tegra::Engines::ShaderType shader_type, bool is_a, const ProgramCode& code,
68 const ProgramCode& code_b) {
69 u64 unique_identifier = boost::hash_value(code);
70 if (is_a) {
71 // VertexA programs include two programs
72 boost::hash_combine(unique_identifier, boost::hash_value(code_b));
73 }
74 return unique_identifier;
75}
76
77} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/memory_util.h b/src/video_core/shader/memory_util.h
new file mode 100644
index 000000000..be90d24fd
--- /dev/null
+++ b/src/video_core/shader/memory_util.h
@@ -0,0 +1,47 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <cstddef>
8#include <vector>
9
10#include "common/common_types.h"
11#include "video_core/engines/maxwell_3d.h"
12#include "video_core/engines/shader_type.h"
13
14namespace Core {
15class System;
16}
17
18namespace Tegra {
19class MemoryManager;
20}
21
22namespace VideoCommon::Shader {
23
24using ProgramCode = std::vector<u64>;
25
26constexpr u32 STAGE_MAIN_OFFSET = 10;
27constexpr u32 KERNEL_MAIN_OFFSET = 0;
28
29/// Gets the address for the specified shader stage program
30GPUVAddr GetShaderAddress(Core::System& system,
31 Tegra::Engines::Maxwell3D::Regs::ShaderProgram program);
32
33/// Gets if the current instruction offset is a scheduler instruction
34bool IsSchedInstruction(std::size_t offset, std::size_t main_offset);
35
36/// Calculates the size of a program stream
37std::size_t CalculateProgramSize(const ProgramCode& program, bool is_compute);
38
39/// Gets the shader program code from memory for the specified address
40ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, GPUVAddr gpu_addr,
41 const u8* host_ptr, bool is_compute);
42
43/// Hashes one (or two) program streams
44u64 GetUniqueIdentifier(Tegra::Engines::ShaderType shader_type, bool is_a, const ProgramCode& code,
45 const ProgramCode& code_b = {});
46
47} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h
index 3eee961f5..601c822d2 100644
--- a/src/video_core/shader/node.h
+++ b/src/video_core/shader/node.h
@@ -132,6 +132,8 @@ enum class OperationCode {
132 LogicalUNotEqual, /// (uint a, uint b) -> bool 132 LogicalUNotEqual, /// (uint a, uint b) -> bool
133 LogicalUGreaterEqual, /// (uint a, uint b) -> bool 133 LogicalUGreaterEqual, /// (uint a, uint b) -> bool
134 134
135 LogicalAddCarry, /// (uint a, uint b) -> bool
136
135 Logical2HLessThan, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 137 Logical2HLessThan, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2
136 Logical2HEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 138 Logical2HEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2
137 Logical2HLessEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 139 Logical2HLessEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2
@@ -265,76 +267,30 @@ class ArraySamplerNode;
265using TrackSamplerData = std::variant<BindlessSamplerNode, ArraySamplerNode>; 267using TrackSamplerData = std::variant<BindlessSamplerNode, ArraySamplerNode>;
266using TrackSampler = std::shared_ptr<TrackSamplerData>; 268using TrackSampler = std::shared_ptr<TrackSamplerData>;
267 269
268class Sampler { 270struct Sampler {
269public: 271 /// Bound samplers constructor
270 /// This constructor is for bound samplers
271 constexpr explicit Sampler(u32 index, u32 offset, Tegra::Shader::TextureType type, 272 constexpr explicit Sampler(u32 index, u32 offset, Tegra::Shader::TextureType type,
272 bool is_array, bool is_shadow, bool is_buffer, bool is_indexed) 273 bool is_array, bool is_shadow, bool is_buffer, bool is_indexed)
273 : index{index}, offset{offset}, type{type}, is_array{is_array}, is_shadow{is_shadow}, 274 : index{index}, offset{offset}, type{type}, is_array{is_array}, is_shadow{is_shadow},
274 is_buffer{is_buffer}, is_indexed{is_indexed} {} 275 is_buffer{is_buffer}, is_indexed{is_indexed} {}
275 276
276 /// This constructor is for bindless samplers 277 /// Bindless samplers constructor
277 constexpr explicit Sampler(u32 index, u32 offset, u32 buffer, Tegra::Shader::TextureType type, 278 constexpr explicit Sampler(u32 index, u32 offset, u32 buffer, Tegra::Shader::TextureType type,
278 bool is_array, bool is_shadow, bool is_buffer, bool is_indexed) 279 bool is_array, bool is_shadow, bool is_buffer, bool is_indexed)
279 : index{index}, offset{offset}, buffer{buffer}, type{type}, is_array{is_array}, 280 : index{index}, offset{offset}, buffer{buffer}, type{type}, is_array{is_array},
280 is_shadow{is_shadow}, is_buffer{is_buffer}, is_bindless{true}, is_indexed{is_indexed} {} 281 is_shadow{is_shadow}, is_buffer{is_buffer}, is_bindless{true}, is_indexed{is_indexed} {}
281 282
282 constexpr u32 GetIndex() const { 283 u32 index = 0; ///< Emulated index given for the this sampler.
283 return index; 284 u32 offset = 0; ///< Offset in the const buffer from where the sampler is being read.
284 } 285 u32 buffer = 0; ///< Buffer where the bindless sampler is being read (unused on bound samplers).
285 286 u32 size = 1; ///< Size of the sampler.
286 constexpr u32 GetOffset() const {
287 return offset;
288 }
289
290 constexpr u32 GetBuffer() const {
291 return buffer;
292 }
293
294 constexpr Tegra::Shader::TextureType GetType() const {
295 return type;
296 }
297
298 constexpr bool IsArray() const {
299 return is_array;
300 }
301
302 constexpr bool IsShadow() const {
303 return is_shadow;
304 }
305
306 constexpr bool IsBuffer() const {
307 return is_buffer;
308 }
309
310 constexpr bool IsBindless() const {
311 return is_bindless;
312 }
313
314 constexpr bool IsIndexed() const {
315 return is_indexed;
316 }
317
318 constexpr u32 Size() const {
319 return size;
320 }
321
322 constexpr void SetSize(u32 new_size) {
323 size = new_size;
324 }
325
326private:
327 u32 index{}; ///< Emulated index given for the this sampler.
328 u32 offset{}; ///< Offset in the const buffer from where the sampler is being read.
329 u32 buffer{}; ///< Buffer where the bindless sampler is being read (unused on bound samplers).
330 u32 size{1}; ///< Size of the sampler.
331 287
332 Tegra::Shader::TextureType type{}; ///< The type used to sample this texture (Texture2D, etc) 288 Tegra::Shader::TextureType type{}; ///< The type used to sample this texture (Texture2D, etc)
333 bool is_array{}; ///< Whether the texture is being sampled as an array texture or not. 289 bool is_array = false; ///< Whether the texture is being sampled as an array texture or not.
334 bool is_shadow{}; ///< Whether the texture is being sampled as a depth texture or not. 290 bool is_shadow = false; ///< Whether the texture is being sampled as a depth texture or not.
335 bool is_buffer{}; ///< Whether the texture is a texture buffer without sampler. 291 bool is_buffer = false; ///< Whether the texture is a texture buffer without sampler.
336 bool is_bindless{}; ///< Whether this sampler belongs to a bindless texture or not. 292 bool is_bindless = false; ///< Whether this sampler belongs to a bindless texture or not.
337 bool is_indexed{}; ///< Whether this sampler is an indexed array of textures. 293 bool is_indexed = false; ///< Whether this sampler is an indexed array of textures.
338}; 294};
339 295
340/// Represents a tracked bindless sampler into a direct const buffer 296/// Represents a tracked bindless sampler into a direct const buffer
@@ -379,13 +335,13 @@ private:
379 u32 offset; 335 u32 offset;
380}; 336};
381 337
382class Image final { 338struct Image {
383public: 339public:
384 /// This constructor is for bound images 340 /// Bound images constructor
385 constexpr explicit Image(u32 index, u32 offset, Tegra::Shader::ImageType type) 341 constexpr explicit Image(u32 index, u32 offset, Tegra::Shader::ImageType type)
386 : index{index}, offset{offset}, type{type} {} 342 : index{index}, offset{offset}, type{type} {}
387 343
388 /// This constructor is for bindless samplers 344 /// Bindless samplers constructor
389 constexpr explicit Image(u32 index, u32 offset, u32 buffer, Tegra::Shader::ImageType type) 345 constexpr explicit Image(u32 index, u32 offset, u32 buffer, Tegra::Shader::ImageType type)
390 : index{index}, offset{offset}, buffer{buffer}, type{type}, is_bindless{true} {} 346 : index{index}, offset{offset}, buffer{buffer}, type{type}, is_bindless{true} {}
391 347
@@ -403,53 +359,20 @@ public:
403 is_atomic = true; 359 is_atomic = true;
404 } 360 }
405 361
406 constexpr u32 GetIndex() const { 362 u32 index = 0;
407 return index; 363 u32 offset = 0;
408 } 364 u32 buffer = 0;
409
410 constexpr u32 GetOffset() const {
411 return offset;
412 }
413
414 constexpr u32 GetBuffer() const {
415 return buffer;
416 }
417
418 constexpr Tegra::Shader::ImageType GetType() const {
419 return type;
420 }
421
422 constexpr bool IsBindless() const {
423 return is_bindless;
424 }
425
426 constexpr bool IsWritten() const {
427 return is_written;
428 }
429
430 constexpr bool IsRead() const {
431 return is_read;
432 }
433
434 constexpr bool IsAtomic() const {
435 return is_atomic;
436 }
437
438private:
439 u32 index{};
440 u32 offset{};
441 u32 buffer{};
442 365
443 Tegra::Shader::ImageType type{}; 366 Tegra::Shader::ImageType type{};
444 bool is_bindless{}; 367 bool is_bindless = false;
445 bool is_written{}; 368 bool is_written = false;
446 bool is_read{}; 369 bool is_read = false;
447 bool is_atomic{}; 370 bool is_atomic = false;
448}; 371};
449 372
450struct GlobalMemoryBase { 373struct GlobalMemoryBase {
451 u32 cbuf_index{}; 374 u32 cbuf_index = 0;
452 u32 cbuf_offset{}; 375 u32 cbuf_offset = 0;
453 376
454 bool operator<(const GlobalMemoryBase& rhs) const { 377 bool operator<(const GlobalMemoryBase& rhs) const {
455 return std::tie(cbuf_index, cbuf_offset) < std::tie(rhs.cbuf_index, rhs.cbuf_offset); 378 return std::tie(cbuf_index, cbuf_offset) < std::tie(rhs.cbuf_index, rhs.cbuf_offset);
@@ -463,7 +386,7 @@ struct MetaArithmetic {
463 386
464/// Parameters describing a texture sampler 387/// Parameters describing a texture sampler
465struct MetaTexture { 388struct MetaTexture {
466 const Sampler& sampler; 389 Sampler sampler;
467 Node array; 390 Node array;
468 Node depth_compare; 391 Node depth_compare;
469 std::vector<Node> aoffi; 392 std::vector<Node> aoffi;
diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h
index c6e7bdf50..15ae152f2 100644
--- a/src/video_core/shader/shader_ir.h
+++ b/src/video_core/shader/shader_ir.h
@@ -18,6 +18,7 @@
18#include "video_core/engines/shader_header.h" 18#include "video_core/engines/shader_header.h"
19#include "video_core/shader/ast.h" 19#include "video_core/shader/ast.h"
20#include "video_core/shader/compiler_settings.h" 20#include "video_core/shader/compiler_settings.h"
21#include "video_core/shader/memory_util.h"
21#include "video_core/shader/node.h" 22#include "video_core/shader/node.h"
22#include "video_core/shader/registry.h" 23#include "video_core/shader/registry.h"
23 24
@@ -25,16 +26,13 @@ namespace VideoCommon::Shader {
25 26
26struct ShaderBlock; 27struct ShaderBlock;
27 28
28using ProgramCode = std::vector<u64>;
29
30constexpr u32 MAX_PROGRAM_LENGTH = 0x1000; 29constexpr u32 MAX_PROGRAM_LENGTH = 0x1000;
31 30
32class ConstBuffer { 31struct ConstBuffer {
33public: 32 constexpr explicit ConstBuffer(u32 max_offset, bool is_indirect)
34 explicit ConstBuffer(u32 max_offset, bool is_indirect)
35 : max_offset{max_offset}, is_indirect{is_indirect} {} 33 : max_offset{max_offset}, is_indirect{is_indirect} {}
36 34
37 ConstBuffer() = default; 35 constexpr ConstBuffer() = default;
38 36
39 void MarkAsUsed(u64 offset) { 37 void MarkAsUsed(u64 offset) {
40 max_offset = std::max(max_offset, static_cast<u32>(offset)); 38 max_offset = std::max(max_offset, static_cast<u32>(offset));
@@ -57,8 +55,8 @@ public:
57 } 55 }
58 56
59private: 57private:
60 u32 max_offset{}; 58 u32 max_offset = 0;
61 bool is_indirect{}; 59 bool is_indirect = false;
62}; 60};
63 61
64struct GlobalMemoryUsage { 62struct GlobalMemoryUsage {
@@ -192,10 +190,14 @@ private:
192 friend class ASTDecoder; 190 friend class ASTDecoder;
193 191
194 struct SamplerInfo { 192 struct SamplerInfo {
195 Tegra::Shader::TextureType type; 193 std::optional<Tegra::Shader::TextureType> type;
196 bool is_array; 194 std::optional<bool> is_array;
197 bool is_shadow; 195 std::optional<bool> is_shadow;
198 bool is_buffer; 196 std::optional<bool> is_buffer;
197
198 constexpr bool IsComplete() const noexcept {
199 return type && is_array && is_shadow && is_buffer;
200 }
199 }; 201 };
200 202
201 void Decode(); 203 void Decode();
@@ -328,16 +330,15 @@ private:
328 OperationCode GetPredicateCombiner(Tegra::Shader::PredOperation operation); 330 OperationCode GetPredicateCombiner(Tegra::Shader::PredOperation operation);
329 331
330 /// Queries the missing sampler info from the execution context. 332 /// Queries the missing sampler info from the execution context.
331 SamplerInfo GetSamplerInfo(std::optional<SamplerInfo> sampler_info, u32 offset, 333 SamplerInfo GetSamplerInfo(SamplerInfo info, u32 offset,
332 std::optional<u32> buffer = std::nullopt); 334 std::optional<u32> buffer = std::nullopt);
333 335
334 /// Accesses a texture sampler 336 /// Accesses a texture sampler.
335 const Sampler* GetSampler(const Tegra::Shader::Sampler& sampler, 337 std::optional<Sampler> GetSampler(Tegra::Shader::Sampler sampler, SamplerInfo info);
336 std::optional<SamplerInfo> sampler_info = std::nullopt);
337 338
338 /// Accesses a texture sampler for a bindless texture. 339 /// Accesses a texture sampler for a bindless texture.
339 const Sampler* GetBindlessSampler(Tegra::Shader::Register reg, Node& index_var, 340 std::optional<Sampler> GetBindlessSampler(Tegra::Shader::Register reg, SamplerInfo info,
340 std::optional<SamplerInfo> sampler_info = std::nullopt); 341 Node& index_var);
341 342
342 /// Accesses an image. 343 /// Accesses an image.
343 Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type); 344 Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type);
diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp
index 513e9bf49..eb97bfd41 100644
--- a/src/video_core/shader/track.cpp
+++ b/src/video_core/shader/track.cpp
@@ -153,21 +153,13 @@ std::tuple<Node, u32, u32> ShaderIR::TrackCbuf(Node tracked, const NodeBlock& co
153 if (gpr->GetIndex() == Tegra::Shader::Register::ZeroIndex) { 153 if (gpr->GetIndex() == Tegra::Shader::Register::ZeroIndex) {
154 return {}; 154 return {};
155 } 155 }
156 s64 current_cursor = cursor; 156 // Reduce the cursor in one to avoid infinite loops when the instruction sets the same
157 while (current_cursor > 0) { 157 // register that it uses as operand
158 // Reduce the cursor in one to avoid infinite loops when the instruction sets the same 158 const auto [source, new_cursor] = TrackRegister(gpr, code, cursor - 1);
159 // register that it uses as operand 159 if (!source) {
160 const auto [source, new_cursor] = TrackRegister(gpr, code, current_cursor - 1); 160 return {};
161 current_cursor = new_cursor;
162 if (!source) {
163 continue;
164 }
165 const auto [base_address, index, offset] = TrackCbuf(source, code, current_cursor);
166 if (base_address != nullptr) {
167 return {base_address, index, offset};
168 }
169 } 161 }
170 return {}; 162 return TrackCbuf(source, code, new_cursor);
171 } 163 }
172 if (const auto operation = std::get_if<OperationNode>(&*tracked)) { 164 if (const auto operation = std::get_if<OperationNode>(&*tracked)) {
173 for (std::size_t i = operation->GetOperandsCount(); i > 0; --i) { 165 for (std::size_t i = operation->GetOperandsCount(); i > 0; --i) {
diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp
index 0de499946..884fabffe 100644
--- a/src/video_core/texture_cache/surface_params.cpp
+++ b/src/video_core/texture_cache/surface_params.cpp
@@ -81,7 +81,7 @@ SurfaceParams SurfaceParams::CreateForTexture(const FormatLookupTable& lookup_ta
81 params.pixel_format = lookup_table.GetPixelFormat( 81 params.pixel_format = lookup_table.GetPixelFormat(
82 tic.format, params.srgb_conversion, tic.r_type, tic.g_type, tic.b_type, tic.a_type); 82 tic.format, params.srgb_conversion, tic.r_type, tic.g_type, tic.b_type, tic.a_type);
83 params.type = GetFormatType(params.pixel_format); 83 params.type = GetFormatType(params.pixel_format);
84 if (entry.IsShadow() && params.type == SurfaceType::ColorTexture) { 84 if (entry.is_shadow && params.type == SurfaceType::ColorTexture) {
85 switch (params.pixel_format) { 85 switch (params.pixel_format) {
86 case PixelFormat::R16U: 86 case PixelFormat::R16U:
87 case PixelFormat::R16F: 87 case PixelFormat::R16F:
@@ -108,7 +108,7 @@ SurfaceParams SurfaceParams::CreateForTexture(const FormatLookupTable& lookup_ta
108 params.emulated_levels = 1; 108 params.emulated_levels = 1;
109 params.is_layered = false; 109 params.is_layered = false;
110 } else { 110 } else {
111 params.target = TextureTypeToSurfaceTarget(entry.GetType(), entry.IsArray()); 111 params.target = TextureTypeToSurfaceTarget(entry.type, entry.is_array);
112 params.width = tic.Width(); 112 params.width = tic.Width();
113 params.height = tic.Height(); 113 params.height = tic.Height();
114 params.depth = tic.Depth(); 114 params.depth = tic.Depth();
@@ -138,7 +138,7 @@ SurfaceParams SurfaceParams::CreateForImage(const FormatLookupTable& lookup_tabl
138 tic.format, params.srgb_conversion, tic.r_type, tic.g_type, tic.b_type, tic.a_type); 138 tic.format, params.srgb_conversion, tic.r_type, tic.g_type, tic.b_type, tic.a_type);
139 params.type = GetFormatType(params.pixel_format); 139 params.type = GetFormatType(params.pixel_format);
140 params.type = GetFormatType(params.pixel_format); 140 params.type = GetFormatType(params.pixel_format);
141 params.target = ImageTypeToSurfaceTarget(entry.GetType()); 141 params.target = ImageTypeToSurfaceTarget(entry.type);
142 // TODO: on 1DBuffer we should use the tic info. 142 // TODO: on 1DBuffer we should use the tic info.
143 if (tic.IsBuffer()) { 143 if (tic.IsBuffer()) {
144 params.target = SurfaceTarget::TextureBuffer; 144 params.target = SurfaceTarget::TextureBuffer;
@@ -248,12 +248,12 @@ SurfaceParams SurfaceParams::CreateForFermiCopySurface(
248 248
249VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget( 249VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget(
250 const VideoCommon::Shader::Sampler& entry) { 250 const VideoCommon::Shader::Sampler& entry) {
251 return TextureTypeToSurfaceTarget(entry.GetType(), entry.IsArray()); 251 return TextureTypeToSurfaceTarget(entry.type, entry.is_array);
252} 252}
253 253
254VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget( 254VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget(
255 const VideoCommon::Shader::Image& entry) { 255 const VideoCommon::Shader::Image& entry) {
256 return ImageTypeToSurfaceTarget(entry.GetType()); 256 return ImageTypeToSurfaceTarget(entry.type);
257} 257}
258 258
259bool SurfaceParams::IsLayered() const { 259bool SurfaceParams::IsLayered() const {
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index cf6bd005a..d6efc34b2 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -143,7 +143,7 @@ public:
143 } 143 }
144 144
145 const auto params{SurfaceParams::CreateForTexture(format_lookup_table, tic, entry)}; 145 const auto params{SurfaceParams::CreateForTexture(format_lookup_table, tic, entry)};
146 const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, false); 146 const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, true, false);
147 if (guard_samplers) { 147 if (guard_samplers) {
148 sampled_textures.push_back(surface); 148 sampled_textures.push_back(surface);
149 } 149 }
@@ -163,7 +163,7 @@ public:
163 return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); 163 return GetNullSurface(SurfaceParams::ExpectedTarget(entry));
164 } 164 }
165 const auto params{SurfaceParams::CreateForImage(format_lookup_table, tic, entry)}; 165 const auto params{SurfaceParams::CreateForImage(format_lookup_table, tic, entry)};
166 const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, false); 166 const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, true, false);
167 if (guard_samplers) { 167 if (guard_samplers) {
168 sampled_textures.push_back(surface); 168 sampled_textures.push_back(surface);
169 } 169 }
@@ -178,7 +178,7 @@ public:
178 return any_rt; 178 return any_rt;
179 } 179 }
180 180
181 TView GetDepthBufferSurface() { 181 TView GetDepthBufferSurface(bool preserve_contents) {
182 std::lock_guard lock{mutex}; 182 std::lock_guard lock{mutex};
183 auto& maxwell3d = system.GPU().Maxwell3D(); 183 auto& maxwell3d = system.GPU().Maxwell3D();
184 if (!maxwell3d.dirty.flags[VideoCommon::Dirty::ZetaBuffer]) { 184 if (!maxwell3d.dirty.flags[VideoCommon::Dirty::ZetaBuffer]) {
@@ -199,7 +199,7 @@ public:
199 return {}; 199 return {};
200 } 200 }
201 const auto depth_params{SurfaceParams::CreateForDepthBuffer(system)}; 201 const auto depth_params{SurfaceParams::CreateForDepthBuffer(system)};
202 auto surface_view = GetSurface(gpu_addr, *cpu_addr, depth_params, true); 202 auto surface_view = GetSurface(gpu_addr, *cpu_addr, depth_params, preserve_contents, true);
203 if (depth_buffer.target) 203 if (depth_buffer.target)
204 depth_buffer.target->MarkAsRenderTarget(false, NO_RT); 204 depth_buffer.target->MarkAsRenderTarget(false, NO_RT);
205 depth_buffer.target = surface_view.first; 205 depth_buffer.target = surface_view.first;
@@ -209,7 +209,7 @@ public:
209 return surface_view.second; 209 return surface_view.second;
210 } 210 }
211 211
212 TView GetColorBufferSurface(std::size_t index) { 212 TView GetColorBufferSurface(std::size_t index, bool preserve_contents) {
213 std::lock_guard lock{mutex}; 213 std::lock_guard lock{mutex};
214 ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets); 214 ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets);
215 auto& maxwell3d = system.GPU().Maxwell3D(); 215 auto& maxwell3d = system.GPU().Maxwell3D();
@@ -239,8 +239,9 @@ public:
239 return {}; 239 return {};
240 } 240 }
241 241
242 auto surface_view = GetSurface(gpu_addr, *cpu_addr, 242 auto surface_view =
243 SurfaceParams::CreateForFramebuffer(system, index), true); 243 GetSurface(gpu_addr, *cpu_addr, SurfaceParams::CreateForFramebuffer(system, index),
244 preserve_contents, true);
244 if (render_targets[index].target) { 245 if (render_targets[index].target) {
245 auto& surface = render_targets[index].target; 246 auto& surface = render_targets[index].target;
246 surface->MarkAsRenderTarget(false, NO_RT); 247 surface->MarkAsRenderTarget(false, NO_RT);
@@ -300,9 +301,9 @@ public:
300 const std::optional<VAddr> src_cpu_addr = 301 const std::optional<VAddr> src_cpu_addr =
301 system.GPU().MemoryManager().GpuToCpuAddress(src_gpu_addr); 302 system.GPU().MemoryManager().GpuToCpuAddress(src_gpu_addr);
302 std::pair<TSurface, TView> dst_surface = 303 std::pair<TSurface, TView> dst_surface =
303 GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, false); 304 GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, true, false);
304 std::pair<TSurface, TView> src_surface = 305 std::pair<TSurface, TView> src_surface =
305 GetSurface(src_gpu_addr, *src_cpu_addr, src_params, false); 306 GetSurface(src_gpu_addr, *src_cpu_addr, src_params, true, false);
306 ImageBlit(src_surface.second, dst_surface.second, copy_config); 307 ImageBlit(src_surface.second, dst_surface.second, copy_config);
307 dst_surface.first->MarkAsModified(true, Tick()); 308 dst_surface.first->MarkAsModified(true, Tick());
308 } 309 }
@@ -532,18 +533,22 @@ private:
532 * @param overlaps The overlapping surfaces registered in the cache. 533 * @param overlaps The overlapping surfaces registered in the cache.
533 * @param params The parameters for the new surface. 534 * @param params The parameters for the new surface.
534 * @param gpu_addr The starting address of the new surface. 535 * @param gpu_addr The starting address of the new surface.
536 * @param preserve_contents Indicates that the new surface should be loaded from memory or left
537 * blank.
535 * @param untopological Indicates to the recycler that the texture has no way to match the 538 * @param untopological Indicates to the recycler that the texture has no way to match the
536 * overlaps due to topological reasons. 539 * overlaps due to topological reasons.
537 **/ 540 **/
538 std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps, 541 std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps,
539 const SurfaceParams& params, const GPUVAddr gpu_addr, 542 const SurfaceParams& params, const GPUVAddr gpu_addr,
543 const bool preserve_contents,
540 const MatchTopologyResult untopological) { 544 const MatchTopologyResult untopological) {
545 const bool do_load = preserve_contents && Settings::IsGPULevelExtreme();
541 for (auto& surface : overlaps) { 546 for (auto& surface : overlaps) {
542 Unregister(surface); 547 Unregister(surface);
543 } 548 }
544 switch (PickStrategy(overlaps, params, gpu_addr, untopological)) { 549 switch (PickStrategy(overlaps, params, gpu_addr, untopological)) {
545 case RecycleStrategy::Ignore: { 550 case RecycleStrategy::Ignore: {
546 return InitializeSurface(gpu_addr, params, Settings::IsGPULevelExtreme()); 551 return InitializeSurface(gpu_addr, params, do_load);
547 } 552 }
548 case RecycleStrategy::Flush: { 553 case RecycleStrategy::Flush: {
549 std::sort(overlaps.begin(), overlaps.end(), 554 std::sort(overlaps.begin(), overlaps.end(),
@@ -553,7 +558,7 @@ private:
553 for (auto& surface : overlaps) { 558 for (auto& surface : overlaps) {
554 FlushSurface(surface); 559 FlushSurface(surface);
555 } 560 }
556 return InitializeSurface(gpu_addr, params); 561 return InitializeSurface(gpu_addr, params, preserve_contents);
557 } 562 }
558 case RecycleStrategy::BufferCopy: { 563 case RecycleStrategy::BufferCopy: {
559 auto new_surface = GetUncachedSurface(gpu_addr, params); 564 auto new_surface = GetUncachedSurface(gpu_addr, params);
@@ -562,7 +567,7 @@ private:
562 } 567 }
563 default: { 568 default: {
564 UNIMPLEMENTED_MSG("Unimplemented Texture Cache Recycling Strategy!"); 569 UNIMPLEMENTED_MSG("Unimplemented Texture Cache Recycling Strategy!");
565 return InitializeSurface(gpu_addr, params); 570 return InitializeSurface(gpu_addr, params, do_load);
566 } 571 }
567 } 572 }
568 } 573 }
@@ -700,11 +705,14 @@ private:
700 * @param params The parameters on the new surface. 705 * @param params The parameters on the new surface.
701 * @param gpu_addr The starting address of the new surface. 706 * @param gpu_addr The starting address of the new surface.
702 * @param cpu_addr The starting address of the new surface on physical memory. 707 * @param cpu_addr The starting address of the new surface on physical memory.
708 * @param preserve_contents Indicates that the new surface should be loaded from memory or
709 * left blank.
703 */ 710 */
704 std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps, 711 std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps,
705 const SurfaceParams& params, 712 const SurfaceParams& params,
706 const GPUVAddr gpu_addr, 713 const GPUVAddr gpu_addr,
707 const VAddr cpu_addr) { 714 const VAddr cpu_addr,
715 bool preserve_contents) {
708 if (params.target == SurfaceTarget::Texture3D) { 716 if (params.target == SurfaceTarget::Texture3D) {
709 bool failed = false; 717 bool failed = false;
710 if (params.num_levels > 1) { 718 if (params.num_levels > 1) {
@@ -754,7 +762,7 @@ private:
754 return std::nullopt; 762 return std::nullopt;
755 } 763 }
756 Unregister(surface); 764 Unregister(surface);
757 return InitializeSurface(gpu_addr, params); 765 return InitializeSurface(gpu_addr, params, preserve_contents);
758 } 766 }
759 return std::nullopt; 767 return std::nullopt;
760 } 768 }
@@ -765,7 +773,7 @@ private:
765 return {{surface, surface->GetMainView()}}; 773 return {{surface, surface->GetMainView()}};
766 } 774 }
767 } 775 }
768 return InitializeSurface(gpu_addr, params); 776 return InitializeSurface(gpu_addr, params, preserve_contents);
769 } 777 }
770 } 778 }
771 779
@@ -788,10 +796,13 @@ private:
788 * 796 *
789 * @param gpu_addr The starting address of the candidate surface. 797 * @param gpu_addr The starting address of the candidate surface.
790 * @param params The parameters on the candidate surface. 798 * @param params The parameters on the candidate surface.
799 * @param preserve_contents Indicates that the new surface should be loaded from memory or
800 * left blank.
791 * @param is_render Whether or not the surface is a render target. 801 * @param is_render Whether or not the surface is a render target.
792 **/ 802 **/
793 std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const VAddr cpu_addr, 803 std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const VAddr cpu_addr,
794 const SurfaceParams& params, bool is_render) { 804 const SurfaceParams& params, bool preserve_contents,
805 bool is_render) {
795 // Step 1 806 // Step 1
796 // Check Level 1 Cache for a fast structural match. If candidate surface 807 // Check Level 1 Cache for a fast structural match. If candidate surface
797 // matches at certain level we are pretty much done. 808 // matches at certain level we are pretty much done.
@@ -800,7 +811,8 @@ private:
800 const auto topological_result = current_surface->MatchesTopology(params); 811 const auto topological_result = current_surface->MatchesTopology(params);
801 if (topological_result != MatchTopologyResult::FullMatch) { 812 if (topological_result != MatchTopologyResult::FullMatch) {
802 std::vector<TSurface> overlaps{current_surface}; 813 std::vector<TSurface> overlaps{current_surface};
803 return RecycleSurface(overlaps, params, gpu_addr, topological_result); 814 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
815 topological_result);
804 } 816 }
805 817
806 const auto struct_result = current_surface->MatchesStructure(params); 818 const auto struct_result = current_surface->MatchesStructure(params);
@@ -825,7 +837,7 @@ private:
825 837
826 // If none are found, we are done. we just load the surface and create it. 838 // If none are found, we are done. we just load the surface and create it.
827 if (overlaps.empty()) { 839 if (overlaps.empty()) {
828 return InitializeSurface(gpu_addr, params); 840 return InitializeSurface(gpu_addr, params, preserve_contents);
829 } 841 }
830 842
831 // Step 3 843 // Step 3
@@ -835,13 +847,15 @@ private:
835 for (const auto& surface : overlaps) { 847 for (const auto& surface : overlaps) {
836 const auto topological_result = surface->MatchesTopology(params); 848 const auto topological_result = surface->MatchesTopology(params);
837 if (topological_result != MatchTopologyResult::FullMatch) { 849 if (topological_result != MatchTopologyResult::FullMatch) {
838 return RecycleSurface(overlaps, params, gpu_addr, topological_result); 850 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
851 topological_result);
839 } 852 }
840 } 853 }
841 854
842 // Check if it's a 3D texture 855 // Check if it's a 3D texture
843 if (params.block_depth > 0) { 856 if (params.block_depth > 0) {
844 auto surface = Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr); 857 auto surface =
858 Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr, preserve_contents);
845 if (surface) { 859 if (surface) {
846 return *surface; 860 return *surface;
847 } 861 }
@@ -861,7 +875,8 @@ private:
861 return *view; 875 return *view;
862 } 876 }
863 } 877 }
864 return RecycleSurface(overlaps, params, gpu_addr, MatchTopologyResult::FullMatch); 878 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
879 MatchTopologyResult::FullMatch);
865 } 880 }
866 // Now we check if the candidate is a mipmap/layer of the overlap 881 // Now we check if the candidate is a mipmap/layer of the overlap
867 std::optional<TView> view = 882 std::optional<TView> view =
@@ -885,7 +900,7 @@ private:
885 pair.first->EmplaceView(params, gpu_addr, candidate_size); 900 pair.first->EmplaceView(params, gpu_addr, candidate_size);
886 if (mirage_view) 901 if (mirage_view)
887 return {pair.first, *mirage_view}; 902 return {pair.first, *mirage_view};
888 return RecycleSurface(overlaps, params, gpu_addr, 903 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
889 MatchTopologyResult::FullMatch); 904 MatchTopologyResult::FullMatch);
890 } 905 }
891 return {current_surface, *view}; 906 return {current_surface, *view};
@@ -901,7 +916,8 @@ private:
901 } 916 }
902 } 917 }
903 // We failed all the tests, recycle the overlaps into a new texture. 918 // We failed all the tests, recycle the overlaps into a new texture.
904 return RecycleSurface(overlaps, params, gpu_addr, MatchTopologyResult::FullMatch); 919 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
920 MatchTopologyResult::FullMatch);
905 } 921 }
906 922
907 /** 923 /**
@@ -1059,10 +1075,10 @@ private:
1059 } 1075 }
1060 1076
1061 std::pair<TSurface, TView> InitializeSurface(GPUVAddr gpu_addr, const SurfaceParams& params, 1077 std::pair<TSurface, TView> InitializeSurface(GPUVAddr gpu_addr, const SurfaceParams& params,
1062 bool do_load = true) { 1078 bool preserve_contents) {
1063 auto new_surface{GetUncachedSurface(gpu_addr, params)}; 1079 auto new_surface{GetUncachedSurface(gpu_addr, params)};
1064 Register(new_surface); 1080 Register(new_surface);
1065 if (do_load) { 1081 if (preserve_contents) {
1066 LoadSurface(new_surface); 1082 LoadSurface(new_surface);
1067 } 1083 }
1068 return {new_surface, new_surface->GetMainView()}; 1084 return {new_surface, new_surface->GetMainView()};
@@ -1156,7 +1172,7 @@ private:
1156 /// Returns true the shader sampler entry is compatible with the TIC texture type. 1172 /// Returns true the shader sampler entry is compatible with the TIC texture type.
1157 static bool IsTypeCompatible(Tegra::Texture::TextureType tic_type, 1173 static bool IsTypeCompatible(Tegra::Texture::TextureType tic_type,
1158 const VideoCommon::Shader::Sampler& entry) { 1174 const VideoCommon::Shader::Sampler& entry) {
1159 const auto shader_type = entry.GetType(); 1175 const auto shader_type = entry.type;
1160 switch (tic_type) { 1176 switch (tic_type) {
1161 case Tegra::Texture::TextureType::Texture1D: 1177 case Tegra::Texture::TextureType::Texture1D:
1162 case Tegra::Texture::TextureType::Texture1DArray: 1178 case Tegra::Texture::TextureType::Texture1DArray:
@@ -1177,7 +1193,7 @@ private:
1177 if (shader_type == Tegra::Shader::TextureType::TextureCube) { 1193 if (shader_type == Tegra::Shader::TextureType::TextureCube) {
1178 return true; 1194 return true;
1179 } 1195 }
1180 return shader_type == Tegra::Shader::TextureType::Texture2D && entry.IsArray(); 1196 return shader_type == Tegra::Shader::TextureType::Texture2D && entry.is_array;
1181 } 1197 }
1182 UNREACHABLE(); 1198 UNREACHABLE();
1183 return true; 1199 return true;
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index 05baec7e1..b44b4276c 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -1304,7 +1304,9 @@ void GMainWindow::OnGameListDumpRomFS(u64 program_id, const std::string& game_pa
1304 FileSys::VirtualFile romfs; 1304 FileSys::VirtualFile romfs;
1305 1305
1306 if (*romfs_title_id == program_id) { 1306 if (*romfs_title_id == program_id) {
1307 romfs = file; 1307 const u64 ivfc_offset = loader->ReadRomFSIVFCOffset();
1308 FileSys::PatchManager pm{program_id};
1309 romfs = pm.PatchRomFS(file, ivfc_offset, FileSys::ContentRecordType::Program);
1308 } else { 1310 } else {
1309 romfs = installed.GetEntry(*romfs_title_id, FileSys::ContentRecordType::Data)->GetRomFS(); 1311 romfs = installed.GetEntry(*romfs_title_id, FileSys::ContentRecordType::Data)->GetRomFS();
1310 } 1312 }