summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/core/CMakeLists.txt2
-rw-r--r--src/core/hle/kernel/global_scheduler_context.cpp6
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.cpp34
-rw-r--r--src/core/hle/kernel/k_interrupt_manager.h17
-rw-r--r--src/core/hle/kernel/k_process.cpp12
-rw-r--r--src/core/hle/kernel/k_process.h4
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp8
-rw-r--r--src/core/hle/kernel/k_thread.cpp44
-rw-r--r--src/core/hle/kernel/k_thread.h6
-rw-r--r--src/core/hle/kernel/svc.cpp21
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp16
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_instructions.h1
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp4
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp51
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_floating_point.cpp4
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_instructions.h2
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp4
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl_special.cpp4
-rw-r--r--src/shader_recompiler/backend/glsl/glsl_emit_context.cpp7
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp25
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_instructions.h1
-rw-r--r--src/shader_recompiler/frontend/ir/opcodes.inc1
-rw-r--r--src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp1
-rw-r--r--src/shader_recompiler/ir_opt/constant_propagation_pass.cpp23
-rw-r--r--src/shader_recompiler/profile.h4
-rw-r--r--src/video_core/renderer_opengl/gl_device.cpp10
-rw-r--r--src/video_core/renderer_opengl/gl_device.h10
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp39
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h2
-rw-r--r--src/video_core/texture_cache/util.cpp10
-rw-r--r--src/video_core/vulkan_common/vulkan_device.cpp9
-rw-r--r--src/video_core/vulkan_common/vulkan_device.h5
-rw-r--r--src/yuzu/applets/qt_controller.cpp68
-rw-r--r--src/yuzu/configuration/configure_input_player.cpp53
-rw-r--r--src/yuzu/main.cpp4
36 files changed, 390 insertions, 124 deletions
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 698c4f912..b1a746727 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -187,6 +187,8 @@ add_library(core STATIC
187 hle/kernel/k_event.h 187 hle/kernel/k_event.h
188 hle/kernel/k_handle_table.cpp 188 hle/kernel/k_handle_table.cpp
189 hle/kernel/k_handle_table.h 189 hle/kernel/k_handle_table.h
190 hle/kernel/k_interrupt_manager.cpp
191 hle/kernel/k_interrupt_manager.h
190 hle/kernel/k_light_condition_variable.cpp 192 hle/kernel/k_light_condition_variable.cpp
191 hle/kernel/k_light_condition_variable.h 193 hle/kernel/k_light_condition_variable.h
192 hle/kernel/k_light_lock.cpp 194 hle/kernel/k_light_lock.cpp
diff --git a/src/core/hle/kernel/global_scheduler_context.cpp b/src/core/hle/kernel/global_scheduler_context.cpp
index 4f4e338e3..baad2c5d6 100644
--- a/src/core/hle/kernel/global_scheduler_context.cpp
+++ b/src/core/hle/kernel/global_scheduler_context.cpp
@@ -9,6 +9,7 @@
9#include "core/hle/kernel/global_scheduler_context.h" 9#include "core/hle/kernel/global_scheduler_context.h"
10#include "core/hle/kernel/k_scheduler.h" 10#include "core/hle/kernel/k_scheduler.h"
11#include "core/hle/kernel/kernel.h" 11#include "core/hle/kernel/kernel.h"
12#include "core/hle/kernel/physical_core.h"
12 13
13namespace Kernel { 14namespace Kernel {
14 15
@@ -42,6 +43,11 @@ void GlobalSchedulerContext::PreemptThreads() {
42 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { 43 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
43 const u32 priority = preemption_priorities[core_id]; 44 const u32 priority = preemption_priorities[core_id];
44 kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority); 45 kernel.Scheduler(core_id).RotateScheduledQueue(core_id, priority);
46
47 // Signal an interrupt occurred. For core 3, this is a certainty, as preemption will result
48 // in the rotator thread being scheduled. For cores 0-2, this is to simulate or system
49 // interrupts that may have occurred.
50 kernel.PhysicalCore(core_id).Interrupt();
45 } 51 }
46} 52}
47 53
diff --git a/src/core/hle/kernel/k_interrupt_manager.cpp b/src/core/hle/kernel/k_interrupt_manager.cpp
new file mode 100644
index 000000000..e5dd39751
--- /dev/null
+++ b/src/core/hle/kernel/k_interrupt_manager.cpp
@@ -0,0 +1,34 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "core/hle/kernel/k_interrupt_manager.h"
6#include "core/hle/kernel/k_process.h"
7#include "core/hle/kernel/k_scheduler.h"
8#include "core/hle/kernel/k_thread.h"
9#include "core/hle/kernel/kernel.h"
10
11namespace Kernel::KInterruptManager {
12
13void HandleInterrupt(KernelCore& kernel, s32 core_id) {
14 auto* process = kernel.CurrentProcess();
15 if (!process) {
16 return;
17 }
18
19 auto& scheduler = kernel.Scheduler(core_id);
20 auto& current_thread = *scheduler.GetCurrentThread();
21
22 // If the user disable count is set, we may need to pin the current thread.
23 if (current_thread.GetUserDisableCount() && !process->GetPinnedThread(core_id)) {
24 KScopedSchedulerLock sl{kernel};
25
26 // Pin the current thread.
27 process->PinCurrentThread(core_id);
28
29 // Set the interrupt flag for the thread.
30 scheduler.GetCurrentThread()->SetInterruptFlag();
31 }
32}
33
34} // namespace Kernel::KInterruptManager
diff --git a/src/core/hle/kernel/k_interrupt_manager.h b/src/core/hle/kernel/k_interrupt_manager.h
new file mode 100644
index 000000000..05924801e
--- /dev/null
+++ b/src/core/hle/kernel/k_interrupt_manager.h
@@ -0,0 +1,17 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include "common/common_types.h"
8
9namespace Kernel {
10
11class KernelCore;
12
13namespace KInterruptManager {
14void HandleInterrupt(KernelCore& kernel, s32 core_id);
15}
16
17} // namespace Kernel
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 73f8bc4fe..bf98a51e2 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -220,30 +220,28 @@ bool KProcess::ReleaseUserException(KThread* thread) {
220 } 220 }
221} 221}
222 222
223void KProcess::PinCurrentThread() { 223void KProcess::PinCurrentThread(s32 core_id) {
224 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 224 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
225 225
226 // Get the current thread. 226 // Get the current thread.
227 const s32 core_id = GetCurrentCoreId(kernel); 227 KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
228 KThread* cur_thread = GetCurrentThreadPointer(kernel);
229 228
230 // If the thread isn't terminated, pin it. 229 // If the thread isn't terminated, pin it.
231 if (!cur_thread->IsTerminationRequested()) { 230 if (!cur_thread->IsTerminationRequested()) {
232 // Pin it. 231 // Pin it.
233 PinThread(core_id, cur_thread); 232 PinThread(core_id, cur_thread);
234 cur_thread->Pin(); 233 cur_thread->Pin(core_id);
235 234
236 // An update is needed. 235 // An update is needed.
237 KScheduler::SetSchedulerUpdateNeeded(kernel); 236 KScheduler::SetSchedulerUpdateNeeded(kernel);
238 } 237 }
239} 238}
240 239
241void KProcess::UnpinCurrentThread() { 240void KProcess::UnpinCurrentThread(s32 core_id) {
242 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 241 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
243 242
244 // Get the current thread. 243 // Get the current thread.
245 const s32 core_id = GetCurrentCoreId(kernel); 244 KThread* cur_thread = kernel.Scheduler(static_cast<std::size_t>(core_id)).GetCurrentThread();
246 KThread* cur_thread = GetCurrentThreadPointer(kernel);
247 245
248 // Unpin it. 246 // Unpin it.
249 cur_thread->Unpin(); 247 cur_thread->Unpin();
diff --git a/src/core/hle/kernel/k_process.h b/src/core/hle/kernel/k_process.h
index cb93c7e24..e7c8b5838 100644
--- a/src/core/hle/kernel/k_process.h
+++ b/src/core/hle/kernel/k_process.h
@@ -345,8 +345,8 @@ public:
345 345
346 bool IsSignaled() const override; 346 bool IsSignaled() const override;
347 347
348 void PinCurrentThread(); 348 void PinCurrentThread(s32 core_id);
349 void UnpinCurrentThread(); 349 void UnpinCurrentThread(s32 core_id);
350 void UnpinThread(KThread* thread); 350 void UnpinThread(KThread* thread);
351 351
352 KLightLock& GetStateLock() { 352 KLightLock& GetStateLock() {
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 277201de4..31cec990e 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -15,6 +15,7 @@
15#include "core/core.h" 15#include "core/core.h"
16#include "core/core_timing.h" 16#include "core/core_timing.h"
17#include "core/cpu_manager.h" 17#include "core/cpu_manager.h"
18#include "core/hle/kernel/k_interrupt_manager.h"
18#include "core/hle/kernel/k_process.h" 19#include "core/hle/kernel/k_process.h"
19#include "core/hle/kernel/k_scheduler.h" 20#include "core/hle/kernel/k_scheduler.h"
20#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h" 21#include "core/hle/kernel/k_scoped_scheduler_lock_and_sleep.h"
@@ -53,6 +54,13 @@ void KScheduler::RescheduleCores(KernelCore& kernel, u64 cores_pending_reschedul
53 } 54 }
54 cores_pending_reschedule &= ~(1ULL << core); 55 cores_pending_reschedule &= ~(1ULL << core);
55 } 56 }
57
58 for (std::size_t core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; ++core_id) {
59 if (kernel.PhysicalCore(core_id).IsInterrupted()) {
60 KInterruptManager::HandleInterrupt(kernel, static_cast<s32>(core_id));
61 }
62 }
63
56 if (must_context_switch) { 64 if (must_context_switch) {
57 auto core_scheduler = kernel.CurrentScheduler(); 65 auto core_scheduler = kernel.CurrentScheduler();
58 kernel.ExitSVCProfile(); 66 kernel.ExitSVCProfile();
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index b8c993748..71e029a3f 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -3,6 +3,7 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <algorithm> 5#include <algorithm>
6#include <atomic>
6#include <cinttypes> 7#include <cinttypes>
7#include <optional> 8#include <optional>
8#include <vector> 9#include <vector>
@@ -33,6 +34,7 @@
33#include "core/hle/kernel/svc_results.h" 34#include "core/hle/kernel/svc_results.h"
34#include "core/hle/kernel/time_manager.h" 35#include "core/hle/kernel/time_manager.h"
35#include "core/hle/result.h" 36#include "core/hle/result.h"
37#include "core/memory.h"
36 38
37#ifdef ARCHITECTURE_x86_64 39#ifdef ARCHITECTURE_x86_64
38#include "core/arm/dynarmic/arm_dynarmic_32.h" 40#include "core/arm/dynarmic/arm_dynarmic_32.h"
@@ -63,6 +65,13 @@ namespace Kernel {
63 65
64namespace { 66namespace {
65 67
68struct ThreadLocalRegion {
69 static constexpr std::size_t MessageBufferSize = 0x100;
70 std::array<u32, MessageBufferSize / sizeof(u32)> message_buffer;
71 std::atomic_uint16_t disable_count;
72 std::atomic_uint16_t interrupt_flag;
73};
74
66class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait { 75class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait {
67public: 76public:
68 explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_) 77 explicit ThreadQueueImplForKThreadSleep(KernelCore& kernel_)
@@ -346,7 +355,7 @@ void KThread::StartTermination() {
346 if (parent != nullptr) { 355 if (parent != nullptr) {
347 parent->ReleaseUserException(this); 356 parent->ReleaseUserException(this);
348 if (parent->GetPinnedThread(GetCurrentCoreId(kernel)) == this) { 357 if (parent->GetPinnedThread(GetCurrentCoreId(kernel)) == this) {
349 parent->UnpinCurrentThread(); 358 parent->UnpinCurrentThread(core_id);
350 } 359 }
351 } 360 }
352 361
@@ -372,7 +381,7 @@ void KThread::StartTermination() {
372 this->Close(); 381 this->Close();
373} 382}
374 383
375void KThread::Pin() { 384void KThread::Pin(s32 current_core) {
376 ASSERT(kernel.GlobalSchedulerContext().IsLocked()); 385 ASSERT(kernel.GlobalSchedulerContext().IsLocked());
377 386
378 // Set ourselves as pinned. 387 // Set ourselves as pinned.
@@ -389,7 +398,6 @@ void KThread::Pin() {
389 398
390 // Bind ourselves to this core. 399 // Bind ourselves to this core.
391 const s32 active_core = GetActiveCore(); 400 const s32 active_core = GetActiveCore();
392 const s32 current_core = GetCurrentCoreId(kernel);
393 401
394 SetActiveCore(current_core); 402 SetActiveCore(current_core);
395 physical_ideal_core_id = current_core; 403 physical_ideal_core_id = current_core;
@@ -482,6 +490,36 @@ void KThread::Unpin() {
482 } 490 }
483} 491}
484 492
493u16 KThread::GetUserDisableCount() const {
494 if (!IsUserThread()) {
495 // We only emulate TLS for user threads
496 return {};
497 }
498
499 auto& memory = kernel.System().Memory();
500 return memory.Read16(tls_address + offsetof(ThreadLocalRegion, disable_count));
501}
502
503void KThread::SetInterruptFlag() {
504 if (!IsUserThread()) {
505 // We only emulate TLS for user threads
506 return;
507 }
508
509 auto& memory = kernel.System().Memory();
510 memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 1);
511}
512
513void KThread::ClearInterruptFlag() {
514 if (!IsUserThread()) {
515 // We only emulate TLS for user threads
516 return;
517 }
518
519 auto& memory = kernel.System().Memory();
520 memory.Write16(tls_address + offsetof(ThreadLocalRegion, interrupt_flag), 0);
521}
522
485ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) { 523ResultCode KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
486 KScopedSchedulerLock sl{kernel}; 524 KScopedSchedulerLock sl{kernel};
487 525
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index c8a08bd71..83dfde69b 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -307,6 +307,10 @@ public:
307 return parent != nullptr; 307 return parent != nullptr;
308 } 308 }
309 309
310 u16 GetUserDisableCount() const;
311 void SetInterruptFlag();
312 void ClearInterruptFlag();
313
310 [[nodiscard]] KThread* GetLockOwner() const { 314 [[nodiscard]] KThread* GetLockOwner() const {
311 return lock_owner; 315 return lock_owner;
312 } 316 }
@@ -490,7 +494,7 @@ public:
490 this->GetStackParameters().disable_count--; 494 this->GetStackParameters().disable_count--;
491 } 495 }
492 496
493 void Pin(); 497 void Pin(s32 current_core);
494 498
495 void Unpin(); 499 void Unpin();
496 500
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 63e2dff19..250ef9042 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -2027,6 +2027,25 @@ static ResultCode SignalToAddress(Core::System& system, VAddr address, Svc::Sign
2027 count); 2027 count);
2028} 2028}
2029 2029
2030static void SynchronizePreemptionState(Core::System& system) {
2031 auto& kernel = system.Kernel();
2032
2033 // Lock the scheduler.
2034 KScopedSchedulerLock sl{kernel};
2035
2036 // If the current thread is pinned, unpin it.
2037 KProcess* cur_process = system.Kernel().CurrentProcess();
2038 const auto core_id = GetCurrentCoreId(kernel);
2039
2040 if (cur_process->GetPinnedThread(core_id) == GetCurrentThreadPointer(kernel)) {
2041 // Clear the current thread's interrupt flag.
2042 GetCurrentThread(kernel).ClearInterruptFlag();
2043
2044 // Unpin the current thread.
2045 cur_process->UnpinCurrentThread(core_id);
2046 }
2047}
2048
2030static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type, 2049static ResultCode SignalToAddress32(Core::System& system, u32 address, Svc::SignalType signal_type,
2031 s32 value, s32 count) { 2050 s32 value, s32 count) {
2032 return SignalToAddress(system, address, signal_type, value, count); 2051 return SignalToAddress(system, address, signal_type, value, count);
@@ -2797,7 +2816,7 @@ static const FunctionDef SVC_Table_64[] = {
2797 {0x33, SvcWrap64<GetThreadContext>, "GetThreadContext"}, 2816 {0x33, SvcWrap64<GetThreadContext>, "GetThreadContext"},
2798 {0x34, SvcWrap64<WaitForAddress>, "WaitForAddress"}, 2817 {0x34, SvcWrap64<WaitForAddress>, "WaitForAddress"},
2799 {0x35, SvcWrap64<SignalToAddress>, "SignalToAddress"}, 2818 {0x35, SvcWrap64<SignalToAddress>, "SignalToAddress"},
2800 {0x36, nullptr, "SynchronizePreemptionState"}, 2819 {0x36, SvcWrap64<SynchronizePreemptionState>, "SynchronizePreemptionState"},
2801 {0x37, nullptr, "Unknown"}, 2820 {0x37, nullptr, "Unknown"},
2802 {0x38, nullptr, "Unknown"}, 2821 {0x38, nullptr, "Unknown"},
2803 {0x39, nullptr, "Unknown"}, 2822 {0x39, nullptr, "Unknown"},
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp
index 6f98d0998..7434a1f92 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp
@@ -126,6 +126,22 @@ void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr, Scal
126 } 126 }
127} 127}
128 128
129void EmitGetAttributeU32(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr, ScalarU32) {
130 switch (attr) {
131 case IR::Attribute::PrimitiveId:
132 ctx.Add("MOV.S {}.x,primitive.id;", inst);
133 break;
134 case IR::Attribute::InstanceId:
135 ctx.Add("MOV.S {}.x,{}.instance;", inst, ctx.attrib_name);
136 break;
137 case IR::Attribute::VertexId:
138 ctx.Add("MOV.S {}.x,{}.id;", inst, ctx.attrib_name);
139 break;
140 default:
141 throw NotImplementedException("Get U32 attribute {}", attr);
142 }
143}
144
129void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, ScalarF32 value, 145void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, ScalarF32 value,
130 [[maybe_unused]] ScalarU32 vertex) { 146 [[maybe_unused]] ScalarU32 vertex) {
131 const u32 element{static_cast<u32>(attr) % 4}; 147 const u32 element{static_cast<u32>(attr) % 4};
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h b/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h
index 1f343bff5..b48007856 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_instructions.h
@@ -50,6 +50,7 @@ void EmitGetCbufU32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
50void EmitGetCbufF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset); 50void EmitGetCbufF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset);
51void EmitGetCbufU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset); 51void EmitGetCbufU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset);
52void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr, ScalarU32 vertex); 52void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr, ScalarU32 vertex);
53void EmitGetAttributeU32(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr, ScalarU32 vertex);
53void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, ScalarF32 value, ScalarU32 vertex); 54void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, ScalarF32 value, ScalarU32 vertex);
54void EmitGetAttributeIndexed(EmitContext& ctx, IR::Inst& inst, ScalarS32 offset, ScalarU32 vertex); 55void EmitGetAttributeIndexed(EmitContext& ctx, IR::Inst& inst, ScalarS32 offset, ScalarU32 vertex);
55void EmitSetAttributeIndexed(EmitContext& ctx, ScalarU32 offset, ScalarF32 value, ScalarU32 vertex); 56void EmitSetAttributeIndexed(EmitContext& ctx, ScalarU32 offset, ScalarF32 value, ScalarU32 vertex);
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp
index 0f2668d9e..e0ead7a53 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_bitwise_conversion.cpp
@@ -7,6 +7,7 @@
7#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h" 7#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
8#include "shader_recompiler/backend/glsl/glsl_emit_context.h" 8#include "shader_recompiler/backend/glsl/glsl_emit_context.h"
9#include "shader_recompiler/frontend/ir/value.h" 9#include "shader_recompiler/frontend/ir/value.h"
10#include "shader_recompiler/profile.h"
10 11
11namespace Shader::Backend::GLSL { 12namespace Shader::Backend::GLSL {
12namespace { 13namespace {
@@ -30,8 +31,9 @@ void EmitConditionRef(EmitContext& ctx, IR::Inst& inst, const IR::Value& value)
30 inst.DestructiveAddUsage(1); 31 inst.DestructiveAddUsage(1);
31 const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U1)}; 32 const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U1)};
32 const auto input{ctx.var_alloc.Consume(value)}; 33 const auto input{ctx.var_alloc.Consume(value)};
34 const auto suffix{ctx.profile.has_gl_bool_ref_bug ? "?true:false" : ""};
33 if (ret != input) { 35 if (ret != input) {
34 ctx.Add("{}={};", ret, input); 36 ctx.Add("{}={}{};", ret, input, suffix);
35 } 37 }
36} 38}
37 39
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
index 6477bd192..0c1fbc7b1 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_context_get_set.cpp
@@ -102,39 +102,46 @@ void GetCbuf16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, const
102 102
103void EmitGetCbufU8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 103void EmitGetCbufU8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
104 const IR::Value& offset) { 104 const IR::Value& offset) {
105 GetCbuf8(ctx, inst, binding, offset, "ftou"); 105 const auto cast{ctx.profile.has_gl_cbuf_ftou_bug ? "" : "ftou"};
106 GetCbuf8(ctx, inst, binding, offset, cast);
106} 107}
107 108
108void EmitGetCbufS8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 109void EmitGetCbufS8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
109 const IR::Value& offset) { 110 const IR::Value& offset) {
110 GetCbuf8(ctx, inst, binding, offset, "ftoi"); 111 const auto cast{ctx.profile.has_gl_cbuf_ftou_bug ? "int" : "ftoi"};
112 GetCbuf8(ctx, inst, binding, offset, cast);
111} 113}
112 114
113void EmitGetCbufU16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 115void EmitGetCbufU16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
114 const IR::Value& offset) { 116 const IR::Value& offset) {
115 GetCbuf16(ctx, inst, binding, offset, "ftou"); 117 const auto cast{ctx.profile.has_gl_cbuf_ftou_bug ? "" : "ftou"};
118 GetCbuf16(ctx, inst, binding, offset, cast);
116} 119}
117 120
118void EmitGetCbufS16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 121void EmitGetCbufS16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
119 const IR::Value& offset) { 122 const IR::Value& offset) {
120 GetCbuf16(ctx, inst, binding, offset, "ftoi"); 123 const auto cast{ctx.profile.has_gl_cbuf_ftou_bug ? "int" : "ftoi"};
124 GetCbuf16(ctx, inst, binding, offset, cast);
121} 125}
122 126
123void EmitGetCbufU32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 127void EmitGetCbufU32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
124 const IR::Value& offset) { 128 const IR::Value& offset) {
125 const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32)}; 129 const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32)};
126 GetCbuf(ctx, ret, binding, offset, 32, "ftou"); 130 const auto cast{ctx.profile.has_gl_cbuf_ftou_bug ? "" : "ftou"};
131 GetCbuf(ctx, ret, binding, offset, 32, cast);
127} 132}
128 133
129void EmitGetCbufF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 134void EmitGetCbufF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
130 const IR::Value& offset) { 135 const IR::Value& offset) {
131 const auto ret{ctx.var_alloc.Define(inst, GlslVarType::F32)}; 136 const auto ret{ctx.var_alloc.Define(inst, GlslVarType::F32)};
132 GetCbuf(ctx, ret, binding, offset, 32); 137 const auto cast{ctx.profile.has_gl_cbuf_ftou_bug ? "utof" : ""};
138 GetCbuf(ctx, ret, binding, offset, 32, cast);
133} 139}
134 140
135void EmitGetCbufU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, 141void EmitGetCbufU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
136 const IR::Value& offset) { 142 const IR::Value& offset) {
137 const auto cbuf{fmt::format("{}_cbuf{}", ctx.stage_name, binding.U32())}; 143 const auto cbuf{fmt::format("{}_cbuf{}", ctx.stage_name, binding.U32())};
144 const auto cast{ctx.profile.has_gl_cbuf_ftou_bug ? "" : "ftou"};
138 if (offset.IsImmediate()) { 145 if (offset.IsImmediate()) {
139 static constexpr u32 cbuf_size{0x10000}; 146 static constexpr u32 cbuf_size{0x10000};
140 const u32 u32_offset{offset.U32()}; 147 const u32 u32_offset{offset.U32()};
@@ -145,26 +152,26 @@ void EmitGetCbufU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding
145 return; 152 return;
146 } 153 }
147 if (u32_offset % 2 == 0) { 154 if (u32_offset % 2 == 0) {
148 ctx.AddU32x2("{}=ftou({}[{}].{}{});", inst, cbuf, u32_offset / 16, 155 ctx.AddU32x2("{}={}({}[{}].{}{});", inst, cast, cbuf, u32_offset / 16,
149 OffsetSwizzle(u32_offset), OffsetSwizzle(u32_offset + 4)); 156 OffsetSwizzle(u32_offset), OffsetSwizzle(u32_offset + 4));
150 } else { 157 } else {
151 ctx.AddU32x2("{}=uvec2(ftou({}[{}].{}),ftou({}[{}].{}));", inst, cbuf, u32_offset / 16, 158 ctx.AddU32x2("{}=uvec2({}({}[{}].{}),{}({}[{}].{}));", inst, cast, cbuf,
152 OffsetSwizzle(u32_offset), cbuf, (u32_offset + 4) / 16, 159 u32_offset / 16, OffsetSwizzle(u32_offset), cast, cbuf,
153 OffsetSwizzle(u32_offset + 4)); 160 (u32_offset + 4) / 16, OffsetSwizzle(u32_offset + 4));
154 } 161 }
155 return; 162 return;
156 } 163 }
157 const auto offset_var{ctx.var_alloc.Consume(offset)}; 164 const auto offset_var{ctx.var_alloc.Consume(offset)};
158 if (!ctx.profile.has_gl_component_indexing_bug) { 165 if (!ctx.profile.has_gl_component_indexing_bug) {
159 ctx.AddU32x2("{}=uvec2(ftou({}[{}>>4][({}>>2)%4]),ftou({}[({}+4)>>4][(({}+4)>>2)%4]));", 166 ctx.AddU32x2("{}=uvec2({}({}[{}>>4][({}>>2)%4]),{}({}[({}+4)>>4][(({}+4)>>2)%4]));", inst,
160 inst, cbuf, offset_var, offset_var, cbuf, offset_var, offset_var); 167 cast, cbuf, offset_var, offset_var, cast, cbuf, offset_var, offset_var);
161 return; 168 return;
162 } 169 }
163 const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32x2)}; 170 const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32x2)};
164 const auto cbuf_offset{fmt::format("{}>>2", offset_var)}; 171 const auto cbuf_offset{fmt::format("{}>>2", offset_var)};
165 for (u32 swizzle = 0; swizzle < 4; ++swizzle) { 172 for (u32 swizzle = 0; swizzle < 4; ++swizzle) {
166 ctx.Add("if(({}&3)=={}){}=uvec2(ftou({}[{}>>4].{}),ftou({}[({}+4)>>4].{}));", cbuf_offset, 173 ctx.Add("if(({}&3)=={}){}=uvec2({}({}[{}>>4].{}),{}({}[({}+4)>>4].{}));", cbuf_offset,
167 swizzle, ret, cbuf, offset_var, "xyzw"[swizzle], cbuf, offset_var, 174 swizzle, ret, cast, cbuf, offset_var, "xyzw"[swizzle], cast, cbuf, offset_var,
168 "xyzw"[(swizzle + 1) % 4]); 175 "xyzw"[(swizzle + 1) % 4]);
169 } 176 }
170} 177}
@@ -221,6 +228,22 @@ void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr,
221 } 228 }
222} 229}
223 230
231void EmitGetAttributeU32(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr, std::string_view) {
232 switch (attr) {
233 case IR::Attribute::PrimitiveId:
234 ctx.AddU32("{}=uint(gl_PrimitiveID);", inst);
235 break;
236 case IR::Attribute::InstanceId:
237 ctx.AddU32("{}=uint(gl_InstanceID);", inst);
238 break;
239 case IR::Attribute::VertexId:
240 ctx.AddU32("{}=uint(gl_VertexID);", inst);
241 break;
242 default:
243 throw NotImplementedException("Get U32 attribute {}", attr);
244 }
245}
246
224void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view value, 247void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view value,
225 [[maybe_unused]] std::string_view vertex) { 248 [[maybe_unused]] std::string_view vertex) {
226 if (IR::IsGeneric(attr)) { 249 if (IR::IsGeneric(attr)) {
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_floating_point.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_floating_point.cpp
index b765a251b..474189d87 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_floating_point.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_floating_point.cpp
@@ -125,11 +125,11 @@ void EmitFPNeg16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& i
125} 125}
126 126
127void EmitFPNeg32(EmitContext& ctx, IR::Inst& inst, std::string_view value) { 127void EmitFPNeg32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
128 ctx.AddF32("{}=-({});", inst, value); 128 ctx.AddF32("{}=0.f-({});", inst, value);
129} 129}
130 130
131void EmitFPNeg64(EmitContext& ctx, IR::Inst& inst, std::string_view value) { 131void EmitFPNeg64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
132 ctx.AddF64("{}=-({});", inst, value); 132 ctx.AddF64("{}=double(0.)-({});", inst, value);
133} 133}
134 134
135void EmitFPSin(EmitContext& ctx, IR::Inst& inst, std::string_view value) { 135void EmitFPSin(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h b/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h
index f86502e4c..6cabbc717 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_instructions.h
@@ -60,6 +60,8 @@ void EmitGetCbufU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding
60 const IR::Value& offset); 60 const IR::Value& offset);
61void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr, 61void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr,
62 std::string_view vertex); 62 std::string_view vertex);
63void EmitGetAttributeU32(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr,
64 std::string_view vertex);
63void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view value, 65void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view value,
64 std::string_view vertex); 66 std::string_view vertex);
65void EmitGetAttributeIndexed(EmitContext& ctx, IR::Inst& inst, std::string_view offset, 67void EmitGetAttributeIndexed(EmitContext& ctx, IR::Inst& inst, std::string_view offset,
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp
index 44060df33..b0d85be99 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_integer.cpp
@@ -87,11 +87,11 @@ void EmitUDiv32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::strin
87} 87}
88 88
89void EmitINeg32(EmitContext& ctx, IR::Inst& inst, std::string_view value) { 89void EmitINeg32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
90 ctx.AddU32("{}=uint(-({}));", inst, value); 90 ctx.AddU32("{}=uint(int(0)-int({}));", inst, value);
91} 91}
92 92
93void EmitINeg64(EmitContext& ctx, IR::Inst& inst, std::string_view value) { 93void EmitINeg64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
94 ctx.AddU64("{}=-({});", inst, value); 94 ctx.AddU64("{}=uint64_t(int64_t(0)-int64_t({}));", inst, value);
95} 95}
96 96
97void EmitIAbs32(EmitContext& ctx, IR::Inst& inst, std::string_view value) { 97void EmitIAbs32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl_special.cpp b/src/shader_recompiler/backend/glsl/emit_glsl_special.cpp
index b8ddafe48..fcf620b79 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl_special.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl_special.cpp
@@ -90,7 +90,9 @@ void EmitPhiMove(EmitContext& ctx, const IR::Value& phi_value, const IR::Value&
90 if (phi_reg == val_reg) { 90 if (phi_reg == val_reg) {
91 return; 91 return;
92 } 92 }
93 ctx.Add("{}={};", phi_reg, val_reg); 93 const bool needs_workaround{ctx.profile.has_gl_bool_ref_bug && phi_type == IR::Type::U1};
94 const auto suffix{needs_workaround ? "?true:false" : ""};
95 ctx.Add("{}={}{};", phi_reg, val_reg, suffix);
94} 96}
95 97
96void EmitPrologue(EmitContext& ctx) { 98void EmitPrologue(EmitContext& ctx) {
diff --git a/src/shader_recompiler/backend/glsl/glsl_emit_context.cpp b/src/shader_recompiler/backend/glsl/glsl_emit_context.cpp
index bc9d2a904..bb7f1a0fd 100644
--- a/src/shader_recompiler/backend/glsl/glsl_emit_context.cpp
+++ b/src/shader_recompiler/backend/glsl/glsl_emit_context.cpp
@@ -428,9 +428,10 @@ void EmitContext::DefineConstantBuffers(Bindings& bindings) {
428 return; 428 return;
429 } 429 }
430 for (const auto& desc : info.constant_buffer_descriptors) { 430 for (const auto& desc : info.constant_buffer_descriptors) {
431 header += fmt::format( 431 const auto cbuf_type{profile.has_gl_cbuf_ftou_bug ? "uvec4" : "vec4"};
432 "layout(std140,binding={}) uniform {}_cbuf_{}{{vec4 {}_cbuf{}[{}];}};", 432 header += fmt::format("layout(std140,binding={}) uniform {}_cbuf_{}{{{} {}_cbuf{}[{}];}};",
433 bindings.uniform_buffer, stage_name, desc.index, stage_name, desc.index, 4 * 1024); 433 bindings.uniform_buffer, stage_name, desc.index, cbuf_type,
434 stage_name, desc.index, 4 * 1024);
434 bindings.uniform_buffer += desc.count; 435 bindings.uniform_buffer += desc.count;
435 } 436 }
436} 437}
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
index 14f470812..8ea730c80 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
@@ -355,6 +355,31 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) {
355 } 355 }
356} 356}
357 357
358Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, Id) {
359 switch (attr) {
360 case IR::Attribute::PrimitiveId:
361 return ctx.OpLoad(ctx.U32[1], ctx.primitive_id);
362 case IR::Attribute::InstanceId:
363 if (ctx.profile.support_vertex_instance_id) {
364 return ctx.OpLoad(ctx.U32[1], ctx.instance_id);
365 } else {
366 const Id index{ctx.OpLoad(ctx.U32[1], ctx.instance_index)};
367 const Id base{ctx.OpLoad(ctx.U32[1], ctx.base_instance)};
368 return ctx.OpISub(ctx.U32[1], index, base);
369 }
370 case IR::Attribute::VertexId:
371 if (ctx.profile.support_vertex_instance_id) {
372 return ctx.OpLoad(ctx.U32[1], ctx.vertex_id);
373 } else {
374 const Id index{ctx.OpLoad(ctx.U32[1], ctx.vertex_index)};
375 const Id base{ctx.OpLoad(ctx.U32[1], ctx.base_vertex)};
376 return ctx.OpISub(ctx.U32[1], index, base);
377 }
378 default:
379 throw NotImplementedException("Read U32 attribute {}", attr);
380 }
381}
382
358void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, [[maybe_unused]] Id vertex) { 383void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, [[maybe_unused]] Id vertex) {
359 const std::optional<OutAttr> output{OutputAttrPointer(ctx, attr)}; 384 const std::optional<OutAttr> output{OutputAttrPointer(ctx, attr)};
360 if (!output) { 385 if (!output) {
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
index 6cd22dd3e..887112deb 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_instructions.h
@@ -53,6 +53,7 @@ Id EmitGetCbufU32(EmitContext& ctx, const IR::Value& binding, const IR::Value& o
53Id EmitGetCbufF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset); 53Id EmitGetCbufF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
54Id EmitGetCbufU32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset); 54Id EmitGetCbufU32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset);
55Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex); 55Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex);
56Id EmitGetAttributeU32(EmitContext& ctx, IR::Attribute attr, Id vertex);
56void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, Id vertex); 57void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, Id vertex);
57Id EmitGetAttributeIndexed(EmitContext& ctx, Id offset, Id vertex); 58Id EmitGetAttributeIndexed(EmitContext& ctx, Id offset, Id vertex);
58void EmitSetAttributeIndexed(EmitContext& ctx, Id offset, Id value, Id vertex); 59void EmitSetAttributeIndexed(EmitContext& ctx, Id offset, Id value, Id vertex);
diff --git a/src/shader_recompiler/frontend/ir/opcodes.inc b/src/shader_recompiler/frontend/ir/opcodes.inc
index 6929919df..b94ce7406 100644
--- a/src/shader_recompiler/frontend/ir/opcodes.inc
+++ b/src/shader_recompiler/frontend/ir/opcodes.inc
@@ -40,6 +40,7 @@ OPCODE(GetCbufU32, U32, U32,
40OPCODE(GetCbufF32, F32, U32, U32, ) 40OPCODE(GetCbufF32, F32, U32, U32, )
41OPCODE(GetCbufU32x2, U32x2, U32, U32, ) 41OPCODE(GetCbufU32x2, U32x2, U32, U32, )
42OPCODE(GetAttribute, F32, Attribute, U32, ) 42OPCODE(GetAttribute, F32, Attribute, U32, )
43OPCODE(GetAttributeU32, U32, Attribute, U32, )
43OPCODE(SetAttribute, Void, Attribute, F32, U32, ) 44OPCODE(SetAttribute, Void, Attribute, F32, U32, )
44OPCODE(GetAttributeIndexed, F32, U32, U32, ) 45OPCODE(GetAttributeIndexed, F32, U32, U32, )
45OPCODE(SetAttributeIndexed, Void, U32, F32, U32, ) 46OPCODE(SetAttributeIndexed, Void, U32, F32, U32, )
diff --git a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
index 1e476d83d..a78c469be 100644
--- a/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
+++ b/src/shader_recompiler/ir_opt/collect_shader_info_pass.cpp
@@ -389,6 +389,7 @@ void VisitUsages(Info& info, IR::Inst& inst) {
389 info.uses_demote_to_helper_invocation = true; 389 info.uses_demote_to_helper_invocation = true;
390 break; 390 break;
391 case IR::Opcode::GetAttribute: 391 case IR::Opcode::GetAttribute:
392 case IR::Opcode::GetAttributeU32:
392 info.loads.mask[static_cast<size_t>(inst.Arg(0).Attribute())] = true; 393 info.loads.mask[static_cast<size_t>(inst.Arg(0).Attribute())] = true;
393 break; 394 break;
394 case IR::Opcode::SetAttribute: 395 case IR::Opcode::SetAttribute:
diff --git a/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp b/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp
index d089fdd12..c134a12bc 100644
--- a/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp
+++ b/src/shader_recompiler/ir_opt/constant_propagation_pass.cpp
@@ -505,6 +505,29 @@ void FoldBitCast(IR::Inst& inst, IR::Opcode reverse) {
505 return; 505 return;
506 } 506 }
507 } 507 }
508 if constexpr (op == IR::Opcode::BitCastU32F32) {
509 // Workaround for new NVIDIA driver bug, where:
510 // uint attr = ftou(itof(gl_InstanceID));
511 // always returned 0.
512 // We can instead manually optimize this and work around the driver bug:
513 // uint attr = uint(gl_InstanceID);
514 if (arg_inst->GetOpcode() == IR::Opcode::GetAttribute) {
515 const IR::Attribute attr{arg_inst->Arg(0).Attribute()};
516 switch (attr) {
517 case IR::Attribute::PrimitiveId:
518 case IR::Attribute::InstanceId:
519 case IR::Attribute::VertexId:
520 break;
521 default:
522 return;
523 }
524 // Replace the bitcasts with an integer attribute get
525 inst.ReplaceOpcode(IR::Opcode::GetAttributeU32);
526 inst.SetArg(0, arg_inst->Arg(0));
527 inst.SetArg(1, arg_inst->Arg(1));
528 return;
529 }
530 }
508} 531}
509 532
510void FoldInverseFunc(IR::Inst& inst, IR::Opcode reverse) { 533void FoldInverseFunc(IR::Inst& inst, IR::Opcode reverse) {
diff --git a/src/shader_recompiler/profile.h b/src/shader_recompiler/profile.h
index f0c3b3b17..dc4c806ff 100644
--- a/src/shader_recompiler/profile.h
+++ b/src/shader_recompiler/profile.h
@@ -65,6 +65,10 @@ struct Profile {
65 bool has_gl_component_indexing_bug{}; 65 bool has_gl_component_indexing_bug{};
66 /// The precise type qualifier is broken in the fragment stage of some drivers 66 /// The precise type qualifier is broken in the fragment stage of some drivers
67 bool has_gl_precise_bug{}; 67 bool has_gl_precise_bug{};
68 /// Some drivers do not properly support floatBitsToUint when used on cbufs
69 bool has_gl_cbuf_ftou_bug{};
70 /// Some drivers poorly optimize boolean variable references
71 bool has_gl_bool_ref_bug{};
68 /// Ignores SPIR-V ordered vs unordered using GLSL semantics 72 /// Ignores SPIR-V ordered vs unordered using GLSL semantics
69 bool ignore_nan_fp_comparisons{}; 73 bool ignore_nan_fp_comparisons{};
70 74
diff --git a/src/video_core/renderer_opengl/gl_device.cpp b/src/video_core/renderer_opengl/gl_device.cpp
index 0764ea6e0..e62912a22 100644
--- a/src/video_core/renderer_opengl/gl_device.cpp
+++ b/src/video_core/renderer_opengl/gl_device.cpp
@@ -182,17 +182,13 @@ Device::Device() {
182 shader_backend = Settings::ShaderBackend::GLSL; 182 shader_backend = Settings::ShaderBackend::GLSL;
183 } 183 }
184 184
185 if (shader_backend == Settings::ShaderBackend::GLSL && is_nvidia && 185 if (shader_backend == Settings::ShaderBackend::GLSL && is_nvidia) {
186 !Settings::values.renderer_debug) {
187 const std::string_view driver_version = version.substr(13); 186 const std::string_view driver_version = version.substr(13);
188 const int version_major = 187 const int version_major =
189 std::atoi(driver_version.substr(0, driver_version.find(".")).data()); 188 std::atoi(driver_version.substr(0, driver_version.find(".")).data());
190
191 if (version_major >= 495) { 189 if (version_major >= 495) {
192 LOG_WARNING(Render_OpenGL, "NVIDIA drivers 495 and later causes significant problems " 190 has_cbuf_ftou_bug = true;
193 "with yuzu. Forcing GLASM as a mitigation."); 191 has_bool_ref_bug = true;
194 shader_backend = Settings::ShaderBackend::GLASM;
195 use_assembly_shaders = true;
196 } 192 }
197 } 193 }
198 194
diff --git a/src/video_core/renderer_opengl/gl_device.h b/src/video_core/renderer_opengl/gl_device.h
index de9e41659..95c2e8d38 100644
--- a/src/video_core/renderer_opengl/gl_device.h
+++ b/src/video_core/renderer_opengl/gl_device.h
@@ -152,6 +152,14 @@ public:
152 return need_fastmath_off; 152 return need_fastmath_off;
153 } 153 }
154 154
155 bool HasCbufFtouBug() const {
156 return has_cbuf_ftou_bug;
157 }
158
159 bool HasBoolRefBug() const {
160 return has_bool_ref_bug;
161 }
162
155 Settings::ShaderBackend GetShaderBackend() const { 163 Settings::ShaderBackend GetShaderBackend() const {
156 return shader_backend; 164 return shader_backend;
157 } 165 }
@@ -200,6 +208,8 @@ private:
200 bool has_sparse_texture_2{}; 208 bool has_sparse_texture_2{};
201 bool warp_size_potentially_larger_than_guest{}; 209 bool warp_size_potentially_larger_than_guest{};
202 bool need_fastmath_off{}; 210 bool need_fastmath_off{};
211 bool has_cbuf_ftou_bug{};
212 bool has_bool_ref_bug{};
203 213
204 std::string vendor_name; 214 std::string vendor_name;
205}; 215};
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 29c6e1a5f..ec558a9af 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -214,6 +214,8 @@ ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindo
214 .has_broken_fp16_float_controls = false, 214 .has_broken_fp16_float_controls = false,
215 .has_gl_component_indexing_bug = device.HasComponentIndexingBug(), 215 .has_gl_component_indexing_bug = device.HasComponentIndexingBug(),
216 .has_gl_precise_bug = device.HasPreciseBug(), 216 .has_gl_precise_bug = device.HasPreciseBug(),
217 .has_gl_cbuf_ftou_bug = device.HasCbufFtouBug(),
218 .has_gl_bool_ref_bug = device.HasBoolRefBug(),
217 .ignore_nan_fp_comparisons = true, 219 .ignore_nan_fp_comparisons = true,
218 .gl_max_compute_smem_size = device.GetMaxComputeSharedMemorySize(), 220 .gl_max_compute_smem_size = device.GetMaxComputeSharedMemorySize(),
219 }, 221 },
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index c3050887c..0ba56ff1e 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -1344,7 +1344,6 @@ bool Image::ScaleUp(bool ignore) {
1344 return false; 1344 return false;
1345 } 1345 }
1346 has_scaled = true; 1346 has_scaled = true;
1347 const auto& device = runtime->device;
1348 if (!scaled_image) { 1347 if (!scaled_image) {
1349 const bool is_2d = info.type == ImageType::e2D; 1348 const bool is_2d = info.type == ImageType::e2D;
1350 const u32 scaled_width = resolution.ScaleUp(info.size.width); 1349 const u32 scaled_width = resolution.ScaleUp(info.size.width);
@@ -1352,7 +1351,7 @@ bool Image::ScaleUp(bool ignore) {
1352 auto scaled_info = info; 1351 auto scaled_info = info;
1353 scaled_info.size.width = scaled_width; 1352 scaled_info.size.width = scaled_width;
1354 scaled_info.size.height = scaled_height; 1353 scaled_info.size.height = scaled_height;
1355 scaled_image = MakeImage(device, scaled_info); 1354 scaled_image = MakeImage(runtime->device, scaled_info);
1356 auto& allocator = runtime->memory_allocator; 1355 auto& allocator = runtime->memory_allocator;
1357 scaled_commit = MemoryCommit(allocator.Commit(scaled_image, MemoryUsage::DeviceLocal)); 1356 scaled_commit = MemoryCommit(allocator.Commit(scaled_image, MemoryUsage::DeviceLocal));
1358 ignore = false; 1357 ignore = false;
@@ -1361,18 +1360,13 @@ bool Image::ScaleUp(bool ignore) {
1361 if (ignore) { 1360 if (ignore) {
1362 return true; 1361 return true;
1363 } 1362 }
1364
1365 if (aspect_mask == 0) { 1363 if (aspect_mask == 0) {
1366 aspect_mask = ImageAspectMask(info.format); 1364 aspect_mask = ImageAspectMask(info.format);
1367 } 1365 }
1368 static constexpr auto OPTIMAL_FORMAT = FormatType::Optimal; 1366 if (NeedsScaleHelper()) {
1369 const PixelFormat format = StorageFormat(info.format);
1370 const auto vk_format = MaxwellToVK::SurfaceFormat(device, OPTIMAL_FORMAT, false, format).format;
1371 const auto blit_usage = VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT;
1372 if (device.IsFormatSupported(vk_format, blit_usage, OPTIMAL_FORMAT)) {
1373 BlitScale(*scheduler, *original_image, *scaled_image, info, aspect_mask, resolution);
1374 } else {
1375 return BlitScaleHelper(true); 1367 return BlitScaleHelper(true);
1368 } else {
1369 BlitScale(*scheduler, *original_image, *scaled_image, info, aspect_mask, resolution);
1376 } 1370 }
1377 return true; 1371 return true;
1378} 1372}
@@ -1394,15 +1388,10 @@ bool Image::ScaleDown(bool ignore) {
1394 if (aspect_mask == 0) { 1388 if (aspect_mask == 0) {
1395 aspect_mask = ImageAspectMask(info.format); 1389 aspect_mask = ImageAspectMask(info.format);
1396 } 1390 }
1397 static constexpr auto OPTIMAL_FORMAT = FormatType::Optimal; 1391 if (NeedsScaleHelper()) {
1398 const PixelFormat format = StorageFormat(info.format);
1399 const auto& device = runtime->device;
1400 const auto vk_format = MaxwellToVK::SurfaceFormat(device, OPTIMAL_FORMAT, false, format).format;
1401 const auto blit_usage = VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT;
1402 if (device.IsFormatSupported(vk_format, blit_usage, OPTIMAL_FORMAT)) {
1403 BlitScale(*scheduler, *scaled_image, *original_image, info, aspect_mask, resolution, false);
1404 } else {
1405 return BlitScaleHelper(false); 1392 return BlitScaleHelper(false);
1393 } else {
1394 BlitScale(*scheduler, *scaled_image, *original_image, info, aspect_mask, resolution, false);
1406 } 1395 }
1407 return true; 1396 return true;
1408} 1397}
@@ -1470,6 +1459,20 @@ bool Image::BlitScaleHelper(bool scale_up) {
1470 return true; 1459 return true;
1471} 1460}
1472 1461
1462bool Image::NeedsScaleHelper() const {
1463 const auto& device = runtime->device;
1464 const bool needs_msaa_helper = info.num_samples > 1 && device.CantBlitMSAA();
1465 if (needs_msaa_helper) {
1466 return true;
1467 }
1468 static constexpr auto OPTIMAL_FORMAT = FormatType::Optimal;
1469 const PixelFormat format = StorageFormat(info.format);
1470 const auto vk_format = MaxwellToVK::SurfaceFormat(device, OPTIMAL_FORMAT, false, format).format;
1471 const auto blit_usage = VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT;
1472 const bool needs_blit_helper = !device.IsFormatSupported(vk_format, blit_usage, OPTIMAL_FORMAT);
1473 return needs_blit_helper;
1474}
1475
1473ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewInfo& info, 1476ImageView::ImageView(TextureCacheRuntime& runtime, const VideoCommon::ImageViewInfo& info,
1474 ImageId image_id_, Image& image) 1477 ImageId image_id_, Image& image)
1475 : VideoCommon::ImageViewBase{info, image.info, image_id_}, device{&runtime.device}, 1478 : VideoCommon::ImageViewBase{info, image.info, image_id_}, device{&runtime.device},
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 2f12be78b..c81130dd2 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -149,6 +149,8 @@ public:
149private: 149private:
150 bool BlitScaleHelper(bool scale_up); 150 bool BlitScaleHelper(bool scale_up);
151 151
152 bool NeedsScaleHelper() const;
153
152 VKScheduler* scheduler{}; 154 VKScheduler* scheduler{};
153 TextureCacheRuntime* runtime{}; 155 TextureCacheRuntime* runtime{};
154 156
diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp
index 7bd31b211..d8e19cb2f 100644
--- a/src/video_core/texture_cache/util.cpp
+++ b/src/video_core/texture_cache/util.cpp
@@ -364,14 +364,14 @@ template <u32 GOB_EXTENT>
364 364
365[[nodiscard]] std::optional<SubresourceExtent> ResolveOverlapRightAddress2D( 365[[nodiscard]] std::optional<SubresourceExtent> ResolveOverlapRightAddress2D(
366 const ImageInfo& new_info, GPUVAddr gpu_addr, const ImageBase& overlap, bool strict_size) { 366 const ImageInfo& new_info, GPUVAddr gpu_addr, const ImageBase& overlap, bool strict_size) {
367 const u32 layer_stride = new_info.layer_stride; 367 const u64 layer_stride = new_info.layer_stride;
368 const s32 new_size = layer_stride * new_info.resources.layers; 368 const u64 new_size = layer_stride * new_info.resources.layers;
369 const s32 diff = static_cast<s32>(overlap.gpu_addr - gpu_addr); 369 const u64 diff = overlap.gpu_addr - gpu_addr;
370 if (diff > new_size) { 370 if (diff > new_size) {
371 return std::nullopt; 371 return std::nullopt;
372 } 372 }
373 const s32 base_layer = diff / layer_stride; 373 const s32 base_layer = static_cast<s32>(diff / layer_stride);
374 const s32 mip_offset = diff % layer_stride; 374 const s32 mip_offset = static_cast<s32>(diff % layer_stride);
375 const std::array offsets = CalculateMipLevelOffsets(new_info); 375 const std::array offsets = CalculateMipLevelOffsets(new_info);
376 const auto end = offsets.begin() + new_info.resources.levels; 376 const auto end = offsets.begin() + new_info.resources.levels;
377 const auto it = std::find(offsets.begin(), end, static_cast<u32>(mip_offset)); 377 const auto it = std::find(offsets.begin(), end, static_cast<u32>(mip_offset));
diff --git a/src/video_core/vulkan_common/vulkan_device.cpp b/src/video_core/vulkan_common/vulkan_device.cpp
index 9862b815b..3d78efddc 100644
--- a/src/video_core/vulkan_common/vulkan_device.cpp
+++ b/src/video_core/vulkan_common/vulkan_device.cpp
@@ -638,15 +638,20 @@ Device::Device(VkInstance instance_, vk::PhysicalDevice physical_, VkSurfaceKHR
638 } 638 }
639 } 639 }
640 640
641 if (ext_vertex_input_dynamic_state && driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS) { 641 const bool is_intel_windows = driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS;
642 if (ext_vertex_input_dynamic_state && is_intel_windows) {
642 LOG_WARNING(Render_Vulkan, "Blacklisting Intel for VK_EXT_vertex_input_dynamic_state"); 643 LOG_WARNING(Render_Vulkan, "Blacklisting Intel for VK_EXT_vertex_input_dynamic_state");
643 ext_vertex_input_dynamic_state = false; 644 ext_vertex_input_dynamic_state = false;
644 } 645 }
645 if (is_float16_supported && driver_id == VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS) { 646 if (is_float16_supported && is_intel_windows) {
646 // Intel's compiler crashes when using fp16 on Astral Chain, disable it for the time being. 647 // Intel's compiler crashes when using fp16 on Astral Chain, disable it for the time being.
647 LOG_WARNING(Render_Vulkan, "Blacklisting Intel proprietary from float16 math"); 648 LOG_WARNING(Render_Vulkan, "Blacklisting Intel proprietary from float16 math");
648 is_float16_supported = false; 649 is_float16_supported = false;
649 } 650 }
651 if (is_intel_windows) {
652 LOG_WARNING(Render_Vulkan, "Intel proprietary drivers do not support MSAA image blits");
653 cant_blit_msaa = true;
654 }
650 655
651 supports_d24_depth = 656 supports_d24_depth =
652 IsFormatSupported(VK_FORMAT_D24_UNORM_S8_UINT, 657 IsFormatSupported(VK_FORMAT_D24_UNORM_S8_UINT,
diff --git a/src/video_core/vulkan_common/vulkan_device.h b/src/video_core/vulkan_common/vulkan_device.h
index 4c9d86aad..37d140ebd 100644
--- a/src/video_core/vulkan_common/vulkan_device.h
+++ b/src/video_core/vulkan_common/vulkan_device.h
@@ -350,6 +350,10 @@ public:
350 return supports_d24_depth; 350 return supports_d24_depth;
351 } 351 }
352 352
353 bool CantBlitMSAA() const {
354 return cant_blit_msaa;
355 }
356
353private: 357private:
354 /// Checks if the physical device is suitable. 358 /// Checks if the physical device is suitable.
355 void CheckSuitability(bool requires_swapchain) const; 359 void CheckSuitability(bool requires_swapchain) const;
@@ -443,6 +447,7 @@ private:
443 bool has_renderdoc{}; ///< Has RenderDoc attached 447 bool has_renderdoc{}; ///< Has RenderDoc attached
444 bool has_nsight_graphics{}; ///< Has Nsight Graphics attached 448 bool has_nsight_graphics{}; ///< Has Nsight Graphics attached
445 bool supports_d24_depth{}; ///< Supports D24 depth buffers. 449 bool supports_d24_depth{}; ///< Supports D24 depth buffers.
450 bool cant_blit_msaa{}; ///< Does not support MSAA<->MSAA blitting.
446 451
447 // Telemetry parameters 452 // Telemetry parameters
448 std::string vendor_name; ///< Device's driver name. 453 std::string vendor_name; ///< Device's driver name.
diff --git a/src/yuzu/applets/qt_controller.cpp b/src/yuzu/applets/qt_controller.cpp
index d63193131..4239c17f5 100644
--- a/src/yuzu/applets/qt_controller.cpp
+++ b/src/yuzu/applets/qt_controller.cpp
@@ -400,36 +400,66 @@ void QtControllerSelectorDialog::SetSupportedControllers() {
400} 400}
401 401
402void QtControllerSelectorDialog::SetEmulatedControllers(std::size_t player_index) { 402void QtControllerSelectorDialog::SetEmulatedControllers(std::size_t player_index) {
403 const auto npad_style_set = system.HIDCore().GetSupportedStyleTag();
403 auto& pairs = index_controller_type_pairs[player_index]; 404 auto& pairs = index_controller_type_pairs[player_index];
404 405
405 pairs.clear(); 406 pairs.clear();
406 emulated_controllers[player_index]->clear(); 407 emulated_controllers[player_index]->clear();
407 408
408 pairs.emplace_back(emulated_controllers[player_index]->count(), 409 const auto add_item = [&](Core::HID::NpadStyleIndex controller_type,
409 Core::HID::NpadStyleIndex::ProController); 410 const QString& controller_name) {
410 emulated_controllers[player_index]->addItem(tr("Pro Controller")); 411 pairs.emplace_back(emulated_controllers[player_index]->count(), controller_type);
412 emulated_controllers[player_index]->addItem(controller_name);
413 };
411 414
412 pairs.emplace_back(emulated_controllers[player_index]->count(), 415 if (npad_style_set.fullkey == 1) {
413 Core::HID::NpadStyleIndex::JoyconDual); 416 add_item(Core::HID::NpadStyleIndex::ProController, tr("Pro Controller"));
414 emulated_controllers[player_index]->addItem(tr("Dual Joycons")); 417 }
415 418
416 pairs.emplace_back(emulated_controllers[player_index]->count(), 419 if (npad_style_set.joycon_dual == 1) {
417 Core::HID::NpadStyleIndex::JoyconLeft); 420 add_item(Core::HID::NpadStyleIndex::JoyconDual, tr("Dual Joycons"));
418 emulated_controllers[player_index]->addItem(tr("Left Joycon")); 421 }
419 422
420 pairs.emplace_back(emulated_controllers[player_index]->count(), 423 if (npad_style_set.joycon_left == 1) {
421 Core::HID::NpadStyleIndex::JoyconRight); 424 add_item(Core::HID::NpadStyleIndex::JoyconLeft, tr("Left Joycon"));
422 emulated_controllers[player_index]->addItem(tr("Right Joycon")); 425 }
423 426
424 if (player_index == 0) { 427 if (npad_style_set.joycon_right == 1) {
425 pairs.emplace_back(emulated_controllers[player_index]->count(), 428 add_item(Core::HID::NpadStyleIndex::JoyconRight, tr("Right Joycon"));
426 Core::HID::NpadStyleIndex::Handheld);
427 emulated_controllers[player_index]->addItem(tr("Handheld"));
428 } 429 }
429 430
430 pairs.emplace_back(emulated_controllers[player_index]->count(), 431 if (player_index == 0 && npad_style_set.handheld == 1) {
431 Core::HID::NpadStyleIndex::GameCube); 432 add_item(Core::HID::NpadStyleIndex::Handheld, tr("Handheld"));
432 emulated_controllers[player_index]->addItem(tr("GameCube Controller")); 433 }
434
435 if (npad_style_set.gamecube == 1) {
436 add_item(Core::HID::NpadStyleIndex::GameCube, tr("GameCube Controller"));
437 }
438
439 // Disable all unsupported controllers
440 if (!Settings::values.enable_all_controllers) {
441 return;
442 }
443
444 if (npad_style_set.palma == 1) {
445 add_item(Core::HID::NpadStyleIndex::Pokeball, tr("Poke Ball Plus"));
446 }
447
448 if (npad_style_set.lark == 1) {
449 add_item(Core::HID::NpadStyleIndex::NES, tr("NES Controller"));
450 }
451
452 if (npad_style_set.lucia == 1) {
453 add_item(Core::HID::NpadStyleIndex::SNES, tr("SNES Controller"));
454 }
455
456 if (npad_style_set.lagoon == 1) {
457 add_item(Core::HID::NpadStyleIndex::N64, tr("N64 Controller"));
458 }
459
460 if (npad_style_set.lager == 1) {
461 add_item(Core::HID::NpadStyleIndex::SegaGenesis, tr("Sega Genesis"));
462 }
433} 463}
434 464
435Core::HID::NpadStyleIndex QtControllerSelectorDialog::GetControllerTypeFromIndex( 465Core::HID::NpadStyleIndex QtControllerSelectorDialog::GetControllerTypeFromIndex(
diff --git a/src/yuzu/configuration/configure_input_player.cpp b/src/yuzu/configuration/configure_input_player.cpp
index cb6163702..8c6249fc2 100644
--- a/src/yuzu/configuration/configure_input_player.cpp
+++ b/src/yuzu/configuration/configure_input_player.cpp
@@ -907,78 +907,63 @@ void ConfigureInputPlayer::UpdateUI() {
907} 907}
908 908
909void ConfigureInputPlayer::SetConnectableControllers() { 909void ConfigureInputPlayer::SetConnectableControllers() {
910 Core::HID::NpadStyleTag npad_style_set = hid_core.GetSupportedStyleTag(); 910 const auto npad_style_set = hid_core.GetSupportedStyleTag();
911 index_controller_type_pairs.clear(); 911 index_controller_type_pairs.clear();
912 ui->comboControllerType->clear(); 912 ui->comboControllerType->clear();
913 913
914 const auto add_item = [&](Core::HID::NpadStyleIndex controller_type,
915 const QString& controller_name) {
916 index_controller_type_pairs.emplace_back(ui->comboControllerType->count(), controller_type);
917 ui->comboControllerType->addItem(controller_name);
918 };
919
914 if (npad_style_set.fullkey == 1) { 920 if (npad_style_set.fullkey == 1) {
915 index_controller_type_pairs.emplace_back(ui->comboControllerType->count(), 921 add_item(Core::HID::NpadStyleIndex::ProController, tr("Pro Controller"));
916 Core::HID::NpadStyleIndex::ProController);
917 ui->comboControllerType->addItem(tr("Pro Controller"));
918 } 922 }
919 923
920 if (npad_style_set.joycon_dual == 1) { 924 if (npad_style_set.joycon_dual == 1) {
921 index_controller_type_pairs.emplace_back(ui->comboControllerType->count(), 925 add_item(Core::HID::NpadStyleIndex::JoyconDual, tr("Dual Joycons"));
922 Core::HID::NpadStyleIndex::JoyconDual);
923 ui->comboControllerType->addItem(tr("Dual Joycons"));
924 } 926 }
925 927
926 if (npad_style_set.joycon_left == 1) { 928 if (npad_style_set.joycon_left == 1) {
927 index_controller_type_pairs.emplace_back(ui->comboControllerType->count(), 929 add_item(Core::HID::NpadStyleIndex::JoyconLeft, tr("Left Joycon"));
928 Core::HID::NpadStyleIndex::JoyconLeft);
929 ui->comboControllerType->addItem(tr("Left Joycon"));
930 } 930 }
931 931
932 if (npad_style_set.joycon_right == 1) { 932 if (npad_style_set.joycon_right == 1) {
933 index_controller_type_pairs.emplace_back(ui->comboControllerType->count(), 933 add_item(Core::HID::NpadStyleIndex::JoyconRight, tr("Right Joycon"));
934 Core::HID::NpadStyleIndex::JoyconRight);
935 ui->comboControllerType->addItem(tr("Right Joycon"));
936 } 934 }
937 935
938 if (player_index == 0 && npad_style_set.handheld == 1) { 936 if (player_index == 0 && npad_style_set.handheld == 1) {
939 index_controller_type_pairs.emplace_back(ui->comboControllerType->count(), 937 add_item(Core::HID::NpadStyleIndex::Handheld, tr("Handheld"));
940 Core::HID::NpadStyleIndex::Handheld);
941 ui->comboControllerType->addItem(tr("Handheld"));
942 } 938 }
943 939
944 if (npad_style_set.gamecube == 1) { 940 if (npad_style_set.gamecube == 1) {
945 index_controller_type_pairs.emplace_back(ui->comboControllerType->count(), 941 add_item(Core::HID::NpadStyleIndex::GameCube, tr("GameCube Controller"));
946 Core::HID::NpadStyleIndex::GameCube);
947 ui->comboControllerType->addItem(tr("GameCube Controller"));
948 } 942 }
949 943
950 // Disable all unsupported controllers 944 // Disable all unsupported controllers
951 if (!Settings::values.enable_all_controllers) { 945 if (!Settings::values.enable_all_controllers) {
952 return; 946 return;
953 } 947 }
948
954 if (npad_style_set.palma == 1) { 949 if (npad_style_set.palma == 1) {
955 index_controller_type_pairs.emplace_back(ui->comboControllerType->count(), 950 add_item(Core::HID::NpadStyleIndex::Pokeball, tr("Poke Ball Plus"));
956 Core::HID::NpadStyleIndex::Pokeball);
957 ui->comboControllerType->addItem(tr("Poke Ball Plus"));
958 } 951 }
959 952
960 if (npad_style_set.lark == 1) { 953 if (npad_style_set.lark == 1) {
961 index_controller_type_pairs.emplace_back(ui->comboControllerType->count(), 954 add_item(Core::HID::NpadStyleIndex::NES, tr("NES Controller"));
962 Core::HID::NpadStyleIndex::NES);
963 ui->comboControllerType->addItem(tr("NES Controller"));
964 } 955 }
965 956
966 if (npad_style_set.lucia == 1) { 957 if (npad_style_set.lucia == 1) {
967 index_controller_type_pairs.emplace_back(ui->comboControllerType->count(), 958 add_item(Core::HID::NpadStyleIndex::SNES, tr("SNES Controller"));
968 Core::HID::NpadStyleIndex::SNES);
969 ui->comboControllerType->addItem(tr("SNES Controller"));
970 } 959 }
971 960
972 if (npad_style_set.lagoon == 1) { 961 if (npad_style_set.lagoon == 1) {
973 index_controller_type_pairs.emplace_back(ui->comboControllerType->count(), 962 add_item(Core::HID::NpadStyleIndex::N64, tr("N64 Controller"));
974 Core::HID::NpadStyleIndex::N64);
975 ui->comboControllerType->addItem(tr("N64 Controller"));
976 } 963 }
977 964
978 if (npad_style_set.lager == 1) { 965 if (npad_style_set.lager == 1) {
979 index_controller_type_pairs.emplace_back(ui->comboControllerType->count(), 966 add_item(Core::HID::NpadStyleIndex::SegaGenesis, tr("Sega Genesis"));
980 Core::HID::NpadStyleIndex::SegaGenesis);
981 ui->comboControllerType->addItem(tr("Sega Genesis"));
982 } 967 }
983} 968}
984 969
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index 1e02d715b..53f11a9ac 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -1547,6 +1547,8 @@ void GMainWindow::ShutdownGame() {
1547 emu_thread->wait(); 1547 emu_thread->wait();
1548 emu_thread = nullptr; 1548 emu_thread = nullptr;
1549 1549
1550 emulation_running = false;
1551
1550 discord_rpc->Update(); 1552 discord_rpc->Update();
1551 1553
1552 // The emulation is stopped, so closing the window or not does not matter anymore 1554 // The emulation is stopped, so closing the window or not does not matter anymore
@@ -1585,8 +1587,6 @@ void GMainWindow::ShutdownGame() {
1585 emu_frametime_label->setVisible(false); 1587 emu_frametime_label->setVisible(false);
1586 renderer_status_button->setEnabled(true); 1588 renderer_status_button->setEnabled(true);
1587 1589
1588 emulation_running = false;
1589
1590 game_path.clear(); 1590 game_path.clear();
1591 1591
1592 // When closing the game, destroy the GLWindow to clear the context after the game is closed 1592 // When closing the game, destroy the GLWindow to clear the context after the game is closed