summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/common/logging/backend.cpp1
-rw-r--r--src/common/logging/filter.cpp1
-rw-r--r--src/common/logging/types.h1
-rw-r--r--src/common/lru_cache.h140
-rw-r--r--src/common/settings.cpp2
-rw-r--r--src/common/settings.h1
-rw-r--r--src/core/CMakeLists.txt2
-rw-r--r--src/core/core.cpp6
-rw-r--r--src/core/core.h3
-rw-r--r--src/core/cpu_manager.cpp23
-rw-r--r--src/core/hle/kernel/k_address_arbiter.cpp4
-rw-r--r--src/core/hle/kernel/k_auto_object.h4
-rw-r--r--src/core/hle/kernel/k_condition_variable.cpp2
-rw-r--r--src/core/hle/kernel/k_handle_table.cpp6
-rw-r--r--src/core/hle/kernel/k_handle_table.h2
-rw-r--r--src/core/hle/kernel/k_process.cpp1
-rw-r--r--src/core/hle/kernel/k_scheduler.cpp85
-rw-r--r--src/core/hle/kernel/k_scheduler.h2
-rw-r--r--src/core/hle/kernel/k_thread.cpp21
-rw-r--r--src/core/hle/kernel/k_thread.h36
-rw-r--r--src/core/hle/kernel/kernel.cpp57
-rw-r--r--src/core/hle/kernel/kernel.h3
-rw-r--r--src/core/hle/kernel/svc.cpp2
-rw-r--r--src/core/hle/service/ngct/ngct.cpp46
-rw-r--r--src/core/hle/service/ngct/ngct.h20
-rw-r--r--src/core/hle/service/nifm/nifm.cpp113
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp9
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue.h4
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.cpp2
-rw-r--r--src/core/hle/service/service.cpp2
-rw-r--r--src/core/network/network_interface.cpp171
-rw-r--r--src/shader_recompiler/backend/spirv/emit_context.cpp56
-rw-r--r--src/shader_recompiler/backend/spirv/emit_context.h4
-rw-r--r--src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp52
-rw-r--r--src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp127
-rw-r--r--src/shader_recompiler/frontend/maxwell/structured_control_flow.h9
-rw-r--r--src/shader_recompiler/frontend/maxwell/translate_program.cpp2
-rw-r--r--src/shader_recompiler/host_translate_info.h5
-rw-r--r--src/video_core/buffer_cache/buffer_base.h20
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h61
-rw-r--r--src/video_core/command_classes/codecs/vp9.cpp1
-rw-r--r--src/video_core/command_classes/codecs/vp9_types.h85
-rw-r--r--src/video_core/engines/maxwell_3d.h8
-rw-r--r--src/video_core/renderer_opengl/gl_device.h4
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp1
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp40
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.cpp6
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.h4
-rw-r--r--src/video_core/texture_cache/image_base.h2
-rw-r--r--src/video_core/texture_cache/texture_cache.h92
-rw-r--r--src/video_core/texture_cache/texture_cache_base.h8
-rw-r--r--src/video_core/textures/decoders.cpp8
-rw-r--r--src/yuzu/configuration/config.cpp2
-rw-r--r--src/yuzu/configuration/configure_graphics.ui2
-rw-r--r--src/yuzu/configuration/configure_graphics_advanced.cpp6
-rw-r--r--src/yuzu/configuration/configure_graphics_advanced.h1
-rw-r--r--src/yuzu/configuration/configure_graphics_advanced.ui14
-rw-r--r--src/yuzu/game_list.cpp8
-rw-r--r--src/yuzu_cmd/config.cpp1
63 files changed, 871 insertions, 545 deletions
diff --git a/src/common/logging/backend.cpp b/src/common/logging/backend.cpp
index 949384fd3..e40d117d6 100644
--- a/src/common/logging/backend.cpp
+++ b/src/common/logging/backend.cpp
@@ -18,6 +18,7 @@
18#include "common/fs/fs_paths.h" 18#include "common/fs/fs_paths.h"
19#include "common/fs/path_util.h" 19#include "common/fs/path_util.h"
20#include "common/literals.h" 20#include "common/literals.h"
21#include "common/thread.h"
21 22
22#include "common/logging/backend.h" 23#include "common/logging/backend.h"
23#include "common/logging/log.h" 24#include "common/logging/log.h"
diff --git a/src/common/logging/filter.cpp b/src/common/logging/filter.cpp
index f055f0e11..42744c994 100644
--- a/src/common/logging/filter.cpp
+++ b/src/common/logging/filter.cpp
@@ -111,6 +111,7 @@ bool ParseFilterRule(Filter& instance, Iterator begin, Iterator end) {
111 SUB(Service, NCM) \ 111 SUB(Service, NCM) \
112 SUB(Service, NFC) \ 112 SUB(Service, NFC) \
113 SUB(Service, NFP) \ 113 SUB(Service, NFP) \
114 SUB(Service, NGCT) \
114 SUB(Service, NIFM) \ 115 SUB(Service, NIFM) \
115 SUB(Service, NIM) \ 116 SUB(Service, NIM) \
116 SUB(Service, NPNS) \ 117 SUB(Service, NPNS) \
diff --git a/src/common/logging/types.h b/src/common/logging/types.h
index 7ad0334fc..ddf9d27ca 100644
--- a/src/common/logging/types.h
+++ b/src/common/logging/types.h
@@ -81,6 +81,7 @@ enum class Class : u8 {
81 Service_NCM, ///< The NCM service 81 Service_NCM, ///< The NCM service
82 Service_NFC, ///< The NFC (Near-field communication) service 82 Service_NFC, ///< The NFC (Near-field communication) service
83 Service_NFP, ///< The NFP service 83 Service_NFP, ///< The NFP service
84 Service_NGCT, ///< The NGCT (No Good Content for Terra) service
84 Service_NIFM, ///< The NIFM (Network interface) service 85 Service_NIFM, ///< The NIFM (Network interface) service
85 Service_NIM, ///< The NIM service 86 Service_NIM, ///< The NIM service
86 Service_NPNS, ///< The NPNS service 87 Service_NPNS, ///< The NPNS service
diff --git a/src/common/lru_cache.h b/src/common/lru_cache.h
new file mode 100644
index 000000000..365488ba5
--- /dev/null
+++ b/src/common/lru_cache.h
@@ -0,0 +1,140 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2+ or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <deque>
8#include <memory>
9#include <type_traits>
10
11#include "common/common_types.h"
12
13namespace Common {
14
15template <class Traits>
16class LeastRecentlyUsedCache {
17 using ObjectType = typename Traits::ObjectType;
18 using TickType = typename Traits::TickType;
19
20 struct Item {
21 ObjectType obj;
22 TickType tick;
23 Item* next{};
24 Item* prev{};
25 };
26
27public:
28 LeastRecentlyUsedCache() : first_item{}, last_item{} {}
29 ~LeastRecentlyUsedCache() = default;
30
31 size_t Insert(ObjectType obj, TickType tick) {
32 const auto new_id = Build();
33 auto& item = item_pool[new_id];
34 item.obj = obj;
35 item.tick = tick;
36 Attach(item);
37 return new_id;
38 }
39
40 void Touch(size_t id, TickType tick) {
41 auto& item = item_pool[id];
42 if (item.tick >= tick) {
43 return;
44 }
45 item.tick = tick;
46 if (&item == last_item) {
47 return;
48 }
49 Detach(item);
50 Attach(item);
51 }
52
53 void Free(size_t id) {
54 auto& item = item_pool[id];
55 Detach(item);
56 item.prev = nullptr;
57 item.next = nullptr;
58 free_items.push_back(id);
59 }
60
61 template <typename Func>
62 void ForEachItemBelow(TickType tick, Func&& func) {
63 static constexpr bool RETURNS_BOOL =
64 std::is_same_v<std::invoke_result<Func, ObjectType>, bool>;
65 Item* iterator = first_item;
66 while (iterator) {
67 if (static_cast<s64>(tick) - static_cast<s64>(iterator->tick) < 0) {
68 return;
69 }
70 Item* next = iterator->next;
71 if constexpr (RETURNS_BOOL) {
72 if (func(iterator->obj)) {
73 return;
74 }
75 } else {
76 func(iterator->obj);
77 }
78 iterator = next;
79 }
80 }
81
82private:
83 size_t Build() {
84 if (free_items.empty()) {
85 const size_t item_id = item_pool.size();
86 auto& item = item_pool.emplace_back();
87 item.next = nullptr;
88 item.prev = nullptr;
89 return item_id;
90 }
91 const size_t item_id = free_items.front();
92 free_items.pop_front();
93 auto& item = item_pool[item_id];
94 item.next = nullptr;
95 item.prev = nullptr;
96 return item_id;
97 }
98
99 void Attach(Item& item) {
100 if (!first_item) {
101 first_item = &item;
102 }
103 if (!last_item) {
104 last_item = &item;
105 } else {
106 item.prev = last_item;
107 last_item->next = &item;
108 item.next = nullptr;
109 last_item = &item;
110 }
111 }
112
113 void Detach(Item& item) {
114 if (item.prev) {
115 item.prev->next = item.next;
116 }
117 if (item.next) {
118 item.next->prev = item.prev;
119 }
120 if (&item == first_item) {
121 first_item = item.next;
122 if (first_item) {
123 first_item->prev = nullptr;
124 }
125 }
126 if (&item == last_item) {
127 last_item = item.prev;
128 if (last_item) {
129 last_item->next = nullptr;
130 }
131 }
132 }
133
134 std::deque<Item> item_pool;
135 std::deque<size_t> free_items;
136 Item* first_item{};
137 Item* last_item{};
138};
139
140} // namespace Common
diff --git a/src/common/settings.cpp b/src/common/settings.cpp
index 996315999..fd3b639cd 100644
--- a/src/common/settings.cpp
+++ b/src/common/settings.cpp
@@ -59,7 +59,6 @@ void LogSettings() {
59 log_setting("Renderer_UseVsync", values.use_vsync.GetValue()); 59 log_setting("Renderer_UseVsync", values.use_vsync.GetValue());
60 log_setting("Renderer_ShaderBackend", values.shader_backend.GetValue()); 60 log_setting("Renderer_ShaderBackend", values.shader_backend.GetValue());
61 log_setting("Renderer_UseAsynchronousShaders", values.use_asynchronous_shaders.GetValue()); 61 log_setting("Renderer_UseAsynchronousShaders", values.use_asynchronous_shaders.GetValue());
62 log_setting("Renderer_UseGarbageCollection", values.use_caches_gc.GetValue());
63 log_setting("Renderer_AnisotropicFilteringLevel", values.max_anisotropy.GetValue()); 62 log_setting("Renderer_AnisotropicFilteringLevel", values.max_anisotropy.GetValue());
64 log_setting("Audio_OutputEngine", values.sink_id.GetValue()); 63 log_setting("Audio_OutputEngine", values.sink_id.GetValue());
65 log_setting("Audio_EnableAudioStretching", values.enable_audio_stretching.GetValue()); 64 log_setting("Audio_EnableAudioStretching", values.enable_audio_stretching.GetValue());
@@ -143,7 +142,6 @@ void RestoreGlobalState(bool is_powered_on) {
143 values.shader_backend.SetGlobal(true); 142 values.shader_backend.SetGlobal(true);
144 values.use_asynchronous_shaders.SetGlobal(true); 143 values.use_asynchronous_shaders.SetGlobal(true);
145 values.use_fast_gpu_time.SetGlobal(true); 144 values.use_fast_gpu_time.SetGlobal(true);
146 values.use_caches_gc.SetGlobal(true);
147 values.bg_red.SetGlobal(true); 145 values.bg_red.SetGlobal(true);
148 values.bg_green.SetGlobal(true); 146 values.bg_green.SetGlobal(true);
149 values.bg_blue.SetGlobal(true); 147 values.bg_blue.SetGlobal(true);
diff --git a/src/common/settings.h b/src/common/settings.h
index 20769d310..ec4d381e8 100644
--- a/src/common/settings.h
+++ b/src/common/settings.h
@@ -475,7 +475,6 @@ struct Values {
475 ShaderBackend::SPIRV, "shader_backend"}; 475 ShaderBackend::SPIRV, "shader_backend"};
476 Setting<bool> use_asynchronous_shaders{false, "use_asynchronous_shaders"}; 476 Setting<bool> use_asynchronous_shaders{false, "use_asynchronous_shaders"};
477 Setting<bool> use_fast_gpu_time{true, "use_fast_gpu_time"}; 477 Setting<bool> use_fast_gpu_time{true, "use_fast_gpu_time"};
478 Setting<bool> use_caches_gc{false, "use_caches_gc"};
479 478
480 Setting<u8> bg_red{0, "bg_red"}; 479 Setting<u8> bg_red{0, "bg_red"};
481 Setting<u8> bg_green{0, "bg_green"}; 480 Setting<u8> bg_green{0, "bg_green"};
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index f5cf5c16a..87d47e2e5 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -452,6 +452,8 @@ add_library(core STATIC
452 hle/service/nfp/nfp.h 452 hle/service/nfp/nfp.h
453 hle/service/nfp/nfp_user.cpp 453 hle/service/nfp/nfp_user.cpp
454 hle/service/nfp/nfp_user.h 454 hle/service/nfp/nfp_user.h
455 hle/service/ngct/ngct.cpp
456 hle/service/ngct/ngct.h
455 hle/service/nifm/nifm.cpp 457 hle/service/nifm/nifm.cpp
456 hle/service/nifm/nifm.h 458 hle/service/nifm/nifm.h
457 hle/service/nim/nim.cpp 459 hle/service/nim/nim.cpp
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 5893a86bf..ba4629993 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -507,6 +507,12 @@ const ARM_Interface& System::CurrentArmInterface() const {
507 return impl->kernel.CurrentPhysicalCore().ArmInterface(); 507 return impl->kernel.CurrentPhysicalCore().ArmInterface();
508} 508}
509 509
510std::size_t System::CurrentCoreIndex() const {
511 std::size_t core = impl->kernel.GetCurrentHostThreadID();
512 ASSERT(core < Core::Hardware::NUM_CPU_CORES);
513 return core;
514}
515
510Kernel::PhysicalCore& System::CurrentPhysicalCore() { 516Kernel::PhysicalCore& System::CurrentPhysicalCore() {
511 return impl->kernel.CurrentPhysicalCore(); 517 return impl->kernel.CurrentPhysicalCore();
512} 518}
diff --git a/src/core/core.h b/src/core/core.h
index f9116ebb6..715ab88e7 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -205,6 +205,9 @@ public:
205 /// Gets an ARM interface to the CPU core that is currently running 205 /// Gets an ARM interface to the CPU core that is currently running
206 [[nodiscard]] const ARM_Interface& CurrentArmInterface() const; 206 [[nodiscard]] const ARM_Interface& CurrentArmInterface() const;
207 207
208 /// Gets the index of the currently running CPU core
209 [[nodiscard]] std::size_t CurrentCoreIndex() const;
210
208 /// Gets the physical core for the CPU core that is currently running 211 /// Gets the physical core for the CPU core that is currently running
209 [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore(); 212 [[nodiscard]] Kernel::PhysicalCore& CurrentPhysicalCore();
210 213
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index de2e5563e..77efcabf0 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -118,18 +118,17 @@ void CpuManager::MultiCoreRunGuestLoop() {
118 physical_core = &kernel.CurrentPhysicalCore(); 118 physical_core = &kernel.CurrentPhysicalCore();
119 } 119 }
120 system.ExitDynarmicProfile(); 120 system.ExitDynarmicProfile();
121 { 121 physical_core->ArmInterface().ClearExclusiveState();
122 Kernel::KScopedDisableDispatch dd(kernel); 122 kernel.CurrentScheduler()->RescheduleCurrentCore();
123 physical_core->ArmInterface().ClearExclusiveState();
124 }
125 } 123 }
126} 124}
127 125
128void CpuManager::MultiCoreRunIdleThread() { 126void CpuManager::MultiCoreRunIdleThread() {
129 auto& kernel = system.Kernel(); 127 auto& kernel = system.Kernel();
130 while (true) { 128 while (true) {
131 Kernel::KScopedDisableDispatch dd(kernel); 129 auto& physical_core = kernel.CurrentPhysicalCore();
132 kernel.CurrentPhysicalCore().Idle(); 130 physical_core.Idle();
131 kernel.CurrentScheduler()->RescheduleCurrentCore();
133 } 132 }
134} 133}
135 134
@@ -137,12 +136,12 @@ void CpuManager::MultiCoreRunSuspendThread() {
137 auto& kernel = system.Kernel(); 136 auto& kernel = system.Kernel();
138 kernel.CurrentScheduler()->OnThreadStart(); 137 kernel.CurrentScheduler()->OnThreadStart();
139 while (true) { 138 while (true) {
140 auto core = kernel.CurrentPhysicalCoreIndex(); 139 auto core = kernel.GetCurrentHostThreadID();
141 auto& scheduler = *kernel.CurrentScheduler(); 140 auto& scheduler = *kernel.CurrentScheduler();
142 Kernel::KThread* current_thread = scheduler.GetCurrentThread(); 141 Kernel::KThread* current_thread = scheduler.GetCurrentThread();
143 Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context); 142 Common::Fiber::YieldTo(current_thread->GetHostContext(), *core_data[core].host_context);
144 ASSERT(scheduler.ContextSwitchPending()); 143 ASSERT(scheduler.ContextSwitchPending());
145 ASSERT(core == kernel.CurrentPhysicalCoreIndex()); 144 ASSERT(core == kernel.GetCurrentHostThreadID());
146 scheduler.RescheduleCurrentCore(); 145 scheduler.RescheduleCurrentCore();
147 } 146 }
148} 147}
@@ -348,11 +347,15 @@ void CpuManager::RunThread(std::stop_token stop_token, std::size_t core) {
348 sc_sync_first_use = false; 347 sc_sync_first_use = false;
349 } 348 }
350 349
351 // Emulation was stopped 350 // Abort if emulation was killed before the session really starts
352 if (stop_token.stop_requested()) { 351 if (!system.IsPoweredOn()) {
353 return; 352 return;
354 } 353 }
355 354
355 if (stop_token.stop_requested()) {
356 break;
357 }
358
356 auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread(); 359 auto current_thread = system.Kernel().CurrentScheduler()->GetCurrentThread();
357 data.is_running = true; 360 data.is_running = true;
358 Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext()); 361 Common::Fiber::YieldTo(data.host_context, *current_thread->GetHostContext());
diff --git a/src/core/hle/kernel/k_address_arbiter.cpp b/src/core/hle/kernel/k_address_arbiter.cpp
index 6771ef621..1b429bc1e 100644
--- a/src/core/hle/kernel/k_address_arbiter.cpp
+++ b/src/core/hle/kernel/k_address_arbiter.cpp
@@ -28,7 +28,7 @@ bool ReadFromUser(Core::System& system, s32* out, VAddr address) {
28 28
29bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) { 29bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 value) {
30 auto& monitor = system.Monitor(); 30 auto& monitor = system.Monitor();
31 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); 31 const auto current_core = system.CurrentCoreIndex();
32 32
33 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. 33 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
34 // TODO(bunnei): We should call CanAccessAtomic(..) here. 34 // TODO(bunnei): We should call CanAccessAtomic(..) here.
@@ -58,7 +58,7 @@ bool DecrementIfLessThan(Core::System& system, s32* out, VAddr address, s32 valu
58 58
59bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) { 59bool UpdateIfEqual(Core::System& system, s32* out, VAddr address, s32 value, s32 new_value) {
60 auto& monitor = system.Monitor(); 60 auto& monitor = system.Monitor();
61 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); 61 const auto current_core = system.CurrentCoreIndex();
62 62
63 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable. 63 // TODO(bunnei): We should disable interrupts here via KScopedInterruptDisable.
64 // TODO(bunnei): We should call CanAccessAtomic(..) here. 64 // TODO(bunnei): We should call CanAccessAtomic(..) here.
diff --git a/src/core/hle/kernel/k_auto_object.h b/src/core/hle/kernel/k_auto_object.h
index 165b76747..e4fcdbc67 100644
--- a/src/core/hle/kernel/k_auto_object.h
+++ b/src/core/hle/kernel/k_auto_object.h
@@ -170,10 +170,6 @@ public:
170 } 170 }
171 } 171 }
172 172
173 const std::string& GetName() const {
174 return name;
175 }
176
177private: 173private:
178 void RegisterWithKernel(); 174 void RegisterWithKernel();
179 void UnregisterWithKernel(); 175 void UnregisterWithKernel();
diff --git a/src/core/hle/kernel/k_condition_variable.cpp b/src/core/hle/kernel/k_condition_variable.cpp
index 4174f35fd..ef14ad1d2 100644
--- a/src/core/hle/kernel/k_condition_variable.cpp
+++ b/src/core/hle/kernel/k_condition_variable.cpp
@@ -35,7 +35,7 @@ bool WriteToUser(Core::System& system, VAddr address, const u32* p) {
35bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero, 35bool UpdateLockAtomic(Core::System& system, u32* out, VAddr address, u32 if_zero,
36 u32 new_orr_mask) { 36 u32 new_orr_mask) {
37 auto& monitor = system.Monitor(); 37 auto& monitor = system.Monitor();
38 const auto current_core = system.Kernel().CurrentPhysicalCoreIndex(); 38 const auto current_core = system.CurrentCoreIndex();
39 39
40 // Load the value from the address. 40 // Load the value from the address.
41 const auto expected = monitor.ExclusiveRead32(current_core, address); 41 const auto expected = monitor.ExclusiveRead32(current_core, address);
diff --git a/src/core/hle/kernel/k_handle_table.cpp b/src/core/hle/kernel/k_handle_table.cpp
index d720c2dda..6a420d5b0 100644
--- a/src/core/hle/kernel/k_handle_table.cpp
+++ b/src/core/hle/kernel/k_handle_table.cpp
@@ -13,7 +13,6 @@ ResultCode KHandleTable::Finalize() {
13 // Get the table and clear our record of it. 13 // Get the table and clear our record of it.
14 u16 saved_table_size = 0; 14 u16 saved_table_size = 0;
15 { 15 {
16 KScopedDisableDispatch dd(kernel);
17 KScopedSpinLock lk(m_lock); 16 KScopedSpinLock lk(m_lock);
18 17
19 std::swap(m_table_size, saved_table_size); 18 std::swap(m_table_size, saved_table_size);
@@ -44,7 +43,6 @@ bool KHandleTable::Remove(Handle handle) {
44 // Find the object and free the entry. 43 // Find the object and free the entry.
45 KAutoObject* obj = nullptr; 44 KAutoObject* obj = nullptr;
46 { 45 {
47 KScopedDisableDispatch dd(kernel);
48 KScopedSpinLock lk(m_lock); 46 KScopedSpinLock lk(m_lock);
49 47
50 if (this->IsValidHandle(handle)) { 48 if (this->IsValidHandle(handle)) {
@@ -63,7 +61,6 @@ bool KHandleTable::Remove(Handle handle) {
63} 61}
64 62
65ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) { 63ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
66 KScopedDisableDispatch dd(kernel);
67 KScopedSpinLock lk(m_lock); 64 KScopedSpinLock lk(m_lock);
68 65
69 // Never exceed our capacity. 66 // Never exceed our capacity.
@@ -86,7 +83,6 @@ ResultCode KHandleTable::Add(Handle* out_handle, KAutoObject* obj, u16 type) {
86} 83}
87 84
88ResultCode KHandleTable::Reserve(Handle* out_handle) { 85ResultCode KHandleTable::Reserve(Handle* out_handle) {
89 KScopedDisableDispatch dd(kernel);
90 KScopedSpinLock lk(m_lock); 86 KScopedSpinLock lk(m_lock);
91 87
92 // Never exceed our capacity. 88 // Never exceed our capacity.
@@ -97,7 +93,6 @@ ResultCode KHandleTable::Reserve(Handle* out_handle) {
97} 93}
98 94
99void KHandleTable::Unreserve(Handle handle) { 95void KHandleTable::Unreserve(Handle handle) {
100 KScopedDisableDispatch dd(kernel);
101 KScopedSpinLock lk(m_lock); 96 KScopedSpinLock lk(m_lock);
102 97
103 // Unpack the handle. 98 // Unpack the handle.
@@ -116,7 +111,6 @@ void KHandleTable::Unreserve(Handle handle) {
116} 111}
117 112
118void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) { 113void KHandleTable::Register(Handle handle, KAutoObject* obj, u16 type) {
119 KScopedDisableDispatch dd(kernel);
120 KScopedSpinLock lk(m_lock); 114 KScopedSpinLock lk(m_lock);
121 115
122 // Unpack the handle. 116 // Unpack the handle.
diff --git a/src/core/hle/kernel/k_handle_table.h b/src/core/hle/kernel/k_handle_table.h
index 75dcec7df..2ff6aa160 100644
--- a/src/core/hle/kernel/k_handle_table.h
+++ b/src/core/hle/kernel/k_handle_table.h
@@ -69,7 +69,6 @@ public:
69 template <typename T = KAutoObject> 69 template <typename T = KAutoObject>
70 KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const { 70 KScopedAutoObject<T> GetObjectWithoutPseudoHandle(Handle handle) const {
71 // Lock and look up in table. 71 // Lock and look up in table.
72 KScopedDisableDispatch dd(kernel);
73 KScopedSpinLock lk(m_lock); 72 KScopedSpinLock lk(m_lock);
74 73
75 if constexpr (std::is_same_v<T, KAutoObject>) { 74 if constexpr (std::is_same_v<T, KAutoObject>) {
@@ -124,7 +123,6 @@ public:
124 size_t num_opened; 123 size_t num_opened;
125 { 124 {
126 // Lock the table. 125 // Lock the table.
127 KScopedDisableDispatch dd(kernel);
128 KScopedSpinLock lk(m_lock); 126 KScopedSpinLock lk(m_lock);
129 for (num_opened = 0; num_opened < num_handles; num_opened++) { 127 for (num_opened = 0; num_opened < num_handles; num_opened++) {
130 // Get the current handle. 128 // Get the current handle.
diff --git a/src/core/hle/kernel/k_process.cpp b/src/core/hle/kernel/k_process.cpp
index 3d7e6707e..8ead1a769 100644
--- a/src/core/hle/kernel/k_process.cpp
+++ b/src/core/hle/kernel/k_process.cpp
@@ -59,7 +59,6 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
59 thread->GetContext64().cpu_registers[0] = 0; 59 thread->GetContext64().cpu_registers[0] = 0;
60 thread->GetContext32().cpu_registers[1] = thread_handle; 60 thread->GetContext32().cpu_registers[1] = thread_handle;
61 thread->GetContext64().cpu_registers[1] = thread_handle; 61 thread->GetContext64().cpu_registers[1] = thread_handle;
62 thread->DisableDispatch();
63 62
64 auto& kernel = system.Kernel(); 63 auto& kernel = system.Kernel();
65 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires 64 // Threads by default are dormant, wake up the main thread so it runs when the scheduler fires
diff --git a/src/core/hle/kernel/k_scheduler.cpp b/src/core/hle/kernel/k_scheduler.cpp
index 6ddbae52c..6a7d80d03 100644
--- a/src/core/hle/kernel/k_scheduler.cpp
+++ b/src/core/hle/kernel/k_scheduler.cpp
@@ -376,18 +376,20 @@ void KScheduler::ClearSchedulerUpdateNeeded(KernelCore& kernel) {
376} 376}
377 377
378void KScheduler::DisableScheduling(KernelCore& kernel) { 378void KScheduler::DisableScheduling(KernelCore& kernel) {
379 ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 0); 379 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
380 GetCurrentThreadPointer(kernel)->DisableDispatch(); 380 ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 0);
381 scheduler->GetCurrentThread()->DisableDispatch();
382 }
381} 383}
382 384
383void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) { 385void KScheduler::EnableScheduling(KernelCore& kernel, u64 cores_needing_scheduling) {
384 ASSERT(GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() >= 1); 386 if (auto* scheduler = kernel.CurrentScheduler(); scheduler) {
385 387 ASSERT(scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1);
386 if (GetCurrentThreadPointer(kernel)->GetDisableDispatchCount() > 1) { 388 if (scheduler->GetCurrentThread()->GetDisableDispatchCount() >= 1) {
387 GetCurrentThreadPointer(kernel)->EnableDispatch(); 389 scheduler->GetCurrentThread()->EnableDispatch();
388 } else { 390 }
389 RescheduleCores(kernel, cores_needing_scheduling);
390 } 391 }
392 RescheduleCores(kernel, cores_needing_scheduling);
391} 393}
392 394
393u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) { 395u64 KScheduler::UpdateHighestPriorityThreads(KernelCore& kernel) {
@@ -615,17 +617,13 @@ KScheduler::KScheduler(Core::System& system_, s32 core_id_) : system{system_}, c
615 state.highest_priority_thread = nullptr; 617 state.highest_priority_thread = nullptr;
616} 618}
617 619
618void KScheduler::Finalize() { 620KScheduler::~KScheduler() {
619 if (idle_thread) { 621 if (idle_thread) {
620 idle_thread->Close(); 622 idle_thread->Close();
621 idle_thread = nullptr; 623 idle_thread = nullptr;
622 } 624 }
623} 625}
624 626
625KScheduler::~KScheduler() {
626 ASSERT(!idle_thread);
627}
628
629KThread* KScheduler::GetCurrentThread() const { 627KThread* KScheduler::GetCurrentThread() const {
630 if (auto result = current_thread.load(); result) { 628 if (auto result = current_thread.load(); result) {
631 return result; 629 return result;
@@ -644,12 +642,10 @@ void KScheduler::RescheduleCurrentCore() {
644 if (phys_core.IsInterrupted()) { 642 if (phys_core.IsInterrupted()) {
645 phys_core.ClearInterrupt(); 643 phys_core.ClearInterrupt();
646 } 644 }
647
648 guard.Lock(); 645 guard.Lock();
649 if (state.needs_scheduling.load()) { 646 if (state.needs_scheduling.load()) {
650 Schedule(); 647 Schedule();
651 } else { 648 } else {
652 GetCurrentThread()->EnableDispatch();
653 guard.Unlock(); 649 guard.Unlock();
654 } 650 }
655} 651}
@@ -659,33 +655,26 @@ void KScheduler::OnThreadStart() {
659} 655}
660 656
661void KScheduler::Unload(KThread* thread) { 657void KScheduler::Unload(KThread* thread) {
662 ASSERT(thread);
663
664 LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr"); 658 LOG_TRACE(Kernel, "core {}, unload thread {}", core_id, thread ? thread->GetName() : "nullptr");
665 659
666 if (thread->IsCallingSvc()) { 660 if (thread) {
667 thread->ClearIsCallingSvc(); 661 if (thread->IsCallingSvc()) {
668 } 662 thread->ClearIsCallingSvc();
669 663 }
670 auto& physical_core = system.Kernel().PhysicalCore(core_id); 664 if (!thread->IsTerminationRequested()) {
671 if (!physical_core.IsInitialized()) { 665 prev_thread = thread;
672 return; 666
673 } 667 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
674 668 cpu_core.SaveContext(thread->GetContext32());
675 Core::ARM_Interface& cpu_core = physical_core.ArmInterface(); 669 cpu_core.SaveContext(thread->GetContext64());
676 cpu_core.SaveContext(thread->GetContext32()); 670 // Save the TPIDR_EL0 system register in case it was modified.
677 cpu_core.SaveContext(thread->GetContext64()); 671 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0());
678 // Save the TPIDR_EL0 system register in case it was modified. 672 cpu_core.ClearExclusiveState();
679 thread->SetTPIDR_EL0(cpu_core.GetTPIDR_EL0()); 673 } else {
680 cpu_core.ClearExclusiveState(); 674 prev_thread = nullptr;
681 675 }
682 if (!thread->IsTerminationRequested() && thread->GetActiveCore() == core_id) { 676 thread->context_guard.Unlock();
683 prev_thread = thread;
684 } else {
685 prev_thread = nullptr;
686 } 677 }
687
688 thread->context_guard.Unlock();
689} 678}
690 679
691void KScheduler::Reload(KThread* thread) { 680void KScheduler::Reload(KThread* thread) {
@@ -694,6 +683,11 @@ void KScheduler::Reload(KThread* thread) {
694 if (thread) { 683 if (thread) {
695 ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable."); 684 ASSERT_MSG(thread->GetState() == ThreadState::Runnable, "Thread must be runnable.");
696 685
686 auto* const thread_owner_process = thread->GetOwnerProcess();
687 if (thread_owner_process != nullptr) {
688 system.Kernel().MakeCurrentProcess(thread_owner_process);
689 }
690
697 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id); 691 Core::ARM_Interface& cpu_core = system.ArmInterface(core_id);
698 cpu_core.LoadContext(thread->GetContext32()); 692 cpu_core.LoadContext(thread->GetContext32());
699 cpu_core.LoadContext(thread->GetContext64()); 693 cpu_core.LoadContext(thread->GetContext64());
@@ -711,7 +705,7 @@ void KScheduler::SwitchContextStep2() {
711} 705}
712 706
713void KScheduler::ScheduleImpl() { 707void KScheduler::ScheduleImpl() {
714 KThread* previous_thread = GetCurrentThread(); 708 KThread* previous_thread = current_thread.load();
715 KThread* next_thread = state.highest_priority_thread; 709 KThread* next_thread = state.highest_priority_thread;
716 710
717 state.needs_scheduling = false; 711 state.needs_scheduling = false;
@@ -723,15 +717,10 @@ void KScheduler::ScheduleImpl() {
723 717
724 // If we're not actually switching thread, there's nothing to do. 718 // If we're not actually switching thread, there's nothing to do.
725 if (next_thread == current_thread.load()) { 719 if (next_thread == current_thread.load()) {
726 previous_thread->EnableDispatch();
727 guard.Unlock(); 720 guard.Unlock();
728 return; 721 return;
729 } 722 }
730 723
731 if (next_thread->GetCurrentCore() != core_id) {
732 next_thread->SetCurrentCore(core_id);
733 }
734
735 current_thread.store(next_thread); 724 current_thread.store(next_thread);
736 725
737 KProcess* const previous_process = system.Kernel().CurrentProcess(); 726 KProcess* const previous_process = system.Kernel().CurrentProcess();
@@ -742,7 +731,11 @@ void KScheduler::ScheduleImpl() {
742 Unload(previous_thread); 731 Unload(previous_thread);
743 732
744 std::shared_ptr<Common::Fiber>* old_context; 733 std::shared_ptr<Common::Fiber>* old_context;
745 old_context = &previous_thread->GetHostContext(); 734 if (previous_thread != nullptr) {
735 old_context = &previous_thread->GetHostContext();
736 } else {
737 old_context = &idle_thread->GetHostContext();
738 }
746 guard.Unlock(); 739 guard.Unlock();
747 740
748 Common::Fiber::YieldTo(*old_context, *switch_fiber); 741 Common::Fiber::YieldTo(*old_context, *switch_fiber);
diff --git a/src/core/hle/kernel/k_scheduler.h b/src/core/hle/kernel/k_scheduler.h
index 516e0cdba..12cfae919 100644
--- a/src/core/hle/kernel/k_scheduler.h
+++ b/src/core/hle/kernel/k_scheduler.h
@@ -33,8 +33,6 @@ public:
33 explicit KScheduler(Core::System& system_, s32 core_id_); 33 explicit KScheduler(Core::System& system_, s32 core_id_);
34 ~KScheduler(); 34 ~KScheduler();
35 35
36 void Finalize();
37
38 /// Reschedules to the next available thread (call after current thread is suspended) 36 /// Reschedules to the next available thread (call after current thread is suspended)
39 void RescheduleCurrentCore(); 37 void RescheduleCurrentCore();
40 38
diff --git a/src/core/hle/kernel/k_thread.cpp b/src/core/hle/kernel/k_thread.cpp
index 0f6808ade..9f1d3156b 100644
--- a/src/core/hle/kernel/k_thread.cpp
+++ b/src/core/hle/kernel/k_thread.cpp
@@ -14,7 +14,6 @@
14#include "common/fiber.h" 14#include "common/fiber.h"
15#include "common/logging/log.h" 15#include "common/logging/log.h"
16#include "common/scope_exit.h" 16#include "common/scope_exit.h"
17#include "common/settings.h"
18#include "common/thread_queue_list.h" 17#include "common/thread_queue_list.h"
19#include "core/core.h" 18#include "core/core.h"
20#include "core/cpu_manager.h" 19#include "core/cpu_manager.h"
@@ -189,7 +188,7 @@ ResultCode KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_s
189 // Setup the stack parameters. 188 // Setup the stack parameters.
190 StackParameters& sp = GetStackParameters(); 189 StackParameters& sp = GetStackParameters();
191 sp.cur_thread = this; 190 sp.cur_thread = this;
192 sp.disable_count = 0; 191 sp.disable_count = 1;
193 SetInExceptionHandler(); 192 SetInExceptionHandler();
194 193
195 // Set thread ID. 194 // Set thread ID.
@@ -216,10 +215,9 @@ ResultCode KThread::InitializeThread(KThread* thread, KThreadFunction func, uint
216 // Initialize the thread. 215 // Initialize the thread.
217 R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type)); 216 R_TRY(thread->Initialize(func, arg, user_stack_top, prio, core, owner, type));
218 217
219 // Initialize emulation parameters. 218 // Initialize host context.
220 thread->host_context = 219 thread->host_context =
221 std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter); 220 std::make_shared<Common::Fiber>(std::move(init_func), init_func_parameter);
222 thread->is_single_core = !Settings::values.use_multi_core.GetValue();
223 221
224 return ResultSuccess; 222 return ResultSuccess;
225} 223}
@@ -972,9 +970,6 @@ ResultCode KThread::Run() {
972 970
973 // Set our state and finish. 971 // Set our state and finish.
974 SetState(ThreadState::Runnable); 972 SetState(ThreadState::Runnable);
975
976 DisableDispatch();
977
978 return ResultSuccess; 973 return ResultSuccess;
979 } 974 }
980} 975}
@@ -1059,16 +1054,4 @@ s32 GetCurrentCoreId(KernelCore& kernel) {
1059 return GetCurrentThread(kernel).GetCurrentCore(); 1054 return GetCurrentThread(kernel).GetCurrentCore();
1060} 1055}
1061 1056
1062KScopedDisableDispatch::~KScopedDisableDispatch() {
1063 if (GetCurrentThread(kernel).GetDisableDispatchCount() <= 1) {
1064 auto scheduler = kernel.CurrentScheduler();
1065
1066 if (scheduler) {
1067 scheduler->RescheduleCurrentCore();
1068 }
1069 } else {
1070 GetCurrentThread(kernel).EnableDispatch();
1071 }
1072}
1073
1074} // namespace Kernel 1057} // namespace Kernel
diff --git a/src/core/hle/kernel/k_thread.h b/src/core/hle/kernel/k_thread.h
index e4c4c877d..c77f44ad4 100644
--- a/src/core/hle/kernel/k_thread.h
+++ b/src/core/hle/kernel/k_thread.h
@@ -450,39 +450,16 @@ public:
450 sleeping_queue = q; 450 sleeping_queue = q;
451 } 451 }
452 452
453 [[nodiscard]] bool IsKernelThread() const {
454 return GetActiveCore() == 3;
455 }
456
457 [[nodiscard]] bool IsDispatchTrackingDisabled() const {
458 return is_single_core || IsKernelThread();
459 }
460
461 [[nodiscard]] s32 GetDisableDispatchCount() const { 453 [[nodiscard]] s32 GetDisableDispatchCount() const {
462 if (IsDispatchTrackingDisabled()) {
463 // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
464 return 1;
465 }
466
467 return this->GetStackParameters().disable_count; 454 return this->GetStackParameters().disable_count;
468 } 455 }
469 456
470 void DisableDispatch() { 457 void DisableDispatch() {
471 if (IsDispatchTrackingDisabled()) {
472 // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
473 return;
474 }
475
476 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0); 458 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() >= 0);
477 this->GetStackParameters().disable_count++; 459 this->GetStackParameters().disable_count++;
478 } 460 }
479 461
480 void EnableDispatch() { 462 void EnableDispatch() {
481 if (IsDispatchTrackingDisabled()) {
482 // TODO(bunnei): Until kernel threads are emulated, we cannot enable/disable dispatch.
483 return;
484 }
485
486 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0); 463 ASSERT(GetCurrentThread(kernel).GetDisableDispatchCount() > 0);
487 this->GetStackParameters().disable_count--; 464 this->GetStackParameters().disable_count--;
488 } 465 }
@@ -731,7 +708,6 @@ private:
731 708
732 // For emulation 709 // For emulation
733 std::shared_ptr<Common::Fiber> host_context{}; 710 std::shared_ptr<Common::Fiber> host_context{};
734 bool is_single_core{};
735 711
736 // For debugging 712 // For debugging
737 std::vector<KSynchronizationObject*> wait_objects_for_debugging; 713 std::vector<KSynchronizationObject*> wait_objects_for_debugging;
@@ -776,16 +752,4 @@ public:
776 } 752 }
777}; 753};
778 754
779class KScopedDisableDispatch {
780public:
781 [[nodiscard]] explicit KScopedDisableDispatch(KernelCore& kernel_) : kernel{kernel_} {
782 GetCurrentThread(kernel).DisableDispatch();
783 }
784
785 ~KScopedDisableDispatch();
786
787private:
788 KernelCore& kernel;
789};
790
791} // namespace Kernel 755} // namespace Kernel
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 8fdab44e4..bea945301 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -85,9 +85,8 @@ struct KernelCore::Impl {
85 } 85 }
86 86
87 void InitializeCores() { 87 void InitializeCores() {
88 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) { 88 for (auto& core : cores) {
89 cores[core_id].Initialize(current_process->Is64BitProcess()); 89 core.Initialize(current_process->Is64BitProcess());
90 system.Memory().SetCurrentPageTable(*current_process, core_id);
91 } 90 }
92 } 91 }
93 92
@@ -132,6 +131,15 @@ struct KernelCore::Impl {
132 next_user_process_id = KProcess::ProcessIDMin; 131 next_user_process_id = KProcess::ProcessIDMin;
133 next_thread_id = 1; 132 next_thread_id = 1;
134 133
134 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
135 if (suspend_threads[core_id]) {
136 suspend_threads[core_id]->Close();
137 suspend_threads[core_id] = nullptr;
138 }
139
140 schedulers[core_id].reset();
141 }
142
135 cores.clear(); 143 cores.clear();
136 144
137 global_handle_table->Finalize(); 145 global_handle_table->Finalize();
@@ -159,16 +167,6 @@ struct KernelCore::Impl {
159 CleanupObject(time_shared_mem); 167 CleanupObject(time_shared_mem);
160 CleanupObject(system_resource_limit); 168 CleanupObject(system_resource_limit);
161 169
162 for (u32 core_id = 0; core_id < Core::Hardware::NUM_CPU_CORES; core_id++) {
163 if (suspend_threads[core_id]) {
164 suspend_threads[core_id]->Close();
165 suspend_threads[core_id] = nullptr;
166 }
167
168 schedulers[core_id]->Finalize();
169 schedulers[core_id].reset();
170 }
171
172 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others 170 // Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
173 next_host_thread_id = Core::Hardware::NUM_CPU_CORES; 171 next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
174 172
@@ -259,6 +257,14 @@ struct KernelCore::Impl {
259 257
260 void MakeCurrentProcess(KProcess* process) { 258 void MakeCurrentProcess(KProcess* process) {
261 current_process = process; 259 current_process = process;
260 if (process == nullptr) {
261 return;
262 }
263
264 const u32 core_id = GetCurrentHostThreadID();
265 if (core_id < Core::Hardware::NUM_CPU_CORES) {
266 system.Memory().SetCurrentPageTable(*process, core_id);
267 }
262 } 268 }
263 269
264 static inline thread_local u32 host_thread_id = UINT32_MAX; 270 static inline thread_local u32 host_thread_id = UINT32_MAX;
@@ -821,20 +827,16 @@ const Kernel::PhysicalCore& KernelCore::PhysicalCore(std::size_t id) const {
821 return impl->cores[id]; 827 return impl->cores[id];
822} 828}
823 829
824size_t KernelCore::CurrentPhysicalCoreIndex() const {
825 const u32 core_id = impl->GetCurrentHostThreadID();
826 if (core_id >= Core::Hardware::NUM_CPU_CORES) {
827 return Core::Hardware::NUM_CPU_CORES - 1;
828 }
829 return core_id;
830}
831
832Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() { 830Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() {
833 return impl->cores[CurrentPhysicalCoreIndex()]; 831 u32 core_id = impl->GetCurrentHostThreadID();
832 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
833 return impl->cores[core_id];
834} 834}
835 835
836const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const { 836const Kernel::PhysicalCore& KernelCore::CurrentPhysicalCore() const {
837 return impl->cores[CurrentPhysicalCoreIndex()]; 837 u32 core_id = impl->GetCurrentHostThreadID();
838 ASSERT(core_id < Core::Hardware::NUM_CPU_CORES);
839 return impl->cores[core_id];
838} 840}
839 841
840Kernel::KScheduler* KernelCore::CurrentScheduler() { 842Kernel::KScheduler* KernelCore::CurrentScheduler() {
@@ -1027,9 +1029,6 @@ void KernelCore::Suspend(bool in_suspention) {
1027 impl->suspend_threads[core_id]->SetState(state); 1029 impl->suspend_threads[core_id]->SetState(state);
1028 impl->suspend_threads[core_id]->SetWaitReasonForDebugging( 1030 impl->suspend_threads[core_id]->SetWaitReasonForDebugging(
1029 ThreadWaitReasonForDebugging::Suspended); 1031 ThreadWaitReasonForDebugging::Suspended);
1030 if (!should_suspend) {
1031 impl->suspend_threads[core_id]->DisableDispatch();
1032 }
1033 } 1032 }
1034 } 1033 }
1035} 1034}
@@ -1044,11 +1043,13 @@ void KernelCore::ExceptionalExit() {
1044} 1043}
1045 1044
1046void KernelCore::EnterSVCProfile() { 1045void KernelCore::EnterSVCProfile() {
1047 impl->svc_ticks[CurrentPhysicalCoreIndex()] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC)); 1046 std::size_t core = impl->GetCurrentHostThreadID();
1047 impl->svc_ticks[core] = MicroProfileEnter(MICROPROFILE_TOKEN(Kernel_SVC));
1048} 1048}
1049 1049
1050void KernelCore::ExitSVCProfile() { 1050void KernelCore::ExitSVCProfile() {
1051 MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[CurrentPhysicalCoreIndex()]); 1051 std::size_t core = impl->GetCurrentHostThreadID();
1052 MicroProfileLeave(MICROPROFILE_TOKEN(Kernel_SVC), impl->svc_ticks[core]);
1052} 1053}
1053 1054
1054std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) { 1055std::weak_ptr<Kernel::ServiceThread> KernelCore::CreateServiceThread(const std::string& name) {
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index 57535433b..3a6db0b1c 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -146,9 +146,6 @@ public:
146 /// Gets the an instance of the respective physical CPU core. 146 /// Gets the an instance of the respective physical CPU core.
147 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const; 147 const Kernel::PhysicalCore& PhysicalCore(std::size_t id) const;
148 148
149 /// Gets the current physical core index for the running host thread.
150 std::size_t CurrentPhysicalCoreIndex() const;
151
152 /// Gets the sole instance of the Scheduler at the current running core. 149 /// Gets the sole instance of the Scheduler at the current running core.
153 Kernel::KScheduler* CurrentScheduler(); 150 Kernel::KScheduler* CurrentScheduler();
154 151
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 890c52198..62fb06c45 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -877,7 +877,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, Handle
877 const u64 thread_ticks = current_thread->GetCpuTime(); 877 const u64 thread_ticks = current_thread->GetCpuTime();
878 878
879 out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks); 879 out_ticks = thread_ticks + (core_timing.GetCPUTicks() - prev_ctx_ticks);
880 } else if (same_thread && info_sub_id == system.Kernel().CurrentPhysicalCoreIndex()) { 880 } else if (same_thread && info_sub_id == system.CurrentCoreIndex()) {
881 out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks; 881 out_ticks = core_timing.GetCPUTicks() - prev_ctx_ticks;
882 } 882 }
883 883
diff --git a/src/core/hle/service/ngct/ngct.cpp b/src/core/hle/service/ngct/ngct.cpp
new file mode 100644
index 000000000..deb3abb28
--- /dev/null
+++ b/src/core/hle/service/ngct/ngct.cpp
@@ -0,0 +1,46 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included
4
5#include "common/string_util.h"
6#include "core/core.h"
7#include "core/hle/ipc_helpers.h"
8#include "core/hle/service/ngct/ngct.h"
9#include "core/hle/service/service.h"
10
11namespace Service::NGCT {
12
13class IService final : public ServiceFramework<IService> {
14public:
15 explicit IService(Core::System& system_) : ServiceFramework{system_, "ngct:u"} {
16 // clang-format off
17 static const FunctionInfo functions[] = {
18 {0, nullptr, "Match"},
19 {1, &IService::Filter, "Filter"},
20 };
21 // clang-format on
22
23 RegisterHandlers(functions);
24 }
25
26private:
27 void Filter(Kernel::HLERequestContext& ctx) {
28 const auto buffer = ctx.ReadBuffer();
29 const auto text = Common::StringFromFixedZeroTerminatedBuffer(
30 reinterpret_cast<const char*>(buffer.data()), buffer.size());
31
32 LOG_WARNING(Service_NGCT, "(STUBBED) called, text={}", text);
33
34 // Return the same string since we don't censor anything
35 ctx.WriteBuffer(buffer);
36
37 IPC::ResponseBuilder rb{ctx, 2};
38 rb.Push(ResultSuccess);
39 }
40};
41
42void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system) {
43 std::make_shared<IService>(system)->InstallAsService(system.ServiceManager());
44}
45
46} // namespace Service::NGCT
diff --git a/src/core/hle/service/ngct/ngct.h b/src/core/hle/service/ngct/ngct.h
new file mode 100644
index 000000000..1f2a47b78
--- /dev/null
+++ b/src/core/hle/service/ngct/ngct.h
@@ -0,0 +1,20 @@
1// Copyright 2021 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included
4
5#pragma once
6
7namespace Core {
8class System;
9}
10
11namespace Service::SM {
12class ServiceManager;
13}
14
15namespace Service::NGCT {
16
17/// Registers all NGCT services with the specified service manager.
18void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system);
19
20} // namespace Service::NGCT
diff --git a/src/core/hle/service/nifm/nifm.cpp b/src/core/hle/service/nifm/nifm.cpp
index 0a53c0c81..9decb9290 100644
--- a/src/core/hle/service/nifm/nifm.cpp
+++ b/src/core/hle/service/nifm/nifm.cpp
@@ -277,37 +277,45 @@ private:
277 void GetCurrentNetworkProfile(Kernel::HLERequestContext& ctx) { 277 void GetCurrentNetworkProfile(Kernel::HLERequestContext& ctx) {
278 LOG_WARNING(Service_NIFM, "(STUBBED) called"); 278 LOG_WARNING(Service_NIFM, "(STUBBED) called");
279 279
280 const SfNetworkProfileData network_profile_data{ 280 const auto net_iface = Network::GetSelectedNetworkInterface();
281 .ip_setting_data{ 281
282 .ip_address_setting{ 282 const SfNetworkProfileData network_profile_data = [&net_iface] {
283 .is_automatic{true}, 283 if (!net_iface) {
284 .current_address{192, 168, 1, 100}, 284 return SfNetworkProfileData{};
285 .subnet_mask{255, 255, 255, 0}, 285 }
286 .gateway{192, 168, 1, 1}, 286
287 }, 287 return SfNetworkProfileData{
288 .dns_setting{ 288 .ip_setting_data{
289 .is_automatic{true}, 289 .ip_address_setting{
290 .primary_dns{1, 1, 1, 1}, 290 .is_automatic{true},
291 .secondary_dns{1, 0, 0, 1}, 291 .current_address{Network::TranslateIPv4(net_iface->ip_address)},
292 .subnet_mask{Network::TranslateIPv4(net_iface->subnet_mask)},
293 .gateway{Network::TranslateIPv4(net_iface->gateway)},
294 },
295 .dns_setting{
296 .is_automatic{true},
297 .primary_dns{1, 1, 1, 1},
298 .secondary_dns{1, 0, 0, 1},
299 },
300 .proxy_setting{
301 .enabled{false},
302 .port{},
303 .proxy_server{},
304 .automatic_auth_enabled{},
305 .user{},
306 .password{},
307 },
308 .mtu{1500},
292 }, 309 },
293 .proxy_setting{ 310 .uuid{0xdeadbeef, 0xdeadbeef},
294 .enabled{false}, 311 .network_name{"yuzu Network"},
295 .port{}, 312 .wireless_setting_data{
296 .proxy_server{}, 313 .ssid_length{12},
297 .automatic_auth_enabled{}, 314 .ssid{"yuzu Network"},
298 .user{}, 315 .passphrase{"yuzupassword"},
299 .password{},
300 }, 316 },
301 .mtu{1500}, 317 };
302 }, 318 }();
303 .uuid{0xdeadbeef, 0xdeadbeef},
304 .network_name{"yuzu Network"},
305 .wireless_setting_data{
306 .ssid_length{12},
307 .ssid{"yuzu Network"},
308 .passphrase{"yuzupassword"},
309 },
310 };
311 319
312 ctx.WriteBuffer(network_profile_data); 320 ctx.WriteBuffer(network_profile_data);
313 321
@@ -352,38 +360,33 @@ private:
352 LOG_WARNING(Service_NIFM, "(STUBBED) called"); 360 LOG_WARNING(Service_NIFM, "(STUBBED) called");
353 361
354 struct IpConfigInfo { 362 struct IpConfigInfo {
355 IpAddressSetting ip_address_setting; 363 IpAddressSetting ip_address_setting{};
356 DnsSetting dns_setting; 364 DnsSetting dns_setting{};
357 }; 365 };
358 static_assert(sizeof(IpConfigInfo) == sizeof(IpAddressSetting) + sizeof(DnsSetting), 366 static_assert(sizeof(IpConfigInfo) == sizeof(IpAddressSetting) + sizeof(DnsSetting),
359 "IpConfigInfo has incorrect size."); 367 "IpConfigInfo has incorrect size.");
360 368
361 IpConfigInfo ip_config_info{ 369 const auto net_iface = Network::GetSelectedNetworkInterface();
362 .ip_address_setting{
363 .is_automatic{true},
364 .current_address{0, 0, 0, 0},
365 .subnet_mask{255, 255, 255, 0},
366 .gateway{192, 168, 1, 1},
367 },
368 .dns_setting{
369 .is_automatic{true},
370 .primary_dns{1, 1, 1, 1},
371 .secondary_dns{1, 0, 0, 1},
372 },
373 };
374 370
375 const auto iface = Network::GetSelectedNetworkInterface(); 371 const IpConfigInfo ip_config_info = [&net_iface] {
376 if (iface) { 372 if (!net_iface) {
377 ip_config_info.ip_address_setting = 373 return IpConfigInfo{};
378 IpAddressSetting{.is_automatic{true}, 374 }
379 .current_address{Network::TranslateIPv4(iface->ip_address)},
380 .subnet_mask{Network::TranslateIPv4(iface->subnet_mask)},
381 .gateway{Network::TranslateIPv4(iface->gateway)}};
382 375
383 } else { 376 return IpConfigInfo{
384 LOG_ERROR(Service_NIFM, 377 .ip_address_setting{
385 "Couldn't get host network configuration info, using default values"); 378 .is_automatic{true},
386 } 379 .current_address{Network::TranslateIPv4(net_iface->ip_address)},
380 .subnet_mask{Network::TranslateIPv4(net_iface->subnet_mask)},
381 .gateway{Network::TranslateIPv4(net_iface->gateway)},
382 },
383 .dns_setting{
384 .is_automatic{true},
385 .primary_dns{1, 1, 1, 1},
386 .secondary_dns{1, 0, 0, 1},
387 },
388 };
389 }();
387 390
388 IPC::ResponseBuilder rb{ctx, 2 + (sizeof(IpConfigInfo) + 3) / sizeof(u32)}; 391 IPC::ResponseBuilder rb{ctx, 2 + (sizeof(IpConfigInfo) + 3) / sizeof(u32)};
389 rb.Push(ResultSuccess); 392 rb.Push(ResultSuccess);
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
index ce6065db2..a33e47d0b 100644
--- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
@@ -42,15 +42,14 @@ void nvdisp_disp0::OnClose(DeviceFD fd) {}
42void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u32 height, 42void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, u32 format, u32 width, u32 height,
43 u32 stride, NVFlinger::BufferQueue::BufferTransformFlags transform, 43 u32 stride, NVFlinger::BufferQueue::BufferTransformFlags transform,
44 const Common::Rectangle<int>& crop_rect) { 44 const Common::Rectangle<int>& crop_rect) {
45 VAddr addr = nvmap_dev->GetObjectAddress(buffer_handle); 45 const VAddr addr = nvmap_dev->GetObjectAddress(buffer_handle);
46 LOG_TRACE(Service, 46 LOG_TRACE(Service,
47 "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}", 47 "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}",
48 addr, offset, width, height, stride, format); 48 addr, offset, width, height, stride, format);
49 49
50 using PixelFormat = Tegra::FramebufferConfig::PixelFormat; 50 const auto pixel_format = static_cast<Tegra::FramebufferConfig::PixelFormat>(format);
51 const Tegra::FramebufferConfig framebuffer{ 51 const Tegra::FramebufferConfig framebuffer{addr, offset, width, height,
52 addr, offset, width, height, stride, static_cast<PixelFormat>(format), 52 stride, pixel_format, transform, crop_rect};
53 transform, crop_rect};
54 53
55 system.GetPerfStats().EndSystemFrame(); 54 system.GetPerfStats().EndSystemFrame();
56 system.GPU().SwapBuffers(&framebuffer); 55 system.GPU().SwapBuffers(&framebuffer);
diff --git a/src/core/hle/service/nvflinger/buffer_queue.h b/src/core/hle/service/nvflinger/buffer_queue.h
index 759247eb0..78de3f354 100644
--- a/src/core/hle/service/nvflinger/buffer_queue.h
+++ b/src/core/hle/service/nvflinger/buffer_queue.h
@@ -42,7 +42,9 @@ struct IGBPBuffer {
42 u32_le index; 42 u32_le index;
43 INSERT_PADDING_WORDS(3); 43 INSERT_PADDING_WORDS(3);
44 u32_le gpu_buffer_id; 44 u32_le gpu_buffer_id;
45 INSERT_PADDING_WORDS(17); 45 INSERT_PADDING_WORDS(6);
46 u32_le external_format;
47 INSERT_PADDING_WORDS(10);
46 u32_le nvmap_handle; 48 u32_le nvmap_handle;
47 u32_le offset; 49 u32_le offset;
48 INSERT_PADDING_WORDS(60); 50 INSERT_PADDING_WORDS(60);
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp
index 00bff8caf..3ead813b0 100644
--- a/src/core/hle/service/nvflinger/nvflinger.cpp
+++ b/src/core/hle/service/nvflinger/nvflinger.cpp
@@ -298,7 +298,7 @@ void NVFlinger::Compose() {
298 auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>("/dev/nvdisp_disp0"); 298 auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>("/dev/nvdisp_disp0");
299 ASSERT(nvdisp); 299 ASSERT(nvdisp);
300 300
301 nvdisp->flip(igbp_buffer.gpu_buffer_id, igbp_buffer.offset, igbp_buffer.format, 301 nvdisp->flip(igbp_buffer.gpu_buffer_id, igbp_buffer.offset, igbp_buffer.external_format,
302 igbp_buffer.width, igbp_buffer.height, igbp_buffer.stride, 302 igbp_buffer.width, igbp_buffer.height, igbp_buffer.stride,
303 buffer->get().transform, buffer->get().crop_rect); 303 buffer->get().transform, buffer->get().crop_rect);
304 304
diff --git a/src/core/hle/service/service.cpp b/src/core/hle/service/service.cpp
index b3e50433b..065133166 100644
--- a/src/core/hle/service/service.cpp
+++ b/src/core/hle/service/service.cpp
@@ -46,6 +46,7 @@
46#include "core/hle/service/ncm/ncm.h" 46#include "core/hle/service/ncm/ncm.h"
47#include "core/hle/service/nfc/nfc.h" 47#include "core/hle/service/nfc/nfc.h"
48#include "core/hle/service/nfp/nfp.h" 48#include "core/hle/service/nfp/nfp.h"
49#include "core/hle/service/ngct/ngct.h"
49#include "core/hle/service/nifm/nifm.h" 50#include "core/hle/service/nifm/nifm.h"
50#include "core/hle/service/nim/nim.h" 51#include "core/hle/service/nim/nim.h"
51#include "core/hle/service/npns/npns.h" 52#include "core/hle/service/npns/npns.h"
@@ -271,6 +272,7 @@ Services::Services(std::shared_ptr<SM::ServiceManager>& sm, Core::System& system
271 NCM::InstallInterfaces(*sm, system); 272 NCM::InstallInterfaces(*sm, system);
272 NFC::InstallInterfaces(*sm, system); 273 NFC::InstallInterfaces(*sm, system);
273 NFP::InstallInterfaces(*sm, system); 274 NFP::InstallInterfaces(*sm, system);
275 NGCT::InstallInterfaces(*sm, system);
274 NIFM::InstallInterfaces(*sm, system); 276 NIFM::InstallInterfaces(*sm, system);
275 NIM::InstallInterfaces(*sm, system); 277 NIM::InstallInterfaces(*sm, system);
276 NPNS::InstallInterfaces(*sm, system); 278 NPNS::InstallInterfaces(*sm, system);
diff --git a/src/core/network/network_interface.cpp b/src/core/network/network_interface.cpp
index cecc9aa11..6811f21b1 100644
--- a/src/core/network/network_interface.cpp
+++ b/src/core/network/network_interface.cpp
@@ -37,73 +37,73 @@ std::vector<NetworkInterface> GetAvailableNetworkInterfaces() {
37 AF_INET, GAA_FLAG_SKIP_MULTICAST | GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_INCLUDE_GATEWAYS, 37 AF_INET, GAA_FLAG_SKIP_MULTICAST | GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_INCLUDE_GATEWAYS,
38 nullptr, adapter_addresses.data(), &buf_size); 38 nullptr, adapter_addresses.data(), &buf_size);
39 39
40 if (ret == ERROR_BUFFER_OVERFLOW) { 40 if (ret != ERROR_BUFFER_OVERFLOW) {
41 adapter_addresses.resize((buf_size / sizeof(IP_ADAPTER_ADDRESSES)) + 1);
42 } else {
43 break; 41 break;
44 } 42 }
43
44 adapter_addresses.resize((buf_size / sizeof(IP_ADAPTER_ADDRESSES)) + 1);
45 } 45 }
46 46
47 if (ret == NO_ERROR) { 47 if (ret != NO_ERROR) {
48 std::vector<NetworkInterface> result; 48 LOG_ERROR(Network, "Failed to get network interfaces with GetAdaptersAddresses");
49 return {};
50 }
49 51
50 for (auto current_address = adapter_addresses.data(); current_address != nullptr; 52 std::vector<NetworkInterface> result;
51 current_address = current_address->Next) {
52 if (current_address->FirstUnicastAddress == nullptr ||
53 current_address->FirstUnicastAddress->Address.lpSockaddr == nullptr) {
54 continue;
55 }
56 53
57 if (current_address->OperStatus != IfOperStatusUp) { 54 for (auto current_address = adapter_addresses.data(); current_address != nullptr;
58 continue; 55 current_address = current_address->Next) {
59 } 56 if (current_address->FirstUnicastAddress == nullptr ||
57 current_address->FirstUnicastAddress->Address.lpSockaddr == nullptr) {
58 continue;
59 }
60 60
61 const auto ip_addr = Common::BitCast<struct sockaddr_in>( 61 if (current_address->OperStatus != IfOperStatusUp) {
62 *current_address->FirstUnicastAddress->Address.lpSockaddr) 62 continue;
63 .sin_addr; 63 }
64 64
65 ULONG mask = 0; 65 const auto ip_addr = Common::BitCast<struct sockaddr_in>(
66 if (ConvertLengthToIpv4Mask(current_address->FirstUnicastAddress->OnLinkPrefixLength, 66 *current_address->FirstUnicastAddress->Address.lpSockaddr)
67 &mask) != NO_ERROR) { 67 .sin_addr;
68 LOG_ERROR(Network, "Failed to convert IPv4 prefix length to subnet mask");
69 continue;
70 }
71 68
72 struct in_addr gateway = {.S_un{.S_addr{0}}}; 69 ULONG mask = 0;
73 if (current_address->FirstGatewayAddress != nullptr && 70 if (ConvertLengthToIpv4Mask(current_address->FirstUnicastAddress->OnLinkPrefixLength,
74 current_address->FirstGatewayAddress->Address.lpSockaddr != nullptr) { 71 &mask) != NO_ERROR) {
75 gateway = Common::BitCast<struct sockaddr_in>( 72 LOG_ERROR(Network, "Failed to convert IPv4 prefix length to subnet mask");
76 *current_address->FirstGatewayAddress->Address.lpSockaddr) 73 continue;
77 .sin_addr; 74 }
78 }
79 75
80 result.push_back(NetworkInterface{ 76 struct in_addr gateway = {.S_un{.S_addr{0}}};
81 .name{Common::UTF16ToUTF8(std::wstring{current_address->FriendlyName})}, 77 if (current_address->FirstGatewayAddress != nullptr &&
82 .ip_address{ip_addr}, 78 current_address->FirstGatewayAddress->Address.lpSockaddr != nullptr) {
83 .subnet_mask = in_addr{.S_un{.S_addr{mask}}}, 79 gateway = Common::BitCast<struct sockaddr_in>(
84 .gateway = gateway}); 80 *current_address->FirstGatewayAddress->Address.lpSockaddr)
81 .sin_addr;
85 } 82 }
86 83
87 return result; 84 result.emplace_back(NetworkInterface{
88 } else { 85 .name{Common::UTF16ToUTF8(std::wstring{current_address->FriendlyName})},
89 LOG_ERROR(Network, "Failed to get network interfaces with GetAdaptersAddresses"); 86 .ip_address{ip_addr},
90 return {}; 87 .subnet_mask = in_addr{.S_un{.S_addr{mask}}},
88 .gateway = gateway});
91 } 89 }
90
91 return result;
92} 92}
93 93
94#else 94#else
95 95
96std::vector<NetworkInterface> GetAvailableNetworkInterfaces() { 96std::vector<NetworkInterface> GetAvailableNetworkInterfaces() {
97 std::vector<NetworkInterface> result;
98
99 struct ifaddrs* ifaddr = nullptr; 97 struct ifaddrs* ifaddr = nullptr;
100 98
101 if (getifaddrs(&ifaddr) != 0) { 99 if (getifaddrs(&ifaddr) != 0) {
102 LOG_ERROR(Network, "Failed to get network interfaces with getifaddrs: {}", 100 LOG_ERROR(Network, "Failed to get network interfaces with getifaddrs: {}",
103 std::strerror(errno)); 101 std::strerror(errno));
104 return result; 102 return {};
105 } 103 }
106 104
105 std::vector<NetworkInterface> result;
106
107 for (auto ifa = ifaddr; ifa != nullptr; ifa = ifa->ifa_next) { 107 for (auto ifa = ifaddr; ifa != nullptr; ifa = ifa->ifa_next) {
108 if (ifa->ifa_addr == nullptr || ifa->ifa_netmask == nullptr) { 108 if (ifa->ifa_addr == nullptr || ifa->ifa_netmask == nullptr) {
109 continue; 109 continue;
@@ -117,55 +117,62 @@ std::vector<NetworkInterface> GetAvailableNetworkInterfaces() {
117 continue; 117 continue;
118 } 118 }
119 119
120 std::uint32_t gateway{0}; 120 u32 gateway{};
121
121 std::ifstream file{"/proc/net/route"}; 122 std::ifstream file{"/proc/net/route"};
122 if (file.is_open()) { 123 if (!file.is_open()) {
124 LOG_ERROR(Network, "Failed to open \"/proc/net/route\"");
123 125
124 // ignore header 126 result.emplace_back(NetworkInterface{
125 file.ignore(std::numeric_limits<std::streamsize>::max(), '\n'); 127 .name{ifa->ifa_name},
128 .ip_address{Common::BitCast<struct sockaddr_in>(*ifa->ifa_addr).sin_addr},
129 .subnet_mask{Common::BitCast<struct sockaddr_in>(*ifa->ifa_netmask).sin_addr},
130 .gateway{in_addr{.s_addr = gateway}}});
131 continue;
132 }
126 133
127 bool gateway_found = false; 134 // ignore header
135 file.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
128 136
129 for (std::string line; std::getline(file, line);) { 137 bool gateway_found = false;
130 std::istringstream iss{line};
131 138
132 std::string iface_name{}; 139 for (std::string line; std::getline(file, line);) {
133 iss >> iface_name; 140 std::istringstream iss{line};
134 if (iface_name != ifa->ifa_name) {
135 continue;
136 }
137 141
138 iss >> std::hex; 142 std::string iface_name;
143 iss >> iface_name;
144 if (iface_name != ifa->ifa_name) {
145 continue;
146 }
139 147
140 std::uint32_t dest{0}; 148 iss >> std::hex;
141 iss >> dest;
142 if (dest != 0) {
143 // not the default route
144 continue;
145 }
146 149
147 iss >> gateway; 150 u32 dest{};
151 iss >> dest;
152 if (dest != 0) {
153 // not the default route
154 continue;
155 }
148 156
149 std::uint16_t flags{0}; 157 iss >> gateway;
150 iss >> flags;
151 158
152 // flag RTF_GATEWAY (defined in <linux/route.h>) 159 u16 flags{};
153 if ((flags & 0x2) == 0) { 160 iss >> flags;
154 continue;
155 }
156 161
157 gateway_found = true; 162 // flag RTF_GATEWAY (defined in <linux/route.h>)
158 break; 163 if ((flags & 0x2) == 0) {
164 continue;
159 } 165 }
160 166
161 if (!gateway_found) { 167 gateway_found = true;
162 gateway = 0; 168 break;
163 }
164 } else {
165 LOG_ERROR(Network, "Failed to open \"/proc/net/route\"");
166 } 169 }
167 170
168 result.push_back(NetworkInterface{ 171 if (!gateway_found) {
172 gateway = 0;
173 }
174
175 result.emplace_back(NetworkInterface{
169 .name{ifa->ifa_name}, 176 .name{ifa->ifa_name},
170 .ip_address{Common::BitCast<struct sockaddr_in>(*ifa->ifa_addr).sin_addr}, 177 .ip_address{Common::BitCast<struct sockaddr_in>(*ifa->ifa_addr).sin_addr},
171 .subnet_mask{Common::BitCast<struct sockaddr_in>(*ifa->ifa_netmask).sin_addr}, 178 .subnet_mask{Common::BitCast<struct sockaddr_in>(*ifa->ifa_netmask).sin_addr},
@@ -180,11 +187,11 @@ std::vector<NetworkInterface> GetAvailableNetworkInterfaces() {
180#endif 187#endif
181 188
182std::optional<NetworkInterface> GetSelectedNetworkInterface() { 189std::optional<NetworkInterface> GetSelectedNetworkInterface() {
183 const std::string& selected_network_interface = Settings::values.network_interface.GetValue(); 190 const auto& selected_network_interface = Settings::values.network_interface.GetValue();
184 const auto network_interfaces = Network::GetAvailableNetworkInterfaces(); 191 const auto network_interfaces = Network::GetAvailableNetworkInterfaces();
185 if (network_interfaces.size() == 0) { 192 if (network_interfaces.size() == 0) {
186 LOG_ERROR(Network, "GetAvailableNetworkInterfaces returned no interfaces"); 193 LOG_ERROR(Network, "GetAvailableNetworkInterfaces returned no interfaces");
187 return {}; 194 return std::nullopt;
188 } 195 }
189 196
190 const auto res = 197 const auto res =
@@ -192,12 +199,12 @@ std::optional<NetworkInterface> GetSelectedNetworkInterface() {
192 return iface.name == selected_network_interface; 199 return iface.name == selected_network_interface;
193 }); 200 });
194 201
195 if (res != network_interfaces.end()) { 202 if (res == network_interfaces.end()) {
196 return *res;
197 } else {
198 LOG_ERROR(Network, "Couldn't find selected interface \"{}\"", selected_network_interface); 203 LOG_ERROR(Network, "Couldn't find selected interface \"{}\"", selected_network_interface);
199 return {}; 204 return std::nullopt;
200 } 205 }
206
207 return *res;
201} 208}
202 209
203} // namespace Network 210} // namespace Network
diff --git a/src/shader_recompiler/backend/spirv/emit_context.cpp b/src/shader_recompiler/backend/spirv/emit_context.cpp
index 2d29d8c14..2885e6799 100644
--- a/src/shader_recompiler/backend/spirv/emit_context.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_context.cpp
@@ -15,6 +15,8 @@
15 15
16namespace Shader::Backend::SPIRV { 16namespace Shader::Backend::SPIRV {
17namespace { 17namespace {
18constexpr size_t NUM_FIXEDFNCTEXTURE = 10;
19
18enum class Operation { 20enum class Operation {
19 Increment, 21 Increment,
20 Decrement, 22 Decrement,
@@ -427,6 +429,16 @@ Id DescType(EmitContext& ctx, Id sampled_type, Id pointer_type, u32 count) {
427 return pointer_type; 429 return pointer_type;
428 } 430 }
429} 431}
432
433size_t FindNextUnusedLocation(const std::bitset<IR::NUM_GENERICS>& used_locations,
434 size_t start_offset) {
435 for (size_t location = start_offset; location < used_locations.size(); ++location) {
436 if (!used_locations.test(location)) {
437 return location;
438 }
439 }
440 throw RuntimeError("Unable to get an unused location for legacy attribute");
441}
430} // Anonymous namespace 442} // Anonymous namespace
431 443
432void VectorTypes::Define(Sirit::Module& sirit_ctx, Id base_type, std::string_view name) { 444void VectorTypes::Define(Sirit::Module& sirit_ctx, Id base_type, std::string_view name) {
@@ -1227,6 +1239,7 @@ void EmitContext::DefineInputs(const IR::Program& program) {
1227 loads[IR::Attribute::TessellationEvaluationPointV]) { 1239 loads[IR::Attribute::TessellationEvaluationPointV]) {
1228 tess_coord = DefineInput(*this, F32[3], false, spv::BuiltIn::TessCoord); 1240 tess_coord = DefineInput(*this, F32[3], false, spv::BuiltIn::TessCoord);
1229 } 1241 }
1242 std::bitset<IR::NUM_GENERICS> used_locations{};
1230 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) { 1243 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
1231 const AttributeType input_type{runtime_info.generic_input_types[index]}; 1244 const AttributeType input_type{runtime_info.generic_input_types[index]};
1232 if (!runtime_info.previous_stage_stores.Generic(index)) { 1245 if (!runtime_info.previous_stage_stores.Generic(index)) {
@@ -1238,6 +1251,7 @@ void EmitContext::DefineInputs(const IR::Program& program) {
1238 if (input_type == AttributeType::Disabled) { 1251 if (input_type == AttributeType::Disabled) {
1239 continue; 1252 continue;
1240 } 1253 }
1254 used_locations.set(index);
1241 const Id type{GetAttributeType(*this, input_type)}; 1255 const Id type{GetAttributeType(*this, input_type)};
1242 const Id id{DefineInput(*this, type, true)}; 1256 const Id id{DefineInput(*this, type, true)};
1243 Decorate(id, spv::Decoration::Location, static_cast<u32>(index)); 1257 Decorate(id, spv::Decoration::Location, static_cast<u32>(index));
@@ -1263,6 +1277,26 @@ void EmitContext::DefineInputs(const IR::Program& program) {
1263 break; 1277 break;
1264 } 1278 }
1265 } 1279 }
1280 size_t previous_unused_location = 0;
1281 if (loads.AnyComponent(IR::Attribute::ColorFrontDiffuseR)) {
1282 const size_t location = FindNextUnusedLocation(used_locations, previous_unused_location);
1283 previous_unused_location = location;
1284 used_locations.set(location);
1285 const Id id{DefineInput(*this, F32[4], true)};
1286 Decorate(id, spv::Decoration::Location, location);
1287 input_front_color = id;
1288 }
1289 for (size_t index = 0; index < NUM_FIXEDFNCTEXTURE; ++index) {
1290 if (loads.AnyComponent(IR::Attribute::FixedFncTexture0S + index * 4)) {
1291 const size_t location =
1292 FindNextUnusedLocation(used_locations, previous_unused_location);
1293 previous_unused_location = location;
1294 used_locations.set(location);
1295 const Id id{DefineInput(*this, F32[4], true)};
1296 Decorate(id, spv::Decoration::Location, location);
1297 input_fixed_fnc_textures[index] = id;
1298 }
1299 }
1266 if (stage == Stage::TessellationEval) { 1300 if (stage == Stage::TessellationEval) {
1267 for (size_t index = 0; index < info.uses_patches.size(); ++index) { 1301 for (size_t index = 0; index < info.uses_patches.size(); ++index) {
1268 if (!info.uses_patches[index]) { 1302 if (!info.uses_patches[index]) {
@@ -1313,9 +1347,31 @@ void EmitContext::DefineOutputs(const IR::Program& program) {
1313 viewport_mask = DefineOutput(*this, TypeArray(U32[1], Const(1u)), std::nullopt, 1347 viewport_mask = DefineOutput(*this, TypeArray(U32[1], Const(1u)), std::nullopt,
1314 spv::BuiltIn::ViewportMaskNV); 1348 spv::BuiltIn::ViewportMaskNV);
1315 } 1349 }
1350 std::bitset<IR::NUM_GENERICS> used_locations{};
1316 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) { 1351 for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
1317 if (info.stores.Generic(index)) { 1352 if (info.stores.Generic(index)) {
1318 DefineGenericOutput(*this, index, invocations); 1353 DefineGenericOutput(*this, index, invocations);
1354 used_locations.set(index);
1355 }
1356 }
1357 size_t previous_unused_location = 0;
1358 if (info.stores.AnyComponent(IR::Attribute::ColorFrontDiffuseR)) {
1359 const size_t location = FindNextUnusedLocation(used_locations, previous_unused_location);
1360 previous_unused_location = location;
1361 used_locations.set(location);
1362 const Id id{DefineOutput(*this, F32[4], invocations)};
1363 Decorate(id, spv::Decoration::Location, static_cast<u32>(location));
1364 output_front_color = id;
1365 }
1366 for (size_t index = 0; index < NUM_FIXEDFNCTEXTURE; ++index) {
1367 if (info.stores.AnyComponent(IR::Attribute::FixedFncTexture0S + index * 4)) {
1368 const size_t location =
1369 FindNextUnusedLocation(used_locations, previous_unused_location);
1370 previous_unused_location = location;
1371 used_locations.set(location);
1372 const Id id{DefineOutput(*this, F32[4], invocations)};
1373 Decorate(id, spv::Decoration::Location, location);
1374 output_fixed_fnc_textures[index] = id;
1319 } 1375 }
1320 } 1376 }
1321 switch (stage) { 1377 switch (stage) {
diff --git a/src/shader_recompiler/backend/spirv/emit_context.h b/src/shader_recompiler/backend/spirv/emit_context.h
index e277bc358..847d0c0e6 100644
--- a/src/shader_recompiler/backend/spirv/emit_context.h
+++ b/src/shader_recompiler/backend/spirv/emit_context.h
@@ -268,10 +268,14 @@ public:
268 Id write_global_func_u32x4{}; 268 Id write_global_func_u32x4{};
269 269
270 Id input_position{}; 270 Id input_position{};
271 Id input_front_color{};
272 std::array<Id, 10> input_fixed_fnc_textures{};
271 std::array<Id, 32> input_generics{}; 273 std::array<Id, 32> input_generics{};
272 274
273 Id output_point_size{}; 275 Id output_point_size{};
274 Id output_position{}; 276 Id output_position{};
277 Id output_front_color{};
278 std::array<Id, 10> output_fixed_fnc_textures{};
275 std::array<std::array<GenericElementInfo, 4>, 32> output_generics{}; 279 std::array<std::array<GenericElementInfo, 4>, 32> output_generics{};
276 280
277 Id output_tess_level_outer{}; 281 Id output_tess_level_outer{};
diff --git a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
index 14c77f162..68f360b3c 100644
--- a/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
+++ b/src/shader_recompiler/backend/spirv/emit_spirv_context_get_set.cpp
@@ -43,6 +43,25 @@ Id AttrPointer(EmitContext& ctx, Id pointer_type, Id vertex, Id base, Args&&...
43 } 43 }
44} 44}
45 45
46bool IsFixedFncTexture(IR::Attribute attribute) {
47 return attribute >= IR::Attribute::FixedFncTexture0S &&
48 attribute <= IR::Attribute::FixedFncTexture9Q;
49}
50
51u32 FixedFncTextureAttributeIndex(IR::Attribute attribute) {
52 if (!IsFixedFncTexture(attribute)) {
53 throw InvalidArgument("Attribute {} is not a FixedFncTexture", attribute);
54 }
55 return (static_cast<u32>(attribute) - static_cast<u32>(IR::Attribute::FixedFncTexture0S)) / 4u;
56}
57
58u32 FixedFncTextureAttributeElement(IR::Attribute attribute) {
59 if (!IsFixedFncTexture(attribute)) {
60 throw InvalidArgument("Attribute {} is not a FixedFncTexture", attribute);
61 }
62 return static_cast<u32>(attribute) % 4u;
63}
64
46template <typename... Args> 65template <typename... Args>
47Id OutputAccessChain(EmitContext& ctx, Id result_type, Id base, Args&&... args) { 66Id OutputAccessChain(EmitContext& ctx, Id result_type, Id base, Args&&... args) {
48 if (ctx.stage == Stage::TessellationControl) { 67 if (ctx.stage == Stage::TessellationControl) {
@@ -74,6 +93,13 @@ std::optional<OutAttr> OutputAttrPointer(EmitContext& ctx, IR::Attribute attr) {
74 return OutputAccessChain(ctx, ctx.output_f32, info.id, index_id); 93 return OutputAccessChain(ctx, ctx.output_f32, info.id, index_id);
75 } 94 }
76 } 95 }
96 if (IsFixedFncTexture(attr)) {
97 const u32 index{FixedFncTextureAttributeIndex(attr)};
98 const u32 element{FixedFncTextureAttributeElement(attr)};
99 const Id element_id{ctx.Const(element)};
100 return OutputAccessChain(ctx, ctx.output_f32, ctx.output_fixed_fnc_textures[index],
101 element_id);
102 }
77 switch (attr) { 103 switch (attr) {
78 case IR::Attribute::PointSize: 104 case IR::Attribute::PointSize:
79 return ctx.output_point_size; 105 return ctx.output_point_size;
@@ -85,6 +111,14 @@ std::optional<OutAttr> OutputAttrPointer(EmitContext& ctx, IR::Attribute attr) {
85 const Id element_id{ctx.Const(element)}; 111 const Id element_id{ctx.Const(element)};
86 return OutputAccessChain(ctx, ctx.output_f32, ctx.output_position, element_id); 112 return OutputAccessChain(ctx, ctx.output_f32, ctx.output_position, element_id);
87 } 113 }
114 case IR::Attribute::ColorFrontDiffuseR:
115 case IR::Attribute::ColorFrontDiffuseG:
116 case IR::Attribute::ColorFrontDiffuseB:
117 case IR::Attribute::ColorFrontDiffuseA: {
118 const u32 element{static_cast<u32>(attr) % 4};
119 const Id element_id{ctx.Const(element)};
120 return OutputAccessChain(ctx, ctx.output_f32, ctx.output_front_color, element_id);
121 }
88 case IR::Attribute::ClipDistance0: 122 case IR::Attribute::ClipDistance0:
89 case IR::Attribute::ClipDistance1: 123 case IR::Attribute::ClipDistance1:
90 case IR::Attribute::ClipDistance2: 124 case IR::Attribute::ClipDistance2:
@@ -307,6 +341,12 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) {
307 const Id value{ctx.OpLoad(type->id, pointer)}; 341 const Id value{ctx.OpLoad(type->id, pointer)};
308 return type->needs_cast ? ctx.OpBitcast(ctx.F32[1], value) : value; 342 return type->needs_cast ? ctx.OpBitcast(ctx.F32[1], value) : value;
309 } 343 }
344 if (IsFixedFncTexture(attr)) {
345 const u32 index{FixedFncTextureAttributeIndex(attr)};
346 const Id attr_id{ctx.input_fixed_fnc_textures[index]};
347 const Id attr_ptr{AttrPointer(ctx, ctx.input_f32, vertex, attr_id, ctx.Const(element))};
348 return ctx.OpLoad(ctx.F32[1], attr_ptr);
349 }
310 switch (attr) { 350 switch (attr) {
311 case IR::Attribute::PrimitiveId: 351 case IR::Attribute::PrimitiveId:
312 return ctx.OpBitcast(ctx.F32[1], ctx.OpLoad(ctx.U32[1], ctx.primitive_id)); 352 return ctx.OpBitcast(ctx.F32[1], ctx.OpLoad(ctx.U32[1], ctx.primitive_id));
@@ -316,6 +356,13 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) {
316 case IR::Attribute::PositionW: 356 case IR::Attribute::PositionW:
317 return ctx.OpLoad(ctx.F32[1], AttrPointer(ctx, ctx.input_f32, vertex, ctx.input_position, 357 return ctx.OpLoad(ctx.F32[1], AttrPointer(ctx, ctx.input_f32, vertex, ctx.input_position,
318 ctx.Const(element))); 358 ctx.Const(element)));
359 case IR::Attribute::ColorFrontDiffuseR:
360 case IR::Attribute::ColorFrontDiffuseG:
361 case IR::Attribute::ColorFrontDiffuseB:
362 case IR::Attribute::ColorFrontDiffuseA: {
363 return ctx.OpLoad(ctx.F32[1], AttrPointer(ctx, ctx.input_f32, vertex, ctx.input_front_color,
364 ctx.Const(element)));
365 }
319 case IR::Attribute::InstanceId: 366 case IR::Attribute::InstanceId:
320 if (ctx.profile.support_vertex_instance_id) { 367 if (ctx.profile.support_vertex_instance_id) {
321 return ctx.OpBitcast(ctx.F32[1], ctx.OpLoad(ctx.U32[1], ctx.instance_id)); 368 return ctx.OpBitcast(ctx.F32[1], ctx.OpLoad(ctx.U32[1], ctx.instance_id));
@@ -333,8 +380,9 @@ Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) {
333 return ctx.OpBitcast(ctx.F32[1], ctx.OpISub(ctx.U32[1], index, base)); 380 return ctx.OpBitcast(ctx.F32[1], ctx.OpISub(ctx.U32[1], index, base));
334 } 381 }
335 case IR::Attribute::FrontFace: 382 case IR::Attribute::FrontFace:
336 return ctx.OpSelect(ctx.U32[1], ctx.OpLoad(ctx.U1, ctx.front_face), 383 return ctx.OpSelect(ctx.F32[1], ctx.OpLoad(ctx.U1, ctx.front_face),
337 ctx.Const(std::numeric_limits<u32>::max()), ctx.u32_zero_value); 384 ctx.OpBitcast(ctx.F32[1], ctx.Const(std::numeric_limits<u32>::max())),
385 ctx.f32_zero_value);
338 case IR::Attribute::PointSpriteS: 386 case IR::Attribute::PointSpriteS:
339 return ctx.OpLoad(ctx.F32[1], 387 return ctx.OpLoad(ctx.F32[1],
340 ctx.OpAccessChain(ctx.input_f32, ctx.point_coord, ctx.u32_zero_value)); 388 ctx.OpAccessChain(ctx.input_f32, ctx.point_coord, ctx.u32_zero_value));
diff --git a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
index 8b3e0a15c..69eeaa3e6 100644
--- a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
+++ b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
@@ -20,6 +20,7 @@
20#include "shader_recompiler/frontend/maxwell/decode.h" 20#include "shader_recompiler/frontend/maxwell/decode.h"
21#include "shader_recompiler/frontend/maxwell/structured_control_flow.h" 21#include "shader_recompiler/frontend/maxwell/structured_control_flow.h"
22#include "shader_recompiler/frontend/maxwell/translate/translate.h" 22#include "shader_recompiler/frontend/maxwell/translate/translate.h"
23#include "shader_recompiler/host_translate_info.h"
23#include "shader_recompiler/object_pool.h" 24#include "shader_recompiler/object_pool.h"
24 25
25namespace Shader::Maxwell { 26namespace Shader::Maxwell {
@@ -652,7 +653,7 @@ class TranslatePass {
652public: 653public:
653 TranslatePass(ObjectPool<IR::Inst>& inst_pool_, ObjectPool<IR::Block>& block_pool_, 654 TranslatePass(ObjectPool<IR::Inst>& inst_pool_, ObjectPool<IR::Block>& block_pool_,
654 ObjectPool<Statement>& stmt_pool_, Environment& env_, Statement& root_stmt, 655 ObjectPool<Statement>& stmt_pool_, Environment& env_, Statement& root_stmt,
655 IR::AbstractSyntaxList& syntax_list_) 656 IR::AbstractSyntaxList& syntax_list_, const HostTranslateInfo& host_info)
656 : stmt_pool{stmt_pool_}, inst_pool{inst_pool_}, block_pool{block_pool_}, env{env_}, 657 : stmt_pool{stmt_pool_}, inst_pool{inst_pool_}, block_pool{block_pool_}, env{env_},
657 syntax_list{syntax_list_} { 658 syntax_list{syntax_list_} {
658 Visit(root_stmt, nullptr, nullptr); 659 Visit(root_stmt, nullptr, nullptr);
@@ -660,6 +661,9 @@ public:
660 IR::Block& first_block{*syntax_list.front().data.block}; 661 IR::Block& first_block{*syntax_list.front().data.block};
661 IR::IREmitter ir(first_block, first_block.begin()); 662 IR::IREmitter ir(first_block, first_block.begin());
662 ir.Prologue(); 663 ir.Prologue();
664 if (uses_demote_to_helper && host_info.needs_demote_reorder) {
665 DemoteCombinationPass();
666 }
663 } 667 }
664 668
665private: 669private:
@@ -809,7 +813,14 @@ private:
809 } 813 }
810 case StatementType::Return: { 814 case StatementType::Return: {
811 ensure_block(); 815 ensure_block();
812 IR::IREmitter{*current_block}.Epilogue(); 816 IR::Block* return_block{block_pool.Create(inst_pool)};
817 IR::IREmitter{*return_block}.Epilogue();
818 current_block->AddBranch(return_block);
819
820 auto& merge{syntax_list.emplace_back()};
821 merge.type = IR::AbstractSyntaxNode::Type::Block;
822 merge.data.block = return_block;
823
813 current_block = nullptr; 824 current_block = nullptr;
814 syntax_list.emplace_back().type = IR::AbstractSyntaxNode::Type::Return; 825 syntax_list.emplace_back().type = IR::AbstractSyntaxNode::Type::Return;
815 break; 826 break;
@@ -824,6 +835,7 @@ private:
824 auto& merge{syntax_list.emplace_back()}; 835 auto& merge{syntax_list.emplace_back()};
825 merge.type = IR::AbstractSyntaxNode::Type::Block; 836 merge.type = IR::AbstractSyntaxNode::Type::Block;
826 merge.data.block = demote_block; 837 merge.data.block = demote_block;
838 uses_demote_to_helper = true;
827 break; 839 break;
828 } 840 }
829 case StatementType::Unreachable: { 841 case StatementType::Unreachable: {
@@ -855,11 +867,117 @@ private:
855 return block_pool.Create(inst_pool); 867 return block_pool.Create(inst_pool);
856 } 868 }
857 869
870 void DemoteCombinationPass() {
871 using Type = IR::AbstractSyntaxNode::Type;
872 std::vector<IR::Block*> demote_blocks;
873 std::vector<IR::U1> demote_conds;
874 u32 num_epilogues{};
875 u32 branch_depth{};
876 for (const IR::AbstractSyntaxNode& node : syntax_list) {
877 if (node.type == Type::If) {
878 ++branch_depth;
879 }
880 if (node.type == Type::EndIf) {
881 --branch_depth;
882 }
883 if (node.type != Type::Block) {
884 continue;
885 }
886 if (branch_depth > 1) {
887 // Skip reordering nested demote branches.
888 continue;
889 }
890 for (const IR::Inst& inst : node.data.block->Instructions()) {
891 const IR::Opcode op{inst.GetOpcode()};
892 if (op == IR::Opcode::DemoteToHelperInvocation) {
893 demote_blocks.push_back(node.data.block);
894 break;
895 }
896 if (op == IR::Opcode::Epilogue) {
897 ++num_epilogues;
898 }
899 }
900 }
901 if (demote_blocks.size() == 0) {
902 return;
903 }
904 if (num_epilogues > 1) {
905 LOG_DEBUG(Shader, "Combining demotes with more than one return is not implemented.");
906 return;
907 }
908 s64 last_iterator_offset{};
909 auto& asl{syntax_list};
910 for (const IR::Block* demote_block : demote_blocks) {
911 const auto start_it{asl.begin() + last_iterator_offset};
912 auto asl_it{std::find_if(start_it, asl.end(), [&](const IR::AbstractSyntaxNode& asn) {
913 return asn.type == Type::If && asn.data.if_node.body == demote_block;
914 })};
915 if (asl_it == asl.end()) {
916 // Demote without a conditional branch.
917 // No need to proceed since all fragment instances will be demoted regardless.
918 return;
919 }
920 const IR::Block* const end_if = asl_it->data.if_node.merge;
921 demote_conds.push_back(asl_it->data.if_node.cond);
922 last_iterator_offset = std::distance(asl.begin(), asl_it);
923
924 asl_it = asl.erase(asl_it);
925 asl_it = std::find_if(asl_it, asl.end(), [&](const IR::AbstractSyntaxNode& asn) {
926 return asn.type == Type::Block && asn.data.block == demote_block;
927 });
928
929 asl_it = asl.erase(asl_it);
930 asl_it = std::find_if(asl_it, asl.end(), [&](const IR::AbstractSyntaxNode& asn) {
931 return asn.type == Type::EndIf && asn.data.end_if.merge == end_if;
932 });
933 asl_it = asl.erase(asl_it);
934 }
935 const auto epilogue_func{[](const IR::AbstractSyntaxNode& asn) {
936 if (asn.type != Type::Block) {
937 return false;
938 }
939 for (const auto& inst : asn.data.block->Instructions()) {
940 if (inst.GetOpcode() == IR::Opcode::Epilogue) {
941 return true;
942 }
943 }
944 return false;
945 }};
946 const auto reverse_it{std::find_if(asl.rbegin(), asl.rend(), epilogue_func)};
947 const auto return_block_it{(reverse_it + 1).base()};
948
949 IR::IREmitter ir{*(return_block_it - 1)->data.block};
950 IR::U1 cond(IR::Value(false));
951 for (const auto& demote_cond : demote_conds) {
952 cond = ir.LogicalOr(cond, demote_cond);
953 }
954 cond.Inst()->DestructiveAddUsage(1);
955
956 IR::AbstractSyntaxNode demote_if_node{};
957 demote_if_node.type = Type::If;
958 demote_if_node.data.if_node.cond = cond;
959 demote_if_node.data.if_node.body = demote_blocks[0];
960 demote_if_node.data.if_node.merge = return_block_it->data.block;
961
962 IR::AbstractSyntaxNode demote_node{};
963 demote_node.type = Type::Block;
964 demote_node.data.block = demote_blocks[0];
965
966 IR::AbstractSyntaxNode demote_endif_node{};
967 demote_endif_node.type = Type::EndIf;
968 demote_endif_node.data.end_if.merge = return_block_it->data.block;
969
970 asl.insert(return_block_it, demote_endif_node);
971 asl.insert(return_block_it, demote_node);
972 asl.insert(return_block_it, demote_if_node);
973 }
974
858 ObjectPool<Statement>& stmt_pool; 975 ObjectPool<Statement>& stmt_pool;
859 ObjectPool<IR::Inst>& inst_pool; 976 ObjectPool<IR::Inst>& inst_pool;
860 ObjectPool<IR::Block>& block_pool; 977 ObjectPool<IR::Block>& block_pool;
861 Environment& env; 978 Environment& env;
862 IR::AbstractSyntaxList& syntax_list; 979 IR::AbstractSyntaxList& syntax_list;
980 bool uses_demote_to_helper{};
863 981
864// TODO: C++20 Remove this when all compilers support constexpr std::vector 982// TODO: C++20 Remove this when all compilers support constexpr std::vector
865#if __cpp_lib_constexpr_vector >= 201907 983#if __cpp_lib_constexpr_vector >= 201907
@@ -871,12 +989,13 @@ private:
871} // Anonymous namespace 989} // Anonymous namespace
872 990
873IR::AbstractSyntaxList BuildASL(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Block>& block_pool, 991IR::AbstractSyntaxList BuildASL(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Block>& block_pool,
874 Environment& env, Flow::CFG& cfg) { 992 Environment& env, Flow::CFG& cfg,
993 const HostTranslateInfo& host_info) {
875 ObjectPool<Statement> stmt_pool{64}; 994 ObjectPool<Statement> stmt_pool{64};
876 GotoPass goto_pass{cfg, stmt_pool}; 995 GotoPass goto_pass{cfg, stmt_pool};
877 Statement& root{goto_pass.RootStatement()}; 996 Statement& root{goto_pass.RootStatement()};
878 IR::AbstractSyntaxList syntax_list; 997 IR::AbstractSyntaxList syntax_list;
879 TranslatePass{inst_pool, block_pool, stmt_pool, env, root, syntax_list}; 998 TranslatePass{inst_pool, block_pool, stmt_pool, env, root, syntax_list, host_info};
880 return syntax_list; 999 return syntax_list;
881} 1000}
882 1001
diff --git a/src/shader_recompiler/frontend/maxwell/structured_control_flow.h b/src/shader_recompiler/frontend/maxwell/structured_control_flow.h
index 88b083649..e38158da3 100644
--- a/src/shader_recompiler/frontend/maxwell/structured_control_flow.h
+++ b/src/shader_recompiler/frontend/maxwell/structured_control_flow.h
@@ -11,10 +11,13 @@
11#include "shader_recompiler/frontend/maxwell/control_flow.h" 11#include "shader_recompiler/frontend/maxwell/control_flow.h"
12#include "shader_recompiler/object_pool.h" 12#include "shader_recompiler/object_pool.h"
13 13
14namespace Shader::Maxwell { 14namespace Shader {
15struct HostTranslateInfo;
16namespace Maxwell {
15 17
16[[nodiscard]] IR::AbstractSyntaxList BuildASL(ObjectPool<IR::Inst>& inst_pool, 18[[nodiscard]] IR::AbstractSyntaxList BuildASL(ObjectPool<IR::Inst>& inst_pool,
17 ObjectPool<IR::Block>& block_pool, Environment& env, 19 ObjectPool<IR::Block>& block_pool, Environment& env,
18 Flow::CFG& cfg); 20 Flow::CFG& cfg, const HostTranslateInfo& host_info);
19 21
20} // namespace Shader::Maxwell 22} // namespace Maxwell
23} // namespace Shader
diff --git a/src/shader_recompiler/frontend/maxwell/translate_program.cpp b/src/shader_recompiler/frontend/maxwell/translate_program.cpp
index c067d459c..012d55357 100644
--- a/src/shader_recompiler/frontend/maxwell/translate_program.cpp
+++ b/src/shader_recompiler/frontend/maxwell/translate_program.cpp
@@ -130,7 +130,7 @@ void AddNVNStorageBuffers(IR::Program& program) {
130IR::Program TranslateProgram(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Block>& block_pool, 130IR::Program TranslateProgram(ObjectPool<IR::Inst>& inst_pool, ObjectPool<IR::Block>& block_pool,
131 Environment& env, Flow::CFG& cfg, const HostTranslateInfo& host_info) { 131 Environment& env, Flow::CFG& cfg, const HostTranslateInfo& host_info) {
132 IR::Program program; 132 IR::Program program;
133 program.syntax_list = BuildASL(inst_pool, block_pool, env, cfg); 133 program.syntax_list = BuildASL(inst_pool, block_pool, env, cfg, host_info);
134 program.blocks = GenerateBlocks(program.syntax_list); 134 program.blocks = GenerateBlocks(program.syntax_list);
135 program.post_order_blocks = PostOrder(program.syntax_list.front()); 135 program.post_order_blocks = PostOrder(program.syntax_list.front());
136 program.stage = env.ShaderStage(); 136 program.stage = env.ShaderStage();
diff --git a/src/shader_recompiler/host_translate_info.h b/src/shader_recompiler/host_translate_info.h
index 94a584219..96468b2e7 100644
--- a/src/shader_recompiler/host_translate_info.h
+++ b/src/shader_recompiler/host_translate_info.h
@@ -11,8 +11,9 @@ namespace Shader {
11 11
12/// Misc information about the host 12/// Misc information about the host
13struct HostTranslateInfo { 13struct HostTranslateInfo {
14 bool support_float16{}; ///< True when the device supports 16-bit floats 14 bool support_float16{}; ///< True when the device supports 16-bit floats
15 bool support_int64{}; ///< True when the device supports 64-bit integers 15 bool support_int64{}; ///< True when the device supports 64-bit integers
16 bool needs_demote_reorder{}; ///< True when the device needs DemoteToHelperInvocation reordered
16}; 17};
17 18
18} // namespace Shader 19} // namespace Shader
diff --git a/src/video_core/buffer_cache/buffer_base.h b/src/video_core/buffer_cache/buffer_base.h
index c3318095c..be2113f5a 100644
--- a/src/video_core/buffer_cache/buffer_base.h
+++ b/src/video_core/buffer_cache/buffer_base.h
@@ -261,16 +261,6 @@ public:
261 stream_score += score; 261 stream_score += score;
262 } 262 }
263 263
264 /// Sets the new frame tick
265 void SetFrameTick(u64 new_frame_tick) noexcept {
266 frame_tick = new_frame_tick;
267 }
268
269 /// Returns the new frame tick
270 [[nodiscard]] u64 FrameTick() const noexcept {
271 return frame_tick;
272 }
273
274 /// Returns the likeliness of this being a stream buffer 264 /// Returns the likeliness of this being a stream buffer
275 [[nodiscard]] int StreamScore() const noexcept { 265 [[nodiscard]] int StreamScore() const noexcept {
276 return stream_score; 266 return stream_score;
@@ -307,6 +297,14 @@ public:
307 return words.size_bytes; 297 return words.size_bytes;
308 } 298 }
309 299
300 size_t getLRUID() const noexcept {
301 return lru_id;
302 }
303
304 void setLRUID(size_t lru_id_) {
305 lru_id = lru_id_;
306 }
307
310private: 308private:
311 template <Type type> 309 template <Type type>
312 u64* Array() noexcept { 310 u64* Array() noexcept {
@@ -603,9 +601,9 @@ private:
603 RasterizerInterface* rasterizer = nullptr; 601 RasterizerInterface* rasterizer = nullptr;
604 VAddr cpu_addr = 0; 602 VAddr cpu_addr = 0;
605 Words words; 603 Words words;
606 u64 frame_tick = 0;
607 BufferFlagBits flags{}; 604 BufferFlagBits flags{};
608 int stream_score = 0; 605 int stream_score = 0;
606 size_t lru_id = SIZE_MAX;
609}; 607};
610 608
611} // namespace VideoCommon 609} // namespace VideoCommon
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 3b43554f9..7bfd57369 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -20,6 +20,7 @@
20#include "common/common_types.h" 20#include "common/common_types.h"
21#include "common/div_ceil.h" 21#include "common/div_ceil.h"
22#include "common/literals.h" 22#include "common/literals.h"
23#include "common/lru_cache.h"
23#include "common/microprofile.h" 24#include "common/microprofile.h"
24#include "common/scope_exit.h" 25#include "common/scope_exit.h"
25#include "common/settings.h" 26#include "common/settings.h"
@@ -330,7 +331,7 @@ private:
330 template <bool insert> 331 template <bool insert>
331 void ChangeRegister(BufferId buffer_id); 332 void ChangeRegister(BufferId buffer_id);
332 333
333 void TouchBuffer(Buffer& buffer) const noexcept; 334 void TouchBuffer(Buffer& buffer, BufferId buffer_id) noexcept;
334 335
335 bool SynchronizeBuffer(Buffer& buffer, VAddr cpu_addr, u32 size); 336 bool SynchronizeBuffer(Buffer& buffer, VAddr cpu_addr, u32 size);
336 337
@@ -428,7 +429,11 @@ private:
428 size_t immediate_buffer_capacity = 0; 429 size_t immediate_buffer_capacity = 0;
429 std::unique_ptr<u8[]> immediate_buffer_alloc; 430 std::unique_ptr<u8[]> immediate_buffer_alloc;
430 431
431 typename SlotVector<Buffer>::Iterator deletion_iterator; 432 struct LRUItemParams {
433 using ObjectType = BufferId;
434 using TickType = u64;
435 };
436 Common::LeastRecentlyUsedCache<LRUItemParams> lru_cache;
432 u64 frame_tick = 0; 437 u64 frame_tick = 0;
433 u64 total_used_memory = 0; 438 u64 total_used_memory = 0;
434 439
@@ -445,7 +450,6 @@ BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
445 kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, cpu_memory{cpu_memory_} { 450 kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, cpu_memory{cpu_memory_} {
446 // Ensure the first slot is used for the null buffer 451 // Ensure the first slot is used for the null buffer
447 void(slot_buffers.insert(runtime, NullBufferParams{})); 452 void(slot_buffers.insert(runtime, NullBufferParams{}));
448 deletion_iterator = slot_buffers.end();
449 common_ranges.clear(); 453 common_ranges.clear();
450} 454}
451 455
@@ -454,20 +458,17 @@ void BufferCache<P>::RunGarbageCollector() {
454 const bool aggressive_gc = total_used_memory >= CRITICAL_MEMORY; 458 const bool aggressive_gc = total_used_memory >= CRITICAL_MEMORY;
455 const u64 ticks_to_destroy = aggressive_gc ? 60 : 120; 459 const u64 ticks_to_destroy = aggressive_gc ? 60 : 120;
456 int num_iterations = aggressive_gc ? 64 : 32; 460 int num_iterations = aggressive_gc ? 64 : 32;
457 for (; num_iterations > 0; --num_iterations) { 461 const auto clean_up = [this, &num_iterations](BufferId buffer_id) {
458 if (deletion_iterator == slot_buffers.end()) { 462 if (num_iterations == 0) {
459 deletion_iterator = slot_buffers.begin(); 463 return true;
460 }
461 ++deletion_iterator;
462 if (deletion_iterator == slot_buffers.end()) {
463 break;
464 }
465 const auto [buffer_id, buffer] = *deletion_iterator;
466 if (buffer->FrameTick() + ticks_to_destroy < frame_tick) {
467 DownloadBufferMemory(*buffer);
468 DeleteBuffer(buffer_id);
469 } 464 }
470 } 465 --num_iterations;
466 auto& buffer = slot_buffers[buffer_id];
467 DownloadBufferMemory(buffer);
468 DeleteBuffer(buffer_id);
469 return false;
470 };
471 lru_cache.ForEachItemBelow(frame_tick - ticks_to_destroy, clean_up);
471} 472}
472 473
473template <class P> 474template <class P>
@@ -485,7 +486,7 @@ void BufferCache<P>::TickFrame() {
485 const bool skip_preferred = hits * 256 < shots * 251; 486 const bool skip_preferred = hits * 256 < shots * 251;
486 uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0; 487 uniform_buffer_skip_cache_size = skip_preferred ? DEFAULT_SKIP_CACHE_SIZE : 0;
487 488
488 if (Settings::values.use_caches_gc.GetValue() && total_used_memory >= EXPECTED_MEMORY) { 489 if (total_used_memory >= EXPECTED_MEMORY) {
489 RunGarbageCollector(); 490 RunGarbageCollector();
490 } 491 }
491 ++frame_tick; 492 ++frame_tick;
@@ -954,7 +955,7 @@ bool BufferCache<P>::IsRegionCpuModified(VAddr addr, size_t size) {
954template <class P> 955template <class P>
955void BufferCache<P>::BindHostIndexBuffer() { 956void BufferCache<P>::BindHostIndexBuffer() {
956 Buffer& buffer = slot_buffers[index_buffer.buffer_id]; 957 Buffer& buffer = slot_buffers[index_buffer.buffer_id];
957 TouchBuffer(buffer); 958 TouchBuffer(buffer, index_buffer.buffer_id);
958 const u32 offset = buffer.Offset(index_buffer.cpu_addr); 959 const u32 offset = buffer.Offset(index_buffer.cpu_addr);
959 const u32 size = index_buffer.size; 960 const u32 size = index_buffer.size;
960 SynchronizeBuffer(buffer, index_buffer.cpu_addr, size); 961 SynchronizeBuffer(buffer, index_buffer.cpu_addr, size);
@@ -975,7 +976,7 @@ void BufferCache<P>::BindHostVertexBuffers() {
975 for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) { 976 for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
976 const Binding& binding = vertex_buffers[index]; 977 const Binding& binding = vertex_buffers[index];
977 Buffer& buffer = slot_buffers[binding.buffer_id]; 978 Buffer& buffer = slot_buffers[binding.buffer_id];
978 TouchBuffer(buffer); 979 TouchBuffer(buffer, binding.buffer_id);
979 SynchronizeBuffer(buffer, binding.cpu_addr, binding.size); 980 SynchronizeBuffer(buffer, binding.cpu_addr, binding.size);
980 if (!flags[Dirty::VertexBuffer0 + index]) { 981 if (!flags[Dirty::VertexBuffer0 + index]) {
981 continue; 982 continue;
@@ -1011,7 +1012,7 @@ void BufferCache<P>::BindHostGraphicsUniformBuffer(size_t stage, u32 index, u32
1011 const VAddr cpu_addr = binding.cpu_addr; 1012 const VAddr cpu_addr = binding.cpu_addr;
1012 const u32 size = std::min(binding.size, (*uniform_buffer_sizes)[stage][index]); 1013 const u32 size = std::min(binding.size, (*uniform_buffer_sizes)[stage][index]);
1013 Buffer& buffer = slot_buffers[binding.buffer_id]; 1014 Buffer& buffer = slot_buffers[binding.buffer_id];
1014 TouchBuffer(buffer); 1015 TouchBuffer(buffer, binding.buffer_id);
1015 const bool use_fast_buffer = binding.buffer_id != NULL_BUFFER_ID && 1016 const bool use_fast_buffer = binding.buffer_id != NULL_BUFFER_ID &&
1016 size <= uniform_buffer_skip_cache_size && 1017 size <= uniform_buffer_skip_cache_size &&
1017 !buffer.IsRegionGpuModified(cpu_addr, size); 1018 !buffer.IsRegionGpuModified(cpu_addr, size);
@@ -1083,7 +1084,7 @@ void BufferCache<P>::BindHostGraphicsStorageBuffers(size_t stage) {
1083 ForEachEnabledBit(enabled_storage_buffers[stage], [&](u32 index) { 1084 ForEachEnabledBit(enabled_storage_buffers[stage], [&](u32 index) {
1084 const Binding& binding = storage_buffers[stage][index]; 1085 const Binding& binding = storage_buffers[stage][index];
1085 Buffer& buffer = slot_buffers[binding.buffer_id]; 1086 Buffer& buffer = slot_buffers[binding.buffer_id];
1086 TouchBuffer(buffer); 1087 TouchBuffer(buffer, binding.buffer_id);
1087 const u32 size = binding.size; 1088 const u32 size = binding.size;
1088 SynchronizeBuffer(buffer, binding.cpu_addr, size); 1089 SynchronizeBuffer(buffer, binding.cpu_addr, size);
1089 1090
@@ -1128,7 +1129,7 @@ void BufferCache<P>::BindHostTransformFeedbackBuffers() {
1128 for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) { 1129 for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) {
1129 const Binding& binding = transform_feedback_buffers[index]; 1130 const Binding& binding = transform_feedback_buffers[index];
1130 Buffer& buffer = slot_buffers[binding.buffer_id]; 1131 Buffer& buffer = slot_buffers[binding.buffer_id];
1131 TouchBuffer(buffer); 1132 TouchBuffer(buffer, binding.buffer_id);
1132 const u32 size = binding.size; 1133 const u32 size = binding.size;
1133 SynchronizeBuffer(buffer, binding.cpu_addr, size); 1134 SynchronizeBuffer(buffer, binding.cpu_addr, size);
1134 1135
@@ -1148,7 +1149,7 @@ void BufferCache<P>::BindHostComputeUniformBuffers() {
1148 ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) { 1149 ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) {
1149 const Binding& binding = compute_uniform_buffers[index]; 1150 const Binding& binding = compute_uniform_buffers[index];
1150 Buffer& buffer = slot_buffers[binding.buffer_id]; 1151 Buffer& buffer = slot_buffers[binding.buffer_id];
1151 TouchBuffer(buffer); 1152 TouchBuffer(buffer, binding.buffer_id);
1152 const u32 size = std::min(binding.size, (*compute_uniform_buffer_sizes)[index]); 1153 const u32 size = std::min(binding.size, (*compute_uniform_buffer_sizes)[index]);
1153 SynchronizeBuffer(buffer, binding.cpu_addr, size); 1154 SynchronizeBuffer(buffer, binding.cpu_addr, size);
1154 1155
@@ -1168,7 +1169,7 @@ void BufferCache<P>::BindHostComputeStorageBuffers() {
1168 ForEachEnabledBit(enabled_compute_storage_buffers, [&](u32 index) { 1169 ForEachEnabledBit(enabled_compute_storage_buffers, [&](u32 index) {
1169 const Binding& binding = compute_storage_buffers[index]; 1170 const Binding& binding = compute_storage_buffers[index];
1170 Buffer& buffer = slot_buffers[binding.buffer_id]; 1171 Buffer& buffer = slot_buffers[binding.buffer_id];
1171 TouchBuffer(buffer); 1172 TouchBuffer(buffer, binding.buffer_id);
1172 const u32 size = binding.size; 1173 const u32 size = binding.size;
1173 SynchronizeBuffer(buffer, binding.cpu_addr, size); 1174 SynchronizeBuffer(buffer, binding.cpu_addr, size);
1174 1175
@@ -1513,11 +1514,11 @@ BufferId BufferCache<P>::CreateBuffer(VAddr cpu_addr, u32 wanted_size) {
1513 const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size); 1514 const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size);
1514 const u32 size = static_cast<u32>(overlap.end - overlap.begin); 1515 const u32 size = static_cast<u32>(overlap.end - overlap.begin);
1515 const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size); 1516 const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size);
1516 TouchBuffer(slot_buffers[new_buffer_id]);
1517 for (const BufferId overlap_id : overlap.ids) { 1517 for (const BufferId overlap_id : overlap.ids) {
1518 JoinOverlap(new_buffer_id, overlap_id, !overlap.has_stream_leap); 1518 JoinOverlap(new_buffer_id, overlap_id, !overlap.has_stream_leap);
1519 } 1519 }
1520 Register(new_buffer_id); 1520 Register(new_buffer_id);
1521 TouchBuffer(slot_buffers[new_buffer_id], new_buffer_id);
1521 return new_buffer_id; 1522 return new_buffer_id;
1522} 1523}
1523 1524
@@ -1534,12 +1535,14 @@ void BufferCache<P>::Unregister(BufferId buffer_id) {
1534template <class P> 1535template <class P>
1535template <bool insert> 1536template <bool insert>
1536void BufferCache<P>::ChangeRegister(BufferId buffer_id) { 1537void BufferCache<P>::ChangeRegister(BufferId buffer_id) {
1537 const Buffer& buffer = slot_buffers[buffer_id]; 1538 Buffer& buffer = slot_buffers[buffer_id];
1538 const auto size = buffer.SizeBytes(); 1539 const auto size = buffer.SizeBytes();
1539 if (insert) { 1540 if (insert) {
1540 total_used_memory += Common::AlignUp(size, 1024); 1541 total_used_memory += Common::AlignUp(size, 1024);
1542 buffer.setLRUID(lru_cache.Insert(buffer_id, frame_tick));
1541 } else { 1543 } else {
1542 total_used_memory -= Common::AlignUp(size, 1024); 1544 total_used_memory -= Common::AlignUp(size, 1024);
1545 lru_cache.Free(buffer.getLRUID());
1543 } 1546 }
1544 const VAddr cpu_addr_begin = buffer.CpuAddr(); 1547 const VAddr cpu_addr_begin = buffer.CpuAddr();
1545 const VAddr cpu_addr_end = cpu_addr_begin + size; 1548 const VAddr cpu_addr_end = cpu_addr_begin + size;
@@ -1555,8 +1558,10 @@ void BufferCache<P>::ChangeRegister(BufferId buffer_id) {
1555} 1558}
1556 1559
1557template <class P> 1560template <class P>
1558void BufferCache<P>::TouchBuffer(Buffer& buffer) const noexcept { 1561void BufferCache<P>::TouchBuffer(Buffer& buffer, BufferId buffer_id) noexcept {
1559 buffer.SetFrameTick(frame_tick); 1562 if (buffer_id != NULL_BUFFER_ID) {
1563 lru_cache.Touch(buffer.getLRUID(), frame_tick);
1564 }
1560} 1565}
1561 1566
1562template <class P> 1567template <class P>
diff --git a/src/video_core/command_classes/codecs/vp9.cpp b/src/video_core/command_classes/codecs/vp9.cpp
index 70030066a..d7e749485 100644
--- a/src/video_core/command_classes/codecs/vp9.cpp
+++ b/src/video_core/command_classes/codecs/vp9.cpp
@@ -742,6 +742,7 @@ VpxBitStreamWriter VP9::ComposeUncompressedHeader() {
742 uncomp_writer.WriteDeltaQ(current_frame_info.uv_dc_delta_q); 742 uncomp_writer.WriteDeltaQ(current_frame_info.uv_dc_delta_q);
743 uncomp_writer.WriteDeltaQ(current_frame_info.uv_ac_delta_q); 743 uncomp_writer.WriteDeltaQ(current_frame_info.uv_ac_delta_q);
744 744
745 ASSERT(!current_frame_info.segment_enabled);
745 uncomp_writer.WriteBit(false); // Segmentation enabled (TODO). 746 uncomp_writer.WriteBit(false); // Segmentation enabled (TODO).
746 747
747 const s32 min_tile_cols_log2 = CalcMinLog2TileCols(current_frame_info.frame_size.width); 748 const s32 min_tile_cols_log2 = CalcMinLog2TileCols(current_frame_info.frame_size.width);
diff --git a/src/video_core/command_classes/codecs/vp9_types.h b/src/video_core/command_classes/codecs/vp9_types.h
index 87eafdb03..3b1ed4b3a 100644
--- a/src/video_core/command_classes/codecs/vp9_types.h
+++ b/src/video_core/command_classes/codecs/vp9_types.h
@@ -22,7 +22,7 @@ struct Vp9FrameDimensions {
22}; 22};
23static_assert(sizeof(Vp9FrameDimensions) == 0x8, "Vp9 Vp9FrameDimensions is an invalid size"); 23static_assert(sizeof(Vp9FrameDimensions) == 0x8, "Vp9 Vp9FrameDimensions is an invalid size");
24 24
25enum FrameFlags : u32 { 25enum class FrameFlags : u32 {
26 IsKeyFrame = 1 << 0, 26 IsKeyFrame = 1 << 0,
27 LastFrameIsKeyFrame = 1 << 1, 27 LastFrameIsKeyFrame = 1 << 1,
28 FrameSizeChanged = 1 << 2, 28 FrameSizeChanged = 1 << 2,
@@ -30,6 +30,7 @@ enum FrameFlags : u32 {
30 LastShowFrame = 1 << 4, 30 LastShowFrame = 1 << 4,
31 IntraOnly = 1 << 5, 31 IntraOnly = 1 << 5,
32}; 32};
33DECLARE_ENUM_FLAG_OPERATORS(FrameFlags)
33 34
34enum class TxSize { 35enum class TxSize {
35 Tx4x4 = 0, // 4x4 transform 36 Tx4x4 = 0, // 4x4 transform
@@ -92,44 +93,34 @@ struct Vp9EntropyProbs {
92static_assert(sizeof(Vp9EntropyProbs) == 0x7B4, "Vp9EntropyProbs is an invalid size"); 93static_assert(sizeof(Vp9EntropyProbs) == 0x7B4, "Vp9EntropyProbs is an invalid size");
93 94
94struct Vp9PictureInfo { 95struct Vp9PictureInfo {
95 bool is_key_frame; 96 u32 bitstream_size;
96 bool intra_only; 97 std::array<u64, 4> frame_offsets;
97 bool last_frame_was_key;
98 bool frame_size_changed;
99 bool error_resilient_mode;
100 bool last_frame_shown;
101 bool show_frame;
102 std::array<s8, 4> ref_frame_sign_bias; 98 std::array<s8, 4> ref_frame_sign_bias;
103 s32 base_q_index; 99 s32 base_q_index;
104 s32 y_dc_delta_q; 100 s32 y_dc_delta_q;
105 s32 uv_dc_delta_q; 101 s32 uv_dc_delta_q;
106 s32 uv_ac_delta_q; 102 s32 uv_ac_delta_q;
107 bool lossless;
108 s32 transform_mode; 103 s32 transform_mode;
109 bool allow_high_precision_mv;
110 s32 interp_filter; 104 s32 interp_filter;
111 s32 reference_mode; 105 s32 reference_mode;
112 s8 comp_fixed_ref;
113 std::array<s8, 2> comp_var_ref;
114 s32 log2_tile_cols; 106 s32 log2_tile_cols;
115 s32 log2_tile_rows; 107 s32 log2_tile_rows;
116 bool segment_enabled;
117 bool segment_map_update;
118 bool segment_map_temporal_update;
119 s32 segment_abs_delta;
120 std::array<u32, 8> segment_feature_enable;
121 std::array<std::array<s16, 4>, 8> segment_feature_data;
122 bool mode_ref_delta_enabled;
123 bool use_prev_in_find_mv_refs;
124 std::array<s8, 4> ref_deltas; 108 std::array<s8, 4> ref_deltas;
125 std::array<s8, 2> mode_deltas; 109 std::array<s8, 2> mode_deltas;
126 Vp9EntropyProbs entropy; 110 Vp9EntropyProbs entropy;
127 Vp9FrameDimensions frame_size; 111 Vp9FrameDimensions frame_size;
128 u8 first_level; 112 u8 first_level;
129 u8 sharpness_level; 113 u8 sharpness_level;
130 u32 bitstream_size; 114 bool is_key_frame;
131 std::array<u64, 4> frame_offsets; 115 bool intra_only;
132 std::array<bool, 4> refresh_frame; 116 bool last_frame_was_key;
117 bool error_resilient_mode;
118 bool last_frame_shown;
119 bool show_frame;
120 bool lossless;
121 bool allow_high_precision_mv;
122 bool segment_enabled;
123 bool mode_ref_delta_enabled;
133}; 124};
134 125
135struct Vp9FrameContainer { 126struct Vp9FrameContainer {
@@ -145,7 +136,7 @@ struct PictureInfo {
145 Vp9FrameDimensions golden_frame_size; ///< 0x50 136 Vp9FrameDimensions golden_frame_size; ///< 0x50
146 Vp9FrameDimensions alt_frame_size; ///< 0x58 137 Vp9FrameDimensions alt_frame_size; ///< 0x58
147 Vp9FrameDimensions current_frame_size; ///< 0x60 138 Vp9FrameDimensions current_frame_size; ///< 0x60
148 u32 vp9_flags; ///< 0x68 139 FrameFlags vp9_flags; ///< 0x68
149 std::array<s8, 4> ref_frame_sign_bias; ///< 0x6C 140 std::array<s8, 4> ref_frame_sign_bias; ///< 0x6C
150 u8 first_level; ///< 0x70 141 u8 first_level; ///< 0x70
151 u8 sharpness_level; ///< 0x71 142 u8 sharpness_level; ///< 0x71
@@ -158,60 +149,43 @@ struct PictureInfo {
158 u8 allow_high_precision_mv; ///< 0x78 149 u8 allow_high_precision_mv; ///< 0x78
159 u8 interp_filter; ///< 0x79 150 u8 interp_filter; ///< 0x79
160 u8 reference_mode; ///< 0x7A 151 u8 reference_mode; ///< 0x7A
161 s8 comp_fixed_ref; ///< 0x7B 152 INSERT_PADDING_BYTES_NOINIT(3); ///< 0x7B
162 std::array<s8, 2> comp_var_ref; ///< 0x7C
163 u8 log2_tile_cols; ///< 0x7E 153 u8 log2_tile_cols; ///< 0x7E
164 u8 log2_tile_rows; ///< 0x7F 154 u8 log2_tile_rows; ///< 0x7F
165 Segmentation segmentation; ///< 0x80 155 Segmentation segmentation; ///< 0x80
166 LoopFilter loop_filter; ///< 0xE4 156 LoopFilter loop_filter; ///< 0xE4
167 INSERT_PADDING_BYTES_NOINIT(5); ///< 0xEB 157 INSERT_PADDING_BYTES_NOINIT(21); ///< 0xEB
168 u32 surface_params; ///< 0xF0
169 INSERT_PADDING_WORDS_NOINIT(3); ///< 0xF4
170 158
171 [[nodiscard]] Vp9PictureInfo Convert() const { 159 [[nodiscard]] Vp9PictureInfo Convert() const {
172 return { 160 return {
173 .is_key_frame = (vp9_flags & FrameFlags::IsKeyFrame) != 0, 161 .bitstream_size = bitstream_size,
174 .intra_only = (vp9_flags & FrameFlags::IntraOnly) != 0, 162 .frame_offsets{},
175 .last_frame_was_key = (vp9_flags & FrameFlags::LastFrameIsKeyFrame) != 0,
176 .frame_size_changed = (vp9_flags & FrameFlags::FrameSizeChanged) != 0,
177 .error_resilient_mode = (vp9_flags & FrameFlags::ErrorResilientMode) != 0,
178 .last_frame_shown = (vp9_flags & FrameFlags::LastShowFrame) != 0,
179 .show_frame = true,
180 .ref_frame_sign_bias = ref_frame_sign_bias, 163 .ref_frame_sign_bias = ref_frame_sign_bias,
181 .base_q_index = base_q_index, 164 .base_q_index = base_q_index,
182 .y_dc_delta_q = y_dc_delta_q, 165 .y_dc_delta_q = y_dc_delta_q,
183 .uv_dc_delta_q = uv_dc_delta_q, 166 .uv_dc_delta_q = uv_dc_delta_q,
184 .uv_ac_delta_q = uv_ac_delta_q, 167 .uv_ac_delta_q = uv_ac_delta_q,
185 .lossless = lossless != 0,
186 .transform_mode = tx_mode, 168 .transform_mode = tx_mode,
187 .allow_high_precision_mv = allow_high_precision_mv != 0,
188 .interp_filter = interp_filter, 169 .interp_filter = interp_filter,
189 .reference_mode = reference_mode, 170 .reference_mode = reference_mode,
190 .comp_fixed_ref = comp_fixed_ref,
191 .comp_var_ref = comp_var_ref,
192 .log2_tile_cols = log2_tile_cols, 171 .log2_tile_cols = log2_tile_cols,
193 .log2_tile_rows = log2_tile_rows, 172 .log2_tile_rows = log2_tile_rows,
194 .segment_enabled = segmentation.enabled != 0,
195 .segment_map_update = segmentation.update_map != 0,
196 .segment_map_temporal_update = segmentation.temporal_update != 0,
197 .segment_abs_delta = segmentation.abs_delta,
198 .segment_feature_enable = segmentation.feature_mask,
199 .segment_feature_data = segmentation.feature_data,
200 .mode_ref_delta_enabled = loop_filter.mode_ref_delta_enabled != 0,
201 .use_prev_in_find_mv_refs = !(vp9_flags == (FrameFlags::ErrorResilientMode)) &&
202 !(vp9_flags == (FrameFlags::FrameSizeChanged)) &&
203 !(vp9_flags == (FrameFlags::IntraOnly)) &&
204 (vp9_flags == (FrameFlags::LastShowFrame)) &&
205 !(vp9_flags == (FrameFlags::LastFrameIsKeyFrame)),
206 .ref_deltas = loop_filter.ref_deltas, 173 .ref_deltas = loop_filter.ref_deltas,
207 .mode_deltas = loop_filter.mode_deltas, 174 .mode_deltas = loop_filter.mode_deltas,
208 .entropy{}, 175 .entropy{},
209 .frame_size = current_frame_size, 176 .frame_size = current_frame_size,
210 .first_level = first_level, 177 .first_level = first_level,
211 .sharpness_level = sharpness_level, 178 .sharpness_level = sharpness_level,
212 .bitstream_size = bitstream_size, 179 .is_key_frame = True(vp9_flags & FrameFlags::IsKeyFrame),
213 .frame_offsets{}, 180 .intra_only = True(vp9_flags & FrameFlags::IntraOnly),
214 .refresh_frame{}, 181 .last_frame_was_key = True(vp9_flags & FrameFlags::LastFrameIsKeyFrame),
182 .error_resilient_mode = True(vp9_flags & FrameFlags::ErrorResilientMode),
183 .last_frame_shown = True(vp9_flags & FrameFlags::LastShowFrame),
184 .show_frame = true,
185 .lossless = lossless != 0,
186 .allow_high_precision_mv = allow_high_precision_mv != 0,
187 .segment_enabled = segmentation.enabled != 0,
188 .mode_ref_delta_enabled = loop_filter.mode_ref_delta_enabled != 0,
215 }; 189 };
216 } 190 }
217}; 191};
@@ -316,7 +290,6 @@ ASSERT_POSITION(last_frame_size, 0x48);
316ASSERT_POSITION(first_level, 0x70); 290ASSERT_POSITION(first_level, 0x70);
317ASSERT_POSITION(segmentation, 0x80); 291ASSERT_POSITION(segmentation, 0x80);
318ASSERT_POSITION(loop_filter, 0xE4); 292ASSERT_POSITION(loop_filter, 0xE4);
319ASSERT_POSITION(surface_params, 0xF0);
320#undef ASSERT_POSITION 293#undef ASSERT_POSITION
321 294
322#define ASSERT_POSITION(field_name, position) \ 295#define ASSERT_POSITION(field_name, position) \
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index 1aa43523a..7f4ca6282 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -475,10 +475,10 @@ public:
475 475
476 // These values are used by Nouveau and some games. 476 // These values are used by Nouveau and some games.
477 AddGL = 0x8006, 477 AddGL = 0x8006,
478 SubtractGL = 0x8007, 478 MinGL = 0x8007,
479 ReverseSubtractGL = 0x8008, 479 MaxGL = 0x8008,
480 MinGL = 0x800a, 480 SubtractGL = 0x800a,
481 MaxGL = 0x800b 481 ReverseSubtractGL = 0x800b
482 }; 482 };
483 483
484 enum class Factor : u32 { 484 enum class Factor : u32 {
diff --git a/src/video_core/renderer_opengl/gl_device.h b/src/video_core/renderer_opengl/gl_device.h
index ee992aed4..de9e41659 100644
--- a/src/video_core/renderer_opengl/gl_device.h
+++ b/src/video_core/renderer_opengl/gl_device.h
@@ -156,6 +156,10 @@ public:
156 return shader_backend; 156 return shader_backend;
157 } 157 }
158 158
159 bool IsAmd() const {
160 return vendor_name == "ATI Technologies Inc.";
161 }
162
159private: 163private:
160 static bool TestVariableAoffi(); 164 static bool TestVariableAoffi();
161 static bool TestPreciseBug(); 165 static bool TestPreciseBug();
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 1f4dda17e..b0e14182e 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -219,6 +219,7 @@ ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindo
219 host_info{ 219 host_info{
220 .support_float16 = false, 220 .support_float16 = false,
221 .support_int64 = device.HasShaderInt64(), 221 .support_int64 = device.HasShaderInt64(),
222 .needs_demote_reorder = device.IsAmd(),
222 } { 223 } {
223 if (use_asynchronous_shaders) { 224 if (use_asynchronous_shaders) {
224 workers = CreateWorkers(); 225 workers = CreateWorkers();
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index 7c9b0d6db..9ff0a28cd 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -164,7 +164,8 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
164 blit_screen.Recreate(); 164 blit_screen.Recreate();
165 } 165 }
166 const VkSemaphore render_semaphore = blit_screen.DrawToSwapchain(*framebuffer, use_accelerated); 166 const VkSemaphore render_semaphore = blit_screen.DrawToSwapchain(*framebuffer, use_accelerated);
167 scheduler.Flush(render_semaphore); 167 const VkSemaphore present_semaphore = swapchain.CurrentPresentSemaphore();
168 scheduler.Flush(render_semaphore, present_semaphore);
168 scheduler.WaitWorker(); 169 scheduler.WaitWorker();
169 swapchain.Present(render_semaphore); 170 swapchain.Present(render_semaphore);
170 171
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
index 3a78c9daa..888bc7392 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
@@ -159,11 +159,13 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer,
159 159
160 const VAddr framebuffer_addr = framebuffer.address + framebuffer.offset; 160 const VAddr framebuffer_addr = framebuffer.address + framebuffer.offset;
161 const u8* const host_ptr = cpu_memory.GetPointer(framebuffer_addr); 161 const u8* const host_ptr = cpu_memory.GetPointer(framebuffer_addr);
162 const size_t size_bytes = GetSizeInBytes(framebuffer);
163 162
164 // TODO(Rodrigo): Read this from HLE 163 // TODO(Rodrigo): Read this from HLE
165 constexpr u32 block_height_log2 = 4; 164 constexpr u32 block_height_log2 = 4;
166 const u32 bytes_per_pixel = GetBytesPerPixel(framebuffer); 165 const u32 bytes_per_pixel = GetBytesPerPixel(framebuffer);
166 const u64 size_bytes{Tegra::Texture::CalculateSize(true, bytes_per_pixel,
167 framebuffer.stride, framebuffer.height,
168 1, block_height_log2, 0)};
167 Tegra::Texture::UnswizzleTexture( 169 Tegra::Texture::UnswizzleTexture(
168 mapped_span.subspan(image_offset, size_bytes), std::span(host_ptr, size_bytes), 170 mapped_span.subspan(image_offset, size_bytes), std::span(host_ptr, size_bytes),
169 bytes_per_pixel, framebuffer.width, framebuffer.height, 1, block_height_log2, 0); 171 bytes_per_pixel, framebuffer.width, framebuffer.height, 1, block_height_log2, 0);
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index f316c4f92..31bfbcb06 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -325,6 +325,8 @@ PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxw
325 host_info = Shader::HostTranslateInfo{ 325 host_info = Shader::HostTranslateInfo{
326 .support_float16 = device.IsFloat16Supported(), 326 .support_float16 = device.IsFloat16Supported(),
327 .support_int64 = device.IsShaderInt64Supported(), 327 .support_int64 = device.IsShaderInt64Supported(),
328 .needs_demote_reorder = driver_id == VK_DRIVER_ID_AMD_PROPRIETARY_KHR ||
329 driver_id == VK_DRIVER_ID_AMD_OPEN_SOURCE_KHR,
328 }; 330 };
329} 331}
330 332
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index 4840962de..1d438787a 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -55,14 +55,14 @@ VKScheduler::~VKScheduler() {
55 worker_thread.join(); 55 worker_thread.join();
56} 56}
57 57
58void VKScheduler::Flush(VkSemaphore semaphore) { 58void VKScheduler::Flush(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore) {
59 SubmitExecution(semaphore); 59 SubmitExecution(signal_semaphore, wait_semaphore);
60 AllocateNewContext(); 60 AllocateNewContext();
61} 61}
62 62
63void VKScheduler::Finish(VkSemaphore semaphore) { 63void VKScheduler::Finish(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore) {
64 const u64 presubmit_tick = CurrentTick(); 64 const u64 presubmit_tick = CurrentTick();
65 SubmitExecution(semaphore); 65 SubmitExecution(signal_semaphore, wait_semaphore);
66 WaitWorker(); 66 WaitWorker();
67 Wait(presubmit_tick); 67 Wait(presubmit_tick);
68 AllocateNewContext(); 68 AllocateNewContext();
@@ -171,37 +171,41 @@ void VKScheduler::AllocateWorkerCommandBuffer() {
171 }); 171 });
172} 172}
173 173
174void VKScheduler::SubmitExecution(VkSemaphore semaphore) { 174void VKScheduler::SubmitExecution(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore) {
175 EndPendingOperations(); 175 EndPendingOperations();
176 InvalidateState(); 176 InvalidateState();
177 177
178 const u64 signal_value = master_semaphore->NextTick(); 178 const u64 signal_value = master_semaphore->NextTick();
179 Record([semaphore, signal_value, this](vk::CommandBuffer cmdbuf) { 179 Record([signal_semaphore, wait_semaphore, signal_value, this](vk::CommandBuffer cmdbuf) {
180 cmdbuf.End(); 180 cmdbuf.End();
181
182 const u32 num_signal_semaphores = semaphore ? 2U : 1U;
183
184 const u64 wait_value = signal_value - 1;
185 const VkPipelineStageFlags wait_stage_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
186
187 const VkSemaphore timeline_semaphore = master_semaphore->Handle(); 181 const VkSemaphore timeline_semaphore = master_semaphore->Handle();
182
183 const u32 num_signal_semaphores = signal_semaphore ? 2U : 1U;
188 const std::array signal_values{signal_value, u64(0)}; 184 const std::array signal_values{signal_value, u64(0)};
189 const std::array signal_semaphores{timeline_semaphore, semaphore}; 185 const std::array signal_semaphores{timeline_semaphore, signal_semaphore};
186
187 const u32 num_wait_semaphores = wait_semaphore ? 2U : 1U;
188 const std::array wait_values{signal_value - 1, u64(1)};
189 const std::array wait_semaphores{timeline_semaphore, wait_semaphore};
190 static constexpr std::array<VkPipelineStageFlags, 2> wait_stage_masks{
191 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
192 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
193 };
190 194
191 const VkTimelineSemaphoreSubmitInfoKHR timeline_si{ 195 const VkTimelineSemaphoreSubmitInfoKHR timeline_si{
192 .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR, 196 .sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR,
193 .pNext = nullptr, 197 .pNext = nullptr,
194 .waitSemaphoreValueCount = 1, 198 .waitSemaphoreValueCount = num_wait_semaphores,
195 .pWaitSemaphoreValues = &wait_value, 199 .pWaitSemaphoreValues = wait_values.data(),
196 .signalSemaphoreValueCount = num_signal_semaphores, 200 .signalSemaphoreValueCount = num_signal_semaphores,
197 .pSignalSemaphoreValues = signal_values.data(), 201 .pSignalSemaphoreValues = signal_values.data(),
198 }; 202 };
199 const VkSubmitInfo submit_info{ 203 const VkSubmitInfo submit_info{
200 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO, 204 .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
201 .pNext = &timeline_si, 205 .pNext = &timeline_si,
202 .waitSemaphoreCount = 1, 206 .waitSemaphoreCount = num_wait_semaphores,
203 .pWaitSemaphores = &timeline_semaphore, 207 .pWaitSemaphores = wait_semaphores.data(),
204 .pWaitDstStageMask = &wait_stage_mask, 208 .pWaitDstStageMask = wait_stage_masks.data(),
205 .commandBufferCount = 1, 209 .commandBufferCount = 1,
206 .pCommandBuffers = cmdbuf.address(), 210 .pCommandBuffers = cmdbuf.address(),
207 .signalSemaphoreCount = num_signal_semaphores, 211 .signalSemaphoreCount = num_signal_semaphores,
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
index cf39a2363..759ed5a48 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.h
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -34,10 +34,10 @@ public:
34 ~VKScheduler(); 34 ~VKScheduler();
35 35
36 /// Sends the current execution context to the GPU. 36 /// Sends the current execution context to the GPU.
37 void Flush(VkSemaphore semaphore = nullptr); 37 void Flush(VkSemaphore signal_semaphore = nullptr, VkSemaphore wait_semaphore = nullptr);
38 38
39 /// Sends the current execution context to the GPU and waits for it to complete. 39 /// Sends the current execution context to the GPU and waits for it to complete.
40 void Finish(VkSemaphore semaphore = nullptr); 40 void Finish(VkSemaphore signal_semaphore = nullptr, VkSemaphore wait_semaphore = nullptr);
41 41
42 /// Waits for the worker thread to finish executing everything. After this function returns it's 42 /// Waits for the worker thread to finish executing everything. After this function returns it's
43 /// safe to touch worker resources. 43 /// safe to touch worker resources.
@@ -191,7 +191,7 @@ private:
191 191
192 void AllocateWorkerCommandBuffer(); 192 void AllocateWorkerCommandBuffer();
193 193
194 void SubmitExecution(VkSemaphore semaphore); 194 void SubmitExecution(VkSemaphore signal_semaphore, VkSemaphore wait_semaphore);
195 195
196 void AllocateNewContext(); 196 void AllocateNewContext();
197 197
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp
index e5318e52d..aadf03cb0 100644
--- a/src/video_core/renderer_vulkan/vk_swapchain.cpp
+++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp
@@ -106,14 +106,12 @@ void VKSwapchain::AcquireNextImage() {
106} 106}
107 107
108void VKSwapchain::Present(VkSemaphore render_semaphore) { 108void VKSwapchain::Present(VkSemaphore render_semaphore) {
109 const VkSemaphore present_semaphore{*present_semaphores[frame_index]};
110 const std::array<VkSemaphore, 2> semaphores{present_semaphore, render_semaphore};
111 const auto present_queue{device.GetPresentQueue()}; 109 const auto present_queue{device.GetPresentQueue()};
112 const VkPresentInfoKHR present_info{ 110 const VkPresentInfoKHR present_info{
113 .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, 111 .sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
114 .pNext = nullptr, 112 .pNext = nullptr,
115 .waitSemaphoreCount = render_semaphore ? 2U : 1U, 113 .waitSemaphoreCount = render_semaphore ? 1U : 0U,
116 .pWaitSemaphores = semaphores.data(), 114 .pWaitSemaphores = &render_semaphore,
117 .swapchainCount = 1, 115 .swapchainCount = 1,
118 .pSwapchains = swapchain.address(), 116 .pSwapchains = swapchain.address(),
119 .pImageIndices = &image_index, 117 .pImageIndices = &image_index,
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.h b/src/video_core/renderer_vulkan/vk_swapchain.h
index cd472dd0a..5bce41e21 100644
--- a/src/video_core/renderer_vulkan/vk_swapchain.h
+++ b/src/video_core/renderer_vulkan/vk_swapchain.h
@@ -72,6 +72,10 @@ public:
72 return image_view_format; 72 return image_view_format;
73 } 73 }
74 74
75 VkSemaphore CurrentPresentSemaphore() const {
76 return *present_semaphores[frame_index];
77 }
78
75private: 79private:
76 void CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width, u32 height, 80 void CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width, u32 height,
77 bool srgb); 81 bool srgb);
diff --git a/src/video_core/texture_cache/image_base.h b/src/video_core/texture_cache/image_base.h
index ff1feda9b..0c17a791b 100644
--- a/src/video_core/texture_cache/image_base.h
+++ b/src/video_core/texture_cache/image_base.h
@@ -80,7 +80,7 @@ struct ImageBase {
80 VAddr cpu_addr_end = 0; 80 VAddr cpu_addr_end = 0;
81 81
82 u64 modification_tick = 0; 82 u64 modification_tick = 0;
83 u64 frame_tick = 0; 83 size_t lru_index = SIZE_MAX;
84 84
85 std::array<u32, MAX_MIP_LEVELS> mip_level_offsets{}; 85 std::array<u32, MAX_MIP_LEVELS> mip_level_offsets{};
86 86
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index a087498ff..24b809242 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -5,7 +5,6 @@
5#pragma once 5#pragma once
6 6
7#include "common/alignment.h" 7#include "common/alignment.h"
8#include "common/settings.h"
9#include "video_core/dirty_flags.h" 8#include "video_core/dirty_flags.h"
10#include "video_core/texture_cache/samples_helper.h" 9#include "video_core/texture_cache/samples_helper.h"
11#include "video_core/texture_cache/texture_cache_base.h" 10#include "video_core/texture_cache/texture_cache_base.h"
@@ -43,8 +42,6 @@ TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface&
43 void(slot_image_views.insert(runtime, NullImageParams{})); 42 void(slot_image_views.insert(runtime, NullImageParams{}));
44 void(slot_samplers.insert(runtime, sampler_descriptor)); 43 void(slot_samplers.insert(runtime, sampler_descriptor));
45 44
46 deletion_iterator = slot_images.begin();
47
48 if constexpr (HAS_DEVICE_MEMORY_INFO) { 45 if constexpr (HAS_DEVICE_MEMORY_INFO) {
49 const auto device_memory = runtime.GetDeviceLocalMemory(); 46 const auto device_memory = runtime.GetDeviceLocalMemory();
50 const u64 possible_expected_memory = (device_memory * 3) / 10; 47 const u64 possible_expected_memory = (device_memory * 3) / 10;
@@ -64,70 +61,38 @@ template <class P>
64void TextureCache<P>::RunGarbageCollector() { 61void TextureCache<P>::RunGarbageCollector() {
65 const bool high_priority_mode = total_used_memory >= expected_memory; 62 const bool high_priority_mode = total_used_memory >= expected_memory;
66 const bool aggressive_mode = total_used_memory >= critical_memory; 63 const bool aggressive_mode = total_used_memory >= critical_memory;
67 const u64 ticks_to_destroy = high_priority_mode ? 60 : 100; 64 const u64 ticks_to_destroy = aggressive_mode ? 10ULL : high_priority_mode ? 25ULL : 100ULL;
68 int num_iterations = aggressive_mode ? 256 : (high_priority_mode ? 128 : 64); 65 size_t num_iterations = aggressive_mode ? 10000 : (high_priority_mode ? 100 : 5);
69 for (; num_iterations > 0; --num_iterations) { 66 const auto clean_up = [this, &num_iterations, high_priority_mode](ImageId image_id) {
70 if (deletion_iterator == slot_images.end()) { 67 if (num_iterations == 0) {
71 deletion_iterator = slot_images.begin(); 68 return true;
72 if (deletion_iterator == slot_images.end()) {
73 break;
74 }
75 } 69 }
76 auto [image_id, image_tmp] = *deletion_iterator; 70 --num_iterations;
77 Image* image = image_tmp; // fix clang error. 71 auto& image = slot_images[image_id];
78 const bool is_alias = True(image->flags & ImageFlagBits::Alias); 72 const bool must_download = image.IsSafeDownload();
79 const bool is_bad_overlap = True(image->flags & ImageFlagBits::BadOverlap); 73 if (!high_priority_mode && must_download) {
80 const bool must_download = image->IsSafeDownload(); 74 return false;
81 bool should_care = is_bad_overlap || is_alias || (high_priority_mode && !must_download);
82 const u64 ticks_needed =
83 is_bad_overlap
84 ? ticks_to_destroy >> 4
85 : ((should_care && aggressive_mode) ? ticks_to_destroy >> 1 : ticks_to_destroy);
86 should_care |= aggressive_mode;
87 if (should_care && image->frame_tick + ticks_needed < frame_tick) {
88 if (is_bad_overlap) {
89 const bool overlap_check = std::ranges::all_of(
90 image->overlapping_images, [&, image](const ImageId& overlap_id) {
91 auto& overlap = slot_images[overlap_id];
92 return overlap.frame_tick >= image->frame_tick;
93 });
94 if (!overlap_check) {
95 ++deletion_iterator;
96 continue;
97 }
98 }
99 if (!is_bad_overlap && must_download) {
100 const bool alias_check = std::ranges::none_of(
101 image->aliased_images, [&, image](const AliasedImage& alias) {
102 auto& alias_image = slot_images[alias.id];
103 return (alias_image.frame_tick < image->frame_tick) ||
104 (alias_image.modification_tick < image->modification_tick);
105 });
106
107 if (alias_check) {
108 auto map = runtime.DownloadStagingBuffer(image->unswizzled_size_bytes);
109 const auto copies = FullDownloadCopies(image->info);
110 image->DownloadMemory(map, copies);
111 runtime.Finish();
112 SwizzleImage(gpu_memory, image->gpu_addr, image->info, copies, map.mapped_span);
113 }
114 }
115 if (True(image->flags & ImageFlagBits::Tracked)) {
116 UntrackImage(*image, image_id);
117 }
118 UnregisterImage(image_id);
119 DeleteImage(image_id);
120 if (is_bad_overlap) {
121 ++num_iterations;
122 }
123 } 75 }
124 ++deletion_iterator; 76 if (must_download) {
125 } 77 auto map = runtime.DownloadStagingBuffer(image.unswizzled_size_bytes);
78 const auto copies = FullDownloadCopies(image.info);
79 image.DownloadMemory(map, copies);
80 runtime.Finish();
81 SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span);
82 }
83 if (True(image.flags & ImageFlagBits::Tracked)) {
84 UntrackImage(image, image_id);
85 }
86 UnregisterImage(image_id);
87 DeleteImage(image_id);
88 return false;
89 };
90 lru_cache.ForEachItemBelow(frame_tick - ticks_to_destroy, clean_up);
126} 91}
127 92
128template <class P> 93template <class P>
129void TextureCache<P>::TickFrame() { 94void TextureCache<P>::TickFrame() {
130 if (Settings::values.use_caches_gc.GetValue() && total_used_memory > minimum_memory) { 95 if (total_used_memory > minimum_memory) {
131 RunGarbageCollector(); 96 RunGarbageCollector();
132 } 97 }
133 sentenced_images.Tick(); 98 sentenced_images.Tick();
@@ -1078,6 +1043,8 @@ void TextureCache<P>::RegisterImage(ImageId image_id) {
1078 tentative_size = EstimatedDecompressedSize(tentative_size, image.info.format); 1043 tentative_size = EstimatedDecompressedSize(tentative_size, image.info.format);
1079 } 1044 }
1080 total_used_memory += Common::AlignUp(tentative_size, 1024); 1045 total_used_memory += Common::AlignUp(tentative_size, 1024);
1046 image.lru_index = lru_cache.Insert(image_id, frame_tick);
1047
1081 ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, 1048 ForEachGPUPage(image.gpu_addr, image.guest_size_bytes,
1082 [this, image_id](u64 page) { gpu_page_table[page].push_back(image_id); }); 1049 [this, image_id](u64 page) { gpu_page_table[page].push_back(image_id); });
1083 if (False(image.flags & ImageFlagBits::Sparse)) { 1050 if (False(image.flags & ImageFlagBits::Sparse)) {
@@ -1115,6 +1082,7 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
1115 tentative_size = EstimatedDecompressedSize(tentative_size, image.info.format); 1082 tentative_size = EstimatedDecompressedSize(tentative_size, image.info.format);
1116 } 1083 }
1117 total_used_memory -= Common::AlignUp(tentative_size, 1024); 1084 total_used_memory -= Common::AlignUp(tentative_size, 1024);
1085 lru_cache.Free(image.lru_index);
1118 const auto& clear_page_table = 1086 const auto& clear_page_table =
1119 [this, image_id]( 1087 [this, image_id](
1120 u64 page, 1088 u64 page,
@@ -1384,7 +1352,7 @@ void TextureCache<P>::PrepareImage(ImageId image_id, bool is_modification, bool
1384 if (is_modification) { 1352 if (is_modification) {
1385 MarkModification(image); 1353 MarkModification(image);
1386 } 1354 }
1387 image.frame_tick = frame_tick; 1355 lru_cache.Touch(image.lru_index, frame_tick);
1388} 1356}
1389 1357
1390template <class P> 1358template <class P>
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index e4ae351cb..d7528ed24 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -14,6 +14,7 @@
14 14
15#include "common/common_types.h" 15#include "common/common_types.h"
16#include "common/literals.h" 16#include "common/literals.h"
17#include "common/lru_cache.h"
17#include "video_core/compatible_formats.h" 18#include "video_core/compatible_formats.h"
18#include "video_core/delayed_destruction_ring.h" 19#include "video_core/delayed_destruction_ring.h"
19#include "video_core/engines/fermi_2d.h" 20#include "video_core/engines/fermi_2d.h"
@@ -370,6 +371,12 @@ private:
370 std::vector<ImageId> uncommitted_downloads; 371 std::vector<ImageId> uncommitted_downloads;
371 std::queue<std::vector<ImageId>> committed_downloads; 372 std::queue<std::vector<ImageId>> committed_downloads;
372 373
374 struct LRUItemParams {
375 using ObjectType = ImageId;
376 using TickType = u64;
377 };
378 Common::LeastRecentlyUsedCache<LRUItemParams> lru_cache;
379
373 static constexpr size_t TICKS_TO_DESTROY = 6; 380 static constexpr size_t TICKS_TO_DESTROY = 6;
374 DelayedDestructionRing<Image, TICKS_TO_DESTROY> sentenced_images; 381 DelayedDestructionRing<Image, TICKS_TO_DESTROY> sentenced_images;
375 DelayedDestructionRing<ImageView, TICKS_TO_DESTROY> sentenced_image_view; 382 DelayedDestructionRing<ImageView, TICKS_TO_DESTROY> sentenced_image_view;
@@ -379,7 +386,6 @@ private:
379 386
380 u64 modification_tick = 0; 387 u64 modification_tick = 0;
381 u64 frame_tick = 0; 388 u64 frame_tick = 0;
382 typename SlotVector<Image>::Iterator deletion_iterator;
383}; 389};
384 390
385} // namespace VideoCommon 391} // namespace VideoCommon
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp
index c010b9353..24e943e4c 100644
--- a/src/video_core/textures/decoders.cpp
+++ b/src/video_core/textures/decoders.cpp
@@ -63,14 +63,6 @@ void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32
63 const u32 unswizzled_offset = 63 const u32 unswizzled_offset =
64 slice * pitch * height + line * pitch + column * BYTES_PER_PIXEL; 64 slice * pitch * height + line * pitch + column * BYTES_PER_PIXEL;
65 65
66 if (const auto offset = (TO_LINEAR ? unswizzled_offset : swizzled_offset);
67 offset >= input.size()) {
68 // TODO(Rodrigo): This is an out of bounds access that should never happen. To
69 // avoid crashing the emulator, break.
70 ASSERT_MSG(false, "offset {} exceeds input size {}!", offset, input.size());
71 break;
72 }
73
74 u8* const dst = &output[TO_LINEAR ? swizzled_offset : unswizzled_offset]; 66 u8* const dst = &output[TO_LINEAR ? swizzled_offset : unswizzled_offset];
75 const u8* const src = &input[TO_LINEAR ? unswizzled_offset : swizzled_offset]; 67 const u8* const src = &input[TO_LINEAR ? unswizzled_offset : swizzled_offset];
76 68
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp
index 377795326..85d292bcc 100644
--- a/src/yuzu/configuration/config.cpp
+++ b/src/yuzu/configuration/config.cpp
@@ -818,7 +818,6 @@ void Config::ReadRendererValues() {
818 ReadGlobalSetting(Settings::values.shader_backend); 818 ReadGlobalSetting(Settings::values.shader_backend);
819 ReadGlobalSetting(Settings::values.use_asynchronous_shaders); 819 ReadGlobalSetting(Settings::values.use_asynchronous_shaders);
820 ReadGlobalSetting(Settings::values.use_fast_gpu_time); 820 ReadGlobalSetting(Settings::values.use_fast_gpu_time);
821 ReadGlobalSetting(Settings::values.use_caches_gc);
822 ReadGlobalSetting(Settings::values.bg_red); 821 ReadGlobalSetting(Settings::values.bg_red);
823 ReadGlobalSetting(Settings::values.bg_green); 822 ReadGlobalSetting(Settings::values.bg_green);
824 ReadGlobalSetting(Settings::values.bg_blue); 823 ReadGlobalSetting(Settings::values.bg_blue);
@@ -1359,7 +1358,6 @@ void Config::SaveRendererValues() {
1359 Settings::values.shader_backend.UsingGlobal()); 1358 Settings::values.shader_backend.UsingGlobal());
1360 WriteGlobalSetting(Settings::values.use_asynchronous_shaders); 1359 WriteGlobalSetting(Settings::values.use_asynchronous_shaders);
1361 WriteGlobalSetting(Settings::values.use_fast_gpu_time); 1360 WriteGlobalSetting(Settings::values.use_fast_gpu_time);
1362 WriteGlobalSetting(Settings::values.use_caches_gc);
1363 WriteGlobalSetting(Settings::values.bg_red); 1361 WriteGlobalSetting(Settings::values.bg_red);
1364 WriteGlobalSetting(Settings::values.bg_green); 1362 WriteGlobalSetting(Settings::values.bg_green);
1365 WriteGlobalSetting(Settings::values.bg_blue); 1363 WriteGlobalSetting(Settings::values.bg_blue);
diff --git a/src/yuzu/configuration/configure_graphics.ui b/src/yuzu/configuration/configure_graphics.ui
index 099ddbb7c..43f1887d1 100644
--- a/src/yuzu/configuration/configure_graphics.ui
+++ b/src/yuzu/configuration/configure_graphics.ui
@@ -156,7 +156,7 @@
156 <item> 156 <item>
157 <widget class="QCheckBox" name="use_disk_shader_cache"> 157 <widget class="QCheckBox" name="use_disk_shader_cache">
158 <property name="text"> 158 <property name="text">
159 <string>Use disk shader cache</string> 159 <string>Use disk pipeline cache</string>
160 </property> 160 </property>
161 </widget> 161 </widget>
162 </item> 162 </item>
diff --git a/src/yuzu/configuration/configure_graphics_advanced.cpp b/src/yuzu/configuration/configure_graphics_advanced.cpp
index a31b8e192..bfd464061 100644
--- a/src/yuzu/configuration/configure_graphics_advanced.cpp
+++ b/src/yuzu/configuration/configure_graphics_advanced.cpp
@@ -28,7 +28,6 @@ void ConfigureGraphicsAdvanced::SetConfiguration() {
28 28
29 ui->use_vsync->setChecked(Settings::values.use_vsync.GetValue()); 29 ui->use_vsync->setChecked(Settings::values.use_vsync.GetValue());
30 ui->use_asynchronous_shaders->setChecked(Settings::values.use_asynchronous_shaders.GetValue()); 30 ui->use_asynchronous_shaders->setChecked(Settings::values.use_asynchronous_shaders.GetValue());
31 ui->use_caches_gc->setChecked(Settings::values.use_caches_gc.GetValue());
32 ui->use_fast_gpu_time->setChecked(Settings::values.use_fast_gpu_time.GetValue()); 31 ui->use_fast_gpu_time->setChecked(Settings::values.use_fast_gpu_time.GetValue());
33 32
34 if (Settings::IsConfiguringGlobal()) { 33 if (Settings::IsConfiguringGlobal()) {
@@ -55,8 +54,6 @@ void ConfigureGraphicsAdvanced::ApplyConfiguration() {
55 ConfigurationShared::ApplyPerGameSetting(&Settings::values.use_asynchronous_shaders, 54 ConfigurationShared::ApplyPerGameSetting(&Settings::values.use_asynchronous_shaders,
56 ui->use_asynchronous_shaders, 55 ui->use_asynchronous_shaders,
57 use_asynchronous_shaders); 56 use_asynchronous_shaders);
58 ConfigurationShared::ApplyPerGameSetting(&Settings::values.use_caches_gc, ui->use_caches_gc,
59 use_caches_gc);
60 ConfigurationShared::ApplyPerGameSetting(&Settings::values.use_fast_gpu_time, 57 ConfigurationShared::ApplyPerGameSetting(&Settings::values.use_fast_gpu_time,
61 ui->use_fast_gpu_time, use_fast_gpu_time); 58 ui->use_fast_gpu_time, use_fast_gpu_time);
62} 59}
@@ -81,7 +78,6 @@ void ConfigureGraphicsAdvanced::SetupPerGameUI() {
81 ui->use_asynchronous_shaders->setEnabled( 78 ui->use_asynchronous_shaders->setEnabled(
82 Settings::values.use_asynchronous_shaders.UsingGlobal()); 79 Settings::values.use_asynchronous_shaders.UsingGlobal());
83 ui->use_fast_gpu_time->setEnabled(Settings::values.use_fast_gpu_time.UsingGlobal()); 80 ui->use_fast_gpu_time->setEnabled(Settings::values.use_fast_gpu_time.UsingGlobal());
84 ui->use_caches_gc->setEnabled(Settings::values.use_caches_gc.UsingGlobal());
85 ui->anisotropic_filtering_combobox->setEnabled( 81 ui->anisotropic_filtering_combobox->setEnabled(
86 Settings::values.max_anisotropy.UsingGlobal()); 82 Settings::values.max_anisotropy.UsingGlobal());
87 83
@@ -94,8 +90,6 @@ void ConfigureGraphicsAdvanced::SetupPerGameUI() {
94 use_asynchronous_shaders); 90 use_asynchronous_shaders);
95 ConfigurationShared::SetColoredTristate(ui->use_fast_gpu_time, 91 ConfigurationShared::SetColoredTristate(ui->use_fast_gpu_time,
96 Settings::values.use_fast_gpu_time, use_fast_gpu_time); 92 Settings::values.use_fast_gpu_time, use_fast_gpu_time);
97 ConfigurationShared::SetColoredTristate(ui->use_caches_gc, Settings::values.use_caches_gc,
98 use_caches_gc);
99 ConfigurationShared::SetColoredComboBox( 93 ConfigurationShared::SetColoredComboBox(
100 ui->gpu_accuracy, ui->label_gpu_accuracy, 94 ui->gpu_accuracy, ui->label_gpu_accuracy,
101 static_cast<int>(Settings::values.gpu_accuracy.GetValue(true))); 95 static_cast<int>(Settings::values.gpu_accuracy.GetValue(true)));
diff --git a/src/yuzu/configuration/configure_graphics_advanced.h b/src/yuzu/configuration/configure_graphics_advanced.h
index 7356e6916..13ba4ff6b 100644
--- a/src/yuzu/configuration/configure_graphics_advanced.h
+++ b/src/yuzu/configuration/configure_graphics_advanced.h
@@ -37,5 +37,4 @@ private:
37 ConfigurationShared::CheckState use_vsync; 37 ConfigurationShared::CheckState use_vsync;
38 ConfigurationShared::CheckState use_asynchronous_shaders; 38 ConfigurationShared::CheckState use_asynchronous_shaders;
39 ConfigurationShared::CheckState use_fast_gpu_time; 39 ConfigurationShared::CheckState use_fast_gpu_time;
40 ConfigurationShared::CheckState use_caches_gc;
41}; 40};
diff --git a/src/yuzu/configuration/configure_graphics_advanced.ui b/src/yuzu/configuration/configure_graphics_advanced.ui
index 4fe6b86ae..b91abc2f0 100644
--- a/src/yuzu/configuration/configure_graphics_advanced.ui
+++ b/src/yuzu/configuration/configure_graphics_advanced.ui
@@ -82,7 +82,7 @@
82 <string>Enables asynchronous shader compilation, which may reduce shader stutter. This feature is experimental.</string> 82 <string>Enables asynchronous shader compilation, which may reduce shader stutter. This feature is experimental.</string>
83 </property> 83 </property>
84 <property name="text"> 84 <property name="text">
85 <string>Use asynchronous shader building (hack)</string> 85 <string>Use asynchronous shader building (Hack)</string>
86 </property> 86 </property>
87 </widget> 87 </widget>
88 </item> 88 </item>
@@ -92,17 +92,7 @@
92 <string>Enables Fast GPU Time. This option will force most games to run at their highest native resolution.</string> 92 <string>Enables Fast GPU Time. This option will force most games to run at their highest native resolution.</string>
93 </property> 93 </property>
94 <property name="text"> 94 <property name="text">
95 <string>Use Fast GPU Time (hack)</string> 95 <string>Use Fast GPU Time (Hack)</string>
96 </property>
97 </widget>
98 </item>
99 <item>
100 <widget class="QCheckBox" name="use_caches_gc">
101 <property name="toolTip">
102 <string>Enables garbage collection for the GPU caches, this will try to keep VRAM within 3-4 GB by flushing the least used textures/buffers. May cause issues in a few games.</string>
103 </property>
104 <property name="text">
105 <string>Enable GPU cache garbage collection (experimental)</string>
106 </property> 96 </property>
107 </widget> 97 </widget>
108 </item> 98 </item>
diff --git a/src/yuzu/game_list.cpp b/src/yuzu/game_list.cpp
index e97804220..f9d949e75 100644
--- a/src/yuzu/game_list.cpp
+++ b/src/yuzu/game_list.cpp
@@ -515,16 +515,16 @@ void GameList::AddGamePopup(QMenu& context_menu, u64 program_id, const std::stri
515 QAction* open_save_location = context_menu.addAction(tr("Open Save Data Location")); 515 QAction* open_save_location = context_menu.addAction(tr("Open Save Data Location"));
516 QAction* open_mod_location = context_menu.addAction(tr("Open Mod Data Location")); 516 QAction* open_mod_location = context_menu.addAction(tr("Open Mod Data Location"));
517 QAction* open_transferable_shader_cache = 517 QAction* open_transferable_shader_cache =
518 context_menu.addAction(tr("Open Transferable Shader Cache")); 518 context_menu.addAction(tr("Open Transferable Pipeline Cache"));
519 context_menu.addSeparator(); 519 context_menu.addSeparator();
520 QMenu* remove_menu = context_menu.addMenu(tr("Remove")); 520 QMenu* remove_menu = context_menu.addMenu(tr("Remove"));
521 QAction* remove_update = remove_menu->addAction(tr("Remove Installed Update")); 521 QAction* remove_update = remove_menu->addAction(tr("Remove Installed Update"));
522 QAction* remove_dlc = remove_menu->addAction(tr("Remove All Installed DLC")); 522 QAction* remove_dlc = remove_menu->addAction(tr("Remove All Installed DLC"));
523 QAction* remove_custom_config = remove_menu->addAction(tr("Remove Custom Configuration")); 523 QAction* remove_custom_config = remove_menu->addAction(tr("Remove Custom Configuration"));
524 QAction* remove_gl_shader_cache = remove_menu->addAction(tr("Remove OpenGL Shader Cache")); 524 QAction* remove_gl_shader_cache = remove_menu->addAction(tr("Remove OpenGL Pipeline Cache"));
525 QAction* remove_vk_shader_cache = remove_menu->addAction(tr("Remove Vulkan Shader Cache")); 525 QAction* remove_vk_shader_cache = remove_menu->addAction(tr("Remove Vulkan Pipeline Cache"));
526 remove_menu->addSeparator(); 526 remove_menu->addSeparator();
527 QAction* remove_shader_cache = remove_menu->addAction(tr("Remove All Shader Caches")); 527 QAction* remove_shader_cache = remove_menu->addAction(tr("Remove All Pipeline Caches"));
528 QAction* remove_all_content = remove_menu->addAction(tr("Remove All Installed Contents")); 528 QAction* remove_all_content = remove_menu->addAction(tr("Remove All Installed Contents"));
529 QMenu* dump_romfs_menu = context_menu.addMenu(tr("Dump RomFS")); 529 QMenu* dump_romfs_menu = context_menu.addMenu(tr("Dump RomFS"));
530 QAction* dump_romfs = dump_romfs_menu->addAction(tr("Dump RomFS")); 530 QAction* dump_romfs = dump_romfs_menu->addAction(tr("Dump RomFS"));
diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp
index 4f14be524..757dd1ea0 100644
--- a/src/yuzu_cmd/config.cpp
+++ b/src/yuzu_cmd/config.cpp
@@ -468,7 +468,6 @@ void Config::ReadValues() {
468 ReadSetting("Renderer", Settings::values.use_nvdec_emulation); 468 ReadSetting("Renderer", Settings::values.use_nvdec_emulation);
469 ReadSetting("Renderer", Settings::values.accelerate_astc); 469 ReadSetting("Renderer", Settings::values.accelerate_astc);
470 ReadSetting("Renderer", Settings::values.use_fast_gpu_time); 470 ReadSetting("Renderer", Settings::values.use_fast_gpu_time);
471 ReadSetting("Renderer", Settings::values.use_caches_gc);
472 471
473 ReadSetting("Renderer", Settings::values.bg_red); 472 ReadSetting("Renderer", Settings::values.bg_red);
474 ReadSetting("Renderer", Settings::values.bg_green); 473 ReadSetting("Renderer", Settings::values.bg_green);