summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/CMakeLists.txt2
-rw-r--r--src/audio_core/CMakeLists.txt3
-rw-r--r--src/audio_core/audio_renderer.cpp32
-rw-r--r--src/audio_core/audio_renderer.h5
-rw-r--r--src/audio_core/behavior_info.cpp100
-rw-r--r--src/audio_core/behavior_info.h66
-rw-r--r--src/audio_core/common.h47
-rw-r--r--src/common/bit_field.h2
-rw-r--r--src/common/uuid.h5
-rw-r--r--src/core/arm/dynarmic/arm_dynarmic_64.cpp17
-rw-r--r--src/core/arm/unicorn/arm_unicorn.cpp16
-rw-r--r--src/core/arm/unicorn/arm_unicorn.h7
-rw-r--r--src/core/crypto/key_manager.cpp3
-rw-r--r--src/core/crypto/partition_data_manager.cpp7
-rw-r--r--src/core/file_sys/program_metadata.cpp11
-rw-r--r--src/core/file_sys/program_metadata.h6
-rw-r--r--src/core/gdbstub/gdbstub.cpp7
-rw-r--r--src/core/hle/kernel/handle_table.cpp2
-rw-r--r--src/core/hle/kernel/memory/memory_block.h23
-rw-r--r--src/core/hle/kernel/memory/memory_block_manager.cpp36
-rw-r--r--src/core/hle/kernel/memory/memory_block_manager.h3
-rw-r--r--src/core/hle/kernel/memory/page_table.cpp44
-rw-r--r--src/core/hle/kernel/memory/page_table.h2
-rw-r--r--src/core/hle/kernel/mutex.cpp5
-rw-r--r--src/core/hle/kernel/physical_core.cpp4
-rw-r--r--src/core/hle/kernel/process_capability.cpp30
-rw-r--r--src/core/hle/kernel/readable_event.cpp13
-rw-r--r--src/core/hle/kernel/resource_limit.cpp2
-rw-r--r--src/core/hle/kernel/shared_memory.cpp8
-rw-r--r--src/core/hle/kernel/shared_memory.h2
-rw-r--r--src/core/hle/kernel/svc.cpp154
-rw-r--r--src/core/hle/kernel/thread.cpp5
-rw-r--r--src/core/hle/service/acc/acc.cpp59
-rw-r--r--src/core/hle/service/acc/acc.h3
-rw-r--r--src/core/hle/service/acc/acc_su.cpp4
-rw-r--r--src/core/hle/service/acc/acc_u0.cpp2
-rw-r--r--src/core/hle/service/acc/acc_u1.cpp3
-rw-r--r--src/core/hle/service/am/am.cpp71
-rw-r--r--src/core/hle/service/am/am.h3
-rw-r--r--src/core/hle/service/audio/audctl.cpp2
-rw-r--r--src/core/hle/service/audio/audin_u.cpp70
-rw-r--r--src/core/hle/service/audio/audin_u.h29
-rw-r--r--src/core/hle/service/audio/audren_u.cpp13
-rw-r--r--src/core/hle/service/bcat/backend/boxcat.cpp7
-rw-r--r--src/core/hle/service/bcat/module.cpp1
-rw-r--r--src/core/hle/service/caps/caps_su.cpp12
-rw-r--r--src/core/hle/service/caps/caps_su.h3
-rw-r--r--src/core/hle/service/es/es.cpp2
-rw-r--r--src/core/hle/service/filesystem/fsp_srv.cpp8
-rw-r--r--src/core/hle/service/friend/friend.cpp1
-rw-r--r--src/core/hle/service/glue/errors.h8
-rw-r--r--src/core/hle/service/hid/controllers/npad.cpp2
-rw-r--r--src/core/hle/service/hid/hid.cpp50
-rw-r--r--src/core/hle/service/hid/hid.h1
-rw-r--r--src/core/hle/service/ldr/ldr.cpp1
-rw-r--r--src/core/hle/service/ncm/ncm.cpp1
-rw-r--r--src/core/hle/service/nim/nim.cpp70
-rw-r--r--src/core/hle/service/npns/npns.cpp2
-rw-r--r--src/core/hle/service/ns/ns.cpp30
-rw-r--r--src/core/hle/service/ns/pl_u.cpp1
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h5
-rw-r--r--src/core/hle/service/pctl/module.cpp2
-rw-r--r--src/core/hle/service/prepo/prepo.cpp12
-rw-r--r--src/core/hle/service/ptm/psm.cpp21
-rw-r--r--src/core/hle/service/set/set.cpp1
-rw-r--r--src/core/hle/service/set/set_cal.cpp2
-rw-r--r--src/core/hle/service/set/set_sys.cpp12
-rw-r--r--src/core/hle/service/sm/sm.cpp12
-rw-r--r--src/core/hle/service/sockets/bsd.cpp1
-rw-r--r--src/core/hle/service/time/time.cpp50
-rw-r--r--src/core/hle/service/time/time_zone_manager.cpp4
-rw-r--r--src/core/hle/service/vi/vi.cpp19
-rw-r--r--src/core/loader/elf.cpp5
-rw-r--r--src/core/loader/nro.cpp23
-rw-r--r--src/core/loader/nro.h2
-rw-r--r--src/core/reporter.h1
-rw-r--r--src/core/settings.cpp11
-rw-r--r--src/core/settings.h13
-rw-r--r--src/core/telemetry_session.cpp16
-rw-r--r--src/input_common/main.cpp6
-rw-r--r--src/tests/core/core_timing.cpp18
-rw-r--r--src/video_core/CMakeLists.txt26
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h126
-rw-r--r--src/video_core/buffer_cache/map_interval.h18
-rw-r--r--src/video_core/dma_pusher.cpp31
-rw-r--r--src/video_core/dma_pusher.h1
-rw-r--r--src/video_core/engines/fermi_2d.cpp8
-rw-r--r--src/video_core/engines/fermi_2d.h3
-rw-r--r--src/video_core/engines/kepler_compute.cpp7
-rw-r--r--src/video_core/engines/kepler_compute.h3
-rw-r--r--src/video_core/engines/kepler_memory.cpp7
-rw-r--r--src/video_core/engines/kepler_memory.h3
-rw-r--r--src/video_core/engines/maxwell_3d.cpp90
-rw-r--r--src/video_core/engines/maxwell_3d.h15
-rw-r--r--src/video_core/engines/maxwell_dma.cpp18
-rw-r--r--src/video_core/engines/maxwell_dma.h3
-rw-r--r--src/video_core/engines/shader_bytecode.h7
-rw-r--r--src/video_core/fence_manager.h172
-rw-r--r--src/video_core/gpu.cpp84
-rw-r--r--src/video_core/gpu.h39
-rw-r--r--src/video_core/gpu_asynch.cpp4
-rw-r--r--src/video_core/gpu_asynch.h2
-rw-r--r--src/video_core/gpu_thread.cpp39
-rw-r--r--src/video_core/gpu_thread.h11
-rw-r--r--src/video_core/memory_manager.cpp18
-rw-r--r--src/video_core/query_cache.h46
-rw-r--r--src/video_core/rasterizer_interface.h21
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp5
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.h2
-rw-r--r--src/video_core/renderer_opengl/gl_fence_manager.cpp72
-rw-r--r--src/video_core/renderer_opengl/gl_fence_manager.h53
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp164
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h14
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp99
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.h3
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.cpp68
-rw-r--r--src/video_core/renderer_opengl/gl_shader_decompiler.h33
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.cpp512
-rw-r--r--src/video_core/renderer_vulkan/fixed_pipeline_state.h372
-rw-r--r--src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp220
-rw-r--r--src/video_core/renderer_vulkan/nsight_aftermath_tracker.h87
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.cpp3
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_buffer_cache.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.cpp1
-rw-r--r--src/video_core/renderer_vulkan/vk_descriptor_pool.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_device.cpp36
-rw-r--r--src/video_core/renderer_vulkan/vk_device.h19
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.cpp101
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.h74
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp96
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.h3
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.cpp13
-rw-r--r--src/video_core/renderer_vulkan/vk_memory_manager.h13
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp118
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h62
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp16
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp218
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h17
-rw-r--r--src/video_core/renderer_vulkan/vk_renderpass_cache.cpp53
-rw-r--r--src/video_core/renderer_vulkan/vk_renderpass_cache.h59
-rw-r--r--src/video_core/renderer_vulkan/vk_sampler_cache.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp11
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.cpp83
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_decompiler.h4
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_util.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_shader_util.h1
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp11
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.h3
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.cpp76
-rw-r--r--src/video_core/renderer_vulkan/vk_stream_buffer.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp3
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_update_descriptor.h1
-rw-r--r--src/video_core/renderer_vulkan/wrapper.cpp58
-rw-r--r--src/video_core/renderer_vulkan/wrapper.h64
-rw-r--r--src/video_core/shader/control_flow.cpp16
-rw-r--r--src/video_core/shader/decode.cpp30
-rw-r--r--src/video_core/shader/decode/arithmetic_half.cpp51
-rw-r--r--src/video_core/shader/decode/arithmetic_integer.cpp35
-rw-r--r--src/video_core/shader/decode/image.cpp18
-rw-r--r--src/video_core/shader/decode/memory.cpp2
-rw-r--r--src/video_core/shader/decode/register_set_predicate.cpp52
-rw-r--r--src/video_core/shader/decode/texture.cpp203
-rw-r--r--src/video_core/shader/memory_util.cpp77
-rw-r--r--src/video_core/shader/memory_util.h47
-rw-r--r--src/video_core/shader/node.h131
-rw-r--r--src/video_core/shader/shader_ir.h37
-rw-r--r--src/video_core/shader/track.cpp20
-rw-r--r--src/video_core/texture_cache/surface_base.h18
-rw-r--r--src/video_core/texture_cache/surface_params.cpp10
-rw-r--r--src/video_core/texture_cache/texture_cache.h182
-rw-r--r--src/video_core/textures/decoders.cpp3
-rw-r--r--src/video_core/textures/decoders.h5
-rw-r--r--src/yuzu/applets/profile_select.cpp2
-rw-r--r--src/yuzu/configuration/config.cpp21
-rw-r--r--src/yuzu/configuration/config.h2
-rw-r--r--src/yuzu/configuration/configure_debug.cpp2
-rw-r--r--src/yuzu/configuration/configure_debug.ui7
-rw-r--r--src/yuzu/configuration/configure_filesystem.cpp2
-rw-r--r--src/yuzu/configuration/configure_graphics_advanced.cpp7
-rw-r--r--src/yuzu/configuration/configure_graphics_advanced.ui40
-rw-r--r--src/yuzu/configuration/configure_hotkeys.cpp80
-rw-r--r--src/yuzu/configuration/configure_hotkeys.h6
-rw-r--r--src/yuzu/configuration/configure_hotkeys.ui39
-rw-r--r--src/yuzu/configuration/configure_input_player.cpp60
-rw-r--r--src/yuzu/configuration/configure_input_player.h6
-rw-r--r--src/yuzu/configuration/configure_input_player.ui16
-rw-r--r--src/yuzu/game_list_p.h14
-rw-r--r--src/yuzu/main.cpp4
-rw-r--r--src/yuzu_cmd/config.cpp8
-rw-r--r--src/yuzu_cmd/default_ini.h9
-rw-r--r--src/yuzu_cmd/emu_window/emu_window_sdl2.cpp7
-rw-r--r--src/yuzu_cmd/emu_window/emu_window_sdl2_vk.cpp3
-rw-r--r--src/yuzu_cmd/emu_window/emu_window_sdl2_vk.h2
-rw-r--r--src/yuzu_tester/config.cpp6
204 files changed, 4620 insertions, 1789 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 0913be72c..3a57356ab 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -54,8 +54,10 @@ else()
54 add_compile_options( 54 add_compile_options(
55 -Wall 55 -Wall
56 -Werror=implicit-fallthrough 56 -Werror=implicit-fallthrough
57 -Werror=missing-declarations
57 -Werror=reorder 58 -Werror=reorder
58 -Wextra 59 -Wextra
60 -Wmissing-declarations
59 -Wno-attributes 61 -Wno-attributes
60 -Wno-unused-parameter 62 -Wno-unused-parameter
61 ) 63 )
diff --git a/src/audio_core/CMakeLists.txt b/src/audio_core/CMakeLists.txt
index c381dbe1d..5ef38a337 100644
--- a/src/audio_core/CMakeLists.txt
+++ b/src/audio_core/CMakeLists.txt
@@ -7,9 +7,12 @@ add_library(audio_core STATIC
7 audio_out.h 7 audio_out.h
8 audio_renderer.cpp 8 audio_renderer.cpp
9 audio_renderer.h 9 audio_renderer.h
10 behavior_info.cpp
11 behavior_info.h
10 buffer.h 12 buffer.h
11 codec.cpp 13 codec.cpp
12 codec.h 14 codec.h
15 common.h
13 null_sink.h 16 null_sink.h
14 sink.h 17 sink.h
15 sink_details.cpp 18 sink_details.cpp
diff --git a/src/audio_core/audio_renderer.cpp b/src/audio_core/audio_renderer.cpp
index 7a9dc61d4..d18ef6940 100644
--- a/src/audio_core/audio_renderer.cpp
+++ b/src/audio_core/audio_renderer.cpp
@@ -6,6 +6,7 @@
6#include "audio_core/audio_out.h" 6#include "audio_core/audio_out.h"
7#include "audio_core/audio_renderer.h" 7#include "audio_core/audio_renderer.h"
8#include "audio_core/codec.h" 8#include "audio_core/codec.h"
9#include "audio_core/common.h"
9#include "common/assert.h" 10#include "common/assert.h"
10#include "common/logging/log.h" 11#include "common/logging/log.h"
11#include "core/core.h" 12#include "core/core.h"
@@ -79,7 +80,7 @@ AudioRenderer::AudioRenderer(Core::Timing::CoreTiming& core_timing, Core::Memory
79 std::size_t instance_number) 80 std::size_t instance_number)
80 : worker_params{params}, buffer_event{buffer_event}, voices(params.voice_count), 81 : worker_params{params}, buffer_event{buffer_event}, voices(params.voice_count),
81 effects(params.effect_count), memory{memory_} { 82 effects(params.effect_count), memory{memory_} {
82 83 behavior_info.SetUserRevision(params.revision);
83 audio_out = std::make_unique<AudioCore::AudioOut>(); 84 audio_out = std::make_unique<AudioCore::AudioOut>();
84 stream = audio_out->OpenStream(core_timing, STREAM_SAMPLE_RATE, STREAM_NUM_CHANNELS, 85 stream = audio_out->OpenStream(core_timing, STREAM_SAMPLE_RATE, STREAM_NUM_CHANNELS,
85 fmt::format("AudioRenderer-Instance{}", instance_number), 86 fmt::format("AudioRenderer-Instance{}", instance_number),
@@ -109,17 +110,17 @@ Stream::State AudioRenderer::GetStreamState() const {
109 return stream->GetState(); 110 return stream->GetState();
110} 111}
111 112
112static constexpr u32 VersionFromRevision(u32_le rev) { 113ResultVal<std::vector<u8>> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params) {
113 // "REV7" -> 7
114 return ((rev >> 24) & 0xff) - 0x30;
115}
116
117std::vector<u8> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_params) {
118 // Copy UpdateDataHeader struct 114 // Copy UpdateDataHeader struct
119 UpdateDataHeader config{}; 115 UpdateDataHeader config{};
120 std::memcpy(&config, input_params.data(), sizeof(UpdateDataHeader)); 116 std::memcpy(&config, input_params.data(), sizeof(UpdateDataHeader));
121 u32 memory_pool_count = worker_params.effect_count + (worker_params.voice_count * 4); 117 u32 memory_pool_count = worker_params.effect_count + (worker_params.voice_count * 4);
122 118
119 if (!behavior_info.UpdateInput(input_params, sizeof(UpdateDataHeader))) {
120 LOG_ERROR(Audio, "Failed to update behavior info input parameters");
121 return Audren::ERR_INVALID_PARAMETERS;
122 }
123
123 // Copy MemoryPoolInfo structs 124 // Copy MemoryPoolInfo structs
124 std::vector<MemoryPoolInfo> mem_pool_info(memory_pool_count); 125 std::vector<MemoryPoolInfo> mem_pool_info(memory_pool_count);
125 std::memcpy(mem_pool_info.data(), 126 std::memcpy(mem_pool_info.data(),
@@ -173,8 +174,7 @@ std::vector<u8> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_
173 // Copy output header 174 // Copy output header
174 UpdateDataHeader response_data{worker_params}; 175 UpdateDataHeader response_data{worker_params};
175 std::vector<u8> output_params(response_data.total_size); 176 std::vector<u8> output_params(response_data.total_size);
176 const auto audren_revision = VersionFromRevision(config.revision); 177 if (behavior_info.IsElapsedFrameCountSupported()) {
177 if (audren_revision >= 5) {
178 response_data.frame_count = 0x10; 178 response_data.frame_count = 0x10;
179 response_data.total_size += 0x10; 179 response_data.total_size += 0x10;
180 } 180 }
@@ -200,7 +200,19 @@ std::vector<u8> AudioRenderer::UpdateAudioRenderer(const std::vector<u8>& input_
200 sizeof(EffectOutStatus)); 200 sizeof(EffectOutStatus));
201 effect_out_status_offset += sizeof(EffectOutStatus); 201 effect_out_status_offset += sizeof(EffectOutStatus);
202 } 202 }
203 return output_params; 203
204 // Update behavior info output
205 const std::size_t behavior_out_status_offset{
206 sizeof(UpdateDataHeader) + response_data.memory_pools_size + response_data.voices_size +
207 response_data.effects_size + response_data.sinks_size +
208 response_data.performance_manager_size};
209
210 if (!behavior_info.UpdateOutput(output_params, behavior_out_status_offset)) {
211 LOG_ERROR(Audio, "Failed to update behavior info output parameters");
212 return Audren::ERR_INVALID_PARAMETERS;
213 }
214
215 return MakeResult(output_params);
204} 216}
205 217
206void AudioRenderer::VoiceState::SetWaveIndex(std::size_t index) { 218void AudioRenderer::VoiceState::SetWaveIndex(std::size_t index) {
diff --git a/src/audio_core/audio_renderer.h b/src/audio_core/audio_renderer.h
index 62faf9f19..b42770fae 100644
--- a/src/audio_core/audio_renderer.h
+++ b/src/audio_core/audio_renderer.h
@@ -8,11 +8,13 @@
8#include <memory> 8#include <memory>
9#include <vector> 9#include <vector>
10 10
11#include "audio_core/behavior_info.h"
11#include "audio_core/stream.h" 12#include "audio_core/stream.h"
12#include "common/common_funcs.h" 13#include "common/common_funcs.h"
13#include "common/common_types.h" 14#include "common/common_types.h"
14#include "common/swap.h" 15#include "common/swap.h"
15#include "core/hle/kernel/object.h" 16#include "core/hle/kernel/object.h"
17#include "core/hle/result.h"
16 18
17namespace Core::Timing { 19namespace Core::Timing {
18class CoreTiming; 20class CoreTiming;
@@ -226,7 +228,7 @@ public:
226 std::shared_ptr<Kernel::WritableEvent> buffer_event, std::size_t instance_number); 228 std::shared_ptr<Kernel::WritableEvent> buffer_event, std::size_t instance_number);
227 ~AudioRenderer(); 229 ~AudioRenderer();
228 230
229 std::vector<u8> UpdateAudioRenderer(const std::vector<u8>& input_params); 231 ResultVal<std::vector<u8>> UpdateAudioRenderer(const std::vector<u8>& input_params);
230 void QueueMixedBuffer(Buffer::Tag tag); 232 void QueueMixedBuffer(Buffer::Tag tag);
231 void ReleaseAndQueueBuffers(); 233 void ReleaseAndQueueBuffers();
232 u32 GetSampleRate() const; 234 u32 GetSampleRate() const;
@@ -237,6 +239,7 @@ public:
237private: 239private:
238 class EffectState; 240 class EffectState;
239 class VoiceState; 241 class VoiceState;
242 BehaviorInfo behavior_info{};
240 243
241 AudioRendererParameter worker_params; 244 AudioRendererParameter worker_params;
242 std::shared_ptr<Kernel::WritableEvent> buffer_event; 245 std::shared_ptr<Kernel::WritableEvent> buffer_event;
diff --git a/src/audio_core/behavior_info.cpp b/src/audio_core/behavior_info.cpp
new file mode 100644
index 000000000..94b7a3bf1
--- /dev/null
+++ b/src/audio_core/behavior_info.cpp
@@ -0,0 +1,100 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <cstring>
6#include "audio_core/behavior_info.h"
7#include "audio_core/common.h"
8#include "common/logging/log.h"
9
10namespace AudioCore {
11
12BehaviorInfo::BehaviorInfo() : process_revision(CURRENT_PROCESS_REVISION) {}
13BehaviorInfo::~BehaviorInfo() = default;
14
15bool BehaviorInfo::UpdateInput(const std::vector<u8>& buffer, std::size_t offset) {
16 if (!CanConsumeBuffer(buffer.size(), offset, sizeof(InParams))) {
17 LOG_ERROR(Audio, "Buffer is an invalid size!");
18 return false;
19 }
20 InParams params{};
21 std::memcpy(&params, buffer.data() + offset, sizeof(InParams));
22
23 if (!IsValidRevision(params.revision)) {
24 LOG_ERROR(Audio, "Invalid input revision, revision=0x{:08X}", params.revision);
25 return false;
26 }
27
28 if (user_revision != params.revision) {
29 LOG_ERROR(Audio,
30 "User revision differs from input revision, expecting 0x{:08X} but got 0x{:08X}",
31 user_revision, params.revision);
32 return false;
33 }
34
35 ClearError();
36 UpdateFlags(params.flags);
37
38 // TODO(ogniK): Check input params size when InfoUpdater is used
39
40 return true;
41}
42
43bool BehaviorInfo::UpdateOutput(std::vector<u8>& buffer, std::size_t offset) {
44 if (!CanConsumeBuffer(buffer.size(), offset, sizeof(OutParams))) {
45 LOG_ERROR(Audio, "Buffer is an invalid size!");
46 return false;
47 }
48
49 OutParams params{};
50 std::memcpy(params.errors.data(), errors.data(), sizeof(ErrorInfo) * errors.size());
51 params.error_count = static_cast<u32_le>(error_count);
52 std::memcpy(buffer.data() + offset, &params, sizeof(OutParams));
53 return true;
54}
55
56void BehaviorInfo::ClearError() {
57 error_count = 0;
58}
59
60void BehaviorInfo::UpdateFlags(u64_le dest_flags) {
61 flags = dest_flags;
62}
63
64void BehaviorInfo::SetUserRevision(u32_le revision) {
65 user_revision = revision;
66}
67
68bool BehaviorInfo::IsAdpcmLoopContextBugFixed() const {
69 return IsRevisionSupported(2, user_revision);
70}
71
72bool BehaviorInfo::IsSplitterSupported() const {
73 return IsRevisionSupported(2, user_revision);
74}
75
76bool BehaviorInfo::IsLongSizePreDelaySupported() const {
77 return IsRevisionSupported(3, user_revision);
78}
79
80bool BehaviorInfo::IsAudioRenererProcessingTimeLimit80PercentSupported() const {
81 return IsRevisionSupported(5, user_revision);
82}
83
84bool BehaviorInfo::IsAudioRenererProcessingTimeLimit75PercentSupported() const {
85 return IsRevisionSupported(4, user_revision);
86}
87
88bool BehaviorInfo::IsAudioRenererProcessingTimeLimit70PercentSupported() const {
89 return IsRevisionSupported(1, user_revision);
90}
91
92bool BehaviorInfo::IsElapsedFrameCountSupported() const {
93 return IsRevisionSupported(5, user_revision);
94}
95
96bool BehaviorInfo::IsMemoryPoolForceMappingEnabled() const {
97 return (flags & 1) != 0;
98}
99
100} // namespace AudioCore
diff --git a/src/audio_core/behavior_info.h b/src/audio_core/behavior_info.h
new file mode 100644
index 000000000..c5e91ab39
--- /dev/null
+++ b/src/audio_core/behavior_info.h
@@ -0,0 +1,66 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <array>
8
9#include <vector>
10#include "common/common_funcs.h"
11#include "common/common_types.h"
12#include "common/swap.h"
13
14namespace AudioCore {
15class BehaviorInfo {
16public:
17 explicit BehaviorInfo();
18 ~BehaviorInfo();
19
20 bool UpdateInput(const std::vector<u8>& buffer, std::size_t offset);
21 bool UpdateOutput(std::vector<u8>& buffer, std::size_t offset);
22
23 void ClearError();
24 void UpdateFlags(u64_le dest_flags);
25 void SetUserRevision(u32_le revision);
26
27 bool IsAdpcmLoopContextBugFixed() const;
28 bool IsSplitterSupported() const;
29 bool IsLongSizePreDelaySupported() const;
30 bool IsAudioRenererProcessingTimeLimit80PercentSupported() const;
31 bool IsAudioRenererProcessingTimeLimit75PercentSupported() const;
32 bool IsAudioRenererProcessingTimeLimit70PercentSupported() const;
33 bool IsElapsedFrameCountSupported() const;
34 bool IsMemoryPoolForceMappingEnabled() const;
35
36private:
37 u32_le process_revision{};
38 u32_le user_revision{};
39 u64_le flags{};
40
41 struct ErrorInfo {
42 u32_le result{};
43 INSERT_PADDING_WORDS(1);
44 u64_le result_info{};
45 };
46 static_assert(sizeof(ErrorInfo) == 0x10, "ErrorInfo is an invalid size");
47
48 std::array<ErrorInfo, 10> errors{};
49 std::size_t error_count{};
50
51 struct InParams {
52 u32_le revision{};
53 u32_le padding{};
54 u64_le flags{};
55 };
56 static_assert(sizeof(InParams) == 0x10, "InParams is an invalid size");
57
58 struct OutParams {
59 std::array<ErrorInfo, 10> errors{};
60 u32_le error_count{};
61 INSERT_PADDING_BYTES(12);
62 };
63 static_assert(sizeof(OutParams) == 0xb0, "OutParams is an invalid size");
64};
65
66} // namespace AudioCore
diff --git a/src/audio_core/common.h b/src/audio_core/common.h
new file mode 100644
index 000000000..98478b66b
--- /dev/null
+++ b/src/audio_core/common.h
@@ -0,0 +1,47 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6#include "common/common_funcs.h"
7#include "common/common_types.h"
8#include "common/swap.h"
9#include "core/hle/result.h"
10
11namespace AudioCore {
12namespace Audren {
13constexpr ResultCode ERR_INVALID_PARAMETERS{ErrorModule::Audio, 41};
14}
15
16constexpr u32_le CURRENT_PROCESS_REVISION = Common::MakeMagic('R', 'E', 'V', '8');
17
18static constexpr u32 VersionFromRevision(u32_le rev) {
19 // "REV7" -> 7
20 return ((rev >> 24) & 0xff) - 0x30;
21}
22
23static constexpr bool IsRevisionSupported(u32 required, u32_le user_revision) {
24 const auto base = VersionFromRevision(user_revision);
25 return required <= base;
26}
27
28static constexpr bool IsValidRevision(u32_le revision) {
29 const auto base = VersionFromRevision(revision);
30 constexpr auto max_rev = VersionFromRevision(CURRENT_PROCESS_REVISION);
31 return base <= max_rev;
32}
33
34static constexpr bool CanConsumeBuffer(std::size_t size, std::size_t offset, std::size_t required) {
35 if (offset > size) {
36 return false;
37 }
38 if (size < required) {
39 return false;
40 }
41 if ((size - offset) < required) {
42 return false;
43 }
44 return true;
45}
46
47} // namespace AudioCore
diff --git a/src/common/bit_field.h b/src/common/bit_field.h
index fd2bbbd99..26ae6c7fc 100644
--- a/src/common/bit_field.h
+++ b/src/common/bit_field.h
@@ -180,7 +180,7 @@ public:
180 } 180 }
181 181
182 constexpr void Assign(const T& value) { 182 constexpr void Assign(const T& value) {
183 storage = (static_cast<StorageType>(storage) & ~mask) | FormatValue(value); 183 storage = static_cast<StorageType>((storage & ~mask) | FormatValue(value));
184 } 184 }
185 185
186 constexpr T Value() const { 186 constexpr T Value() const {
diff --git a/src/common/uuid.h b/src/common/uuid.h
index f6ad064fb..4d3af8cec 100644
--- a/src/common/uuid.h
+++ b/src/common/uuid.h
@@ -40,6 +40,11 @@ struct UUID {
40 uuid = INVALID_UUID; 40 uuid = INVALID_UUID;
41 } 41 }
42 42
43 // TODO(ogniK): Properly generate a Nintendo ID
44 constexpr u64 GetNintendoID() const {
45 return uuid[0];
46 }
47
43 std::string Format() const; 48 std::string Format() const;
44 std::string FormatSwitch() const; 49 std::string FormatSwitch() const;
45}; 50};
diff --git a/src/core/arm/dynarmic/arm_dynarmic_64.cpp b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
index 9add5d363..337b97be9 100644
--- a/src/core/arm/dynarmic/arm_dynarmic_64.cpp
+++ b/src/core/arm/dynarmic/arm_dynarmic_64.cpp
@@ -20,6 +20,7 @@
20#include "core/hle/kernel/scheduler.h" 20#include "core/hle/kernel/scheduler.h"
21#include "core/hle/kernel/svc.h" 21#include "core/hle/kernel/svc.h"
22#include "core/memory.h" 22#include "core/memory.h"
23#include "core/settings.h"
23 24
24namespace Core { 25namespace Core {
25 26
@@ -144,6 +145,8 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
144 config.page_table_address_space_bits = address_space_bits; 145 config.page_table_address_space_bits = address_space_bits;
145 config.silently_mirror_page_table = false; 146 config.silently_mirror_page_table = false;
146 config.absolute_offset_page_table = true; 147 config.absolute_offset_page_table = true;
148 config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128;
149 config.only_detect_misalignment_via_page_table_on_page_boundary = true;
147 150
148 // Multi-process state 151 // Multi-process state
149 config.processor_id = core_index; 152 config.processor_id = core_index;
@@ -159,8 +162,11 @@ std::shared_ptr<Dynarmic::A64::Jit> ARM_Dynarmic_64::MakeJit(Common::PageTable&
159 // Unpredictable instructions 162 // Unpredictable instructions
160 config.define_unpredictable_behaviour = true; 163 config.define_unpredictable_behaviour = true;
161 164
162 config.detect_misaligned_access_via_page_table = 16 | 32 | 64 | 128; 165 // Optimizations
163 config.only_detect_misalignment_via_page_table_on_page_boundary = true; 166 if (Settings::values.disable_cpu_opt) {
167 config.enable_optimizations = false;
168 config.enable_fast_dispatch = false;
169 }
164 170
165 return std::make_shared<Dynarmic::A64::Jit>(config); 171 return std::make_shared<Dynarmic::A64::Jit>(config);
166} 172}
@@ -179,10 +185,9 @@ void ARM_Dynarmic_64::Step() {
179 185
180ARM_Dynarmic_64::ARM_Dynarmic_64(System& system, ExclusiveMonitor& exclusive_monitor, 186ARM_Dynarmic_64::ARM_Dynarmic_64(System& system, ExclusiveMonitor& exclusive_monitor,
181 std::size_t core_index) 187 std::size_t core_index)
182 : ARM_Interface{system}, 188 : ARM_Interface{system}, cb(std::make_unique<DynarmicCallbacks64>(*this)),
183 cb(std::make_unique<DynarmicCallbacks64>(*this)), inner_unicorn{system}, 189 inner_unicorn{system, ARM_Unicorn::Arch::AArch64}, core_index{core_index},
184 core_index{core_index}, exclusive_monitor{ 190 exclusive_monitor{dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
185 dynamic_cast<DynarmicExclusiveMonitor&>(exclusive_monitor)} {}
186 191
187ARM_Dynarmic_64::~ARM_Dynarmic_64() = default; 192ARM_Dynarmic_64::~ARM_Dynarmic_64() = default;
188 193
diff --git a/src/core/arm/unicorn/arm_unicorn.cpp b/src/core/arm/unicorn/arm_unicorn.cpp
index d189efb63..e40e9626a 100644
--- a/src/core/arm/unicorn/arm_unicorn.cpp
+++ b/src/core/arm/unicorn/arm_unicorn.cpp
@@ -11,6 +11,7 @@
11#include "core/core_timing.h" 11#include "core/core_timing.h"
12#include "core/hle/kernel/scheduler.h" 12#include "core/hle/kernel/scheduler.h"
13#include "core/hle/kernel/svc.h" 13#include "core/hle/kernel/svc.h"
14#include "core/memory.h"
14 15
15namespace Core { 16namespace Core {
16 17
@@ -61,8 +62,9 @@ static bool UnmappedMemoryHook(uc_engine* uc, uc_mem_type type, u64 addr, int si
61 return false; 62 return false;
62} 63}
63 64
64ARM_Unicorn::ARM_Unicorn(System& system) : ARM_Interface{system} { 65ARM_Unicorn::ARM_Unicorn(System& system, Arch architecture) : ARM_Interface{system} {
65 CHECKED(uc_open(UC_ARCH_ARM64, UC_MODE_ARM, &uc)); 66 const auto arch = architecture == Arch::AArch32 ? UC_ARCH_ARM : UC_ARCH_ARM64;
67 CHECKED(uc_open(arch, UC_MODE_ARM, &uc));
66 68
67 auto fpv = 3 << 20; 69 auto fpv = 3 << 20;
68 CHECKED(uc_reg_write(uc, UC_ARM64_REG_CPACR_EL1, &fpv)); 70 CHECKED(uc_reg_write(uc, UC_ARM64_REG_CPACR_EL1, &fpv));
@@ -171,7 +173,17 @@ MICROPROFILE_DEFINE(ARM_Jit_Unicorn, "ARM JIT", "Unicorn", MP_RGB(255, 64, 64));
171 173
172void ARM_Unicorn::ExecuteInstructions(std::size_t num_instructions) { 174void ARM_Unicorn::ExecuteInstructions(std::size_t num_instructions) {
173 MICROPROFILE_SCOPE(ARM_Jit_Unicorn); 175 MICROPROFILE_SCOPE(ARM_Jit_Unicorn);
176
177 // Temporarily map the code page for Unicorn
178 u64 map_addr{GetPC() & ~Memory::PAGE_MASK};
179 std::vector<u8> page_buffer(Memory::PAGE_SIZE);
180 system.Memory().ReadBlock(map_addr, page_buffer.data(), page_buffer.size());
181
182 CHECKED(uc_mem_map_ptr(uc, map_addr, page_buffer.size(),
183 UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC, page_buffer.data()));
174 CHECKED(uc_emu_start(uc, GetPC(), 1ULL << 63, 0, num_instructions)); 184 CHECKED(uc_emu_start(uc, GetPC(), 1ULL << 63, 0, num_instructions));
185 CHECKED(uc_mem_unmap(uc, map_addr, page_buffer.size()));
186
175 system.CoreTiming().AddTicks(num_instructions); 187 system.CoreTiming().AddTicks(num_instructions);
176 if (GDBStub::IsServerEnabled()) { 188 if (GDBStub::IsServerEnabled()) {
177 if (last_bkpt_hit && last_bkpt.type == GDBStub::BreakpointType::Execute) { 189 if (last_bkpt_hit && last_bkpt.type == GDBStub::BreakpointType::Execute) {
diff --git a/src/core/arm/unicorn/arm_unicorn.h b/src/core/arm/unicorn/arm_unicorn.h
index f30d13cb6..725c65085 100644
--- a/src/core/arm/unicorn/arm_unicorn.h
+++ b/src/core/arm/unicorn/arm_unicorn.h
@@ -15,7 +15,12 @@ class System;
15 15
16class ARM_Unicorn final : public ARM_Interface { 16class ARM_Unicorn final : public ARM_Interface {
17public: 17public:
18 explicit ARM_Unicorn(System& system); 18 enum class Arch {
19 AArch32, // 32-bit ARM
20 AArch64, // 64-bit ARM
21 };
22
23 explicit ARM_Unicorn(System& system, Arch architecture);
19 ~ARM_Unicorn() override; 24 ~ARM_Unicorn() override;
20 25
21 void SetPC(u64 pc) override; 26 void SetPC(u64 pc) override;
diff --git a/src/core/crypto/key_manager.cpp b/src/core/crypto/key_manager.cpp
index 87e6a1fd3..8997c7082 100644
--- a/src/core/crypto/key_manager.cpp
+++ b/src/core/crypto/key_manager.cpp
@@ -1202,7 +1202,8 @@ const boost::container::flat_map<std::string, KeyIndex<S128KeyType>> KeyManager:
1202 {S128KeyType::Source, static_cast<u64>(SourceKeyType::KeyAreaKey), 1202 {S128KeyType::Source, static_cast<u64>(SourceKeyType::KeyAreaKey),
1203 static_cast<u64>(KeyAreaKeyType::System)}}, 1203 static_cast<u64>(KeyAreaKeyType::System)}},
1204 {"titlekek_source", {S128KeyType::Source, static_cast<u64>(SourceKeyType::Titlekek), 0}}, 1204 {"titlekek_source", {S128KeyType::Source, static_cast<u64>(SourceKeyType::Titlekek), 0}},
1205 {"keyblob_mac_key_source", {S128KeyType::Source, static_cast<u64>(SourceKeyType::KeyblobMAC)}}, 1205 {"keyblob_mac_key_source",
1206 {S128KeyType::Source, static_cast<u64>(SourceKeyType::KeyblobMAC), 0}},
1206 {"tsec_key", {S128KeyType::TSEC, 0, 0}}, 1207 {"tsec_key", {S128KeyType::TSEC, 0, 0}},
1207 {"secure_boot_key", {S128KeyType::SecureBoot, 0, 0}}, 1208 {"secure_boot_key", {S128KeyType::SecureBoot, 0, 0}},
1208 {"sd_seed", {S128KeyType::SDSeed, 0, 0}}, 1209 {"sd_seed", {S128KeyType::SDSeed, 0, 0}},
diff --git a/src/core/crypto/partition_data_manager.cpp b/src/core/crypto/partition_data_manager.cpp
index d64302f2e..7ed71ac3a 100644
--- a/src/core/crypto/partition_data_manager.cpp
+++ b/src/core/crypto/partition_data_manager.cpp
@@ -202,8 +202,8 @@ static std::array<Key128, 0x20> FindEncryptedMasterKeyFromHex(const std::vector<
202 return out; 202 return out;
203} 203}
204 204
205FileSys::VirtualFile FindFileInDirWithNames(const FileSys::VirtualDir& dir, 205static FileSys::VirtualFile FindFileInDirWithNames(const FileSys::VirtualDir& dir,
206 const std::string& name) { 206 const std::string& name) {
207 const auto upper = Common::ToUpper(name); 207 const auto upper = Common::ToUpper(name);
208 208
209 for (const auto& fname : {name, name + ".bin", upper, upper + ".BIN"}) { 209 for (const auto& fname : {name, name + ".bin", upper, upper + ".BIN"}) {
@@ -345,8 +345,7 @@ FileSys::VirtualFile PartitionDataManager::GetPackage2Raw(Package2Type type) con
345 return package2.at(static_cast<size_t>(type)); 345 return package2.at(static_cast<size_t>(type));
346} 346}
347 347
348bool AttemptDecrypt(const std::array<u8, 16>& key, Package2Header& header) { 348static bool AttemptDecrypt(const std::array<u8, 16>& key, Package2Header& header) {
349
350 const std::vector<u8> iv(header.header_ctr.begin(), header.header_ctr.end()); 349 const std::vector<u8> iv(header.header_ctr.begin(), header.header_ctr.end());
351 Package2Header temp = header; 350 Package2Header temp = header;
352 AESCipher<Key128> cipher(key, Mode::CTR); 351 AESCipher<Key128> cipher(key, Mode::CTR);
diff --git a/src/core/file_sys/program_metadata.cpp b/src/core/file_sys/program_metadata.cpp
index 1d6c30962..43169bf9f 100644
--- a/src/core/file_sys/program_metadata.cpp
+++ b/src/core/file_sys/program_metadata.cpp
@@ -51,6 +51,17 @@ Loader::ResultStatus ProgramMetadata::Load(VirtualFile file) {
51 return Loader::ResultStatus::Success; 51 return Loader::ResultStatus::Success;
52} 52}
53 53
54/*static*/ ProgramMetadata ProgramMetadata::GetDefault() {
55 ProgramMetadata result;
56
57 result.LoadManual(
58 true /*is_64_bit*/, FileSys::ProgramAddressSpaceType::Is39Bit /*address_space*/,
59 0x2c /*main_thread_prio*/, 0 /*main_thread_core*/, 0x00100000 /*main_thread_stack_size*/,
60 {}, 0xFFFFFFFFFFFFFFFF /*filesystem_permissions*/, {} /*capabilities*/);
61
62 return result;
63}
64
54void ProgramMetadata::LoadManual(bool is_64_bit, ProgramAddressSpaceType address_space, 65void ProgramMetadata::LoadManual(bool is_64_bit, ProgramAddressSpaceType address_space,
55 s32 main_thread_prio, u32 main_thread_core, 66 s32 main_thread_prio, u32 main_thread_core,
56 u32 main_thread_stack_size, u64 title_id, 67 u32 main_thread_stack_size, u64 title_id,
diff --git a/src/core/file_sys/program_metadata.h b/src/core/file_sys/program_metadata.h
index f8759a396..35069972b 100644
--- a/src/core/file_sys/program_metadata.h
+++ b/src/core/file_sys/program_metadata.h
@@ -44,9 +44,13 @@ public:
44 ProgramMetadata(); 44 ProgramMetadata();
45 ~ProgramMetadata(); 45 ~ProgramMetadata();
46 46
47 /// Gets a default ProgramMetadata configuration, should only be used for homebrew formats where
48 /// we do not have an NPDM file
49 static ProgramMetadata GetDefault();
50
47 Loader::ResultStatus Load(VirtualFile file); 51 Loader::ResultStatus Load(VirtualFile file);
48 52
49 // Load from parameters instead of NPDM file, used for KIP 53 /// Load from parameters instead of NPDM file, used for KIP
50 void LoadManual(bool is_64_bit, ProgramAddressSpaceType address_space, s32 main_thread_prio, 54 void LoadManual(bool is_64_bit, ProgramAddressSpaceType address_space, s32 main_thread_prio,
51 u32 main_thread_core, u32 main_thread_stack_size, u64 title_id, 55 u32 main_thread_core, u32 main_thread_stack_size, u64 title_id,
52 u64 filesystem_permissions, KernelCapabilityDescriptors capabilities); 56 u64 filesystem_permissions, KernelCapabilityDescriptors capabilities);
diff --git a/src/core/gdbstub/gdbstub.cpp b/src/core/gdbstub/gdbstub.cpp
index 2f15635c5..70c0f8b80 100644
--- a/src/core/gdbstub/gdbstub.cpp
+++ b/src/core/gdbstub/gdbstub.cpp
@@ -1389,10 +1389,9 @@ void SendTrap(Kernel::Thread* thread, int trap) {
1389 return; 1389 return;
1390 } 1390 }
1391 1391
1392 if (!halt_loop || current_thread == thread) { 1392 current_thread = thread;
1393 current_thread = thread; 1393 SendSignal(thread, trap);
1394 SendSignal(thread, trap); 1394
1395 }
1396 halt_loop = true; 1395 halt_loop = true;
1397 send_trap = false; 1396 send_trap = false;
1398} 1397}
diff --git a/src/core/hle/kernel/handle_table.cpp b/src/core/hle/kernel/handle_table.cpp
index e441a27fc..35448b576 100644
--- a/src/core/hle/kernel/handle_table.cpp
+++ b/src/core/hle/kernel/handle_table.cpp
@@ -30,6 +30,7 @@ HandleTable::~HandleTable() = default;
30 30
31ResultCode HandleTable::SetSize(s32 handle_table_size) { 31ResultCode HandleTable::SetSize(s32 handle_table_size) {
32 if (static_cast<u32>(handle_table_size) > MAX_COUNT) { 32 if (static_cast<u32>(handle_table_size) > MAX_COUNT) {
33 LOG_ERROR(Kernel, "Handle table size {} is greater than {}", handle_table_size, MAX_COUNT);
33 return ERR_OUT_OF_MEMORY; 34 return ERR_OUT_OF_MEMORY;
34 } 35 }
35 36
@@ -80,6 +81,7 @@ ResultVal<Handle> HandleTable::Duplicate(Handle handle) {
80 81
81ResultCode HandleTable::Close(Handle handle) { 82ResultCode HandleTable::Close(Handle handle) {
82 if (!IsValid(handle)) { 83 if (!IsValid(handle)) {
84 LOG_ERROR(Kernel, "Handle is not valid! handle={:08X}", handle);
83 return ERR_INVALID_HANDLE; 85 return ERR_INVALID_HANDLE;
84 } 86 }
85 87
diff --git a/src/core/hle/kernel/memory/memory_block.h b/src/core/hle/kernel/memory/memory_block.h
index e11043b60..9db1f7b39 100644
--- a/src/core/hle/kernel/memory/memory_block.h
+++ b/src/core/hle/kernel/memory/memory_block.h
@@ -17,7 +17,7 @@ namespace Kernel::Memory {
17 17
18enum class MemoryState : u32 { 18enum class MemoryState : u32 {
19 None = 0, 19 None = 0,
20 Mask = 0xFFFFFFFF, // TODO(bunnei): This should probable be 0xFF 20 Mask = 0xFF,
21 All = ~None, 21 All = ~None,
22 22
23 FlagCanReprotect = (1 << 8), 23 FlagCanReprotect = (1 << 8),
@@ -253,6 +253,23 @@ public:
253 }; 253 };
254 } 254 }
255 255
256 void ShareToDevice(MemoryPermission /*new_perm*/) {
257 ASSERT((attribute & MemoryAttribute::DeviceShared) == MemoryAttribute::DeviceShared ||
258 device_use_count == 0);
259 attribute |= MemoryAttribute::DeviceShared;
260 const u16 new_use_count{++device_use_count};
261 ASSERT(new_use_count > 0);
262 }
263
264 void UnshareToDevice(MemoryPermission /*new_perm*/) {
265 ASSERT((attribute & MemoryAttribute::DeviceShared) == MemoryAttribute::DeviceShared);
266 const u16 prev_use_count{device_use_count--};
267 ASSERT(prev_use_count > 0);
268 if (prev_use_count == 1) {
269 attribute &= ~MemoryAttribute::DeviceShared;
270 }
271 }
272
256private: 273private:
257 constexpr bool HasProperties(MemoryState s, MemoryPermission p, MemoryAttribute a) const { 274 constexpr bool HasProperties(MemoryState s, MemoryPermission p, MemoryAttribute a) const {
258 constexpr MemoryAttribute AttributeIgnoreMask{MemoryAttribute::DontCareMask | 275 constexpr MemoryAttribute AttributeIgnoreMask{MemoryAttribute::DontCareMask |
@@ -287,9 +304,9 @@ private:
287 state = new_state; 304 state = new_state;
288 perm = new_perm; 305 perm = new_perm;
289 306
290 // TODO(bunnei): Is this right?
291 attribute = static_cast<MemoryAttribute>( 307 attribute = static_cast<MemoryAttribute>(
292 new_attribute /*| (attribute & (MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared))*/); 308 new_attribute |
309 (attribute & (MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared)));
293 } 310 }
294 311
295 constexpr MemoryBlock Split(VAddr split_addr) { 312 constexpr MemoryBlock Split(VAddr split_addr) {
diff --git a/src/core/hle/kernel/memory/memory_block_manager.cpp b/src/core/hle/kernel/memory/memory_block_manager.cpp
index 1ebc126c0..900395c37 100644
--- a/src/core/hle/kernel/memory/memory_block_manager.cpp
+++ b/src/core/hle/kernel/memory/memory_block_manager.cpp
@@ -143,6 +143,42 @@ void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState s
143 } 143 }
144} 144}
145 145
146void MemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
147 MemoryPermission perm) {
148 const std::size_t prev_count{memory_block_tree.size()};
149 const VAddr end_addr{addr + num_pages * PageSize};
150 iterator node{memory_block_tree.begin()};
151
152 while (node != memory_block_tree.end()) {
153 MemoryBlock* block{&(*node)};
154 iterator next_node{std::next(node)};
155 const VAddr cur_addr{block->GetAddress()};
156 const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
157
158 if (addr < cur_end_addr && cur_addr < end_addr) {
159 iterator new_node{node};
160
161 if (addr > cur_addr) {
162 memory_block_tree.insert(node, block->Split(addr));
163 }
164
165 if (end_addr < cur_end_addr) {
166 new_node = memory_block_tree.insert(node, block->Split(end_addr));
167 }
168
169 lock_func(new_node, perm);
170
171 MergeAdjacent(new_node, next_node);
172 }
173
174 if (cur_end_addr - 1 >= end_addr - 1) {
175 break;
176 }
177
178 node = next_node;
179 }
180}
181
146void MemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) { 182void MemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
147 const_iterator it{FindIterator(start)}; 183 const_iterator it{FindIterator(start)};
148 MemoryInfo info{}; 184 MemoryInfo info{};
diff --git a/src/core/hle/kernel/memory/memory_block_manager.h b/src/core/hle/kernel/memory/memory_block_manager.h
index 0f2270f0f..9451b5df6 100644
--- a/src/core/hle/kernel/memory/memory_block_manager.h
+++ b/src/core/hle/kernel/memory/memory_block_manager.h
@@ -45,6 +45,9 @@ public:
45 MemoryPermission perm = MemoryPermission::None, 45 MemoryPermission perm = MemoryPermission::None,
46 MemoryAttribute attribute = MemoryAttribute::None); 46 MemoryAttribute attribute = MemoryAttribute::None);
47 47
48 using LockFunc = std::function<void(iterator, MemoryPermission)>;
49 void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, MemoryPermission perm);
50
48 using IterateFunc = std::function<void(const MemoryInfo&)>; 51 using IterateFunc = std::function<void(const MemoryInfo&)>;
49 void IterateForRange(VAddr start, VAddr end, IterateFunc&& func); 52 void IterateForRange(VAddr start, VAddr end, IterateFunc&& func);
50 53
diff --git a/src/core/hle/kernel/memory/page_table.cpp b/src/core/hle/kernel/memory/page_table.cpp
index 091e52ca4..3281611f8 100644
--- a/src/core/hle/kernel/memory/page_table.cpp
+++ b/src/core/hle/kernel/memory/page_table.cpp
@@ -840,6 +840,50 @@ ResultVal<VAddr> PageTable::AllocateAndMapMemory(std::size_t needed_num_pages, s
840 return MakeResult<VAddr>(addr); 840 return MakeResult<VAddr>(addr);
841} 841}
842 842
843ResultCode PageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
844 std::lock_guard lock{page_table_lock};
845
846 MemoryPermission perm{};
847 if (const ResultCode result{CheckMemoryState(
848 nullptr, &perm, nullptr, addr, size, MemoryState::FlagCanChangeAttribute,
849 MemoryState::FlagCanChangeAttribute, MemoryPermission::None, MemoryPermission::None,
850 MemoryAttribute::LockedAndIpcLocked, MemoryAttribute::None,
851 MemoryAttribute::DeviceSharedAndUncached)};
852 result.IsError()) {
853 return result;
854 }
855
856 block_manager->UpdateLock(addr, size / PageSize,
857 [](MemoryBlockManager::iterator block, MemoryPermission perm) {
858 block->ShareToDevice(perm);
859 },
860 perm);
861
862 return RESULT_SUCCESS;
863}
864
865ResultCode PageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
866 std::lock_guard lock{page_table_lock};
867
868 MemoryPermission perm{};
869 if (const ResultCode result{CheckMemoryState(
870 nullptr, &perm, nullptr, addr, size, MemoryState::FlagCanChangeAttribute,
871 MemoryState::FlagCanChangeAttribute, MemoryPermission::None, MemoryPermission::None,
872 MemoryAttribute::LockedAndIpcLocked, MemoryAttribute::None,
873 MemoryAttribute::DeviceSharedAndUncached)};
874 result.IsError()) {
875 return result;
876 }
877
878 block_manager->UpdateLock(addr, size / PageSize,
879 [](MemoryBlockManager::iterator block, MemoryPermission perm) {
880 block->UnshareToDevice(perm);
881 },
882 perm);
883
884 return RESULT_SUCCESS;
885}
886
843ResultCode PageTable::InitializeMemoryLayout(VAddr start, VAddr end) { 887ResultCode PageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
844 block_manager = std::make_unique<MemoryBlockManager>(start, end); 888 block_manager = std::make_unique<MemoryBlockManager>(start, end);
845 889
diff --git a/src/core/hle/kernel/memory/page_table.h b/src/core/hle/kernel/memory/page_table.h
index 80384ab0f..a867aa050 100644
--- a/src/core/hle/kernel/memory/page_table.h
+++ b/src/core/hle/kernel/memory/page_table.h
@@ -53,6 +53,8 @@ public:
53 bool is_map_only, VAddr region_start, 53 bool is_map_only, VAddr region_start,
54 std::size_t region_num_pages, MemoryState state, 54 std::size_t region_num_pages, MemoryState state,
55 MemoryPermission perm, PAddr map_addr = 0); 55 MemoryPermission perm, PAddr map_addr = 0);
56 ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
57 ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
56 58
57 Common::PageTable& PageTableImpl() { 59 Common::PageTable& PageTableImpl() {
58 return page_table_impl; 60 return page_table_impl;
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index eff4e45b0..7869eb32b 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -7,6 +7,7 @@
7#include <vector> 7#include <vector>
8 8
9#include "common/assert.h" 9#include "common/assert.h"
10#include "common/logging/log.h"
10#include "core/core.h" 11#include "core/core.h"
11#include "core/hle/kernel/errors.h" 12#include "core/hle/kernel/errors.h"
12#include "core/hle/kernel/handle_table.h" 13#include "core/hle/kernel/handle_table.h"
@@ -67,6 +68,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
67 Handle requesting_thread_handle) { 68 Handle requesting_thread_handle) {
68 // The mutex address must be 4-byte aligned 69 // The mutex address must be 4-byte aligned
69 if ((address % sizeof(u32)) != 0) { 70 if ((address % sizeof(u32)) != 0) {
71 LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
70 return ERR_INVALID_ADDRESS; 72 return ERR_INVALID_ADDRESS;
71 } 73 }
72 74
@@ -88,6 +90,8 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
88 } 90 }
89 91
90 if (holding_thread == nullptr) { 92 if (holding_thread == nullptr) {
93 LOG_ERROR(Kernel, "Holding thread does not exist! thread_handle={:08X}",
94 holding_thread_handle);
91 return ERR_INVALID_HANDLE; 95 return ERR_INVALID_HANDLE;
92 } 96 }
93 97
@@ -109,6 +113,7 @@ ResultCode Mutex::TryAcquire(VAddr address, Handle holding_thread_handle,
109ResultCode Mutex::Release(VAddr address) { 113ResultCode Mutex::Release(VAddr address) {
110 // The mutex address must be 4-byte aligned 114 // The mutex address must be 4-byte aligned
111 if ((address % sizeof(u32)) != 0) { 115 if ((address % sizeof(u32)) != 0) {
116 LOG_ERROR(Kernel, "Address is not 4-byte aligned! address={:016X}", address);
112 return ERR_INVALID_ADDRESS; 117 return ERR_INVALID_ADDRESS;
113 } 118 }
114 119
diff --git a/src/core/hle/kernel/physical_core.cpp b/src/core/hle/kernel/physical_core.cpp
index aa2787467..a15011076 100644
--- a/src/core/hle/kernel/physical_core.cpp
+++ b/src/core/hle/kernel/physical_core.cpp
@@ -27,7 +27,9 @@ PhysicalCore::PhysicalCore(Core::System& system, std::size_t id,
27 std::make_unique<Core::ARM_Dynarmic_64>(system, exclusive_monitor, core_index); 27 std::make_unique<Core::ARM_Dynarmic_64>(system, exclusive_monitor, core_index);
28 28
29#else 29#else
30 arm_interface = std::make_shared<Core::ARM_Unicorn>(system); 30 using Core::ARM_Unicorn;
31 arm_interface_32 = std::make_unique<ARM_Unicorn>(system, ARM_Unicorn::Arch::AArch32);
32 arm_interface_64 = std::make_unique<ARM_Unicorn>(system, ARM_Unicorn::Arch::AArch64);
31 LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available"); 33 LOG_WARNING(Core, "CPU JIT requested, but Dynarmic not available");
32#endif 34#endif
33 35
diff --git a/src/core/hle/kernel/process_capability.cpp b/src/core/hle/kernel/process_capability.cpp
index 48e5ae682..63880f13d 100644
--- a/src/core/hle/kernel/process_capability.cpp
+++ b/src/core/hle/kernel/process_capability.cpp
@@ -3,6 +3,7 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "common/bit_util.h" 5#include "common/bit_util.h"
6#include "common/logging/log.h"
6#include "core/hle/kernel/errors.h" 7#include "core/hle/kernel/errors.h"
7#include "core/hle/kernel/handle_table.h" 8#include "core/hle/kernel/handle_table.h"
8#include "core/hle/kernel/memory/page_table.h" 9#include "core/hle/kernel/memory/page_table.h"
@@ -119,22 +120,30 @@ ResultCode ProcessCapabilities::ParseCapabilities(const u32* capabilities,
119 // The MapPhysical type uses two descriptor flags for its parameters. 120 // The MapPhysical type uses two descriptor flags for its parameters.
120 // If there's only one, then there's a problem. 121 // If there's only one, then there's a problem.
121 if (i >= num_capabilities) { 122 if (i >= num_capabilities) {
123 LOG_ERROR(Kernel, "Invalid combination! i={}", i);
122 return ERR_INVALID_COMBINATION; 124 return ERR_INVALID_COMBINATION;
123 } 125 }
124 126
125 const auto size_flags = capabilities[i]; 127 const auto size_flags = capabilities[i];
126 if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) { 128 if (GetCapabilityType(size_flags) != CapabilityType::MapPhysical) {
129 LOG_ERROR(Kernel, "Invalid capability type! size_flags={}", size_flags);
127 return ERR_INVALID_COMBINATION; 130 return ERR_INVALID_COMBINATION;
128 } 131 }
129 132
130 const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table); 133 const auto result = HandleMapPhysicalFlags(descriptor, size_flags, page_table);
131 if (result.IsError()) { 134 if (result.IsError()) {
135 LOG_ERROR(Kernel, "Failed to map physical flags! descriptor={}, size_flags={}",
136 descriptor, size_flags);
132 return result; 137 return result;
133 } 138 }
134 } else { 139 } else {
135 const auto result = 140 const auto result =
136 ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, page_table); 141 ParseSingleFlagCapability(set_flags, set_svc_bits, descriptor, page_table);
137 if (result.IsError()) { 142 if (result.IsError()) {
143 LOG_ERROR(
144 Kernel,
145 "Failed to parse capability flag! set_flags={}, set_svc_bits={}, descriptor={}",
146 set_flags, set_svc_bits, descriptor);
138 return result; 147 return result;
139 } 148 }
140 } 149 }
@@ -162,6 +171,9 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
162 const u32 flag_length = GetFlagBitOffset(type); 171 const u32 flag_length = GetFlagBitOffset(type);
163 const u32 set_flag = 1U << flag_length; 172 const u32 set_flag = 1U << flag_length;
164 if ((set_flag & set_flags & InitializeOnceMask) != 0) { 173 if ((set_flag & set_flags & InitializeOnceMask) != 0) {
174 LOG_ERROR(Kernel,
175 "Attempted to initialize flags that may only be initialized once. set_flags={}",
176 set_flags);
165 return ERR_INVALID_COMBINATION; 177 return ERR_INVALID_COMBINATION;
166 } 178 }
167 set_flags |= set_flag; 179 set_flags |= set_flag;
@@ -187,6 +199,7 @@ ResultCode ProcessCapabilities::ParseSingleFlagCapability(u32& set_flags, u32& s
187 break; 199 break;
188 } 200 }
189 201
202 LOG_ERROR(Kernel, "Invalid capability type! type={}", static_cast<u32>(type));
190 return ERR_INVALID_CAPABILITY_DESCRIPTOR; 203 return ERR_INVALID_CAPABILITY_DESCRIPTOR;
191} 204}
192 205
@@ -208,23 +221,31 @@ void ProcessCapabilities::Clear() {
208 221
209ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) { 222ResultCode ProcessCapabilities::HandlePriorityCoreNumFlags(u32 flags) {
210 if (priority_mask != 0 || core_mask != 0) { 223 if (priority_mask != 0 || core_mask != 0) {
224 LOG_ERROR(Kernel, "Core or priority mask are not zero! priority_mask={}, core_mask={}",
225 priority_mask, core_mask);
211 return ERR_INVALID_CAPABILITY_DESCRIPTOR; 226 return ERR_INVALID_CAPABILITY_DESCRIPTOR;
212 } 227 }
213 228
214 const u32 core_num_min = (flags >> 16) & 0xFF; 229 const u32 core_num_min = (flags >> 16) & 0xFF;
215 const u32 core_num_max = (flags >> 24) & 0xFF; 230 const u32 core_num_max = (flags >> 24) & 0xFF;
216 if (core_num_min > core_num_max) { 231 if (core_num_min > core_num_max) {
232 LOG_ERROR(Kernel, "Core min is greater than core max! core_num_min={}, core_num_max={}",
233 core_num_min, core_num_max);
217 return ERR_INVALID_COMBINATION; 234 return ERR_INVALID_COMBINATION;
218 } 235 }
219 236
220 const u32 priority_min = (flags >> 10) & 0x3F; 237 const u32 priority_min = (flags >> 10) & 0x3F;
221 const u32 priority_max = (flags >> 4) & 0x3F; 238 const u32 priority_max = (flags >> 4) & 0x3F;
222 if (priority_min > priority_max) { 239 if (priority_min > priority_max) {
240 LOG_ERROR(Kernel,
241 "Priority min is greater than priority max! priority_min={}, priority_max={}",
242 core_num_min, priority_max);
223 return ERR_INVALID_COMBINATION; 243 return ERR_INVALID_COMBINATION;
224 } 244 }
225 245
226 // The switch only has 4 usable cores. 246 // The switch only has 4 usable cores.
227 if (core_num_max >= 4) { 247 if (core_num_max >= 4) {
248 LOG_ERROR(Kernel, "Invalid max cores specified! core_num_max={}", core_num_max);
228 return ERR_INVALID_PROCESSOR_ID; 249 return ERR_INVALID_PROCESSOR_ID;
229 } 250 }
230 251
@@ -259,6 +280,7 @@ ResultCode ProcessCapabilities::HandleSyscallFlags(u32& set_svc_bits, u32 flags)
259 } 280 }
260 281
261 if (svc_number >= svc_capabilities.size()) { 282 if (svc_number >= svc_capabilities.size()) {
283 LOG_ERROR(Kernel, "Process svc capability is out of range! svc_number={}", svc_number);
262 return ERR_OUT_OF_RANGE; 284 return ERR_OUT_OF_RANGE;
263 } 285 }
264 286
@@ -295,6 +317,8 @@ ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
295 // emulate that, it's sufficient to mark every interrupt as defined. 317 // emulate that, it's sufficient to mark every interrupt as defined.
296 318
297 if (interrupt >= interrupt_capabilities.size()) { 319 if (interrupt >= interrupt_capabilities.size()) {
320 LOG_ERROR(Kernel, "Process interrupt capability is out of range! svc_number={}",
321 interrupt);
298 return ERR_OUT_OF_RANGE; 322 return ERR_OUT_OF_RANGE;
299 } 323 }
300 324
@@ -307,6 +331,7 @@ ResultCode ProcessCapabilities::HandleInterruptFlags(u32 flags) {
307ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) { 331ResultCode ProcessCapabilities::HandleProgramTypeFlags(u32 flags) {
308 const u32 reserved = flags >> 17; 332 const u32 reserved = flags >> 17;
309 if (reserved != 0) { 333 if (reserved != 0) {
334 LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
310 return ERR_RESERVED_VALUE; 335 return ERR_RESERVED_VALUE;
311 } 336 }
312 337
@@ -324,6 +349,9 @@ ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
324 const u32 major_version = kernel_version >> 19; 349 const u32 major_version = kernel_version >> 19;
325 350
326 if (major_version != 0 || flags < 0x80000) { 351 if (major_version != 0 || flags < 0x80000) {
352 LOG_ERROR(Kernel,
353 "Kernel version is non zero or flags are too small! major_version={}, flags={}",
354 major_version, flags);
327 return ERR_INVALID_CAPABILITY_DESCRIPTOR; 355 return ERR_INVALID_CAPABILITY_DESCRIPTOR;
328 } 356 }
329 357
@@ -334,6 +362,7 @@ ResultCode ProcessCapabilities::HandleKernelVersionFlags(u32 flags) {
334ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) { 362ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
335 const u32 reserved = flags >> 26; 363 const u32 reserved = flags >> 26;
336 if (reserved != 0) { 364 if (reserved != 0) {
365 LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
337 return ERR_RESERVED_VALUE; 366 return ERR_RESERVED_VALUE;
338 } 367 }
339 368
@@ -344,6 +373,7 @@ ResultCode ProcessCapabilities::HandleHandleTableFlags(u32 flags) {
344ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) { 373ResultCode ProcessCapabilities::HandleDebugFlags(u32 flags) {
345 const u32 reserved = flags >> 19; 374 const u32 reserved = flags >> 19;
346 if (reserved != 0) { 375 if (reserved != 0) {
376 LOG_ERROR(Kernel, "Reserved value is non-zero! reserved={}", reserved);
347 return ERR_RESERVED_VALUE; 377 return ERR_RESERVED_VALUE;
348 } 378 }
349 379
diff --git a/src/core/hle/kernel/readable_event.cpp b/src/core/hle/kernel/readable_event.cpp
index 9d3d3a81b..00860fcbd 100644
--- a/src/core/hle/kernel/readable_event.cpp
+++ b/src/core/hle/kernel/readable_event.cpp
@@ -4,6 +4,7 @@
4 4
5#include <algorithm> 5#include <algorithm>
6#include "common/assert.h" 6#include "common/assert.h"
7#include "common/logging/log.h"
7#include "core/hle/kernel/errors.h" 8#include "core/hle/kernel/errors.h"
8#include "core/hle/kernel/object.h" 9#include "core/hle/kernel/object.h"
9#include "core/hle/kernel/readable_event.h" 10#include "core/hle/kernel/readable_event.h"
@@ -23,10 +24,12 @@ void ReadableEvent::Acquire(Thread* thread) {
23} 24}
24 25
25void ReadableEvent::Signal() { 26void ReadableEvent::Signal() {
26 if (!is_signaled) { 27 if (is_signaled) {
27 is_signaled = true; 28 return;
28 SynchronizationObject::Signal(); 29 }
29 }; 30
31 is_signaled = true;
32 SynchronizationObject::Signal();
30} 33}
31 34
32void ReadableEvent::Clear() { 35void ReadableEvent::Clear() {
@@ -35,6 +38,8 @@ void ReadableEvent::Clear() {
35 38
36ResultCode ReadableEvent::Reset() { 39ResultCode ReadableEvent::Reset() {
37 if (!is_signaled) { 40 if (!is_signaled) {
41 LOG_ERROR(Kernel, "Handle is not signaled! object_id={}, object_type={}, object_name={}",
42 GetObjectId(), GetTypeName(), GetName());
38 return ERR_INVALID_STATE; 43 return ERR_INVALID_STATE;
39 } 44 }
40 45
diff --git a/src/core/hle/kernel/resource_limit.cpp b/src/core/hle/kernel/resource_limit.cpp
index 96e5b9892..d9beaa3a4 100644
--- a/src/core/hle/kernel/resource_limit.cpp
+++ b/src/core/hle/kernel/resource_limit.cpp
@@ -69,6 +69,8 @@ ResultCode ResourceLimit::SetLimitValue(ResourceType resource, s64 value) {
69 limit[index] = value; 69 limit[index] = value;
70 return RESULT_SUCCESS; 70 return RESULT_SUCCESS;
71 } else { 71 } else {
72 LOG_ERROR(Kernel, "Limit value is too large! resource={}, value={}, index={}",
73 static_cast<u32>(resource), value, index);
72 return ERR_INVALID_STATE; 74 return ERR_INVALID_STATE;
73 } 75 }
74} 76}
diff --git a/src/core/hle/kernel/shared_memory.cpp b/src/core/hle/kernel/shared_memory.cpp
index c67696757..0cd467110 100644
--- a/src/core/hle/kernel/shared_memory.cpp
+++ b/src/core/hle/kernel/shared_memory.cpp
@@ -36,22 +36,22 @@ std::shared_ptr<SharedMemory> SharedMemory::Create(
36} 36}
37 37
38ResultCode SharedMemory::Map(Process& target_process, VAddr address, std::size_t size, 38ResultCode SharedMemory::Map(Process& target_process, VAddr address, std::size_t size,
39 Memory::MemoryPermission permission) { 39 Memory::MemoryPermission permissions) {
40 const u64 page_count{(size + Memory::PageSize - 1) / Memory::PageSize}; 40 const u64 page_count{(size + Memory::PageSize - 1) / Memory::PageSize};
41 41
42 if (page_list.GetNumPages() != page_count) { 42 if (page_list.GetNumPages() != page_count) {
43 UNIMPLEMENTED_MSG("Page count does not match"); 43 UNIMPLEMENTED_MSG("Page count does not match");
44 } 44 }
45 45
46 Memory::MemoryPermission expected = 46 const Memory::MemoryPermission expected =
47 &target_process == owner_process ? owner_permission : user_permission; 47 &target_process == owner_process ? owner_permission : user_permission;
48 48
49 if (permission != expected) { 49 if (permissions != expected) {
50 UNIMPLEMENTED_MSG("Permission does not match"); 50 UNIMPLEMENTED_MSG("Permission does not match");
51 } 51 }
52 52
53 return target_process.PageTable().MapPages(address, page_list, Memory::MemoryState::Shared, 53 return target_process.PageTable().MapPages(address, page_list, Memory::MemoryState::Shared,
54 permission); 54 permissions);
55} 55}
56 56
57} // namespace Kernel 57} // namespace Kernel
diff --git a/src/core/hle/kernel/shared_memory.h b/src/core/hle/kernel/shared_memory.h
index cd16d6412..0ef87235c 100644
--- a/src/core/hle/kernel/shared_memory.h
+++ b/src/core/hle/kernel/shared_memory.h
@@ -51,7 +51,7 @@ public:
51 * @param permissions Memory block map permissions (specified by SVC field) 51 * @param permissions Memory block map permissions (specified by SVC field)
52 */ 52 */
53 ResultCode Map(Process& target_process, VAddr address, std::size_t size, 53 ResultCode Map(Process& target_process, VAddr address, std::size_t size,
54 Memory::MemoryPermission permission); 54 Memory::MemoryPermission permissions);
55 55
56 /** 56 /**
57 * Gets a pointer to the shared memory block 57 * Gets a pointer to the shared memory block
diff --git a/src/core/hle/kernel/svc.cpp b/src/core/hle/kernel/svc.cpp
index 4134acf65..4ae4529f5 100644
--- a/src/core/hle/kernel/svc.cpp
+++ b/src/core/hle/kernel/svc.cpp
@@ -55,9 +55,6 @@ constexpr bool IsValidAddressRange(VAddr address, u64 size) {
55 return address + size > address; 55 return address + size > address;
56} 56}
57 57
58// 8 GiB
59constexpr u64 MAIN_MEMORY_SIZE = 0x200000000;
60
61// Helper function that performs the common sanity checks for svcMapMemory 58// Helper function that performs the common sanity checks for svcMapMemory
62// and svcUnmapMemory. This is doable, as both functions perform their sanitizing 59// and svcUnmapMemory. This is doable, as both functions perform their sanitizing
63// in the same order. 60// in the same order.
@@ -688,6 +685,8 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
688 case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource: 685 case GetInfoType::TotalPhysicalMemoryAvailableWithoutSystemResource:
689 case GetInfoType::TotalPhysicalMemoryUsedWithoutSystemResource: { 686 case GetInfoType::TotalPhysicalMemoryUsedWithoutSystemResource: {
690 if (info_sub_id != 0) { 687 if (info_sub_id != 0) {
688 LOG_ERROR(Kernel_SVC, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
689 info_sub_id);
691 return ERR_INVALID_ENUM_VALUE; 690 return ERR_INVALID_ENUM_VALUE;
692 } 691 }
693 692
@@ -695,6 +694,8 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
695 system.Kernel().CurrentProcess()->GetHandleTable(); 694 system.Kernel().CurrentProcess()->GetHandleTable();
696 const auto process = current_process_handle_table.Get<Process>(static_cast<Handle>(handle)); 695 const auto process = current_process_handle_table.Get<Process>(static_cast<Handle>(handle));
697 if (!process) { 696 if (!process) {
697 LOG_ERROR(Kernel_SVC, "Process is not valid! info_id={}, info_sub_id={}, handle={:08X}",
698 info_id, info_sub_id, handle);
698 return ERR_INVALID_HANDLE; 699 return ERR_INVALID_HANDLE;
699 } 700 }
700 701
@@ -776,7 +777,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
776 break; 777 break;
777 } 778 }
778 779
779 LOG_WARNING(Kernel_SVC, "(STUBBED) Unimplemented svcGetInfo id=0x{:016X}", info_id); 780 LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
780 return ERR_INVALID_ENUM_VALUE; 781 return ERR_INVALID_ENUM_VALUE;
781 } 782 }
782 783
@@ -786,10 +787,13 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
786 787
787 case GetInfoType::RegisterResourceLimit: { 788 case GetInfoType::RegisterResourceLimit: {
788 if (handle != 0) { 789 if (handle != 0) {
790 LOG_ERROR(Kernel, "Handle is non zero! handle={:08X}", handle);
789 return ERR_INVALID_HANDLE; 791 return ERR_INVALID_HANDLE;
790 } 792 }
791 793
792 if (info_sub_id != 0) { 794 if (info_sub_id != 0) {
795 LOG_ERROR(Kernel, "Info sub id is non zero! info_id={}, info_sub_id={}", info_id,
796 info_sub_id);
793 return ERR_INVALID_COMBINATION; 797 return ERR_INVALID_COMBINATION;
794 } 798 }
795 799
@@ -869,7 +873,7 @@ static ResultCode GetInfo(Core::System& system, u64* result, u64 info_id, u64 ha
869 } 873 }
870 874
871 default: 875 default:
872 LOG_WARNING(Kernel_SVC, "(STUBBED) Unimplemented svcGetInfo id=0x{:016X}", info_id); 876 LOG_ERROR(Kernel_SVC, "Unimplemented svcGetInfo id=0x{:016X}", info_id);
873 return ERR_INVALID_ENUM_VALUE; 877 return ERR_INVALID_ENUM_VALUE;
874 } 878 }
875} 879}
@@ -1229,6 +1233,142 @@ static ResultCode QueryMemory32(Core::System& system, u32 memory_info_address,
1229 return QueryMemory(system, memory_info_address, page_info_address, query_address); 1233 return QueryMemory(system, memory_info_address, page_info_address, query_address);
1230} 1234}
1231 1235
1236static ResultCode MapProcessCodeMemory(Core::System& system, Handle process_handle, u64 dst_address,
1237 u64 src_address, u64 size) {
1238 LOG_DEBUG(Kernel_SVC,
1239 "called. process_handle=0x{:08X}, dst_address=0x{:016X}, "
1240 "src_address=0x{:016X}, size=0x{:016X}",
1241 process_handle, dst_address, src_address, size);
1242
1243 if (!Common::Is4KBAligned(src_address)) {
1244 LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
1245 src_address);
1246 return ERR_INVALID_ADDRESS;
1247 }
1248
1249 if (!Common::Is4KBAligned(dst_address)) {
1250 LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
1251 dst_address);
1252 return ERR_INVALID_ADDRESS;
1253 }
1254
1255 if (size == 0 || !Common::Is4KBAligned(size)) {
1256 LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X})", size);
1257 return ERR_INVALID_SIZE;
1258 }
1259
1260 if (!IsValidAddressRange(dst_address, size)) {
1261 LOG_ERROR(Kernel_SVC,
1262 "Destination address range overflows the address space (dst_address=0x{:016X}, "
1263 "size=0x{:016X}).",
1264 dst_address, size);
1265 return ERR_INVALID_ADDRESS_STATE;
1266 }
1267
1268 if (!IsValidAddressRange(src_address, size)) {
1269 LOG_ERROR(Kernel_SVC,
1270 "Source address range overflows the address space (src_address=0x{:016X}, "
1271 "size=0x{:016X}).",
1272 src_address, size);
1273 return ERR_INVALID_ADDRESS_STATE;
1274 }
1275
1276 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1277 auto process = handle_table.Get<Process>(process_handle);
1278 if (!process) {
1279 LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
1280 process_handle);
1281 return ERR_INVALID_HANDLE;
1282 }
1283
1284 auto& page_table = process->PageTable();
1285 if (!page_table.IsInsideAddressSpace(src_address, size)) {
1286 LOG_ERROR(Kernel_SVC,
1287 "Source address range is not within the address space (src_address=0x{:016X}, "
1288 "size=0x{:016X}).",
1289 src_address, size);
1290 return ERR_INVALID_ADDRESS_STATE;
1291 }
1292
1293 if (!page_table.IsInsideASLRRegion(dst_address, size)) {
1294 LOG_ERROR(Kernel_SVC,
1295 "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
1296 "size=0x{:016X}).",
1297 dst_address, size);
1298 return ERR_INVALID_MEMORY_RANGE;
1299 }
1300
1301 return page_table.MapProcessCodeMemory(dst_address, src_address, size);
1302}
1303
1304static ResultCode UnmapProcessCodeMemory(Core::System& system, Handle process_handle,
1305 u64 dst_address, u64 src_address, u64 size) {
1306 LOG_DEBUG(Kernel_SVC,
1307 "called. process_handle=0x{:08X}, dst_address=0x{:016X}, src_address=0x{:016X}, "
1308 "size=0x{:016X}",
1309 process_handle, dst_address, src_address, size);
1310
1311 if (!Common::Is4KBAligned(dst_address)) {
1312 LOG_ERROR(Kernel_SVC, "dst_address is not page-aligned (dst_address=0x{:016X}).",
1313 dst_address);
1314 return ERR_INVALID_ADDRESS;
1315 }
1316
1317 if (!Common::Is4KBAligned(src_address)) {
1318 LOG_ERROR(Kernel_SVC, "src_address is not page-aligned (src_address=0x{:016X}).",
1319 src_address);
1320 return ERR_INVALID_ADDRESS;
1321 }
1322
1323 if (size == 0 || Common::Is4KBAligned(size)) {
1324 LOG_ERROR(Kernel_SVC, "Size is zero or not page-aligned (size=0x{:016X}).", size);
1325 return ERR_INVALID_SIZE;
1326 }
1327
1328 if (!IsValidAddressRange(dst_address, size)) {
1329 LOG_ERROR(Kernel_SVC,
1330 "Destination address range overflows the address space (dst_address=0x{:016X}, "
1331 "size=0x{:016X}).",
1332 dst_address, size);
1333 return ERR_INVALID_ADDRESS_STATE;
1334 }
1335
1336 if (!IsValidAddressRange(src_address, size)) {
1337 LOG_ERROR(Kernel_SVC,
1338 "Source address range overflows the address space (src_address=0x{:016X}, "
1339 "size=0x{:016X}).",
1340 src_address, size);
1341 return ERR_INVALID_ADDRESS_STATE;
1342 }
1343
1344 const auto& handle_table = system.Kernel().CurrentProcess()->GetHandleTable();
1345 auto process = handle_table.Get<Process>(process_handle);
1346 if (!process) {
1347 LOG_ERROR(Kernel_SVC, "Invalid process handle specified (handle=0x{:08X}).",
1348 process_handle);
1349 return ERR_INVALID_HANDLE;
1350 }
1351
1352 auto& page_table = process->PageTable();
1353 if (!page_table.IsInsideAddressSpace(src_address, size)) {
1354 LOG_ERROR(Kernel_SVC,
1355 "Source address range is not within the address space (src_address=0x{:016X}, "
1356 "size=0x{:016X}).",
1357 src_address, size);
1358 return ERR_INVALID_ADDRESS_STATE;
1359 }
1360
1361 if (!page_table.IsInsideASLRRegion(dst_address, size)) {
1362 LOG_ERROR(Kernel_SVC,
1363 "Destination address range is not within the ASLR region (dst_address=0x{:016X}, "
1364 "size=0x{:016X}).",
1365 dst_address, size);
1366 return ERR_INVALID_MEMORY_RANGE;
1367 }
1368
1369 return page_table.UnmapProcessCodeMemory(dst_address, src_address, size);
1370}
1371
1232/// Exits the current process 1372/// Exits the current process
1233static void ExitProcess(Core::System& system) { 1373static void ExitProcess(Core::System& system) {
1234 auto* current_process = system.Kernel().CurrentProcess(); 1374 auto* current_process = system.Kernel().CurrentProcess();
@@ -2256,8 +2396,8 @@ static const FunctionDef SVC_Table_64[] = {
2256 {0x74, nullptr, "MapProcessMemory"}, 2396 {0x74, nullptr, "MapProcessMemory"},
2257 {0x75, nullptr, "UnmapProcessMemory"}, 2397 {0x75, nullptr, "UnmapProcessMemory"},
2258 {0x76, SvcWrap64<QueryProcessMemory>, "QueryProcessMemory"}, 2398 {0x76, SvcWrap64<QueryProcessMemory>, "QueryProcessMemory"},
2259 {0x77, nullptr, "MapProcessCodeMemory"}, 2399 {0x77, SvcWrap64<MapProcessCodeMemory>, "MapProcessCodeMemory"},
2260 {0x78, nullptr, "UnmapProcessCodeMemory"}, 2400 {0x78, SvcWrap64<UnmapProcessCodeMemory>, "UnmapProcessCodeMemory"},
2261 {0x79, nullptr, "CreateProcess"}, 2401 {0x79, nullptr, "CreateProcess"},
2262 {0x7A, nullptr, "StartProcess"}, 2402 {0x7A, nullptr, "StartProcess"},
2263 {0x7B, nullptr, "TerminateProcess"}, 2403 {0x7B, nullptr, "TerminateProcess"},
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 4c0451c01..db7f379ac 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -150,8 +150,7 @@ static void ResetThreadContext64(Core::ARM_Interface::ThreadContext64& context,
150 context.pc = entry_point; 150 context.pc = entry_point;
151 context.sp = stack_top; 151 context.sp = stack_top;
152 // TODO(merry): Perform a hardware test to determine the below value. 152 // TODO(merry): Perform a hardware test to determine the below value.
153 // AHP = 0, DN = 1, FTZ = 1, RMode = Round towards zero 153 context.fpcr = 0;
154 context.fpcr = 0x03C00000;
155} 154}
156 155
157ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::string name, 156ResultVal<std::shared_ptr<Thread>> Thread::Create(KernelCore& kernel, std::string name,
@@ -424,6 +423,8 @@ ResultCode Thread::SetCoreAndAffinityMask(s32 new_core, u64 new_affinity_mask) {
424 if (new_core == THREADPROCESSORID_DONT_UPDATE) { 423 if (new_core == THREADPROCESSORID_DONT_UPDATE) {
425 new_core = use_override ? ideal_core_override : ideal_core; 424 new_core = use_override ? ideal_core_override : ideal_core;
426 if ((new_affinity_mask & (1ULL << new_core)) == 0) { 425 if ((new_affinity_mask & (1ULL << new_core)) == 0) {
426 LOG_ERROR(Kernel, "New affinity mask is incorrect! new_core={}, new_affinity_mask={}",
427 new_core, new_affinity_mask);
427 return ERR_INVALID_COMBINATION; 428 return ERR_INVALID_COMBINATION;
428 } 429 }
429 } 430 }
diff --git a/src/core/hle/service/acc/acc.cpp b/src/core/hle/service/acc/acc.cpp
index cfac8ca9a..630a8b048 100644
--- a/src/core/hle/service/acc/acc.cpp
+++ b/src/core/hle/service/acc/acc.cpp
@@ -228,7 +228,8 @@ public:
228 228
229class IManagerForApplication final : public ServiceFramework<IManagerForApplication> { 229class IManagerForApplication final : public ServiceFramework<IManagerForApplication> {
230public: 230public:
231 IManagerForApplication() : ServiceFramework("IManagerForApplication") { 231 explicit IManagerForApplication(Common::UUID user_id)
232 : ServiceFramework("IManagerForApplication"), user_id(user_id) {
232 // clang-format off 233 // clang-format off
233 static const FunctionInfo functions[] = { 234 static const FunctionInfo functions[] = {
234 {0, &IManagerForApplication::CheckAvailability, "CheckAvailability"}, 235 {0, &IManagerForApplication::CheckAvailability, "CheckAvailability"},
@@ -254,12 +255,14 @@ private:
254 } 255 }
255 256
256 void GetAccountId(Kernel::HLERequestContext& ctx) { 257 void GetAccountId(Kernel::HLERequestContext& ctx) {
257 LOG_WARNING(Service_ACC, "(STUBBED) called"); 258 LOG_DEBUG(Service_ACC, "called");
258 // Should return a nintendo account ID 259
259 IPC::ResponseBuilder rb{ctx, 4}; 260 IPC::ResponseBuilder rb{ctx, 4};
260 rb.Push(RESULT_SUCCESS); 261 rb.Push(RESULT_SUCCESS);
261 rb.PushRaw<u64>(1); 262 rb.PushRaw<u64>(user_id.GetNintendoID());
262 } 263 }
264
265 Common::UUID user_id;
263}; 266};
264 267
265void Module::Interface::GetUserCount(Kernel::HLERequestContext& ctx) { 268void Module::Interface::GetUserCount(Kernel::HLERequestContext& ctx) {
@@ -319,46 +322,37 @@ void Module::Interface::IsUserRegistrationRequestPermitted(Kernel::HLERequestCon
319 322
320void Module::Interface::InitializeApplicationInfo(Kernel::HLERequestContext& ctx) { 323void Module::Interface::InitializeApplicationInfo(Kernel::HLERequestContext& ctx) {
321 IPC::RequestParser rp{ctx}; 324 IPC::RequestParser rp{ctx};
322 auto pid = rp.Pop<u64>();
323 325
324 LOG_DEBUG(Service_ACC, "called, process_id={}", pid); 326 LOG_DEBUG(Service_ACC, "called");
325 IPC::ResponseBuilder rb{ctx, 2}; 327 IPC::ResponseBuilder rb{ctx, 2};
326 rb.Push(InitializeApplicationInfoBase(pid)); 328 rb.Push(InitializeApplicationInfoBase());
327} 329}
328 330
329void Module::Interface::InitializeApplicationInfoRestricted(Kernel::HLERequestContext& ctx) { 331void Module::Interface::InitializeApplicationInfoRestricted(Kernel::HLERequestContext& ctx) {
330 IPC::RequestParser rp{ctx}; 332 IPC::RequestParser rp{ctx};
331 auto pid = rp.Pop<u64>();
332 333
333 LOG_WARNING(Service_ACC, "(Partial implementation) called, process_id={}", pid); 334 LOG_WARNING(Service_ACC, "(Partial implementation) called");
334 335
335 // TODO(ogniK): We require checking if the user actually owns the title and what not. As of 336 // TODO(ogniK): We require checking if the user actually owns the title and what not. As of
336 // currently, we assume the user owns the title. InitializeApplicationInfoBase SHOULD be called 337 // currently, we assume the user owns the title. InitializeApplicationInfoBase SHOULD be called
337 // first then we do extra checks if the game is a digital copy. 338 // first then we do extra checks if the game is a digital copy.
338 339
339 IPC::ResponseBuilder rb{ctx, 2}; 340 IPC::ResponseBuilder rb{ctx, 2};
340 rb.Push(InitializeApplicationInfoBase(pid)); 341 rb.Push(InitializeApplicationInfoBase());
341} 342}
342 343
343ResultCode Module::Interface::InitializeApplicationInfoBase(u64 process_id) { 344ResultCode Module::Interface::InitializeApplicationInfoBase() {
344 if (application_info) { 345 if (application_info) {
345 LOG_ERROR(Service_ACC, "Application already initialized"); 346 LOG_ERROR(Service_ACC, "Application already initialized");
346 return ERR_ACCOUNTINFO_ALREADY_INITIALIZED; 347 return ERR_ACCOUNTINFO_ALREADY_INITIALIZED;
347 } 348 }
348 349
349 const auto& list = system.Kernel().GetProcessList(); 350 // TODO(ogniK): This should be changed to reflect the target process for when we have multiple
350 const auto iter = std::find_if(list.begin(), list.end(), [&process_id](const auto& process) { 351 // processes emulated. As we don't actually have pid support we should assume we're just using
351 return process->GetProcessID() == process_id; 352 // our own process
352 }); 353 const auto& current_process = system.Kernel().CurrentProcess();
353 354 const auto launch_property =
354 if (iter == list.end()) { 355 system.GetARPManager().GetLaunchProperty(current_process->GetTitleID());
355 LOG_ERROR(Service_ACC, "Failed to find process ID");
356 application_info.application_type = ApplicationType::Unknown;
357
358 return ERR_ACCOUNTINFO_BAD_APPLICATION;
359 }
360
361 const auto launch_property = system.GetARPManager().GetLaunchProperty((*iter)->GetTitleID());
362 356
363 if (launch_property.Failed()) { 357 if (launch_property.Failed()) {
364 LOG_ERROR(Service_ACC, "Failed to get launch property"); 358 LOG_ERROR(Service_ACC, "Failed to get launch property");
@@ -372,10 +366,12 @@ ResultCode Module::Interface::InitializeApplicationInfoBase(u64 process_id) {
372 case FileSys::StorageId::Host: 366 case FileSys::StorageId::Host:
373 case FileSys::StorageId::NandUser: 367 case FileSys::StorageId::NandUser:
374 case FileSys::StorageId::SdCard: 368 case FileSys::StorageId::SdCard:
369 case FileSys::StorageId::None: // Yuzu specific, differs from hardware
375 application_info.application_type = ApplicationType::Digital; 370 application_info.application_type = ApplicationType::Digital;
376 break; 371 break;
377 default: 372 default:
378 LOG_ERROR(Service_ACC, "Invalid game storage ID"); 373 LOG_ERROR(Service_ACC, "Invalid game storage ID! storage_id={}",
374 launch_property->base_game_storage_id);
379 return ERR_ACCOUNTINFO_BAD_APPLICATION; 375 return ERR_ACCOUNTINFO_BAD_APPLICATION;
380 } 376 }
381 377
@@ -389,7 +385,7 @@ void Module::Interface::GetBaasAccountManagerForApplication(Kernel::HLERequestCo
389 LOG_DEBUG(Service_ACC, "called"); 385 LOG_DEBUG(Service_ACC, "called");
390 IPC::ResponseBuilder rb{ctx, 2, 0, 1}; 386 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
391 rb.Push(RESULT_SUCCESS); 387 rb.Push(RESULT_SUCCESS);
392 rb.PushIpcInterface<IManagerForApplication>(); 388 rb.PushIpcInterface<IManagerForApplication>(profile_manager->GetLastOpenedUser());
393} 389}
394 390
395void Module::Interface::IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx) { 391void Module::Interface::IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx) {
@@ -428,6 +424,17 @@ void Module::Interface::GetProfileEditor(Kernel::HLERequestContext& ctx) {
428 rb.PushIpcInterface<IProfileEditor>(user_id, *profile_manager); 424 rb.PushIpcInterface<IProfileEditor>(user_id, *profile_manager);
429} 425}
430 426
427void Module::Interface::ListQualifiedUsers(Kernel::HLERequestContext& ctx) {
428 LOG_DEBUG(Service_ACC, "called");
429
430 // All users should be qualified. We don't actually have parental control or anything to do with
431 // nintendo online currently. We're just going to assume the user running the game has access to
432 // the game regardless of parental control settings.
433 ctx.WriteBuffer(profile_manager->GetAllUsers());
434 IPC::ResponseBuilder rb{ctx, 2};
435 rb.Push(RESULT_SUCCESS);
436}
437
431void Module::Interface::TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx) { 438void Module::Interface::TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx) {
432 LOG_DEBUG(Service_ACC, "called"); 439 LOG_DEBUG(Service_ACC, "called");
433 // A u8 is passed into this function which we can safely ignore. It's to determine if we have 440 // A u8 is passed into this function which we can safely ignore. It's to determine if we have
diff --git a/src/core/hle/service/acc/acc.h b/src/core/hle/service/acc/acc.h
index 7a7dc9ec6..74ca39d6e 100644
--- a/src/core/hle/service/acc/acc.h
+++ b/src/core/hle/service/acc/acc.h
@@ -33,9 +33,10 @@ public:
33 void TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx); 33 void TrySelectUserWithoutInteraction(Kernel::HLERequestContext& ctx);
34 void IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx); 34 void IsUserAccountSwitchLocked(Kernel::HLERequestContext& ctx);
35 void GetProfileEditor(Kernel::HLERequestContext& ctx); 35 void GetProfileEditor(Kernel::HLERequestContext& ctx);
36 void ListQualifiedUsers(Kernel::HLERequestContext& ctx);
36 37
37 private: 38 private:
38 ResultCode InitializeApplicationInfoBase(u64 process_id); 39 ResultCode InitializeApplicationInfoBase();
39 40
40 enum class ApplicationType : u32_le { 41 enum class ApplicationType : u32_le {
41 GameCard = 0, 42 GameCard = 0,
diff --git a/src/core/hle/service/acc/acc_su.cpp b/src/core/hle/service/acc/acc_su.cpp
index b941c260b..2eefc6df5 100644
--- a/src/core/hle/service/acc/acc_su.cpp
+++ b/src/core/hle/service/acc/acc_su.cpp
@@ -33,8 +33,10 @@ ACC_SU::ACC_SU(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
33 {111, nullptr, "ClearSaveDataThumbnail"}, 33 {111, nullptr, "ClearSaveDataThumbnail"},
34 {112, nullptr, "LoadSaveDataThumbnail"}, 34 {112, nullptr, "LoadSaveDataThumbnail"},
35 {113, nullptr, "GetSaveDataThumbnailExistence"}, 35 {113, nullptr, "GetSaveDataThumbnailExistence"},
36 {120, nullptr, "ListOpenUsersInApplication"},
36 {130, nullptr, "ActivateOpenContextRetention"}, 37 {130, nullptr, "ActivateOpenContextRetention"},
37 {140, nullptr, "ListQualifiedUsers"}, 38 {140, &ACC_SU::ListQualifiedUsers, "ListQualifiedUsers"},
39 {150, nullptr, "AuthenticateApplicationAsync"},
38 {190, nullptr, "GetUserLastOpenedApplication"}, 40 {190, nullptr, "GetUserLastOpenedApplication"},
39 {191, nullptr, "ActivateOpenContextHolder"}, 41 {191, nullptr, "ActivateOpenContextHolder"},
40 {200, nullptr, "BeginUserRegistration"}, 42 {200, nullptr, "BeginUserRegistration"},
diff --git a/src/core/hle/service/acc/acc_u0.cpp b/src/core/hle/service/acc/acc_u0.cpp
index 0ac19f4ff..fb4e7e772 100644
--- a/src/core/hle/service/acc/acc_u0.cpp
+++ b/src/core/hle/service/acc/acc_u0.cpp
@@ -32,7 +32,7 @@ ACC_U0::ACC_U0(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
32 {130, nullptr, "LoadOpenContext"}, 32 {130, nullptr, "LoadOpenContext"},
33 {131, nullptr, "ListOpenContextStoredUsers"}, 33 {131, nullptr, "ListOpenContextStoredUsers"},
34 {140, &ACC_U0::InitializeApplicationInfoRestricted, "InitializeApplicationInfoRestricted"}, 34 {140, &ACC_U0::InitializeApplicationInfoRestricted, "InitializeApplicationInfoRestricted"},
35 {141, nullptr, "ListQualifiedUsers"}, 35 {141, &ACC_U0::ListQualifiedUsers, "ListQualifiedUsers"},
36 {150, &ACC_U0::IsUserAccountSwitchLocked, "IsUserAccountSwitchLocked"}, 36 {150, &ACC_U0::IsUserAccountSwitchLocked, "IsUserAccountSwitchLocked"},
37 }; 37 };
38 // clang-format on 38 // clang-format on
diff --git a/src/core/hle/service/acc/acc_u1.cpp b/src/core/hle/service/acc/acc_u1.cpp
index 858e91dde..9f29cdc82 100644
--- a/src/core/hle/service/acc/acc_u1.cpp
+++ b/src/core/hle/service/acc/acc_u1.cpp
@@ -34,7 +34,8 @@ ACC_U1::ACC_U1(std::shared_ptr<Module> module, std::shared_ptr<ProfileManager> p
34 {112, nullptr, "LoadSaveDataThumbnail"}, 34 {112, nullptr, "LoadSaveDataThumbnail"},
35 {113, nullptr, "GetSaveDataThumbnailExistence"}, 35 {113, nullptr, "GetSaveDataThumbnailExistence"},
36 {130, nullptr, "ActivateOpenContextRetention"}, 36 {130, nullptr, "ActivateOpenContextRetention"},
37 {140, nullptr, "ListQualifiedUsers"}, 37 {140, &ACC_U1::ListQualifiedUsers, "ListQualifiedUsers"},
38 {150, nullptr, "AuthenticateApplicationAsync"},
38 {190, nullptr, "GetUserLastOpenedApplication"}, 39 {190, nullptr, "GetUserLastOpenedApplication"},
39 {191, nullptr, "ActivateOpenContextHolder"}, 40 {191, nullptr, "ActivateOpenContextHolder"},
40 {997, nullptr, "DebugInvalidateTokenCacheForUser"}, 41 {997, nullptr, "DebugInvalidateTokenCacheForUser"},
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index 557608e76..a967e6ef7 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -43,20 +43,15 @@
43 43
44namespace Service::AM { 44namespace Service::AM {
45 45
46constexpr ResultCode ERR_NO_DATA_IN_CHANNEL{ErrorModule::AM, 0x2}; 46constexpr ResultCode ERR_NO_DATA_IN_CHANNEL{ErrorModule::AM, 2};
47constexpr ResultCode ERR_NO_MESSAGES{ErrorModule::AM, 0x3}; 47constexpr ResultCode ERR_NO_MESSAGES{ErrorModule::AM, 3};
48constexpr ResultCode ERR_SIZE_OUT_OF_BOUNDS{ErrorModule::AM, 0x1F7}; 48constexpr ResultCode ERR_SIZE_OUT_OF_BOUNDS{ErrorModule::AM, 503};
49 49
50enum class LaunchParameterKind : u32 { 50enum class LaunchParameterKind : u32 {
51 ApplicationSpecific = 1, 51 ApplicationSpecific = 1,
52 AccountPreselectedUser = 2, 52 AccountPreselectedUser = 2,
53}; 53};
54 54
55enum class VrMode : u8 {
56 Disabled = 0,
57 Enabled = 1,
58};
59
60constexpr u32 LAUNCH_PARAMETER_ACCOUNT_PRESELECTED_USER_MAGIC = 0xC79497CA; 55constexpr u32 LAUNCH_PARAMETER_ACCOUNT_PRESELECTED_USER_MAGIC = 0xC79497CA;
61 56
62struct LaunchParameterAccountPreselectedUser { 57struct LaunchParameterAccountPreselectedUser {
@@ -235,6 +230,7 @@ IDebugFunctions::IDebugFunctions() : ServiceFramework{"IDebugFunctions"} {
235 {30, nullptr, "RequestLaunchApplicationWithUserAndArgumentForDebug"}, 230 {30, nullptr, "RequestLaunchApplicationWithUserAndArgumentForDebug"},
236 {40, nullptr, "GetAppletResourceUsageInfo"}, 231 {40, nullptr, "GetAppletResourceUsageInfo"},
237 {100, nullptr, "SetCpuBoostModeForApplet"}, 232 {100, nullptr, "SetCpuBoostModeForApplet"},
233 {101, nullptr, "CancelCpuBoostModeForApplet"},
238 {110, nullptr, "PushToAppletBoundChannelForDebug"}, 234 {110, nullptr, "PushToAppletBoundChannelForDebug"},
239 {111, nullptr, "TryPopFromAppletBoundChannelForDebug"}, 235 {111, nullptr, "TryPopFromAppletBoundChannelForDebug"},
240 {120, nullptr, "AlarmSettingNotificationEnableAppEventReserve"}, 236 {120, nullptr, "AlarmSettingNotificationEnableAppEventReserve"},
@@ -277,6 +273,8 @@ ISelfController::ISelfController(Core::System& system,
277 {41, nullptr, "IsSystemBufferSharingEnabled"}, 273 {41, nullptr, "IsSystemBufferSharingEnabled"},
278 {42, nullptr, "GetSystemSharedLayerHandle"}, 274 {42, nullptr, "GetSystemSharedLayerHandle"},
279 {43, nullptr, "GetSystemSharedBufferHandle"}, 275 {43, nullptr, "GetSystemSharedBufferHandle"},
276 {44, nullptr, "CreateManagedDisplaySeparableLayer"},
277 {45, nullptr, "SetManagedDisplayLayerSeparationMode"},
280 {50, &ISelfController::SetHandlesRequestToDisplay, "SetHandlesRequestToDisplay"}, 278 {50, &ISelfController::SetHandlesRequestToDisplay, "SetHandlesRequestToDisplay"},
281 {51, nullptr, "ApproveToDisplay"}, 279 {51, nullptr, "ApproveToDisplay"},
282 {60, nullptr, "OverrideAutoSleepTimeAndDimmingTime"}, 280 {60, nullptr, "OverrideAutoSleepTimeAndDimmingTime"},
@@ -623,11 +621,15 @@ ICommonStateGetter::ICommonStateGetter(Core::System& system,
623 {64, nullptr, "SetTvPowerStateMatchingMode"}, 621 {64, nullptr, "SetTvPowerStateMatchingMode"},
624 {65, nullptr, "GetApplicationIdByContentActionName"}, 622 {65, nullptr, "GetApplicationIdByContentActionName"},
625 {66, &ICommonStateGetter::SetCpuBoostMode, "SetCpuBoostMode"}, 623 {66, &ICommonStateGetter::SetCpuBoostMode, "SetCpuBoostMode"},
624 {67, nullptr, "CancelCpuBoostMode"},
626 {80, nullptr, "PerformSystemButtonPressingIfInFocus"}, 625 {80, nullptr, "PerformSystemButtonPressingIfInFocus"},
627 {90, nullptr, "SetPerformanceConfigurationChangedNotification"}, 626 {90, nullptr, "SetPerformanceConfigurationChangedNotification"},
628 {91, nullptr, "GetCurrentPerformanceConfiguration"}, 627 {91, nullptr, "GetCurrentPerformanceConfiguration"},
628 {100, nullptr, "SetHandlingHomeButtonShortPressedEnabled"},
629 {200, nullptr, "GetOperationModeSystemInfo"}, 629 {200, nullptr, "GetOperationModeSystemInfo"},
630 {300, nullptr, "GetSettingsPlatformRegion"}, 630 {300, nullptr, "GetSettingsPlatformRegion"},
631 {400, nullptr, "ActivateMigrationService"},
632 {401, nullptr, "DeactivateMigrationService"},
631 }; 633 };
632 // clang-format on 634 // clang-format on
633 635
@@ -678,27 +680,21 @@ void ICommonStateGetter::GetCurrentFocusState(Kernel::HLERequestContext& ctx) {
678} 680}
679 681
680void ICommonStateGetter::IsVrModeEnabled(Kernel::HLERequestContext& ctx) { 682void ICommonStateGetter::IsVrModeEnabled(Kernel::HLERequestContext& ctx) {
681 LOG_WARNING(Service_AM, "(STUBBED) called"); 683 LOG_DEBUG(Service_AM, "called");
682 684
683 IPC::ResponseBuilder rb{ctx, 3}; 685 IPC::ResponseBuilder rb{ctx, 3};
684 rb.Push(RESULT_SUCCESS); 686 rb.Push(RESULT_SUCCESS);
685 rb.PushEnum(VrMode::Disabled); 687 rb.Push(vr_mode_state);
686} 688}
687 689
688void ICommonStateGetter::SetVrModeEnabled(Kernel::HLERequestContext& ctx) { 690void ICommonStateGetter::SetVrModeEnabled(Kernel::HLERequestContext& ctx) {
689 IPC::RequestParser rp{ctx}; 691 IPC::RequestParser rp{ctx};
690 const auto is_vr_mode_enabled = rp.Pop<bool>(); 692 vr_mode_state = rp.Pop<bool>();
691 693
692 LOG_WARNING(Service_AM, "(STUBBED) called. is_vr_mode_enabled={}", is_vr_mode_enabled); 694 LOG_WARNING(Service_AM, "VR Mode is {}", vr_mode_state ? "on" : "off");
693 695
694 IPC::ResponseBuilder rb{ctx, 2}; 696 IPC::ResponseBuilder rb{ctx, 2};
695 if (!is_vr_mode_enabled) { 697 rb.Push(RESULT_SUCCESS);
696 rb.Push(RESULT_SUCCESS);
697 } else {
698 // TODO: Find better error code for this
699 UNIMPLEMENTED_MSG("is_vr_mode_enabled={}", is_vr_mode_enabled);
700 rb.Push(RESULT_UNKNOWN);
701 }
702} 698}
703 699
704void ICommonStateGetter::SetLcdBacklighOffEnabled(Kernel::HLERequestContext& ctx) { 700void ICommonStateGetter::SetLcdBacklighOffEnabled(Kernel::HLERequestContext& ctx) {
@@ -835,6 +831,7 @@ public:
835 {25, nullptr, "Terminate"}, 831 {25, nullptr, "Terminate"},
836 {30, &ILibraryAppletAccessor::GetResult, "GetResult"}, 832 {30, &ILibraryAppletAccessor::GetResult, "GetResult"},
837 {50, nullptr, "SetOutOfFocusApplicationSuspendingEnabled"}, 833 {50, nullptr, "SetOutOfFocusApplicationSuspendingEnabled"},
834 {60, nullptr, "PresetLibraryAppletGpuTimeSliceZero"},
838 {100, &ILibraryAppletAccessor::PushInData, "PushInData"}, 835 {100, &ILibraryAppletAccessor::PushInData, "PushInData"},
839 {101, &ILibraryAppletAccessor::PopOutData, "PopOutData"}, 836 {101, &ILibraryAppletAccessor::PopOutData, "PopOutData"},
840 {102, nullptr, "PushExtraStorage"}, 837 {102, nullptr, "PushExtraStorage"},
@@ -903,7 +900,7 @@ private:
903 void PopOutData(Kernel::HLERequestContext& ctx) { 900 void PopOutData(Kernel::HLERequestContext& ctx) {
904 LOG_DEBUG(Service_AM, "called"); 901 LOG_DEBUG(Service_AM, "called");
905 902
906 const auto storage = applet->GetBroker().PopNormalDataToGame(); 903 auto storage = applet->GetBroker().PopNormalDataToGame();
907 if (storage == nullptr) { 904 if (storage == nullptr) {
908 LOG_ERROR(Service_AM, 905 LOG_ERROR(Service_AM,
909 "storage is a nullptr. There is no data in the current normal channel"); 906 "storage is a nullptr. There is no data in the current normal channel");
@@ -934,7 +931,7 @@ private:
934 void PopInteractiveOutData(Kernel::HLERequestContext& ctx) { 931 void PopInteractiveOutData(Kernel::HLERequestContext& ctx) {
935 LOG_DEBUG(Service_AM, "called"); 932 LOG_DEBUG(Service_AM, "called");
936 933
937 const auto storage = applet->GetBroker().PopInteractiveDataToGame(); 934 auto storage = applet->GetBroker().PopInteractiveDataToGame();
938 if (storage == nullptr) { 935 if (storage == nullptr) {
939 LOG_ERROR(Service_AM, 936 LOG_ERROR(Service_AM,
940 "storage is a nullptr. There is no data in the current interactive channel"); 937 "storage is a nullptr. There is no data in the current interactive channel");
@@ -1139,6 +1136,7 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
1139 {31, &IApplicationFunctions::EndBlockingHomeButtonShortAndLongPressed, "EndBlockingHomeButtonShortAndLongPressed"}, 1136 {31, &IApplicationFunctions::EndBlockingHomeButtonShortAndLongPressed, "EndBlockingHomeButtonShortAndLongPressed"},
1140 {32, &IApplicationFunctions::BeginBlockingHomeButton, "BeginBlockingHomeButton"}, 1137 {32, &IApplicationFunctions::BeginBlockingHomeButton, "BeginBlockingHomeButton"},
1141 {33, &IApplicationFunctions::EndBlockingHomeButton, "EndBlockingHomeButton"}, 1138 {33, &IApplicationFunctions::EndBlockingHomeButton, "EndBlockingHomeButton"},
1139 {34, nullptr, "SelectApplicationLicense"},
1142 {40, &IApplicationFunctions::NotifyRunning, "NotifyRunning"}, 1140 {40, &IApplicationFunctions::NotifyRunning, "NotifyRunning"},
1143 {50, &IApplicationFunctions::GetPseudoDeviceId, "GetPseudoDeviceId"}, 1141 {50, &IApplicationFunctions::GetPseudoDeviceId, "GetPseudoDeviceId"},
1144 {60, nullptr, "SetMediaPlaybackStateForApplication"}, 1142 {60, nullptr, "SetMediaPlaybackStateForApplication"},
@@ -1148,6 +1146,7 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
1148 {68, nullptr, "RequestFlushGamePlayingMovieForDebug"}, 1146 {68, nullptr, "RequestFlushGamePlayingMovieForDebug"},
1149 {70, nullptr, "RequestToShutdown"}, 1147 {70, nullptr, "RequestToShutdown"},
1150 {71, nullptr, "RequestToReboot"}, 1148 {71, nullptr, "RequestToReboot"},
1149 {72, nullptr, "RequestToSleep"},
1151 {80, nullptr, "ExitAndRequestToShowThanksMessage"}, 1150 {80, nullptr, "ExitAndRequestToShowThanksMessage"},
1152 {90, &IApplicationFunctions::EnableApplicationCrashReport, "EnableApplicationCrashReport"}, 1151 {90, &IApplicationFunctions::EnableApplicationCrashReport, "EnableApplicationCrashReport"},
1153 {100, &IApplicationFunctions::InitializeApplicationCopyrightFrameBuffer, "InitializeApplicationCopyrightFrameBuffer"}, 1152 {100, &IApplicationFunctions::InitializeApplicationCopyrightFrameBuffer, "InitializeApplicationCopyrightFrameBuffer"},
@@ -1159,7 +1158,7 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
1159 {121, nullptr, "ClearUserChannel"}, 1158 {121, nullptr, "ClearUserChannel"},
1160 {122, nullptr, "UnpopToUserChannel"}, 1159 {122, nullptr, "UnpopToUserChannel"},
1161 {130, &IApplicationFunctions::GetGpuErrorDetectedSystemEvent, "GetGpuErrorDetectedSystemEvent"}, 1160 {130, &IApplicationFunctions::GetGpuErrorDetectedSystemEvent, "GetGpuErrorDetectedSystemEvent"},
1162 {140, nullptr, "GetFriendInvitationStorageChannelEvent"}, 1161 {140, &IApplicationFunctions::GetFriendInvitationStorageChannelEvent, "GetFriendInvitationStorageChannelEvent"},
1163 {141, nullptr, "TryPopFromFriendInvitationStorageChannel"}, 1162 {141, nullptr, "TryPopFromFriendInvitationStorageChannel"},
1164 {150, nullptr, "GetNotificationStorageChannelEvent"}, 1163 {150, nullptr, "GetNotificationStorageChannelEvent"},
1165 {151, nullptr, "TryPopFromNotificationStorageChannel"}, 1164 {151, nullptr, "TryPopFromNotificationStorageChannel"},
@@ -1176,6 +1175,9 @@ IApplicationFunctions::IApplicationFunctions(Core::System& system_)
1176 auto& kernel = system.Kernel(); 1175 auto& kernel = system.Kernel();
1177 gpu_error_detected_event = Kernel::WritableEvent::CreateEventPair( 1176 gpu_error_detected_event = Kernel::WritableEvent::CreateEventPair(
1178 kernel, "IApplicationFunctions:GpuErrorDetectedSystemEvent"); 1177 kernel, "IApplicationFunctions:GpuErrorDetectedSystemEvent");
1178
1179 friend_invitation_storage_channel_event = Kernel::WritableEvent::CreateEventPair(
1180 kernel, "IApplicationFunctions:FriendInvitationStorageChannelEvent");
1179} 1181}
1180 1182
1181IApplicationFunctions::~IApplicationFunctions() = default; 1183IApplicationFunctions::~IApplicationFunctions() = default;
@@ -1333,12 +1335,23 @@ void IApplicationFunctions::SetTerminateResult(Kernel::HLERequestContext& ctx) {
1333} 1335}
1334 1336
1335void IApplicationFunctions::GetDisplayVersion(Kernel::HLERequestContext& ctx) { 1337void IApplicationFunctions::GetDisplayVersion(Kernel::HLERequestContext& ctx) {
1336 LOG_WARNING(Service_AM, "(STUBBED) called"); 1338 LOG_DEBUG(Service_AM, "called");
1339
1340 std::array<u8, 0x10> version_string{};
1341
1342 FileSys::PatchManager pm{system.CurrentProcess()->GetTitleID()};
1343 const auto res = pm.GetControlMetadata();
1344 if (res.first != nullptr) {
1345 const auto& version = res.first->GetVersionString();
1346 std::copy(version.begin(), version.end(), version_string.begin());
1347 } else {
1348 constexpr u128 default_version = {1, 0};
1349 std::memcpy(version_string.data(), default_version.data(), sizeof(u128));
1350 }
1337 1351
1338 IPC::ResponseBuilder rb{ctx, 6}; 1352 IPC::ResponseBuilder rb{ctx, 6};
1339 rb.Push(RESULT_SUCCESS); 1353 rb.Push(RESULT_SUCCESS);
1340 rb.Push<u64>(1); 1354 rb.PushRaw(version_string);
1341 rb.Push<u64>(0);
1342} 1355}
1343 1356
1344void IApplicationFunctions::GetDesiredLanguage(Kernel::HLERequestContext& ctx) { 1357void IApplicationFunctions::GetDesiredLanguage(Kernel::HLERequestContext& ctx) {
@@ -1490,6 +1503,14 @@ void IApplicationFunctions::GetGpuErrorDetectedSystemEvent(Kernel::HLERequestCon
1490 rb.PushCopyObjects(gpu_error_detected_event.readable); 1503 rb.PushCopyObjects(gpu_error_detected_event.readable);
1491} 1504}
1492 1505
1506void IApplicationFunctions::GetFriendInvitationStorageChannelEvent(Kernel::HLERequestContext& ctx) {
1507 LOG_DEBUG(Service_AM, "called");
1508
1509 IPC::ResponseBuilder rb{ctx, 2, 1};
1510 rb.Push(RESULT_SUCCESS);
1511 rb.PushCopyObjects(friend_invitation_storage_channel_event.readable);
1512}
1513
1493void InstallInterfaces(SM::ServiceManager& service_manager, 1514void InstallInterfaces(SM::ServiceManager& service_manager,
1494 std::shared_ptr<NVFlinger::NVFlinger> nvflinger, Core::System& system) { 1515 std::shared_ptr<NVFlinger::NVFlinger> nvflinger, Core::System& system) {
1495 auto message_queue = std::make_shared<AppletMessageQueue>(system.Kernel()); 1516 auto message_queue = std::make_shared<AppletMessageQueue>(system.Kernel());
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h
index 53cfce10f..dfa701d73 100644
--- a/src/core/hle/service/am/am.h
+++ b/src/core/hle/service/am/am.h
@@ -191,6 +191,7 @@ private:
191 191
192 Core::System& system; 192 Core::System& system;
193 std::shared_ptr<AppletMessageQueue> msg_queue; 193 std::shared_ptr<AppletMessageQueue> msg_queue;
194 bool vr_mode_state{};
194}; 195};
195 196
196class IStorageImpl { 197class IStorageImpl {
@@ -280,10 +281,12 @@ private:
280 void QueryApplicationPlayStatistics(Kernel::HLERequestContext& ctx); 281 void QueryApplicationPlayStatistics(Kernel::HLERequestContext& ctx);
281 void QueryApplicationPlayStatisticsByUid(Kernel::HLERequestContext& ctx); 282 void QueryApplicationPlayStatisticsByUid(Kernel::HLERequestContext& ctx);
282 void GetGpuErrorDetectedSystemEvent(Kernel::HLERequestContext& ctx); 283 void GetGpuErrorDetectedSystemEvent(Kernel::HLERequestContext& ctx);
284 void GetFriendInvitationStorageChannelEvent(Kernel::HLERequestContext& ctx);
283 285
284 bool launch_popped_application_specific = false; 286 bool launch_popped_application_specific = false;
285 bool launch_popped_account_preselect = false; 287 bool launch_popped_account_preselect = false;
286 Kernel::EventPair gpu_error_detected_event; 288 Kernel::EventPair gpu_error_detected_event;
289 Kernel::EventPair friend_invitation_storage_channel_event;
287 Core::System& system; 290 Core::System& system;
288}; 291};
289 292
diff --git a/src/core/hle/service/audio/audctl.cpp b/src/core/hle/service/audio/audctl.cpp
index 9e08e5346..6ddb547fb 100644
--- a/src/core/hle/service/audio/audctl.cpp
+++ b/src/core/hle/service/audio/audctl.cpp
@@ -39,6 +39,8 @@ AudCtl::AudCtl() : ServiceFramework{"audctl"} {
39 {25, nullptr, "GetAudioVolumeDataForPlayReport"}, 39 {25, nullptr, "GetAudioVolumeDataForPlayReport"},
40 {26, nullptr, "UpdateHeadphoneSettings"}, 40 {26, nullptr, "UpdateHeadphoneSettings"},
41 {27, nullptr, "SetVolumeMappingTableForDev"}, 41 {27, nullptr, "SetVolumeMappingTableForDev"},
42 {28, nullptr, "GetAudioOutputChannelCountForPlayReport"},
43 {29, nullptr, "BindAudioOutputChannelCountUpdateEventForPlayReport"},
42 }; 44 };
43 // clang-format on 45 // clang-format on
44 46
diff --git a/src/core/hle/service/audio/audin_u.cpp b/src/core/hle/service/audio/audin_u.cpp
index d7f1d348d..3e2299426 100644
--- a/src/core/hle/service/audio/audin_u.cpp
+++ b/src/core/hle/service/audio/audin_u.cpp
@@ -2,6 +2,9 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "common/logging/log.h"
6#include "core/hle/ipc_helpers.h"
7#include "core/hle/kernel/hle_ipc.h"
5#include "core/hle/service/audio/audin_u.h" 8#include "core/hle/service/audio/audin_u.h"
6 9
7namespace Service::Audio { 10namespace Service::Audio {
@@ -36,11 +39,12 @@ public:
36AudInU::AudInU() : ServiceFramework("audin:u") { 39AudInU::AudInU() : ServiceFramework("audin:u") {
37 // clang-format off 40 // clang-format off
38 static const FunctionInfo functions[] = { 41 static const FunctionInfo functions[] = {
39 {0, nullptr, "ListAudioIns"}, 42 {0, &AudInU::ListAudioIns, "ListAudioIns"},
40 {1, nullptr, "OpenAudioIn"}, 43 {1, &AudInU::OpenAudioIn, "OpenAudioIn"},
41 {2, nullptr, "Unknown"}, 44 {2, &AudInU::ListAudioIns, "ListAudioInsAuto"},
42 {3, nullptr, "OpenAudioInAuto"}, 45 {3, &AudInU::OpenAudioIn, "OpenAudioInAuto"},
43 {4, nullptr, "ListAudioInsAuto"}, 46 {4, &AudInU::ListAudioInsAutoFiltered, "ListAudioInsAutoFiltered"},
47 {5, &AudInU::OpenAudioInProtocolSpecified, "OpenAudioInProtocolSpecified"},
44 }; 48 };
45 // clang-format on 49 // clang-format on
46 50
@@ -49,4 +53,60 @@ AudInU::AudInU() : ServiceFramework("audin:u") {
49 53
50AudInU::~AudInU() = default; 54AudInU::~AudInU() = default;
51 55
56void AudInU::ListAudioIns(Kernel::HLERequestContext& ctx) {
57 LOG_DEBUG(Service_Audio, "called");
58 const std::size_t count = ctx.GetWriteBufferSize() / sizeof(AudioInDeviceName);
59
60 const std::size_t device_count = std::min(count, audio_device_names.size());
61 std::vector<AudioInDeviceName> device_names;
62 device_names.reserve(device_count);
63
64 for (std::size_t i = 0; i < device_count; i++) {
65 const auto& device_name = audio_device_names[i];
66 auto& entry = device_names.emplace_back();
67 device_name.copy(entry.data(), device_name.size());
68 }
69
70 ctx.WriteBuffer(device_names);
71
72 IPC::ResponseBuilder rb{ctx, 3};
73 rb.Push(RESULT_SUCCESS);
74 rb.Push(static_cast<u32>(device_names.size()));
75}
76
77void AudInU::ListAudioInsAutoFiltered(Kernel::HLERequestContext& ctx) {
78 LOG_DEBUG(Service_Audio, "called");
79 constexpr u32 device_count = 0;
80
81 // Since we don't actually use any other audio input devices, we return 0 devices. Filtered
82 // device listing just omits the default input device
83
84 IPC::ResponseBuilder rb{ctx, 3};
85 rb.Push(RESULT_SUCCESS);
86 rb.Push(static_cast<u32>(device_count));
87}
88
89void AudInU::OpenInOutImpl(Kernel::HLERequestContext& ctx) {
90 AudInOutParams params{};
91 params.channel_count = 2;
92 params.sample_format = SampleFormat::PCM16;
93 params.sample_rate = 48000;
94 params.state = State::Started;
95
96 IPC::ResponseBuilder rb{ctx, 6, 0, 1};
97 rb.Push(RESULT_SUCCESS);
98 rb.PushRaw<AudInOutParams>(params);
99 rb.PushIpcInterface<IAudioIn>();
100}
101
102void AudInU::OpenAudioIn(Kernel::HLERequestContext& ctx) {
103 LOG_WARNING(Service_Audio, "(STUBBED) called");
104 OpenInOutImpl(ctx);
105}
106
107void AudInU::OpenAudioInProtocolSpecified(Kernel::HLERequestContext& ctx) {
108 LOG_WARNING(Service_Audio, "(STUBBED) called");
109 OpenInOutImpl(ctx);
110}
111
52} // namespace Service::Audio 112} // namespace Service::Audio
diff --git a/src/core/hle/service/audio/audin_u.h b/src/core/hle/service/audio/audin_u.h
index 0538b9560..a599f4a64 100644
--- a/src/core/hle/service/audio/audin_u.h
+++ b/src/core/hle/service/audio/audin_u.h
@@ -16,6 +16,35 @@ class AudInU final : public ServiceFramework<AudInU> {
16public: 16public:
17 explicit AudInU(); 17 explicit AudInU();
18 ~AudInU() override; 18 ~AudInU() override;
19
20private:
21 enum class SampleFormat : u32_le {
22 PCM16 = 2,
23 };
24
25 enum class State : u32_le {
26 Started = 0,
27 Stopped = 1,
28 };
29
30 struct AudInOutParams {
31 u32_le sample_rate{};
32 u32_le channel_count{};
33 SampleFormat sample_format{};
34 State state{};
35 };
36 static_assert(sizeof(AudInOutParams) == 0x10, "AudInOutParams is an invalid size");
37
38 using AudioInDeviceName = std::array<char, 256>;
39 static constexpr std::array<std::string_view, 1> audio_device_names{{
40 "BuiltInHeadset",
41 }};
42
43 void ListAudioIns(Kernel::HLERequestContext& ctx);
44 void ListAudioInsAutoFiltered(Kernel::HLERequestContext& ctx);
45 void OpenInOutImpl(Kernel::HLERequestContext& ctx);
46 void OpenAudioIn(Kernel::HLERequestContext& ctx);
47 void OpenAudioInProtocolSpecified(Kernel::HLERequestContext& ctx);
19}; 48};
20 49
21} // namespace Service::Audio 50} // namespace Service::Audio
diff --git a/src/core/hle/service/audio/audren_u.cpp b/src/core/hle/service/audio/audren_u.cpp
index 175cabf45..d8359abaa 100644
--- a/src/core/hle/service/audio/audren_u.cpp
+++ b/src/core/hle/service/audio/audren_u.cpp
@@ -92,11 +92,16 @@ private:
92 } 92 }
93 93
94 void RequestUpdateImpl(Kernel::HLERequestContext& ctx) { 94 void RequestUpdateImpl(Kernel::HLERequestContext& ctx) {
95 LOG_WARNING(Service_Audio, "(STUBBED) called"); 95 LOG_DEBUG(Service_Audio, "(STUBBED) called");
96
97 auto result = renderer->UpdateAudioRenderer(ctx.ReadBuffer());
98
99 if (result.Succeeded()) {
100 ctx.WriteBuffer(result.Unwrap());
101 }
96 102
97 ctx.WriteBuffer(renderer->UpdateAudioRenderer(ctx.ReadBuffer()));
98 IPC::ResponseBuilder rb{ctx, 2}; 103 IPC::ResponseBuilder rb{ctx, 2};
99 rb.Push(RESULT_SUCCESS); 104 rb.Push(result.Code());
100 } 105 }
101 106
102 void Start(Kernel::HLERequestContext& ctx) { 107 void Start(Kernel::HLERequestContext& ctx) {
@@ -252,8 +257,6 @@ private:
252 } 257 }
253 258
254 void GetAudioDeviceOutputVolume(Kernel::HLERequestContext& ctx) { 259 void GetAudioDeviceOutputVolume(Kernel::HLERequestContext& ctx) {
255 IPC::RequestParser rp{ctx};
256
257 const auto device_name_buffer = ctx.ReadBuffer(); 260 const auto device_name_buffer = ctx.ReadBuffer();
258 const std::string name = Common::StringFromBuffer(device_name_buffer); 261 const std::string name = Common::StringFromBuffer(device_name_buffer);
259 262
diff --git a/src/core/hle/service/bcat/backend/boxcat.cpp b/src/core/hle/service/bcat/backend/boxcat.cpp
index f589864ee..5febe8fc1 100644
--- a/src/core/hle/service/bcat/backend/boxcat.cpp
+++ b/src/core/hle/service/bcat/backend/boxcat.cpp
@@ -18,6 +18,7 @@
18#include "core/hle/service/bcat/backend/boxcat.h" 18#include "core/hle/service/bcat/backend/boxcat.h"
19#include "core/settings.h" 19#include "core/settings.h"
20 20
21namespace Service::BCAT {
21namespace { 22namespace {
22 23
23// Prevents conflicts with windows macro called CreateFile 24// Prevents conflicts with windows macro called CreateFile
@@ -30,10 +31,6 @@ bool VfsDeleteFileWrap(FileSys::VirtualDir dir, std::string_view name) {
30 return dir->DeleteFile(name); 31 return dir->DeleteFile(name);
31} 32}
32 33
33} // Anonymous namespace
34
35namespace Service::BCAT {
36
37constexpr ResultCode ERROR_GENERAL_BCAT_FAILURE{ErrorModule::BCAT, 1}; 34constexpr ResultCode ERROR_GENERAL_BCAT_FAILURE{ErrorModule::BCAT, 1};
38 35
39constexpr char BOXCAT_HOSTNAME[] = "api.yuzu-emu.org"; 36constexpr char BOXCAT_HOSTNAME[] = "api.yuzu-emu.org";
@@ -90,8 +87,6 @@ constexpr u32 PORT = 443;
90constexpr u32 TIMEOUT_SECONDS = 30; 87constexpr u32 TIMEOUT_SECONDS = 30;
91[[maybe_unused]] constexpr u64 VFS_COPY_BLOCK_SIZE = 1ULL << 24; // 4MB 88[[maybe_unused]] constexpr u64 VFS_COPY_BLOCK_SIZE = 1ULL << 24; // 4MB
92 89
93namespace {
94
95std::string GetBINFilePath(u64 title_id) { 90std::string GetBINFilePath(u64 title_id) {
96 return fmt::format("{}bcat/{:016X}/launchparam.bin", 91 return fmt::format("{}bcat/{:016X}/launchparam.bin",
97 FileUtil::GetUserPath(FileUtil::UserPath::CacheDir), title_id); 92 FileUtil::GetUserPath(FileUtil::UserPath::CacheDir), title_id);
diff --git a/src/core/hle/service/bcat/module.cpp b/src/core/hle/service/bcat/module.cpp
index 7ada67130..34aba7a27 100644
--- a/src/core/hle/service/bcat/module.cpp
+++ b/src/core/hle/service/bcat/module.cpp
@@ -141,6 +141,7 @@ public:
141 {20301, nullptr, "RequestSuspendDeliveryTask"}, 141 {20301, nullptr, "RequestSuspendDeliveryTask"},
142 {20400, nullptr, "RegisterSystemApplicationDeliveryTask"}, 142 {20400, nullptr, "RegisterSystemApplicationDeliveryTask"},
143 {20401, nullptr, "UnregisterSystemApplicationDeliveryTask"}, 143 {20401, nullptr, "UnregisterSystemApplicationDeliveryTask"},
144 {20410, nullptr, "SetSystemApplicationDeliveryTaskTimer"},
144 {30100, &IBcatService::SetPassphrase, "SetPassphrase"}, 145 {30100, &IBcatService::SetPassphrase, "SetPassphrase"},
145 {30200, nullptr, "RegisterBackgroundDeliveryTask"}, 146 {30200, nullptr, "RegisterBackgroundDeliveryTask"},
146 {30201, nullptr, "UnregisterBackgroundDeliveryTask"}, 147 {30201, nullptr, "UnregisterBackgroundDeliveryTask"},
diff --git a/src/core/hle/service/caps/caps_su.cpp b/src/core/hle/service/caps/caps_su.cpp
index 2b4c2d808..e8b0698e8 100644
--- a/src/core/hle/service/caps/caps_su.cpp
+++ b/src/core/hle/service/caps/caps_su.cpp
@@ -2,6 +2,8 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include "common/logging/log.h"
6#include "core/hle/ipc_helpers.h"
5#include "core/hle/service/caps/caps_su.h" 7#include "core/hle/service/caps/caps_su.h"
6 8
7namespace Service::Capture { 9namespace Service::Capture {
@@ -9,8 +11,11 @@ namespace Service::Capture {
9CAPS_SU::CAPS_SU() : ServiceFramework("caps:su") { 11CAPS_SU::CAPS_SU() : ServiceFramework("caps:su") {
10 // clang-format off 12 // clang-format off
11 static const FunctionInfo functions[] = { 13 static const FunctionInfo functions[] = {
14 {32, &CAPS_SU::SetShimLibraryVersion, "SetShimLibraryVersion"},
12 {201, nullptr, "SaveScreenShot"}, 15 {201, nullptr, "SaveScreenShot"},
13 {203, nullptr, "SaveScreenShotEx0"}, 16 {203, nullptr, "SaveScreenShotEx0"},
17 {205, nullptr, "SaveScreenShotEx1"},
18 {210, nullptr, "SaveScreenShotEx2"},
14 }; 19 };
15 // clang-format on 20 // clang-format on
16 21
@@ -19,4 +24,11 @@ CAPS_SU::CAPS_SU() : ServiceFramework("caps:su") {
19 24
20CAPS_SU::~CAPS_SU() = default; 25CAPS_SU::~CAPS_SU() = default;
21 26
27void CAPS_SU::SetShimLibraryVersion(Kernel::HLERequestContext& ctx) {
28 LOG_WARNING(Service_Capture, "(STUBBED) called");
29
30 IPC::ResponseBuilder rb{ctx, 2};
31 rb.Push(RESULT_SUCCESS);
32}
33
22} // namespace Service::Capture 34} // namespace Service::Capture
diff --git a/src/core/hle/service/caps/caps_su.h b/src/core/hle/service/caps/caps_su.h
index cb11f7c9a..c494d7c84 100644
--- a/src/core/hle/service/caps/caps_su.h
+++ b/src/core/hle/service/caps/caps_su.h
@@ -16,6 +16,9 @@ class CAPS_SU final : public ServiceFramework<CAPS_SU> {
16public: 16public:
17 explicit CAPS_SU(); 17 explicit CAPS_SU();
18 ~CAPS_SU() override; 18 ~CAPS_SU() override;
19
20private:
21 void SetShimLibraryVersion(Kernel::HLERequestContext& ctx);
19}; 22};
20 23
21} // namespace Service::Capture 24} // namespace Service::Capture
diff --git a/src/core/hle/service/es/es.cpp b/src/core/hle/service/es/es.cpp
index df00ae625..f8e9df4b1 100644
--- a/src/core/hle/service/es/es.cpp
+++ b/src/core/hle/service/es/es.cpp
@@ -4,6 +4,7 @@
4 4
5#include "core/crypto/key_manager.h" 5#include "core/crypto/key_manager.h"
6#include "core/hle/ipc_helpers.h" 6#include "core/hle/ipc_helpers.h"
7#include "core/hle/service/es/es.h"
7#include "core/hle/service/service.h" 8#include "core/hle/service/service.h"
8 9
9namespace Service::ES { 10namespace Service::ES {
@@ -76,7 +77,6 @@ private:
76 } 77 }
77 78
78 void ImportTicket(Kernel::HLERequestContext& ctx) { 79 void ImportTicket(Kernel::HLERequestContext& ctx) {
79 IPC::RequestParser rp{ctx};
80 const auto ticket = ctx.ReadBuffer(); 80 const auto ticket = ctx.ReadBuffer();
81 const auto cert = ctx.ReadBuffer(1); 81 const auto cert = ctx.ReadBuffer(1);
82 82
diff --git a/src/core/hle/service/filesystem/fsp_srv.cpp b/src/core/hle/service/filesystem/fsp_srv.cpp
index 61045c75c..f6503fe2f 100644
--- a/src/core/hle/service/filesystem/fsp_srv.cpp
+++ b/src/core/hle/service/filesystem/fsp_srv.cpp
@@ -316,8 +316,8 @@ public:
316 {8, &IFileSystem::OpenFile, "OpenFile"}, 316 {8, &IFileSystem::OpenFile, "OpenFile"},
317 {9, &IFileSystem::OpenDirectory, "OpenDirectory"}, 317 {9, &IFileSystem::OpenDirectory, "OpenDirectory"},
318 {10, &IFileSystem::Commit, "Commit"}, 318 {10, &IFileSystem::Commit, "Commit"},
319 {11, nullptr, "GetFreeSpaceSize"}, 319 {11, &IFileSystem::GetFreeSpaceSize, "GetFreeSpaceSize"},
320 {12, nullptr, "GetTotalSpaceSize"}, 320 {12, &IFileSystem::GetTotalSpaceSize, "GetTotalSpaceSize"},
321 {13, &IFileSystem::CleanDirectoryRecursively, "CleanDirectoryRecursively"}, 321 {13, &IFileSystem::CleanDirectoryRecursively, "CleanDirectoryRecursively"},
322 {14, nullptr, "GetFileTimeStampRaw"}, 322 {14, nullptr, "GetFileTimeStampRaw"},
323 {15, nullptr, "QueryEntry"}, 323 {15, nullptr, "QueryEntry"},
@@ -697,12 +697,14 @@ FSP_SRV::FSP_SRV(FileSystemController& fsc, const Core::Reporter& reporter)
697 {68, nullptr, "OpenSaveDataInfoReaderBySaveDataFilter"}, 697 {68, nullptr, "OpenSaveDataInfoReaderBySaveDataFilter"},
698 {69, nullptr, "ReadSaveDataFileSystemExtraDataBySaveDataAttribute"}, 698 {69, nullptr, "ReadSaveDataFileSystemExtraDataBySaveDataAttribute"},
699 {70, nullptr, "WriteSaveDataFileSystemExtraDataBySaveDataAttribute"}, 699 {70, nullptr, "WriteSaveDataFileSystemExtraDataBySaveDataAttribute"},
700 {71, nullptr, "ReadSaveDataFileSystemExtraDataWithMaskBySaveDataAttribute"},
700 {80, nullptr, "OpenSaveDataMetaFile"}, 701 {80, nullptr, "OpenSaveDataMetaFile"},
701 {81, nullptr, "OpenSaveDataTransferManager"}, 702 {81, nullptr, "OpenSaveDataTransferManager"},
702 {82, nullptr, "OpenSaveDataTransferManagerVersion2"}, 703 {82, nullptr, "OpenSaveDataTransferManagerVersion2"},
703 {83, nullptr, "OpenSaveDataTransferProhibiterForCloudBackUp"}, 704 {83, nullptr, "OpenSaveDataTransferProhibiterForCloudBackUp"},
704 {84, nullptr, "ListApplicationAccessibleSaveDataOwnerId"}, 705 {84, nullptr, "ListApplicationAccessibleSaveDataOwnerId"},
705 {85, nullptr, "OpenSaveDataTransferManagerForSaveDataRepair"}, 706 {85, nullptr, "OpenSaveDataTransferManagerForSaveDataRepair"},
707 {86, nullptr, "OpenSaveDataMover"},
706 {100, nullptr, "OpenImageDirectoryFileSystem"}, 708 {100, nullptr, "OpenImageDirectoryFileSystem"},
707 {110, nullptr, "OpenContentStorageFileSystem"}, 709 {110, nullptr, "OpenContentStorageFileSystem"},
708 {120, nullptr, "OpenCloudBackupWorkStorageFileSystem"}, 710 {120, nullptr, "OpenCloudBackupWorkStorageFileSystem"},
@@ -762,9 +764,11 @@ FSP_SRV::FSP_SRV(FileSystemController& fsc, const Core::Reporter& reporter)
762 {1011, &FSP_SRV::GetAccessLogVersionInfo, "GetAccessLogVersionInfo"}, 764 {1011, &FSP_SRV::GetAccessLogVersionInfo, "GetAccessLogVersionInfo"},
763 {1012, nullptr, "GetFsStackUsage"}, 765 {1012, nullptr, "GetFsStackUsage"},
764 {1013, nullptr, "UnsetSaveDataRootPath"}, 766 {1013, nullptr, "UnsetSaveDataRootPath"},
767 {1014, nullptr, "OutputMultiProgramTagAccessLog"},
765 {1100, nullptr, "OverrideSaveDataTransferTokenSignVerificationKey"}, 768 {1100, nullptr, "OverrideSaveDataTransferTokenSignVerificationKey"},
766 {1110, nullptr, "CorruptSaveDataFileSystemBySaveDataSpaceId2"}, 769 {1110, nullptr, "CorruptSaveDataFileSystemBySaveDataSpaceId2"},
767 {1200, nullptr, "OpenMultiCommitManager"}, 770 {1200, nullptr, "OpenMultiCommitManager"},
771 {1300, nullptr, "OpenBisWiper"},
768 }; 772 };
769 // clang-format on 773 // clang-format on
770 RegisterHandlers(functions); 774 RegisterHandlers(functions);
diff --git a/src/core/hle/service/friend/friend.cpp b/src/core/hle/service/friend/friend.cpp
index 7938b4b80..68f259b70 100644
--- a/src/core/hle/service/friend/friend.cpp
+++ b/src/core/hle/service/friend/friend.cpp
@@ -96,6 +96,7 @@ public:
96 {30830, nullptr, "ClearPlayLog"}, 96 {30830, nullptr, "ClearPlayLog"},
97 {30900, nullptr, "SendFriendInvitation"}, 97 {30900, nullptr, "SendFriendInvitation"},
98 {30910, nullptr, "ReadFriendInvitation"}, 98 {30910, nullptr, "ReadFriendInvitation"},
99 {30911, nullptr, "ReadAllFriendInvitations"},
99 {49900, nullptr, "DeleteNetworkServiceAccountCache"}, 100 {49900, nullptr, "DeleteNetworkServiceAccountCache"},
100 }; 101 };
101 // clang-format on 102 // clang-format on
diff --git a/src/core/hle/service/glue/errors.h b/src/core/hle/service/glue/errors.h
index c2874c585..f6647f724 100644
--- a/src/core/hle/service/glue/errors.h
+++ b/src/core/hle/service/glue/errors.h
@@ -8,9 +8,9 @@
8 8
9namespace Service::Glue { 9namespace Service::Glue {
10 10
11constexpr ResultCode ERR_INVALID_RESOURCE{ErrorModule::ARP, 0x1E}; 11constexpr ResultCode ERR_INVALID_RESOURCE{ErrorModule::ARP, 30};
12constexpr ResultCode ERR_INVALID_PROCESS_ID{ErrorModule::ARP, 0x1F}; 12constexpr ResultCode ERR_INVALID_PROCESS_ID{ErrorModule::ARP, 31};
13constexpr ResultCode ERR_INVALID_ACCESS{ErrorModule::ARP, 0x2A}; 13constexpr ResultCode ERR_INVALID_ACCESS{ErrorModule::ARP, 42};
14constexpr ResultCode ERR_NOT_REGISTERED{ErrorModule::ARP, 0x66}; 14constexpr ResultCode ERR_NOT_REGISTERED{ErrorModule::ARP, 102};
15 15
16} // namespace Service::Glue 16} // namespace Service::Glue
diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp
index 2ccfffc19..c55d900e2 100644
--- a/src/core/hle/service/hid/controllers/npad.cpp
+++ b/src/core/hle/service/hid/controllers/npad.cpp
@@ -502,7 +502,7 @@ void Controller_NPad::SetNpadMode(u32 npad_id, NPadAssignments assignment_mode)
502 502
503void Controller_NPad::VibrateController(const std::vector<u32>& controller_ids, 503void Controller_NPad::VibrateController(const std::vector<u32>& controller_ids,
504 const std::vector<Vibration>& vibrations) { 504 const std::vector<Vibration>& vibrations) {
505 LOG_WARNING(Service_HID, "(STUBBED) called"); 505 LOG_DEBUG(Service_HID, "(STUBBED) called");
506 506
507 if (!can_controllers_vibrate) { 507 if (!can_controllers_vibrate) {
508 return; 508 return;
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index d6031a987..5559587e3 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -233,7 +233,7 @@ Hid::Hid(Core::System& system) : ServiceFramework("hid"), system(system) {
233 {302, nullptr, "StopConsoleSixAxisSensor"}, 233 {302, nullptr, "StopConsoleSixAxisSensor"},
234 {303, nullptr, "ActivateSevenSixAxisSensor"}, 234 {303, nullptr, "ActivateSevenSixAxisSensor"},
235 {304, nullptr, "StartSevenSixAxisSensor"}, 235 {304, nullptr, "StartSevenSixAxisSensor"},
236 {305, nullptr, "StopSevenSixAxisSensor"}, 236 {305, &Hid::StopSevenSixAxisSensor, "StopSevenSixAxisSensor"},
237 {306, &Hid::InitializeSevenSixAxisSensor, "InitializeSevenSixAxisSensor"}, 237 {306, &Hid::InitializeSevenSixAxisSensor, "InitializeSevenSixAxisSensor"},
238 {307, nullptr, "FinalizeSevenSixAxisSensor"}, 238 {307, nullptr, "FinalizeSevenSixAxisSensor"},
239 {308, nullptr, "SetSevenSixAxisSensorFusionStrength"}, 239 {308, nullptr, "SetSevenSixAxisSensorFusionStrength"},
@@ -282,6 +282,7 @@ Hid::Hid(Core::System& system) : ServiceFramework("hid"), system(system) {
282 {1001, nullptr, "GetNpadCommunicationMode"}, 282 {1001, nullptr, "GetNpadCommunicationMode"},
283 {1002, nullptr, "SetTouchScreenConfiguration"}, 283 {1002, nullptr, "SetTouchScreenConfiguration"},
284 {1003, nullptr, "IsFirmwareUpdateNeededForNotification"}, 284 {1003, nullptr, "IsFirmwareUpdateNeededForNotification"},
285 {2000, nullptr, "ActivateDigitizer"},
285 }; 286 };
286 // clang-format on 287 // clang-format on
287 288
@@ -852,6 +853,17 @@ void Hid::SetPalmaBoostMode(Kernel::HLERequestContext& ctx) {
852 rb.Push(RESULT_SUCCESS); 853 rb.Push(RESULT_SUCCESS);
853} 854}
854 855
856void Hid::StopSevenSixAxisSensor(Kernel::HLERequestContext& ctx) {
857 IPC::RequestParser rp{ctx};
858 const auto applet_resource_user_id{rp.Pop<u64>()};
859
860 LOG_WARNING(Service_HID, "(STUBBED) called, applet_resource_user_id={}",
861 applet_resource_user_id);
862
863 IPC::ResponseBuilder rb{ctx, 2};
864 rb.Push(RESULT_SUCCESS);
865}
866
855void Hid::InitializeSevenSixAxisSensor(Kernel::HLERequestContext& ctx) { 867void Hid::InitializeSevenSixAxisSensor(Kernel::HLERequestContext& ctx) {
856 LOG_WARNING(Service_HID, "(STUBBED) called"); 868 LOG_WARNING(Service_HID, "(STUBBED) called");
857 869
@@ -870,6 +882,7 @@ public:
870 {10, nullptr, "DeactivateTouchScreen"}, 882 {10, nullptr, "DeactivateTouchScreen"},
871 {11, nullptr, "SetTouchScreenAutoPilotState"}, 883 {11, nullptr, "SetTouchScreenAutoPilotState"},
872 {12, nullptr, "UnsetTouchScreenAutoPilotState"}, 884 {12, nullptr, "UnsetTouchScreenAutoPilotState"},
885 {13, nullptr, "GetTouchScreenConfiguration"},
873 {20, nullptr, "DeactivateMouse"}, 886 {20, nullptr, "DeactivateMouse"},
874 {21, nullptr, "SetMouseAutoPilotState"}, 887 {21, nullptr, "SetMouseAutoPilotState"},
875 {22, nullptr, "UnsetMouseAutoPilotState"}, 888 {22, nullptr, "UnsetMouseAutoPilotState"},
@@ -879,7 +892,9 @@ public:
879 {50, nullptr, "DeactivateXpad"}, 892 {50, nullptr, "DeactivateXpad"},
880 {51, nullptr, "SetXpadAutoPilotState"}, 893 {51, nullptr, "SetXpadAutoPilotState"},
881 {52, nullptr, "UnsetXpadAutoPilotState"}, 894 {52, nullptr, "UnsetXpadAutoPilotState"},
882 {60, nullptr, "DeactivateJoyXpad"}, 895 {60, nullptr, "ClearNpadSystemCommonPolicy"},
896 {61, nullptr, "DeactivateNpad"},
897 {62, nullptr, "ForceDisconnectNpad"},
883 {91, nullptr, "DeactivateGesture"}, 898 {91, nullptr, "DeactivateGesture"},
884 {110, nullptr, "DeactivateHomeButton"}, 899 {110, nullptr, "DeactivateHomeButton"},
885 {111, nullptr, "SetHomeButtonAutoPilotState"}, 900 {111, nullptr, "SetHomeButtonAutoPilotState"},
@@ -899,6 +914,15 @@ public:
899 {141, nullptr, "GetConsoleSixAxisSensorSamplingFrequency"}, 914 {141, nullptr, "GetConsoleSixAxisSensorSamplingFrequency"},
900 {142, nullptr, "DeactivateSevenSixAxisSensor"}, 915 {142, nullptr, "DeactivateSevenSixAxisSensor"},
901 {143, nullptr, "GetConsoleSixAxisSensorCountStates"}, 916 {143, nullptr, "GetConsoleSixAxisSensorCountStates"},
917 {144, nullptr, "GetAccelerometerFsr"},
918 {145, nullptr, "SetAccelerometerFsr"},
919 {146, nullptr, "GetAccelerometerOdr"},
920 {147, nullptr, "SetAccelerometerOdr"},
921 {148, nullptr, "GetGyroscopeFsr"},
922 {149, nullptr, "SetGyroscopeFsr"},
923 {150, nullptr, "GetGyroscopeOdr"},
924 {151, nullptr, "SetGyroscopeOdr"},
925 {152, nullptr, "GetWhoAmI"},
902 {201, nullptr, "ActivateFirmwareUpdate"}, 926 {201, nullptr, "ActivateFirmwareUpdate"},
903 {202, nullptr, "DeactivateFirmwareUpdate"}, 927 {202, nullptr, "DeactivateFirmwareUpdate"},
904 {203, nullptr, "StartFirmwareUpdate"}, 928 {203, nullptr, "StartFirmwareUpdate"},
@@ -927,6 +951,17 @@ public:
927 {233, nullptr, "ClearPairingInfo"}, 951 {233, nullptr, "ClearPairingInfo"},
928 {234, nullptr, "GetUniquePadDeviceTypeSetInternal"}, 952 {234, nullptr, "GetUniquePadDeviceTypeSetInternal"},
929 {235, nullptr, "EnableAnalogStickPower"}, 953 {235, nullptr, "EnableAnalogStickPower"},
954 {236, nullptr, "RequestKuinaUartClockCal"},
955 {237, nullptr, "GetKuinaUartClockCal"},
956 {238, nullptr, "SetKuinaUartClockTrim"},
957 {239, nullptr, "KuinaLoopbackTest"},
958 {240, nullptr, "RequestBatteryVoltage"},
959 {241, nullptr, "GetBatteryVoltage"},
960 {242, nullptr, "GetUniquePadPowerInfo"},
961 {243, nullptr, "RebootUniquePad"},
962 {244, nullptr, "RequestKuinaFirmwareVersion"},
963 {245, nullptr, "GetKuinaFirmwareVersion"},
964 {246, nullptr, "GetVidPid"},
930 {301, nullptr, "GetAbstractedPadHandles"}, 965 {301, nullptr, "GetAbstractedPadHandles"},
931 {302, nullptr, "GetAbstractedPadState"}, 966 {302, nullptr, "GetAbstractedPadState"},
932 {303, nullptr, "GetAbstractedPadsState"}, 967 {303, nullptr, "GetAbstractedPadsState"},
@@ -945,6 +980,17 @@ public:
945 {350, nullptr, "AddRegisteredDevice"}, 980 {350, nullptr, "AddRegisteredDevice"},
946 {400, nullptr, "DisableExternalMcuOnNxDevice"}, 981 {400, nullptr, "DisableExternalMcuOnNxDevice"},
947 {401, nullptr, "DisableRailDeviceFiltering"}, 982 {401, nullptr, "DisableRailDeviceFiltering"},
983 {402, nullptr, "EnableWiredPairing"},
984 {403, nullptr, "EnableShipmentModeAutoClear"},
985 {500, nullptr, "SetFactoryInt"},
986 {501, nullptr, "IsFactoryBootEnabled"},
987 {550, nullptr, "SetAnalogStickModelDataTemporarily"},
988 {551, nullptr, "GetAnalogStickModelData"},
989 {552, nullptr, "ResetAnalogStickModelData"},
990 {600, nullptr, "ConvertPadState"},
991 {2000, nullptr, "DeactivateDigitizer"},
992 {2001, nullptr, "SetDigitizerAutoPilotState"},
993 {2002, nullptr, "UnsetDigitizerAutoPilotState"},
948 }; 994 };
949 // clang-format on 995 // clang-format on
950 996
diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h
index 039c38b58..23552efb1 100644
--- a/src/core/hle/service/hid/hid.h
+++ b/src/core/hle/service/hid/hid.h
@@ -128,6 +128,7 @@ private:
128 void StopSixAxisSensor(Kernel::HLERequestContext& ctx); 128 void StopSixAxisSensor(Kernel::HLERequestContext& ctx);
129 void SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx); 129 void SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx);
130 void SetPalmaBoostMode(Kernel::HLERequestContext& ctx); 130 void SetPalmaBoostMode(Kernel::HLERequestContext& ctx);
131 void StopSevenSixAxisSensor(Kernel::HLERequestContext& ctx);
131 void InitializeSevenSixAxisSensor(Kernel::HLERequestContext& ctx); 132 void InitializeSevenSixAxisSensor(Kernel::HLERequestContext& ctx);
132 133
133 std::shared_ptr<IAppletResource> applet_resource; 134 std::shared_ptr<IAppletResource> applet_resource;
diff --git a/src/core/hle/service/ldr/ldr.cpp b/src/core/hle/service/ldr/ldr.cpp
index 0cde7a557..6ad3be1b3 100644
--- a/src/core/hle/service/ldr/ldr.cpp
+++ b/src/core/hle/service/ldr/ldr.cpp
@@ -116,6 +116,7 @@ public:
116 {1, nullptr, "GetProgramInfo"}, 116 {1, nullptr, "GetProgramInfo"},
117 {2, nullptr, "RegisterTitle"}, 117 {2, nullptr, "RegisterTitle"},
118 {3, nullptr, "UnregisterTitle"}, 118 {3, nullptr, "UnregisterTitle"},
119 {4, nullptr, "SetEnabledProgramVerification"},
119 }; 120 };
120 // clang-format on 121 // clang-format on
121 122
diff --git a/src/core/hle/service/ncm/ncm.cpp b/src/core/hle/service/ncm/ncm.cpp
index 89e283ca5..ec9aae04a 100644
--- a/src/core/hle/service/ncm/ncm.cpp
+++ b/src/core/hle/service/ncm/ncm.cpp
@@ -122,6 +122,7 @@ public:
122 {11, nullptr, "ActivateContentMetaDatabase"}, 122 {11, nullptr, "ActivateContentMetaDatabase"},
123 {12, nullptr, "InactivateContentMetaDatabase"}, 123 {12, nullptr, "InactivateContentMetaDatabase"},
124 {13, nullptr, "InvalidateRightsIdCache"}, 124 {13, nullptr, "InvalidateRightsIdCache"},
125 {14, nullptr, "GetMemoryReport"},
125 }; 126 };
126 // clang-format on 127 // clang-format on
127 128
diff --git a/src/core/hle/service/nim/nim.cpp b/src/core/hle/service/nim/nim.cpp
index e85f123e2..f19affce7 100644
--- a/src/core/hle/service/nim/nim.cpp
+++ b/src/core/hle/service/nim/nim.cpp
@@ -15,6 +15,66 @@
15 15
16namespace Service::NIM { 16namespace Service::NIM {
17 17
18class IShopServiceAsync final : public ServiceFramework<IShopServiceAsync> {
19public:
20 IShopServiceAsync() : ServiceFramework("IShopServiceAsync") {
21 // clang-format off
22 static const FunctionInfo functions[] = {
23 {0, nullptr, "Cancel"},
24 {1, nullptr, "GetSize"},
25 {2, nullptr, "Read"},
26 {3, nullptr, "GetErrorCode"},
27 {4, nullptr, "Request"},
28 {5, nullptr, "Prepare"},
29 };
30 // clang-format on
31
32 RegisterHandlers(functions);
33 }
34};
35
36class IShopServiceAccessor final : public ServiceFramework<IShopServiceAccessor> {
37public:
38 IShopServiceAccessor() : ServiceFramework("IShopServiceAccessor") {
39 // clang-format off
40 static const FunctionInfo functions[] = {
41 {0, &IShopServiceAccessor::CreateAsyncInterface, "CreateAsyncInterface"},
42 };
43 // clang-format on
44
45 RegisterHandlers(functions);
46 }
47
48private:
49 void CreateAsyncInterface(Kernel::HLERequestContext& ctx) {
50 LOG_WARNING(Service_NIM, "(STUBBED) called");
51 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
52 rb.Push(RESULT_SUCCESS);
53 rb.PushIpcInterface<IShopServiceAsync>();
54 }
55};
56
57class IShopServiceAccessServer final : public ServiceFramework<IShopServiceAccessServer> {
58public:
59 IShopServiceAccessServer() : ServiceFramework("IShopServiceAccessServer") {
60 // clang-format off
61 static const FunctionInfo functions[] = {
62 {0, &IShopServiceAccessServer::CreateAccessorInterface, "CreateAccessorInterface"},
63 };
64 // clang-format on
65
66 RegisterHandlers(functions);
67 }
68
69private:
70 void CreateAccessorInterface(Kernel::HLERequestContext& ctx) {
71 LOG_WARNING(Service_NIM, "(STUBBED) called");
72 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
73 rb.Push(RESULT_SUCCESS);
74 rb.PushIpcInterface<IShopServiceAccessor>();
75 }
76};
77
18class NIM final : public ServiceFramework<NIM> { 78class NIM final : public ServiceFramework<NIM> {
19public: 79public:
20 explicit NIM() : ServiceFramework{"nim"} { 80 explicit NIM() : ServiceFramework{"nim"} {
@@ -78,7 +138,7 @@ public:
78 explicit NIM_ECA() : ServiceFramework{"nim:eca"} { 138 explicit NIM_ECA() : ServiceFramework{"nim:eca"} {
79 // clang-format off 139 // clang-format off
80 static const FunctionInfo functions[] = { 140 static const FunctionInfo functions[] = {
81 {0, nullptr, "CreateServerInterface"}, 141 {0, &NIM_ECA::CreateServerInterface, "CreateServerInterface"},
82 {1, nullptr, "RefreshDebugAvailability"}, 142 {1, nullptr, "RefreshDebugAvailability"},
83 {2, nullptr, "ClearDebugResponse"}, 143 {2, nullptr, "ClearDebugResponse"},
84 {3, nullptr, "RegisterDebugResponse"}, 144 {3, nullptr, "RegisterDebugResponse"},
@@ -87,6 +147,14 @@ public:
87 147
88 RegisterHandlers(functions); 148 RegisterHandlers(functions);
89 } 149 }
150
151private:
152 void CreateServerInterface(Kernel::HLERequestContext& ctx) {
153 LOG_WARNING(Service_NIM, "(STUBBED) called");
154 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
155 rb.Push(RESULT_SUCCESS);
156 rb.PushIpcInterface<IShopServiceAccessServer>();
157 }
90}; 158};
91 159
92class NIM_SHP final : public ServiceFramework<NIM_SHP> { 160class NIM_SHP final : public ServiceFramework<NIM_SHP> {
diff --git a/src/core/hle/service/npns/npns.cpp b/src/core/hle/service/npns/npns.cpp
index aa171473b..f38d01084 100644
--- a/src/core/hle/service/npns/npns.cpp
+++ b/src/core/hle/service/npns/npns.cpp
@@ -48,6 +48,8 @@ public:
48 {151, nullptr, "GetStateWithHandover"}, 48 {151, nullptr, "GetStateWithHandover"},
49 {152, nullptr, "GetStateChangeEventWithHandover"}, 49 {152, nullptr, "GetStateChangeEventWithHandover"},
50 {153, nullptr, "GetDropEventWithHandover"}, 50 {153, nullptr, "GetDropEventWithHandover"},
51 {161, nullptr, "GetRequestChangeStateCancelEvent"},
52 {162, nullptr, "RequestChangeStateForceTimedWithCancelEvent"},
51 {201, nullptr, "RequestChangeStateForceTimed"}, 53 {201, nullptr, "RequestChangeStateForceTimed"},
52 {202, nullptr, "RequestChangeStateForceAsync"}, 54 {202, nullptr, "RequestChangeStateForceAsync"},
53 }; 55 };
diff --git a/src/core/hle/service/ns/ns.cpp b/src/core/hle/service/ns/ns.cpp
index fdab3cf78..7e5ceccdb 100644
--- a/src/core/hle/service/ns/ns.cpp
+++ b/src/core/hle/service/ns/ns.cpp
@@ -110,6 +110,10 @@ IApplicationManagerInterface::IApplicationManagerInterface()
110 {100, nullptr, "ResetToFactorySettings"}, 110 {100, nullptr, "ResetToFactorySettings"},
111 {101, nullptr, "ResetToFactorySettingsWithoutUserSaveData"}, 111 {101, nullptr, "ResetToFactorySettingsWithoutUserSaveData"},
112 {102, nullptr, "ResetToFactorySettingsForRefurbishment"}, 112 {102, nullptr, "ResetToFactorySettingsForRefurbishment"},
113 {103, nullptr, "ResetToFactorySettingsWithPlatformRegion"},
114 {104, nullptr, "ResetToFactorySettingsWithPlatformRegionAuthentication"},
115 {105, nullptr, "RequestResetToFactorySettingsSecurely"},
116 {106, nullptr, "RequestResetToFactorySettingsWithPlatformRegionAuthenticationSecurely"},
113 {200, nullptr, "CalculateUserSaveDataStatistics"}, 117 {200, nullptr, "CalculateUserSaveDataStatistics"},
114 {201, nullptr, "DeleteUserSaveDataAll"}, 118 {201, nullptr, "DeleteUserSaveDataAll"},
115 {210, nullptr, "DeleteUserSystemSaveData"}, 119 {210, nullptr, "DeleteUserSystemSaveData"},
@@ -191,6 +195,9 @@ IApplicationManagerInterface::IApplicationManagerInterface()
191 {1307, nullptr, "TryDeleteRunningApplicationContentEntities"}, 195 {1307, nullptr, "TryDeleteRunningApplicationContentEntities"},
192 {1308, nullptr, "DeleteApplicationCompletelyForDebug"}, 196 {1308, nullptr, "DeleteApplicationCompletelyForDebug"},
193 {1309, nullptr, "CleanupUnavailableAddOnContents"}, 197 {1309, nullptr, "CleanupUnavailableAddOnContents"},
198 {1310, nullptr, "RequestMoveApplicationEntity"},
199 {1311, nullptr, "EstimateSizeToMove"},
200 {1312, nullptr, "HasMovableEntity"},
194 {1400, nullptr, "PrepareShutdown"}, 201 {1400, nullptr, "PrepareShutdown"},
195 {1500, nullptr, "FormatSdCard"}, 202 {1500, nullptr, "FormatSdCard"},
196 {1501, nullptr, "NeedsSystemUpdateToFormatSdCard"}, 203 {1501, nullptr, "NeedsSystemUpdateToFormatSdCard"},
@@ -241,7 +248,7 @@ IApplicationManagerInterface::IApplicationManagerInterface()
241 {2153, nullptr, "DeactivateRightsEnvironment"}, 248 {2153, nullptr, "DeactivateRightsEnvironment"},
242 {2154, nullptr, "ForceActivateRightsContextForExit"}, 249 {2154, nullptr, "ForceActivateRightsContextForExit"},
243 {2155, nullptr, "UpdateRightsEnvironmentStatus"}, 250 {2155, nullptr, "UpdateRightsEnvironmentStatus"},
244 {2156, nullptr, "CreateRightsEnvironmentForPreomia"}, 251 {2156, nullptr, "CreateRightsEnvironmentForMicroApplication"},
245 {2160, nullptr, "AddTargetApplicationToRightsEnvironment"}, 252 {2160, nullptr, "AddTargetApplicationToRightsEnvironment"},
246 {2161, nullptr, "SetUsersToRightsEnvironment"}, 253 {2161, nullptr, "SetUsersToRightsEnvironment"},
247 {2170, nullptr, "GetRightsEnvironmentStatus"}, 254 {2170, nullptr, "GetRightsEnvironmentStatus"},
@@ -258,6 +265,7 @@ IApplicationManagerInterface::IApplicationManagerInterface()
258 {2350, nullptr, "PerformAutoUpdateByApplicationId"}, 265 {2350, nullptr, "PerformAutoUpdateByApplicationId"},
259 {2351, nullptr, "RequestNoDownloadRightsErrorResolution"}, 266 {2351, nullptr, "RequestNoDownloadRightsErrorResolution"},
260 {2352, nullptr, "RequestResolveNoDownloadRightsError"}, 267 {2352, nullptr, "RequestResolveNoDownloadRightsError"},
268 {2353, nullptr, "GetApplicationDownloadTaskInfo"},
261 {2400, nullptr, "GetPromotionInfo"}, 269 {2400, nullptr, "GetPromotionInfo"},
262 {2401, nullptr, "CountPromotionInfo"}, 270 {2401, nullptr, "CountPromotionInfo"},
263 {2402, nullptr, "ListPromotionInfo"}, 271 {2402, nullptr, "ListPromotionInfo"},
@@ -266,9 +274,12 @@ IApplicationManagerInterface::IApplicationManagerInterface()
266 {2500, nullptr, "ConfirmAvailableTime"}, 274 {2500, nullptr, "ConfirmAvailableTime"},
267 {2510, nullptr, "CreateApplicationResource"}, 275 {2510, nullptr, "CreateApplicationResource"},
268 {2511, nullptr, "GetApplicationResource"}, 276 {2511, nullptr, "GetApplicationResource"},
269 {2513, nullptr, "LaunchPreomia"}, 277 {2513, nullptr, "LaunchMicroApplication"},
270 {2514, nullptr, "ClearTaskOfAsyncTaskManager"}, 278 {2514, nullptr, "ClearTaskOfAsyncTaskManager"},
279 {2515, nullptr, "CleanupAllPlaceHolderAndFragmentsIfNoTask"},
280 {2516, nullptr, "EnsureApplicationCertificate"},
271 {2800, nullptr, "GetApplicationIdOfPreomia"}, 281 {2800, nullptr, "GetApplicationIdOfPreomia"},
282 {9999, nullptr, "GetApplicationCertificate"},
272 }; 283 };
273 // clang-format on 284 // clang-format on
274 285
@@ -360,10 +371,15 @@ ResultVal<u8> IApplicationManagerInterface::GetApplicationDesiredLanguage(
360 // Convert to application language, get priority list 371 // Convert to application language, get priority list
361 const auto application_language = ConvertToApplicationLanguage(language_code); 372 const auto application_language = ConvertToApplicationLanguage(language_code);
362 if (application_language == std::nullopt) { 373 if (application_language == std::nullopt) {
374 LOG_ERROR(Service_NS, "Could not convert application language! language_code={}",
375 language_code);
363 return ERR_APPLICATION_LANGUAGE_NOT_FOUND; 376 return ERR_APPLICATION_LANGUAGE_NOT_FOUND;
364 } 377 }
365 const auto priority_list = GetApplicationLanguagePriorityList(*application_language); 378 const auto priority_list = GetApplicationLanguagePriorityList(*application_language);
366 if (!priority_list) { 379 if (!priority_list) {
380 LOG_ERROR(Service_NS,
381 "Could not find application language priorities! application_language={}",
382 *application_language);
367 return ERR_APPLICATION_LANGUAGE_NOT_FOUND; 383 return ERR_APPLICATION_LANGUAGE_NOT_FOUND;
368 } 384 }
369 385
@@ -375,6 +391,8 @@ ResultVal<u8> IApplicationManagerInterface::GetApplicationDesiredLanguage(
375 } 391 }
376 } 392 }
377 393
394 LOG_ERROR(Service_NS, "Could not find a valid language! supported_languages={:08X}",
395 supported_languages);
378 return ERR_APPLICATION_LANGUAGE_NOT_FOUND; 396 return ERR_APPLICATION_LANGUAGE_NOT_FOUND;
379} 397}
380 398
@@ -399,6 +417,7 @@ ResultVal<u64> IApplicationManagerInterface::ConvertApplicationLanguageToLanguag
399 const auto language_code = 417 const auto language_code =
400 ConvertToLanguageCode(static_cast<ApplicationLanguage>(application_language)); 418 ConvertToLanguageCode(static_cast<ApplicationLanguage>(application_language));
401 if (language_code == std::nullopt) { 419 if (language_code == std::nullopt) {
420 LOG_ERROR(Service_NS, "Language not found! application_language={}", application_language);
402 return ERR_APPLICATION_LANGUAGE_NOT_FOUND; 421 return ERR_APPLICATION_LANGUAGE_NOT_FOUND;
403 } 422 }
404 423
@@ -505,6 +524,10 @@ IFactoryResetInterface::IFactoryResetInterface::IFactoryResetInterface()
505 {100, nullptr, "ResetToFactorySettings"}, 524 {100, nullptr, "ResetToFactorySettings"},
506 {101, nullptr, "ResetToFactorySettingsWithoutUserSaveData"}, 525 {101, nullptr, "ResetToFactorySettingsWithoutUserSaveData"},
507 {102, nullptr, "ResetToFactorySettingsForRefurbishment"}, 526 {102, nullptr, "ResetToFactorySettingsForRefurbishment"},
527 {103, nullptr, "ResetToFactorySettingsWithPlatformRegion"},
528 {104, nullptr, "ResetToFactorySettingsWithPlatformRegionAuthentication"},
529 {105, nullptr, "RequestResetToFactorySettingsSecurely"},
530 {106, nullptr, "RequestResetToFactorySettingsWithPlatformRegionAuthenticationSecurely"},
508 }; 531 };
509 // clang-format on 532 // clang-format on
510 533
@@ -553,6 +576,9 @@ public:
553 {10, nullptr, "TerminateApplication2"}, 576 {10, nullptr, "TerminateApplication2"},
554 {11, nullptr, "GetRunningApplicationProcessId"}, 577 {11, nullptr, "GetRunningApplicationProcessId"},
555 {12, nullptr, "SetCurrentApplicationRightsEnvironmentCanBeActive"}, 578 {12, nullptr, "SetCurrentApplicationRightsEnvironmentCanBeActive"},
579 {13, nullptr, "CreateApplicationResourceForDevelop"},
580 {14, nullptr, "IsPreomiaForDevelop"},
581 {15, nullptr, "GetApplicationProgramIdFromHost"},
556 }; 582 };
557 // clang-format on 583 // clang-format on
558 584
diff --git a/src/core/hle/service/ns/pl_u.cpp b/src/core/hle/service/ns/pl_u.cpp
index ab1746d28..6efdf1606 100644
--- a/src/core/hle/service/ns/pl_u.cpp
+++ b/src/core/hle/service/ns/pl_u.cpp
@@ -164,6 +164,7 @@ PL_U::PL_U(Core::System& system)
164 {6, nullptr, "GetSharedFontInOrderOfPriorityForSystem"}, 164 {6, nullptr, "GetSharedFontInOrderOfPriorityForSystem"},
165 {100, nullptr, "RequestApplicationFunctionAuthorization"}, 165 {100, nullptr, "RequestApplicationFunctionAuthorization"},
166 {101, nullptr, "RequestApplicationFunctionAuthorizationForSystem"}, 166 {101, nullptr, "RequestApplicationFunctionAuthorizationForSystem"},
167 {102, nullptr, "RequestApplicationFunctionAuthorizationByApplicationId"},
167 {1000, nullptr, "LoadNgWordDataForPlatformRegionChina"}, 168 {1000, nullptr, "LoadNgWordDataForPlatformRegionChina"},
168 {1001, nullptr, "GetNgWordDataSizeForPlatformRegionChina"}, 169 {1001, nullptr, "GetNgWordDataSizeForPlatformRegionChina"},
169 }; 170 };
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
index 642b0a2cb..07b644ec5 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
@@ -159,9 +159,10 @@ private:
159 static_assert(sizeof(IoctlFlushL2) == 8, "IoctlFlushL2 is incorrect size"); 159 static_assert(sizeof(IoctlFlushL2) == 8, "IoctlFlushL2 is incorrect size");
160 160
161 struct IoctlGetGpuTime { 161 struct IoctlGetGpuTime {
162 u64_le gpu_time; 162 u64_le gpu_time{};
163 INSERT_PADDING_WORDS(2);
163 }; 164 };
164 static_assert(sizeof(IoctlGetGpuTime) == 8, "IoctlGetGpuTime is incorrect size"); 165 static_assert(sizeof(IoctlGetGpuTime) == 0x10, "IoctlGetGpuTime is incorrect size");
165 166
166 u32 GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output, 167 u32 GetCharacteristics(const std::vector<u8>& input, std::vector<u8>& output,
167 std::vector<u8>& output2, IoctlVersion version); 168 std::vector<u8>& output2, IoctlVersion version);
diff --git a/src/core/hle/service/pctl/module.cpp b/src/core/hle/service/pctl/module.cpp
index c75b4ee34..caf14ed61 100644
--- a/src/core/hle/service/pctl/module.cpp
+++ b/src/core/hle/service/pctl/module.cpp
@@ -31,6 +31,8 @@ public:
31 {1014, nullptr, "ConfirmPlayableApplicationVideoOld"}, 31 {1014, nullptr, "ConfirmPlayableApplicationVideoOld"},
32 {1015, nullptr, "ConfirmPlayableApplicationVideo"}, 32 {1015, nullptr, "ConfirmPlayableApplicationVideo"},
33 {1016, nullptr, "ConfirmShowNewsPermission"}, 33 {1016, nullptr, "ConfirmShowNewsPermission"},
34 {1017, nullptr, "EndFreeCommunication"},
35 {1018, nullptr, "IsFreeCommunicationAvailable"},
34 {1031, nullptr, "IsRestrictionEnabled"}, 36 {1031, nullptr, "IsRestrictionEnabled"},
35 {1032, nullptr, "GetSafetyLevel"}, 37 {1032, nullptr, "GetSafetyLevel"},
36 {1033, nullptr, "SetSafetyLevel"}, 38 {1033, nullptr, "SetSafetyLevel"},
diff --git a/src/core/hle/service/prepo/prepo.cpp b/src/core/hle/service/prepo/prepo.cpp
index 8f1be0e48..14309c679 100644
--- a/src/core/hle/service/prepo/prepo.cpp
+++ b/src/core/hle/service/prepo/prepo.cpp
@@ -21,8 +21,10 @@ public:
21 static const FunctionInfo functions[] = { 21 static const FunctionInfo functions[] = {
22 {10100, &PlayReport::SaveReport<Core::Reporter::PlayReportType::Old>, "SaveReportOld"}, 22 {10100, &PlayReport::SaveReport<Core::Reporter::PlayReportType::Old>, "SaveReportOld"},
23 {10101, &PlayReport::SaveReportWithUser<Core::Reporter::PlayReportType::Old>, "SaveReportWithUserOld"}, 23 {10101, &PlayReport::SaveReportWithUser<Core::Reporter::PlayReportType::Old>, "SaveReportWithUserOld"},
24 {10102, &PlayReport::SaveReport<Core::Reporter::PlayReportType::New>, "SaveReport"}, 24 {10102, &PlayReport::SaveReport<Core::Reporter::PlayReportType::Old2>, "SaveReportOld2"},
25 {10103, &PlayReport::SaveReportWithUser<Core::Reporter::PlayReportType::New>, "SaveReportWithUser"}, 25 {10103, &PlayReport::SaveReportWithUser<Core::Reporter::PlayReportType::Old2>, "SaveReportWithUserOld2"},
26 {10104, nullptr, "SaveReport"},
27 {10105, nullptr, "SaveReportWithUser"},
26 {10200, nullptr, "RequestImmediateTransmission"}, 28 {10200, nullptr, "RequestImmediateTransmission"},
27 {10300, nullptr, "GetTransmissionStatus"}, 29 {10300, nullptr, "GetTransmissionStatus"},
28 {10400, nullptr, "GetSystemSessionId"}, 30 {10400, nullptr, "GetSystemSessionId"},
@@ -35,8 +37,10 @@ public:
35 {30400, nullptr, "GetStatistics"}, 37 {30400, nullptr, "GetStatistics"},
36 {30401, nullptr, "GetThroughputHistory"}, 38 {30401, nullptr, "GetThroughputHistory"},
37 {30500, nullptr, "GetLastUploadError"}, 39 {30500, nullptr, "GetLastUploadError"},
40 {30600, nullptr, "GetApplicationUploadSummary"},
38 {40100, nullptr, "IsUserAgreementCheckEnabled"}, 41 {40100, nullptr, "IsUserAgreementCheckEnabled"},
39 {40101, nullptr, "SetUserAgreementCheckEnabled"}, 42 {40101, nullptr, "SetUserAgreementCheckEnabled"},
43 {50100, nullptr, "ReadAllApplicationReportFiles"},
40 {90100, nullptr, "ReadAllReportFiles"}, 44 {90100, nullptr, "ReadAllReportFiles"},
41 }; 45 };
42 // clang-format on 46 // clang-format on
@@ -51,7 +55,7 @@ private:
51 const auto process_id = rp.PopRaw<u64>(); 55 const auto process_id = rp.PopRaw<u64>();
52 56
53 std::vector<std::vector<u8>> data{ctx.ReadBuffer(0)}; 57 std::vector<std::vector<u8>> data{ctx.ReadBuffer(0)};
54 if (Type == Core::Reporter::PlayReportType::New) { 58 if constexpr (Type == Core::Reporter::PlayReportType::Old2) {
55 data.emplace_back(ctx.ReadBuffer(1)); 59 data.emplace_back(ctx.ReadBuffer(1));
56 } 60 }
57 61
@@ -71,7 +75,7 @@ private:
71 const auto user_id = rp.PopRaw<u128>(); 75 const auto user_id = rp.PopRaw<u128>();
72 const auto process_id = rp.PopRaw<u64>(); 76 const auto process_id = rp.PopRaw<u64>();
73 std::vector<std::vector<u8>> data{ctx.ReadBuffer(0)}; 77 std::vector<std::vector<u8>> data{ctx.ReadBuffer(0)};
74 if (Type == Core::Reporter::PlayReportType::New) { 78 if constexpr (Type == Core::Reporter::PlayReportType::Old2) {
75 data.emplace_back(ctx.ReadBuffer(1)); 79 data.emplace_back(ctx.ReadBuffer(1));
76 } 80 }
77 81
diff --git a/src/core/hle/service/ptm/psm.cpp b/src/core/hle/service/ptm/psm.cpp
index c2d5fda94..12d154ecf 100644
--- a/src/core/hle/service/ptm/psm.cpp
+++ b/src/core/hle/service/ptm/psm.cpp
@@ -12,9 +12,6 @@
12 12
13namespace Service::PSM { 13namespace Service::PSM {
14 14
15constexpr u32 BATTERY_FULLY_CHARGED = 100; // 100% Full
16constexpr u32 BATTERY_CURRENTLY_CHARGING = 1; // Plugged into an official dock
17
18class PSM final : public ServiceFramework<PSM> { 15class PSM final : public ServiceFramework<PSM> {
19public: 16public:
20 explicit PSM() : ServiceFramework{"psm"} { 17 explicit PSM() : ServiceFramework{"psm"} {
@@ -48,20 +45,30 @@ public:
48 45
49private: 46private:
50 void GetBatteryChargePercentage(Kernel::HLERequestContext& ctx) { 47 void GetBatteryChargePercentage(Kernel::HLERequestContext& ctx) {
51 LOG_WARNING(Service_PSM, "(STUBBED) called"); 48 LOG_DEBUG(Service_PSM, "called");
52 49
53 IPC::ResponseBuilder rb{ctx, 3}; 50 IPC::ResponseBuilder rb{ctx, 3};
54 rb.Push(RESULT_SUCCESS); 51 rb.Push(RESULT_SUCCESS);
55 rb.Push<u32>(BATTERY_FULLY_CHARGED); 52 rb.Push<u32>(battery_charge_percentage);
56 } 53 }
57 54
58 void GetChargerType(Kernel::HLERequestContext& ctx) { 55 void GetChargerType(Kernel::HLERequestContext& ctx) {
59 LOG_WARNING(Service_PSM, "(STUBBED) called"); 56 LOG_DEBUG(Service_PSM, "called");
60 57
61 IPC::ResponseBuilder rb{ctx, 3}; 58 IPC::ResponseBuilder rb{ctx, 3};
62 rb.Push(RESULT_SUCCESS); 59 rb.Push(RESULT_SUCCESS);
63 rb.Push<u32>(BATTERY_CURRENTLY_CHARGING); 60 rb.PushEnum(charger_type);
64 } 61 }
62
63 enum class ChargerType : u32 {
64 Unplugged = 0,
65 RegularCharger = 1,
66 LowPowerCharger = 2,
67 Unknown = 3,
68 };
69
70 u32 battery_charge_percentage{100}; // 100%
71 ChargerType charger_type{ChargerType::RegularCharger};
65}; 72};
66 73
67void InstallInterfaces(SM::ServiceManager& sm) { 74void InstallInterfaces(SM::ServiceManager& sm) {
diff --git a/src/core/hle/service/set/set.cpp b/src/core/hle/service/set/set.cpp
index 9e12c76fc..f3b4b286c 100644
--- a/src/core/hle/service/set/set.cpp
+++ b/src/core/hle/service/set/set.cpp
@@ -67,6 +67,7 @@ void SET::MakeLanguageCode(Kernel::HLERequestContext& ctx) {
67 const auto index = rp.Pop<u32>(); 67 const auto index = rp.Pop<u32>();
68 68
69 if (index >= available_language_codes.size()) { 69 if (index >= available_language_codes.size()) {
70 LOG_ERROR(Service_SET, "Invalid language code index! index={}", index);
70 IPC::ResponseBuilder rb{ctx, 2}; 71 IPC::ResponseBuilder rb{ctx, 2};
71 rb.Push(ERR_INVALID_LANGUAGE); 72 rb.Push(ERR_INVALID_LANGUAGE);
72 return; 73 return;
diff --git a/src/core/hle/service/set/set_cal.cpp b/src/core/hle/service/set/set_cal.cpp
index 1398a4a48..3fbfecc9e 100644
--- a/src/core/hle/service/set/set_cal.cpp
+++ b/src/core/hle/service/set/set_cal.cpp
@@ -50,6 +50,8 @@ SET_CAL::SET_CAL() : ServiceFramework("set:cal") {
50 {39, nullptr, "GetConsoleSixAxisSensorModuleType"}, 50 {39, nullptr, "GetConsoleSixAxisSensorModuleType"},
51 {40, nullptr, "GetConsoleSixAxisSensorHorizontalOffset"}, 51 {40, nullptr, "GetConsoleSixAxisSensorHorizontalOffset"},
52 {41, nullptr, "GetBatteryVersion"}, 52 {41, nullptr, "GetBatteryVersion"},
53 {42, nullptr, "GetDeviceId"},
54 {43, nullptr, "GetConsoleSixAxisSensorMountType"},
53 }; 55 };
54 // clang-format on 56 // clang-format on
55 57
diff --git a/src/core/hle/service/set/set_sys.cpp b/src/core/hle/service/set/set_sys.cpp
index b7c9ea74b..8bd4c7e79 100644
--- a/src/core/hle/service/set/set_sys.cpp
+++ b/src/core/hle/service/set/set_sys.cpp
@@ -288,6 +288,18 @@ SET_SYS::SET_SYS() : ServiceFramework("set:sys") {
288 {186, nullptr, "GetMemoryUsageRateFlag"}, 288 {186, nullptr, "GetMemoryUsageRateFlag"},
289 {187, nullptr, "GetTouchScreenMode"}, 289 {187, nullptr, "GetTouchScreenMode"},
290 {188, nullptr, "SetTouchScreenMode"}, 290 {188, nullptr, "SetTouchScreenMode"},
291 {189, nullptr, "GetButtonConfigSettingsFull"},
292 {190, nullptr, "SetButtonConfigSettingsFull"},
293 {191, nullptr, "GetButtonConfigSettingsEmbedded"},
294 {192, nullptr, "SetButtonConfigSettingsEmbedded"},
295 {193, nullptr, "GetButtonConfigSettingsLeft"},
296 {194, nullptr, "SetButtonConfigSettingsLeft"},
297 {195, nullptr, "GetButtonConfigSettingsRight"},
298 {196, nullptr, "SetButtonConfigSettingsRight"},
299 {197, nullptr, "GetButtonConfigRegisteredSettingsEmbedded"},
300 {198, nullptr, "SetButtonConfigRegisteredSettingsEmbedded"},
301 {199, nullptr, "GetButtonConfigRegisteredSettings"},
302 {200, nullptr, "SetButtonConfigRegisteredSettings"},
291 }; 303 };
292 // clang-format on 304 // clang-format on
293 305
diff --git a/src/core/hle/service/sm/sm.cpp b/src/core/hle/service/sm/sm.cpp
index 88909504d..6ada13be4 100644
--- a/src/core/hle/service/sm/sm.cpp
+++ b/src/core/hle/service/sm/sm.cpp
@@ -28,9 +28,11 @@ void ServiceManager::InvokeControlRequest(Kernel::HLERequestContext& context) {
28 28
29static ResultCode ValidateServiceName(const std::string& name) { 29static ResultCode ValidateServiceName(const std::string& name) {
30 if (name.size() <= 0 || name.size() > 8) { 30 if (name.size() <= 0 || name.size() > 8) {
31 LOG_ERROR(Service_SM, "Invalid service name! service={}", name);
31 return ERR_INVALID_NAME; 32 return ERR_INVALID_NAME;
32 } 33 }
33 if (name.find('\0') != std::string::npos) { 34 if (name.find('\0') != std::string::npos) {
35 LOG_ERROR(Service_SM, "A non null terminated service was passed");
34 return ERR_INVALID_NAME; 36 return ERR_INVALID_NAME;
35 } 37 }
36 return RESULT_SUCCESS; 38 return RESULT_SUCCESS;
@@ -51,8 +53,10 @@ ResultVal<std::shared_ptr<Kernel::ServerPort>> ServiceManager::RegisterService(
51 53
52 CASCADE_CODE(ValidateServiceName(name)); 54 CASCADE_CODE(ValidateServiceName(name));
53 55
54 if (registered_services.find(name) != registered_services.end()) 56 if (registered_services.find(name) != registered_services.end()) {
57 LOG_ERROR(Service_SM, "Service is already registered! service={}", name);
55 return ERR_ALREADY_REGISTERED; 58 return ERR_ALREADY_REGISTERED;
59 }
56 60
57 auto& kernel = Core::System::GetInstance().Kernel(); 61 auto& kernel = Core::System::GetInstance().Kernel();
58 auto [server_port, client_port] = 62 auto [server_port, client_port] =
@@ -66,9 +70,10 @@ ResultCode ServiceManager::UnregisterService(const std::string& name) {
66 CASCADE_CODE(ValidateServiceName(name)); 70 CASCADE_CODE(ValidateServiceName(name));
67 71
68 const auto iter = registered_services.find(name); 72 const auto iter = registered_services.find(name);
69 if (iter == registered_services.end()) 73 if (iter == registered_services.end()) {
74 LOG_ERROR(Service_SM, "Server is not registered! service={}", name);
70 return ERR_SERVICE_NOT_REGISTERED; 75 return ERR_SERVICE_NOT_REGISTERED;
71 76 }
72 registered_services.erase(iter); 77 registered_services.erase(iter);
73 return RESULT_SUCCESS; 78 return RESULT_SUCCESS;
74} 79}
@@ -79,6 +84,7 @@ ResultVal<std::shared_ptr<Kernel::ClientPort>> ServiceManager::GetServicePort(
79 CASCADE_CODE(ValidateServiceName(name)); 84 CASCADE_CODE(ValidateServiceName(name));
80 auto it = registered_services.find(name); 85 auto it = registered_services.find(name);
81 if (it == registered_services.end()) { 86 if (it == registered_services.end()) {
87 LOG_ERROR(Service_SM, "Server is not registered! service={}", name);
82 return ERR_SERVICE_NOT_REGISTERED; 88 return ERR_SERVICE_NOT_REGISTERED;
83 } 89 }
84 90
diff --git a/src/core/hle/service/sockets/bsd.cpp b/src/core/hle/service/sockets/bsd.cpp
index f67fab2f9..8d4952c0e 100644
--- a/src/core/hle/service/sockets/bsd.cpp
+++ b/src/core/hle/service/sockets/bsd.cpp
@@ -148,6 +148,7 @@ BSD::BSD(const char* name) : ServiceFramework(name) {
148 {30, nullptr, "SendMMsg"}, 148 {30, nullptr, "SendMMsg"},
149 {31, nullptr, "EventFd"}, 149 {31, nullptr, "EventFd"},
150 {32, nullptr, "RegisterResourceStatisticsName"}, 150 {32, nullptr, "RegisterResourceStatisticsName"},
151 {33, nullptr, "Initialize2"},
151 }; 152 };
152 // clang-format on 153 // clang-format on
153 154
diff --git a/src/core/hle/service/time/time.cpp b/src/core/hle/service/time/time.cpp
index e722886de..67f1bbcf3 100644
--- a/src/core/hle/service/time/time.cpp
+++ b/src/core/hle/service/time/time.cpp
@@ -20,8 +20,8 @@ namespace Service::Time {
20 20
21class ISystemClock final : public ServiceFramework<ISystemClock> { 21class ISystemClock final : public ServiceFramework<ISystemClock> {
22public: 22public:
23 ISystemClock(Clock::SystemClockCore& clock_core) 23 explicit ISystemClock(Clock::SystemClockCore& clock_core, Core::System& system)
24 : ServiceFramework("ISystemClock"), clock_core{clock_core} { 24 : ServiceFramework("ISystemClock"), clock_core{clock_core}, system{system} {
25 // clang-format off 25 // clang-format off
26 static const FunctionInfo functions[] = { 26 static const FunctionInfo functions[] = {
27 {0, &ISystemClock::GetCurrentTime, "GetCurrentTime"}, 27 {0, &ISystemClock::GetCurrentTime, "GetCurrentTime"},
@@ -46,9 +46,8 @@ private:
46 } 46 }
47 47
48 s64 posix_time{}; 48 s64 posix_time{};
49 if (const ResultCode result{ 49 if (const ResultCode result{clock_core.GetCurrentTime(system, posix_time)};
50 clock_core.GetCurrentTime(Core::System::GetInstance(), posix_time)}; 50 result.IsError()) {
51 result != RESULT_SUCCESS) {
52 IPC::ResponseBuilder rb{ctx, 2}; 51 IPC::ResponseBuilder rb{ctx, 2};
53 rb.Push(result); 52 rb.Push(result);
54 return; 53 return;
@@ -69,9 +68,8 @@ private:
69 } 68 }
70 69
71 Clock::SystemClockContext system_clock_context{}; 70 Clock::SystemClockContext system_clock_context{};
72 if (const ResultCode result{ 71 if (const ResultCode result{clock_core.GetClockContext(system, system_clock_context)};
73 clock_core.GetClockContext(Core::System::GetInstance(), system_clock_context)}; 72 result.IsError()) {
74 result != RESULT_SUCCESS) {
75 IPC::ResponseBuilder rb{ctx, 2}; 73 IPC::ResponseBuilder rb{ctx, 2};
76 rb.Push(result); 74 rb.Push(result);
77 return; 75 return;
@@ -83,12 +81,13 @@ private:
83 } 81 }
84 82
85 Clock::SystemClockCore& clock_core; 83 Clock::SystemClockCore& clock_core;
84 Core::System& system;
86}; 85};
87 86
88class ISteadyClock final : public ServiceFramework<ISteadyClock> { 87class ISteadyClock final : public ServiceFramework<ISteadyClock> {
89public: 88public:
90 ISteadyClock(Clock::SteadyClockCore& clock_core) 89 explicit ISteadyClock(Clock::SteadyClockCore& clock_core, Core::System& system)
91 : ServiceFramework("ISteadyClock"), clock_core{clock_core} { 90 : ServiceFramework("ISteadyClock"), clock_core{clock_core}, system{system} {
92 static const FunctionInfo functions[] = { 91 static const FunctionInfo functions[] = {
93 {0, &ISteadyClock::GetCurrentTimePoint, "GetCurrentTimePoint"}, 92 {0, &ISteadyClock::GetCurrentTimePoint, "GetCurrentTimePoint"},
94 }; 93 };
@@ -105,14 +104,14 @@ private:
105 return; 104 return;
106 } 105 }
107 106
108 const Clock::SteadyClockTimePoint time_point{ 107 const Clock::SteadyClockTimePoint time_point{clock_core.GetCurrentTimePoint(system)};
109 clock_core.GetCurrentTimePoint(Core::System::GetInstance())};
110 IPC::ResponseBuilder rb{ctx, (sizeof(Clock::SteadyClockTimePoint) / 4) + 2}; 108 IPC::ResponseBuilder rb{ctx, (sizeof(Clock::SteadyClockTimePoint) / 4) + 2};
111 rb.Push(RESULT_SUCCESS); 109 rb.Push(RESULT_SUCCESS);
112 rb.PushRaw(time_point); 110 rb.PushRaw(time_point);
113 } 111 }
114 112
115 Clock::SteadyClockCore& clock_core; 113 Clock::SteadyClockCore& clock_core;
114 Core::System& system;
116}; 115};
117 116
118ResultCode Module::Interface::GetClockSnapshotFromSystemClockContextInternal( 117ResultCode Module::Interface::GetClockSnapshotFromSystemClockContextInternal(
@@ -134,7 +133,7 @@ ResultCode Module::Interface::GetClockSnapshotFromSystemClockContextInternal(
134 } 133 }
135 134
136 const auto current_time_point{ 135 const auto current_time_point{
137 time_manager.GetStandardSteadyClockCore().GetCurrentTimePoint(Core::System::GetInstance())}; 136 time_manager.GetStandardSteadyClockCore().GetCurrentTimePoint(system)};
138 if (const ResultCode result{Clock::ClockSnapshot::GetCurrentTime( 137 if (const ResultCode result{Clock::ClockSnapshot::GetCurrentTime(
139 clock_snapshot.user_time, current_time_point, clock_snapshot.user_context)}; 138 clock_snapshot.user_time, current_time_point, clock_snapshot.user_context)};
140 result != RESULT_SUCCESS) { 139 result != RESULT_SUCCESS) {
@@ -176,21 +175,24 @@ void Module::Interface::GetStandardUserSystemClock(Kernel::HLERequestContext& ct
176 LOG_DEBUG(Service_Time, "called"); 175 LOG_DEBUG(Service_Time, "called");
177 IPC::ResponseBuilder rb{ctx, 2, 0, 1}; 176 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
178 rb.Push(RESULT_SUCCESS); 177 rb.Push(RESULT_SUCCESS);
179 rb.PushIpcInterface<ISystemClock>(module->GetTimeManager().GetStandardUserSystemClockCore()); 178 rb.PushIpcInterface<ISystemClock>(module->GetTimeManager().GetStandardUserSystemClockCore(),
179 system);
180} 180}
181 181
182void Module::Interface::GetStandardNetworkSystemClock(Kernel::HLERequestContext& ctx) { 182void Module::Interface::GetStandardNetworkSystemClock(Kernel::HLERequestContext& ctx) {
183 LOG_DEBUG(Service_Time, "called"); 183 LOG_DEBUG(Service_Time, "called");
184 IPC::ResponseBuilder rb{ctx, 2, 0, 1}; 184 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
185 rb.Push(RESULT_SUCCESS); 185 rb.Push(RESULT_SUCCESS);
186 rb.PushIpcInterface<ISystemClock>(module->GetTimeManager().GetStandardNetworkSystemClockCore()); 186 rb.PushIpcInterface<ISystemClock>(module->GetTimeManager().GetStandardNetworkSystemClockCore(),
187 system);
187} 188}
188 189
189void Module::Interface::GetStandardSteadyClock(Kernel::HLERequestContext& ctx) { 190void Module::Interface::GetStandardSteadyClock(Kernel::HLERequestContext& ctx) {
190 LOG_DEBUG(Service_Time, "called"); 191 LOG_DEBUG(Service_Time, "called");
191 IPC::ResponseBuilder rb{ctx, 2, 0, 1}; 192 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
192 rb.Push(RESULT_SUCCESS); 193 rb.Push(RESULT_SUCCESS);
193 rb.PushIpcInterface<ISteadyClock>(module->GetTimeManager().GetStandardSteadyClockCore()); 194 rb.PushIpcInterface<ISteadyClock>(module->GetTimeManager().GetStandardSteadyClockCore(),
195 system);
194} 196}
195 197
196void Module::Interface::GetTimeZoneService(Kernel::HLERequestContext& ctx) { 198void Module::Interface::GetTimeZoneService(Kernel::HLERequestContext& ctx) {
@@ -204,7 +206,8 @@ void Module::Interface::GetStandardLocalSystemClock(Kernel::HLERequestContext& c
204 LOG_DEBUG(Service_Time, "called"); 206 LOG_DEBUG(Service_Time, "called");
205 IPC::ResponseBuilder rb{ctx, 2, 0, 1}; 207 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
206 rb.Push(RESULT_SUCCESS); 208 rb.Push(RESULT_SUCCESS);
207 rb.PushIpcInterface<ISystemClock>(module->GetTimeManager().GetStandardLocalSystemClockCore()); 209 rb.PushIpcInterface<ISystemClock>(module->GetTimeManager().GetStandardLocalSystemClockCore(),
210 system);
208} 211}
209 212
210void Module::Interface::IsStandardNetworkSystemClockAccuracySufficient( 213void Module::Interface::IsStandardNetworkSystemClockAccuracySufficient(
@@ -228,8 +231,7 @@ void Module::Interface::CalculateMonotonicSystemClockBaseTimePoint(Kernel::HLERe
228 231
229 IPC::RequestParser rp{ctx}; 232 IPC::RequestParser rp{ctx};
230 const auto context{rp.PopRaw<Clock::SystemClockContext>()}; 233 const auto context{rp.PopRaw<Clock::SystemClockContext>()};
231 const auto current_time_point{ 234 const auto current_time_point{steady_clock_core.GetCurrentTimePoint(system)};
232 steady_clock_core.GetCurrentTimePoint(Core::System::GetInstance())};
233 235
234 if (current_time_point.clock_source_id == context.steady_time_point.clock_source_id) { 236 if (current_time_point.clock_source_id == context.steady_time_point.clock_source_id) {
235 const auto ticks{Clock::TimeSpanType::FromTicks( 237 const auto ticks{Clock::TimeSpanType::FromTicks(
@@ -255,8 +257,8 @@ void Module::Interface::GetClockSnapshot(Kernel::HLERequestContext& ctx) {
255 Clock::SystemClockContext user_context{}; 257 Clock::SystemClockContext user_context{};
256 if (const ResultCode result{ 258 if (const ResultCode result{
257 module->GetTimeManager().GetStandardUserSystemClockCore().GetClockContext( 259 module->GetTimeManager().GetStandardUserSystemClockCore().GetClockContext(
258 Core::System::GetInstance(), user_context)}; 260 system, user_context)};
259 result != RESULT_SUCCESS) { 261 result.IsError()) {
260 IPC::ResponseBuilder rb{ctx, 2}; 262 IPC::ResponseBuilder rb{ctx, 2};
261 rb.Push(result); 263 rb.Push(result);
262 return; 264 return;
@@ -264,8 +266,8 @@ void Module::Interface::GetClockSnapshot(Kernel::HLERequestContext& ctx) {
264 Clock::SystemClockContext network_context{}; 266 Clock::SystemClockContext network_context{};
265 if (const ResultCode result{ 267 if (const ResultCode result{
266 module->GetTimeManager().GetStandardNetworkSystemClockCore().GetClockContext( 268 module->GetTimeManager().GetStandardNetworkSystemClockCore().GetClockContext(
267 Core::System::GetInstance(), network_context)}; 269 system, network_context)};
268 result != RESULT_SUCCESS) { 270 result.IsError()) {
269 IPC::ResponseBuilder rb{ctx, 2}; 271 IPC::ResponseBuilder rb{ctx, 2};
270 rb.Push(result); 272 rb.Push(result);
271 return; 273 return;
@@ -274,7 +276,7 @@ void Module::Interface::GetClockSnapshot(Kernel::HLERequestContext& ctx) {
274 Clock::ClockSnapshot clock_snapshot{}; 276 Clock::ClockSnapshot clock_snapshot{};
275 if (const ResultCode result{GetClockSnapshotFromSystemClockContextInternal( 277 if (const ResultCode result{GetClockSnapshotFromSystemClockContextInternal(
276 &ctx.GetThread(), user_context, network_context, type, clock_snapshot)}; 278 &ctx.GetThread(), user_context, network_context, type, clock_snapshot)};
277 result != RESULT_SUCCESS) { 279 result.IsError()) {
278 IPC::ResponseBuilder rb{ctx, 2}; 280 IPC::ResponseBuilder rb{ctx, 2};
279 rb.Push(result); 281 rb.Push(result);
280 return; 282 return;
diff --git a/src/core/hle/service/time/time_zone_manager.cpp b/src/core/hle/service/time/time_zone_manager.cpp
index c8159bcd5..69152d0ac 100644
--- a/src/core/hle/service/time/time_zone_manager.cpp
+++ b/src/core/hle/service/time/time_zone_manager.cpp
@@ -518,8 +518,8 @@ static bool ParseTimeZoneBinary(TimeZoneRule& time_zone_rule, FileSys::VirtualFi
518 constexpr s32 time_zone_max_leaps{50}; 518 constexpr s32 time_zone_max_leaps{50};
519 constexpr s32 time_zone_max_chars{50}; 519 constexpr s32 time_zone_max_chars{50};
520 if (!(0 <= header.leap_count && header.leap_count < time_zone_max_leaps && 520 if (!(0 <= header.leap_count && header.leap_count < time_zone_max_leaps &&
521 0 < header.type_count && header.type_count < time_zone_rule.ttis.size() && 521 0 < header.type_count && header.type_count < s32(time_zone_rule.ttis.size()) &&
522 0 <= header.time_count && header.time_count < time_zone_rule.ats.size() && 522 0 <= header.time_count && header.time_count < s32(time_zone_rule.ats.size()) &&
523 0 <= header.char_count && header.char_count < time_zone_max_chars && 523 0 <= header.char_count && header.char_count < time_zone_max_chars &&
524 (header.ttis_std_count == header.type_count || header.ttis_std_count == 0) && 524 (header.ttis_std_count == header.type_count || header.ttis_std_count == 0) &&
525 (header.ttis_gmt_count == header.type_count || header.ttis_gmt_count == 0))) { 525 (header.ttis_gmt_count == header.type_count || header.ttis_gmt_count == 0))) {
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index 7f109f4eb..46e14c2a3 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -267,7 +267,7 @@ protected:
267 267
268private: 268private:
269 struct Data { 269 struct Data {
270 u32_le unk_0; 270 u32_le unk_0{};
271 }; 271 };
272 272
273 Data data{}; 273 Data data{};
@@ -614,6 +614,14 @@ private:
614 ctx.WriteBuffer(response.Serialize()); 614 ctx.WriteBuffer(response.Serialize());
615 break; 615 break;
616 } 616 }
617 case TransactionId::SetBufferCount: {
618 LOG_WARNING(Service_VI, "(STUBBED) called, transaction=SetBufferCount");
619 [[maybe_unused]] const auto buffer = ctx.ReadBuffer();
620
621 IGBPEmptyResponseParcel response{};
622 ctx.WriteBuffer(response.Serialize());
623 break;
624 }
617 default: 625 default:
618 ASSERT_MSG(false, "Unimplemented"); 626 ASSERT_MSG(false, "Unimplemented");
619 } 627 }
@@ -859,6 +867,7 @@ private:
859 867
860 const auto layer_id = nv_flinger->CreateLayer(display); 868 const auto layer_id = nv_flinger->CreateLayer(display);
861 if (!layer_id) { 869 if (!layer_id) {
870 LOG_ERROR(Service_VI, "Layer not found! display=0x{:016X}", display);
862 IPC::ResponseBuilder rb{ctx, 2}; 871 IPC::ResponseBuilder rb{ctx, 2};
863 rb.Push(ERR_NOT_FOUND); 872 rb.Push(ERR_NOT_FOUND);
864 return; 873 return;
@@ -975,6 +984,7 @@ private:
975 984
976 const auto display_id = nv_flinger->OpenDisplay(name); 985 const auto display_id = nv_flinger->OpenDisplay(name);
977 if (!display_id) { 986 if (!display_id) {
987 LOG_ERROR(Service_VI, "Display not found! display_name={}", name);
978 IPC::ResponseBuilder rb{ctx, 2}; 988 IPC::ResponseBuilder rb{ctx, 2};
979 rb.Push(ERR_NOT_FOUND); 989 rb.Push(ERR_NOT_FOUND);
980 return; 990 return;
@@ -1074,6 +1084,7 @@ private:
1074 1084
1075 const auto display_id = nv_flinger->OpenDisplay(display_name); 1085 const auto display_id = nv_flinger->OpenDisplay(display_name);
1076 if (!display_id) { 1086 if (!display_id) {
1087 LOG_ERROR(Service_VI, "Layer not found! layer_id={}", layer_id);
1077 IPC::ResponseBuilder rb{ctx, 2}; 1088 IPC::ResponseBuilder rb{ctx, 2};
1078 rb.Push(ERR_NOT_FOUND); 1089 rb.Push(ERR_NOT_FOUND);
1079 return; 1090 return;
@@ -1081,6 +1092,7 @@ private:
1081 1092
1082 const auto buffer_queue_id = nv_flinger->FindBufferQueueId(*display_id, layer_id); 1093 const auto buffer_queue_id = nv_flinger->FindBufferQueueId(*display_id, layer_id);
1083 if (!buffer_queue_id) { 1094 if (!buffer_queue_id) {
1095 LOG_ERROR(Service_VI, "Buffer queue id not found! display_id={}", *display_id);
1084 IPC::ResponseBuilder rb{ctx, 2}; 1096 IPC::ResponseBuilder rb{ctx, 2};
1085 rb.Push(ERR_NOT_FOUND); 1097 rb.Push(ERR_NOT_FOUND);
1086 return; 1098 return;
@@ -1116,6 +1128,7 @@ private:
1116 1128
1117 const auto layer_id = nv_flinger->CreateLayer(display_id); 1129 const auto layer_id = nv_flinger->CreateLayer(display_id);
1118 if (!layer_id) { 1130 if (!layer_id) {
1131 LOG_ERROR(Service_VI, "Layer not found! layer_id={}", *layer_id);
1119 IPC::ResponseBuilder rb{ctx, 2}; 1132 IPC::ResponseBuilder rb{ctx, 2};
1120 rb.Push(ERR_NOT_FOUND); 1133 rb.Push(ERR_NOT_FOUND);
1121 return; 1134 return;
@@ -1123,6 +1136,7 @@ private:
1123 1136
1124 const auto buffer_queue_id = nv_flinger->FindBufferQueueId(display_id, *layer_id); 1137 const auto buffer_queue_id = nv_flinger->FindBufferQueueId(display_id, *layer_id);
1125 if (!buffer_queue_id) { 1138 if (!buffer_queue_id) {
1139 LOG_ERROR(Service_VI, "Buffer queue id not found! display_id={}", display_id);
1126 IPC::ResponseBuilder rb{ctx, 2}; 1140 IPC::ResponseBuilder rb{ctx, 2};
1127 rb.Push(ERR_NOT_FOUND); 1141 rb.Push(ERR_NOT_FOUND);
1128 return; 1142 return;
@@ -1153,6 +1167,7 @@ private:
1153 1167
1154 const auto vsync_event = nv_flinger->FindVsyncEvent(display_id); 1168 const auto vsync_event = nv_flinger->FindVsyncEvent(display_id);
1155 if (!vsync_event) { 1169 if (!vsync_event) {
1170 LOG_ERROR(Service_VI, "Vsync event was not found for display_id={}", display_id);
1156 IPC::ResponseBuilder rb{ctx, 2}; 1171 IPC::ResponseBuilder rb{ctx, 2};
1157 rb.Push(ERR_NOT_FOUND); 1172 rb.Push(ERR_NOT_FOUND);
1158 return; 1173 return;
@@ -1193,6 +1208,7 @@ private:
1193 case NintendoScaleMode::PreserveAspectRatio: 1208 case NintendoScaleMode::PreserveAspectRatio:
1194 return MakeResult(ConvertedScaleMode::PreserveAspectRatio); 1209 return MakeResult(ConvertedScaleMode::PreserveAspectRatio);
1195 default: 1210 default:
1211 LOG_ERROR(Service_VI, "Invalid scaling mode specified, mode={}", mode);
1196 return ERR_OPERATION_FAILED; 1212 return ERR_OPERATION_FAILED;
1197 } 1213 }
1198 } 1214 }
@@ -1249,6 +1265,7 @@ void detail::GetDisplayServiceImpl(Kernel::HLERequestContext& ctx,
1249 const auto policy = rp.PopEnum<Policy>(); 1265 const auto policy = rp.PopEnum<Policy>();
1250 1266
1251 if (!IsValidServiceAccess(permission, policy)) { 1267 if (!IsValidServiceAccess(permission, policy)) {
1268 LOG_ERROR(Service_VI, "Permission denied for policy {}", static_cast<u32>(policy));
1252 IPC::ResponseBuilder rb{ctx, 2}; 1269 IPC::ResponseBuilder rb{ctx, 2};
1253 rb.Push(ERR_PERMISSION_DENIED); 1270 rb.Push(ERR_PERMISSION_DENIED);
1254 return; 1271 return;
diff --git a/src/core/loader/elf.cpp b/src/core/loader/elf.cpp
index 1e9ed2837..8f7615115 100644
--- a/src/core/loader/elf.cpp
+++ b/src/core/loader/elf.cpp
@@ -398,6 +398,11 @@ AppLoader_ELF::LoadResult AppLoader_ELF::Load(Kernel::Process& process) {
398 Kernel::CodeSet codeset = elf_reader.LoadInto(base_address); 398 Kernel::CodeSet codeset = elf_reader.LoadInto(base_address);
399 const VAddr entry_point = codeset.entrypoint; 399 const VAddr entry_point = codeset.entrypoint;
400 400
401 // Setup the process code layout
402 if (process.LoadFromMetadata(FileSys::ProgramMetadata::GetDefault(), buffer.size()).IsError()) {
403 return {ResultStatus::ErrorNotInitialized, {}};
404 }
405
401 process.LoadModule(std::move(codeset), entry_point); 406 process.LoadModule(std::move(codeset), entry_point);
402 407
403 is_loaded = true; 408 is_loaded = true;
diff --git a/src/core/loader/nro.cpp b/src/core/loader/nro.cpp
index 5d7e8136e..906544bc9 100644
--- a/src/core/loader/nro.cpp
+++ b/src/core/loader/nro.cpp
@@ -131,7 +131,7 @@ static constexpr u32 PageAlignSize(u32 size) {
131} 131}
132 132
133static bool LoadNroImpl(Kernel::Process& process, const std::vector<u8>& data, 133static bool LoadNroImpl(Kernel::Process& process, const std::vector<u8>& data,
134 const std::string& name, VAddr load_base) { 134 const std::string& name) {
135 if (data.size() < sizeof(NroHeader)) { 135 if (data.size() < sizeof(NroHeader)) {
136 return {}; 136 return {};
137 } 137 }
@@ -187,19 +187,25 @@ static bool LoadNroImpl(Kernel::Process& process, const std::vector<u8>& data,
187 codeset.DataSegment().size += bss_size; 187 codeset.DataSegment().size += bss_size;
188 program_image.resize(static_cast<u32>(program_image.size()) + bss_size); 188 program_image.resize(static_cast<u32>(program_image.size()) + bss_size);
189 189
190 // Setup the process code layout
191 if (process.LoadFromMetadata(FileSys::ProgramMetadata::GetDefault(), program_image.size())
192 .IsError()) {
193 return false;
194 }
195
190 // Load codeset for current process 196 // Load codeset for current process
191 codeset.memory = std::move(program_image); 197 codeset.memory = std::move(program_image);
192 process.LoadModule(std::move(codeset), load_base); 198 process.LoadModule(std::move(codeset), process.PageTable().GetCodeRegionStart());
193 199
194 // Register module with GDBStub 200 // Register module with GDBStub
195 GDBStub::RegisterModule(name, load_base, load_base); 201 GDBStub::RegisterModule(name, process.PageTable().GetCodeRegionStart(),
202 process.PageTable().GetCodeRegionEnd());
196 203
197 return true; 204 return true;
198} 205}
199 206
200bool AppLoader_NRO::LoadNro(Kernel::Process& process, const FileSys::VfsFile& file, 207bool AppLoader_NRO::LoadNro(Kernel::Process& process, const FileSys::VfsFile& file) {
201 VAddr load_base) { 208 return LoadNroImpl(process, file.ReadAllBytes(), file.GetName());
202 return LoadNroImpl(process, file.ReadAllBytes(), file.GetName(), load_base);
203} 209}
204 210
205AppLoader_NRO::LoadResult AppLoader_NRO::Load(Kernel::Process& process) { 211AppLoader_NRO::LoadResult AppLoader_NRO::Load(Kernel::Process& process) {
@@ -207,10 +213,7 @@ AppLoader_NRO::LoadResult AppLoader_NRO::Load(Kernel::Process& process) {
207 return {ResultStatus::ErrorAlreadyLoaded, {}}; 213 return {ResultStatus::ErrorAlreadyLoaded, {}};
208 } 214 }
209 215
210 // Load NRO 216 if (!LoadNro(process, *file)) {
211 const VAddr base_address = process.PageTable().GetCodeRegionStart();
212
213 if (!LoadNro(process, *file, base_address)) {
214 return {ResultStatus::ErrorLoadingNRO, {}}; 217 return {ResultStatus::ErrorLoadingNRO, {}};
215 } 218 }
216 219
diff --git a/src/core/loader/nro.h b/src/core/loader/nro.h
index 71811bc29..4593d48fb 100644
--- a/src/core/loader/nro.h
+++ b/src/core/loader/nro.h
@@ -47,7 +47,7 @@ public:
47 bool IsRomFSUpdatable() const override; 47 bool IsRomFSUpdatable() const override;
48 48
49private: 49private:
50 bool LoadNro(Kernel::Process& process, const FileSys::VfsFile& file, VAddr load_base); 50 bool LoadNro(Kernel::Process& process, const FileSys::VfsFile& file);
51 51
52 std::vector<u8> icon_data; 52 std::vector<u8> icon_data;
53 std::unique_ptr<FileSys::NACP> nacp; 53 std::unique_ptr<FileSys::NACP> nacp;
diff --git a/src/core/reporter.h b/src/core/reporter.h
index 380941b1b..86d760cf0 100644
--- a/src/core/reporter.h
+++ b/src/core/reporter.h
@@ -56,6 +56,7 @@ public:
56 56
57 enum class PlayReportType { 57 enum class PlayReportType {
58 Old, 58 Old,
59 Old2,
59 New, 60 New,
60 System, 61 System,
61 }; 62 };
diff --git a/src/core/settings.cpp b/src/core/settings.cpp
index c1282cb80..2b0bdc4d3 100644
--- a/src/core/settings.cpp
+++ b/src/core/settings.cpp
@@ -92,10 +92,11 @@ void LogSettings() {
92 LogSetting("Renderer_UseFrameLimit", Settings::values.use_frame_limit); 92 LogSetting("Renderer_UseFrameLimit", Settings::values.use_frame_limit);
93 LogSetting("Renderer_FrameLimit", Settings::values.frame_limit); 93 LogSetting("Renderer_FrameLimit", Settings::values.frame_limit);
94 LogSetting("Renderer_UseDiskShaderCache", Settings::values.use_disk_shader_cache); 94 LogSetting("Renderer_UseDiskShaderCache", Settings::values.use_disk_shader_cache);
95 LogSetting("Renderer_UseAccurateGpuEmulation", Settings::values.use_accurate_gpu_emulation); 95 LogSetting("Renderer_GPUAccuracyLevel", Settings::values.gpu_accuracy);
96 LogSetting("Renderer_UseAsynchronousGpuEmulation", 96 LogSetting("Renderer_UseAsynchronousGpuEmulation",
97 Settings::values.use_asynchronous_gpu_emulation); 97 Settings::values.use_asynchronous_gpu_emulation);
98 LogSetting("Renderer_UseVsync", Settings::values.use_vsync); 98 LogSetting("Renderer_UseVsync", Settings::values.use_vsync);
99 LogSetting("Renderer_AnisotropicFilteringLevel", Settings::values.max_anisotropy);
99 LogSetting("Audio_OutputEngine", Settings::values.sink_id); 100 LogSetting("Audio_OutputEngine", Settings::values.sink_id);
100 LogSetting("Audio_EnableAudioStretching", Settings::values.enable_audio_stretching); 101 LogSetting("Audio_EnableAudioStretching", Settings::values.enable_audio_stretching);
101 LogSetting("Audio_OutputDevice", Settings::values.audio_device_id); 102 LogSetting("Audio_OutputDevice", Settings::values.audio_device_id);
@@ -109,4 +110,12 @@ void LogSettings() {
109 LogSetting("Services_BCATBoxcatLocal", Settings::values.bcat_boxcat_local); 110 LogSetting("Services_BCATBoxcatLocal", Settings::values.bcat_boxcat_local);
110} 111}
111 112
113bool IsGPULevelExtreme() {
114 return values.gpu_accuracy == GPUAccuracy::Extreme;
115}
116
117bool IsGPULevelHigh() {
118 return values.gpu_accuracy == GPUAccuracy::Extreme || values.gpu_accuracy == GPUAccuracy::High;
119}
120
112} // namespace Settings 121} // namespace Settings
diff --git a/src/core/settings.h b/src/core/settings.h
index 79ec01731..163900f0b 100644
--- a/src/core/settings.h
+++ b/src/core/settings.h
@@ -376,6 +376,12 @@ enum class RendererBackend {
376 Vulkan = 1, 376 Vulkan = 1,
377}; 377};
378 378
379enum class GPUAccuracy : u32 {
380 Normal = 0,
381 High = 1,
382 Extreme = 2,
383};
384
379struct Values { 385struct Values {
380 // System 386 // System
381 bool use_docked_mode; 387 bool use_docked_mode;
@@ -436,10 +442,11 @@ struct Values {
436 bool use_frame_limit; 442 bool use_frame_limit;
437 u16 frame_limit; 443 u16 frame_limit;
438 bool use_disk_shader_cache; 444 bool use_disk_shader_cache;
439 bool use_accurate_gpu_emulation; 445 GPUAccuracy gpu_accuracy;
440 bool use_asynchronous_gpu_emulation; 446 bool use_asynchronous_gpu_emulation;
441 bool use_vsync; 447 bool use_vsync;
442 bool force_30fps_mode; 448 bool force_30fps_mode;
449 bool use_fast_gpu_time;
443 450
444 float bg_red; 451 float bg_red;
445 float bg_green; 452 float bg_green;
@@ -464,6 +471,7 @@ struct Values {
464 bool dump_nso; 471 bool dump_nso;
465 bool reporting_services; 472 bool reporting_services;
466 bool quest_flag; 473 bool quest_flag;
474 bool disable_cpu_opt;
467 475
468 // BCAT 476 // BCAT
469 std::string bcat_backend; 477 std::string bcat_backend;
@@ -479,6 +487,9 @@ struct Values {
479 std::map<u64, std::vector<std::string>> disabled_addons; 487 std::map<u64, std::vector<std::string>> disabled_addons;
480} extern values; 488} extern values;
481 489
490bool IsGPULevelExtreme();
491bool IsGPULevelHigh();
492
482void Apply(); 493void Apply();
483void LogSettings(); 494void LogSettings();
484} // namespace Settings 495} // namespace Settings
diff --git a/src/core/telemetry_session.cpp b/src/core/telemetry_session.cpp
index fd5a3ee9f..1c3b03a1c 100644
--- a/src/core/telemetry_session.cpp
+++ b/src/core/telemetry_session.cpp
@@ -56,6 +56,18 @@ static const char* TranslateRenderer(Settings::RendererBackend backend) {
56 return "Unknown"; 56 return "Unknown";
57} 57}
58 58
59static const char* TranslateGPUAccuracyLevel(Settings::GPUAccuracy backend) {
60 switch (backend) {
61 case Settings::GPUAccuracy::Normal:
62 return "Normal";
63 case Settings::GPUAccuracy::High:
64 return "High";
65 case Settings::GPUAccuracy::Extreme:
66 return "Extreme";
67 }
68 return "Unknown";
69}
70
59u64 GetTelemetryId() { 71u64 GetTelemetryId() {
60 u64 telemetry_id{}; 72 u64 telemetry_id{};
61 const std::string filename{FileUtil::GetUserPath(FileUtil::UserPath::ConfigDir) + 73 const std::string filename{FileUtil::GetUserPath(FileUtil::UserPath::ConfigDir) +
@@ -184,8 +196,8 @@ void TelemetrySession::AddInitialInfo(Loader::AppLoader& app_loader) {
184 AddField(field_type, "Renderer_UseFrameLimit", Settings::values.use_frame_limit); 196 AddField(field_type, "Renderer_UseFrameLimit", Settings::values.use_frame_limit);
185 AddField(field_type, "Renderer_FrameLimit", Settings::values.frame_limit); 197 AddField(field_type, "Renderer_FrameLimit", Settings::values.frame_limit);
186 AddField(field_type, "Renderer_UseDiskShaderCache", Settings::values.use_disk_shader_cache); 198 AddField(field_type, "Renderer_UseDiskShaderCache", Settings::values.use_disk_shader_cache);
187 AddField(field_type, "Renderer_UseAccurateGpuEmulation", 199 AddField(field_type, "Renderer_GPUAccuracyLevel",
188 Settings::values.use_accurate_gpu_emulation); 200 TranslateGPUAccuracyLevel(Settings::values.gpu_accuracy));
189 AddField(field_type, "Renderer_UseAsynchronousGpuEmulation", 201 AddField(field_type, "Renderer_UseAsynchronousGpuEmulation",
190 Settings::values.use_asynchronous_gpu_emulation); 202 Settings::values.use_asynchronous_gpu_emulation);
191 AddField(field_type, "Renderer_UseVsync", Settings::values.use_vsync); 203 AddField(field_type, "Renderer_UseVsync", Settings::values.use_vsync);
diff --git a/src/input_common/main.cpp b/src/input_common/main.cpp
index c98c848cf..95e351e24 100644
--- a/src/input_common/main.cpp
+++ b/src/input_common/main.cpp
@@ -18,7 +18,9 @@ namespace InputCommon {
18 18
19static std::shared_ptr<Keyboard> keyboard; 19static std::shared_ptr<Keyboard> keyboard;
20static std::shared_ptr<MotionEmu> motion_emu; 20static std::shared_ptr<MotionEmu> motion_emu;
21#ifdef HAVE_SDL2
21static std::unique_ptr<SDL::State> sdl; 22static std::unique_ptr<SDL::State> sdl;
23#endif
22static std::unique_ptr<CemuhookUDP::State> udp; 24static std::unique_ptr<CemuhookUDP::State> udp;
23 25
24void Init() { 26void Init() {
@@ -29,7 +31,9 @@ void Init() {
29 motion_emu = std::make_shared<MotionEmu>(); 31 motion_emu = std::make_shared<MotionEmu>();
30 Input::RegisterFactory<Input::MotionDevice>("motion_emu", motion_emu); 32 Input::RegisterFactory<Input::MotionDevice>("motion_emu", motion_emu);
31 33
34#ifdef HAVE_SDL2
32 sdl = SDL::Init(); 35 sdl = SDL::Init();
36#endif
33 37
34 udp = CemuhookUDP::Init(); 38 udp = CemuhookUDP::Init();
35} 39}
@@ -40,7 +44,9 @@ void Shutdown() {
40 Input::UnregisterFactory<Input::AnalogDevice>("analog_from_button"); 44 Input::UnregisterFactory<Input::AnalogDevice>("analog_from_button");
41 Input::UnregisterFactory<Input::MotionDevice>("motion_emu"); 45 Input::UnregisterFactory<Input::MotionDevice>("motion_emu");
42 motion_emu.reset(); 46 motion_emu.reset();
47#ifdef HAVE_SDL2
43 sdl.reset(); 48 sdl.reset();
49#endif
44 udp.reset(); 50 udp.reset();
45} 51}
46 52
diff --git a/src/tests/core/core_timing.cpp b/src/tests/core/core_timing.cpp
index 1e3940801..ff2d11cc8 100644
--- a/src/tests/core/core_timing.cpp
+++ b/src/tests/core/core_timing.cpp
@@ -14,13 +14,14 @@
14#include "core/core.h" 14#include "core/core.h"
15#include "core/core_timing.h" 15#include "core/core_timing.h"
16 16
17namespace {
17// Numbers are chosen randomly to make sure the correct one is given. 18// Numbers are chosen randomly to make sure the correct one is given.
18static constexpr std::array<u64, 5> CB_IDS{{42, 144, 93, 1026, UINT64_C(0xFFFF7FFFF7FFFF)}}; 19constexpr std::array<u64, 5> CB_IDS{{42, 144, 93, 1026, UINT64_C(0xFFFF7FFFF7FFFF)}};
19static constexpr int MAX_SLICE_LENGTH = 10000; // Copied from CoreTiming internals 20constexpr int MAX_SLICE_LENGTH = 10000; // Copied from CoreTiming internals
20 21
21static std::bitset<CB_IDS.size()> callbacks_ran_flags; 22std::bitset<CB_IDS.size()> callbacks_ran_flags;
22static u64 expected_callback = 0; 23u64 expected_callback = 0;
23static s64 lateness = 0; 24s64 lateness = 0;
24 25
25template <unsigned int IDX> 26template <unsigned int IDX>
26void CallbackTemplate(u64 userdata, s64 cycles_late) { 27void CallbackTemplate(u64 userdata, s64 cycles_late) {
@@ -31,7 +32,7 @@ void CallbackTemplate(u64 userdata, s64 cycles_late) {
31 REQUIRE(lateness == cycles_late); 32 REQUIRE(lateness == cycles_late);
32} 33}
33 34
34static u64 callbacks_done = 0; 35u64 callbacks_done = 0;
35 36
36void EmptyCallback(u64 userdata, s64 cycles_late) { 37void EmptyCallback(u64 userdata, s64 cycles_late) {
37 ++callbacks_done; 38 ++callbacks_done;
@@ -48,8 +49,8 @@ struct ScopeInit final {
48 Core::Timing::CoreTiming core_timing; 49 Core::Timing::CoreTiming core_timing;
49}; 50};
50 51
51static void AdvanceAndCheck(Core::Timing::CoreTiming& core_timing, u32 idx, u32 context = 0, 52void AdvanceAndCheck(Core::Timing::CoreTiming& core_timing, u32 idx, u32 context = 0,
52 int expected_lateness = 0, int cpu_downcount = 0) { 53 int expected_lateness = 0, int cpu_downcount = 0) {
53 callbacks_ran_flags = 0; 54 callbacks_ran_flags = 0;
54 expected_callback = CB_IDS[idx]; 55 expected_callback = CB_IDS[idx];
55 lateness = expected_lateness; 56 lateness = expected_lateness;
@@ -62,6 +63,7 @@ static void AdvanceAndCheck(Core::Timing::CoreTiming& core_timing, u32 idx, u32
62 63
63 REQUIRE(decltype(callbacks_ran_flags)().set(idx) == callbacks_ran_flags); 64 REQUIRE(decltype(callbacks_ran_flags)().set(idx) == callbacks_ran_flags);
64} 65}
66} // Anonymous namespace
65 67
66TEST_CASE("CoreTiming[BasicOrder]", "[core]") { 68TEST_CASE("CoreTiming[BasicOrder]", "[core]") {
67 ScopeInit guard; 69 ScopeInit guard;
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 258d58eba..ff53282c9 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -23,6 +23,7 @@ add_library(video_core STATIC
23 engines/shader_bytecode.h 23 engines/shader_bytecode.h
24 engines/shader_header.h 24 engines/shader_header.h
25 engines/shader_type.h 25 engines/shader_type.h
26 fence_manager.h
26 gpu.cpp 27 gpu.cpp
27 gpu.h 28 gpu.h
28 gpu_asynch.cpp 29 gpu_asynch.cpp
@@ -51,6 +52,8 @@ add_library(video_core STATIC
51 renderer_opengl/gl_buffer_cache.h 52 renderer_opengl/gl_buffer_cache.h
52 renderer_opengl/gl_device.cpp 53 renderer_opengl/gl_device.cpp
53 renderer_opengl/gl_device.h 54 renderer_opengl/gl_device.h
55 renderer_opengl/gl_fence_manager.cpp
56 renderer_opengl/gl_fence_manager.h
54 renderer_opengl/gl_framebuffer_cache.cpp 57 renderer_opengl/gl_framebuffer_cache.cpp
55 renderer_opengl/gl_framebuffer_cache.h 58 renderer_opengl/gl_framebuffer_cache.h
56 renderer_opengl/gl_rasterizer.cpp 59 renderer_opengl/gl_rasterizer.cpp
@@ -121,6 +124,8 @@ add_library(video_core STATIC
121 shader/decode.cpp 124 shader/decode.cpp
122 shader/expr.cpp 125 shader/expr.cpp
123 shader/expr.h 126 shader/expr.h
127 shader/memory_util.cpp
128 shader/memory_util.h
124 shader/node_helper.cpp 129 shader/node_helper.cpp
125 shader/node_helper.h 130 shader/node_helper.h
126 shader/node.h 131 shader/node.h
@@ -160,6 +165,8 @@ if (ENABLE_VULKAN)
160 renderer_vulkan/fixed_pipeline_state.h 165 renderer_vulkan/fixed_pipeline_state.h
161 renderer_vulkan/maxwell_to_vk.cpp 166 renderer_vulkan/maxwell_to_vk.cpp
162 renderer_vulkan/maxwell_to_vk.h 167 renderer_vulkan/maxwell_to_vk.h
168 renderer_vulkan/nsight_aftermath_tracker.cpp
169 renderer_vulkan/nsight_aftermath_tracker.h
163 renderer_vulkan/renderer_vulkan.h 170 renderer_vulkan/renderer_vulkan.h
164 renderer_vulkan/renderer_vulkan.cpp 171 renderer_vulkan/renderer_vulkan.cpp
165 renderer_vulkan/vk_blit_screen.cpp 172 renderer_vulkan/vk_blit_screen.cpp
@@ -174,6 +181,8 @@ if (ENABLE_VULKAN)
174 renderer_vulkan/vk_descriptor_pool.h 181 renderer_vulkan/vk_descriptor_pool.h
175 renderer_vulkan/vk_device.cpp 182 renderer_vulkan/vk_device.cpp
176 renderer_vulkan/vk_device.h 183 renderer_vulkan/vk_device.h
184 renderer_vulkan/vk_fence_manager.cpp
185 renderer_vulkan/vk_fence_manager.h
177 renderer_vulkan/vk_graphics_pipeline.cpp 186 renderer_vulkan/vk_graphics_pipeline.cpp
178 renderer_vulkan/vk_graphics_pipeline.h 187 renderer_vulkan/vk_graphics_pipeline.h
179 renderer_vulkan/vk_image.cpp 188 renderer_vulkan/vk_image.cpp
@@ -213,19 +222,30 @@ if (ENABLE_VULKAN)
213 renderer_vulkan/wrapper.cpp 222 renderer_vulkan/wrapper.cpp
214 renderer_vulkan/wrapper.h 223 renderer_vulkan/wrapper.h
215 ) 224 )
216
217 target_include_directories(video_core PRIVATE sirit ../../externals/Vulkan-Headers/include)
218 target_compile_definitions(video_core PRIVATE HAS_VULKAN)
219endif() 225endif()
220 226
221create_target_directory_groups(video_core) 227create_target_directory_groups(video_core)
222 228
223target_link_libraries(video_core PUBLIC common core) 229target_link_libraries(video_core PUBLIC common core)
224target_link_libraries(video_core PRIVATE glad) 230target_link_libraries(video_core PRIVATE glad)
231
225if (ENABLE_VULKAN) 232if (ENABLE_VULKAN)
233 target_include_directories(video_core PRIVATE sirit ../../externals/Vulkan-Headers/include)
234 target_compile_definitions(video_core PRIVATE HAS_VULKAN)
226 target_link_libraries(video_core PRIVATE sirit) 235 target_link_libraries(video_core PRIVATE sirit)
227endif() 236endif()
228 237
238if (ENABLE_NSIGHT_AFTERMATH)
239 if (NOT DEFINED ENV{NSIGHT_AFTERMATH_SDK})
240 message(ERROR "Environment variable NSIGHT_AFTERMATH_SDK has to be provided")
241 endif()
242 if (NOT WIN32)
243 message(ERROR "Nsight Aftermath doesn't support non-Windows platforms")
244 endif()
245 target_compile_definitions(video_core PRIVATE HAS_NSIGHT_AFTERMATH)
246 target_include_directories(video_core PRIVATE "$ENV{NSIGHT_AFTERMATH_SDK}/include")
247endif()
248
229if (MSVC) 249if (MSVC)
230 target_compile_options(video_core PRIVATE /we4267) 250 target_compile_options(video_core PRIVATE /we4267)
231else() 251else()
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 83e7a1cde..56e570994 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -4,7 +4,7 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <array> 7#include <list>
8#include <memory> 8#include <memory>
9#include <mutex> 9#include <mutex>
10#include <unordered_map> 10#include <unordered_map>
@@ -18,8 +18,10 @@
18 18
19#include "common/alignment.h" 19#include "common/alignment.h"
20#include "common/common_types.h" 20#include "common/common_types.h"
21#include "common/logging/log.h"
21#include "core/core.h" 22#include "core/core.h"
22#include "core/memory.h" 23#include "core/memory.h"
24#include "core/settings.h"
23#include "video_core/buffer_cache/buffer_block.h" 25#include "video_core/buffer_cache/buffer_block.h"
24#include "video_core/buffer_cache/map_interval.h" 26#include "video_core/buffer_cache/map_interval.h"
25#include "video_core/memory_manager.h" 27#include "video_core/memory_manager.h"
@@ -79,14 +81,13 @@ public:
79 auto map = MapAddress(block, gpu_addr, cpu_addr, size); 81 auto map = MapAddress(block, gpu_addr, cpu_addr, size);
80 if (is_written) { 82 if (is_written) {
81 map->MarkAsModified(true, GetModifiedTicks()); 83 map->MarkAsModified(true, GetModifiedTicks());
84 if (Settings::IsGPULevelHigh() && Settings::values.use_asynchronous_gpu_emulation) {
85 MarkForAsyncFlush(map);
86 }
82 if (!map->IsWritten()) { 87 if (!map->IsWritten()) {
83 map->MarkAsWritten(true); 88 map->MarkAsWritten(true);
84 MarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1); 89 MarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1);
85 } 90 }
86 } else {
87 if (map->IsWritten()) {
88 WriteBarrier();
89 }
90 } 91 }
91 92
92 return {ToHandle(block), static_cast<u64>(block->GetOffset(cpu_addr))}; 93 return {ToHandle(block), static_cast<u64>(block->GetOffset(cpu_addr))};
@@ -137,11 +138,22 @@ public:
137 }); 138 });
138 for (auto& object : objects) { 139 for (auto& object : objects) {
139 if (object->IsModified() && object->IsRegistered()) { 140 if (object->IsModified() && object->IsRegistered()) {
141 mutex.unlock();
140 FlushMap(object); 142 FlushMap(object);
143 mutex.lock();
141 } 144 }
142 } 145 }
143 } 146 }
144 147
148 bool MustFlushRegion(VAddr addr, std::size_t size) {
149 std::lock_guard lock{mutex};
150
151 const std::vector<MapInterval> objects = GetMapsInRange(addr, size);
152 return std::any_of(objects.cbegin(), objects.cend(), [](const MapInterval& map) {
153 return map->IsModified() && map->IsRegistered();
154 });
155 }
156
145 /// Mark the specified region as being invalidated 157 /// Mark the specified region as being invalidated
146 void InvalidateRegion(VAddr addr, u64 size) { 158 void InvalidateRegion(VAddr addr, u64 size) {
147 std::lock_guard lock{mutex}; 159 std::lock_guard lock{mutex};
@@ -154,6 +166,77 @@ public:
154 } 166 }
155 } 167 }
156 168
169 void OnCPUWrite(VAddr addr, std::size_t size) {
170 std::lock_guard lock{mutex};
171
172 for (const auto& object : GetMapsInRange(addr, size)) {
173 if (object->IsMemoryMarked() && object->IsRegistered()) {
174 UnmarkMemory(object);
175 object->SetSyncPending(true);
176 marked_for_unregister.emplace_back(object);
177 }
178 }
179 }
180
181 void SyncGuestHost() {
182 std::lock_guard lock{mutex};
183
184 for (const auto& object : marked_for_unregister) {
185 if (object->IsRegistered()) {
186 object->SetSyncPending(false);
187 Unregister(object);
188 }
189 }
190 marked_for_unregister.clear();
191 }
192
193 void CommitAsyncFlushes() {
194 if (uncommitted_flushes) {
195 auto commit_list = std::make_shared<std::list<MapInterval>>();
196 for (auto& map : *uncommitted_flushes) {
197 if (map->IsRegistered() && map->IsModified()) {
198 // TODO(Blinkhawk): Implement backend asynchronous flushing
199 // AsyncFlushMap(map)
200 commit_list->push_back(map);
201 }
202 }
203 if (!commit_list->empty()) {
204 committed_flushes.push_back(commit_list);
205 } else {
206 committed_flushes.emplace_back();
207 }
208 } else {
209 committed_flushes.emplace_back();
210 }
211 uncommitted_flushes.reset();
212 }
213
214 bool ShouldWaitAsyncFlushes() const {
215 return !committed_flushes.empty() && committed_flushes.front() != nullptr;
216 }
217
218 bool HasUncommittedFlushes() const {
219 return uncommitted_flushes != nullptr;
220 }
221
222 void PopAsyncFlushes() {
223 if (committed_flushes.empty()) {
224 return;
225 }
226 auto& flush_list = committed_flushes.front();
227 if (!flush_list) {
228 committed_flushes.pop_front();
229 return;
230 }
231 for (MapInterval& map : *flush_list) {
232 if (map->IsRegistered()) {
233 // TODO(Blinkhawk): Replace this for reading the asynchronous flush
234 FlushMap(map);
235 }
236 }
237 committed_flushes.pop_front();
238 }
239
157 virtual BufferType GetEmptyBuffer(std::size_t size) = 0; 240 virtual BufferType GetEmptyBuffer(std::size_t size) = 0;
158 241
159protected: 242protected:
@@ -166,8 +249,6 @@ protected:
166 249
167 virtual BufferType ToHandle(const OwnerBuffer& storage) = 0; 250 virtual BufferType ToHandle(const OwnerBuffer& storage) = 0;
168 251
169 virtual void WriteBarrier() = 0;
170
171 virtual OwnerBuffer CreateBlock(VAddr cpu_addr, std::size_t size) = 0; 252 virtual OwnerBuffer CreateBlock(VAddr cpu_addr, std::size_t size) = 0;
172 253
173 virtual void UploadBlockData(const OwnerBuffer& buffer, std::size_t offset, std::size_t size, 254 virtual void UploadBlockData(const OwnerBuffer& buffer, std::size_t offset, std::size_t size,
@@ -196,17 +277,30 @@ protected:
196 const IntervalType interval{new_map->GetStart(), new_map->GetEnd()}; 277 const IntervalType interval{new_map->GetStart(), new_map->GetEnd()};
197 mapped_addresses.insert({interval, new_map}); 278 mapped_addresses.insert({interval, new_map});
198 rasterizer.UpdatePagesCachedCount(cpu_addr, size, 1); 279 rasterizer.UpdatePagesCachedCount(cpu_addr, size, 1);
280 new_map->SetMemoryMarked(true);
199 if (inherit_written) { 281 if (inherit_written) {
200 MarkRegionAsWritten(new_map->GetStart(), new_map->GetEnd() - 1); 282 MarkRegionAsWritten(new_map->GetStart(), new_map->GetEnd() - 1);
201 new_map->MarkAsWritten(true); 283 new_map->MarkAsWritten(true);
202 } 284 }
203 } 285 }
204 286
205 /// Unregisters an object from the cache 287 void UnmarkMemory(const MapInterval& map) {
206 void Unregister(MapInterval& map) { 288 if (!map->IsMemoryMarked()) {
289 return;
290 }
207 const std::size_t size = map->GetEnd() - map->GetStart(); 291 const std::size_t size = map->GetEnd() - map->GetStart();
208 rasterizer.UpdatePagesCachedCount(map->GetStart(), size, -1); 292 rasterizer.UpdatePagesCachedCount(map->GetStart(), size, -1);
293 map->SetMemoryMarked(false);
294 }
295
296 /// Unregisters an object from the cache
297 void Unregister(const MapInterval& map) {
298 UnmarkMemory(map);
209 map->MarkAsRegistered(false); 299 map->MarkAsRegistered(false);
300 if (map->IsSyncPending()) {
301 marked_for_unregister.remove(map);
302 map->SetSyncPending(false);
303 }
210 if (map->IsWritten()) { 304 if (map->IsWritten()) {
211 UnmarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1); 305 UnmarkRegionAsWritten(map->GetStart(), map->GetEnd() - 1);
212 } 306 }
@@ -264,6 +358,9 @@ private:
264 MapInterval new_map = CreateMap(new_start, new_end, new_gpu_addr); 358 MapInterval new_map = CreateMap(new_start, new_end, new_gpu_addr);
265 if (modified_inheritance) { 359 if (modified_inheritance) {
266 new_map->MarkAsModified(true, GetModifiedTicks()); 360 new_map->MarkAsModified(true, GetModifiedTicks());
361 if (Settings::IsGPULevelHigh() && Settings::values.use_asynchronous_gpu_emulation) {
362 MarkForAsyncFlush(new_map);
363 }
267 } 364 }
268 Register(new_map, write_inheritance); 365 Register(new_map, write_inheritance);
269 return new_map; 366 return new_map;
@@ -450,6 +547,13 @@ private:
450 return false; 547 return false;
451 } 548 }
452 549
550 void MarkForAsyncFlush(MapInterval& map) {
551 if (!uncommitted_flushes) {
552 uncommitted_flushes = std::make_shared<std::unordered_set<MapInterval>>();
553 }
554 uncommitted_flushes->insert(map);
555 }
556
453 VideoCore::RasterizerInterface& rasterizer; 557 VideoCore::RasterizerInterface& rasterizer;
454 Core::System& system; 558 Core::System& system;
455 559
@@ -479,6 +583,10 @@ private:
479 u64 modified_ticks = 0; 583 u64 modified_ticks = 0;
480 584
481 std::vector<u8> staging_buffer; 585 std::vector<u8> staging_buffer;
586 std::list<MapInterval> marked_for_unregister;
587
588 std::shared_ptr<std::unordered_set<MapInterval>> uncommitted_flushes{};
589 std::list<std::shared_ptr<std::list<MapInterval>>> committed_flushes;
482 590
483 std::recursive_mutex mutex; 591 std::recursive_mutex mutex;
484}; 592};
diff --git a/src/video_core/buffer_cache/map_interval.h b/src/video_core/buffer_cache/map_interval.h
index b0956029d..29d8b26f3 100644
--- a/src/video_core/buffer_cache/map_interval.h
+++ b/src/video_core/buffer_cache/map_interval.h
@@ -46,6 +46,22 @@ public:
46 return is_registered; 46 return is_registered;
47 } 47 }
48 48
49 void SetMemoryMarked(bool is_memory_marked_) {
50 is_memory_marked = is_memory_marked_;
51 }
52
53 bool IsMemoryMarked() const {
54 return is_memory_marked;
55 }
56
57 void SetSyncPending(bool is_sync_pending_) {
58 is_sync_pending = is_sync_pending_;
59 }
60
61 bool IsSyncPending() const {
62 return is_sync_pending;
63 }
64
49 VAddr GetStart() const { 65 VAddr GetStart() const {
50 return start; 66 return start;
51 } 67 }
@@ -83,6 +99,8 @@ private:
83 bool is_written{}; 99 bool is_written{};
84 bool is_modified{}; 100 bool is_modified{};
85 bool is_registered{}; 101 bool is_registered{};
102 bool is_memory_marked{};
103 bool is_sync_pending{};
86 u64 ticks{}; 104 u64 ticks{};
87}; 105};
88 106
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp
index 0b77afc71..16311f05e 100644
--- a/src/video_core/dma_pusher.cpp
+++ b/src/video_core/dma_pusher.cpp
@@ -21,6 +21,7 @@ MICROPROFILE_DEFINE(DispatchCalls, "GPU", "Execute command buffer", MP_RGB(128,
21void DmaPusher::DispatchCalls() { 21void DmaPusher::DispatchCalls() {
22 MICROPROFILE_SCOPE(DispatchCalls); 22 MICROPROFILE_SCOPE(DispatchCalls);
23 23
24 gpu.SyncGuestHost();
24 // On entering GPU code, assume all memory may be touched by the ARM core. 25 // On entering GPU code, assume all memory may be touched by the ARM core.
25 gpu.Maxwell3D().OnMemoryWrite(); 26 gpu.Maxwell3D().OnMemoryWrite();
26 27
@@ -32,6 +33,8 @@ void DmaPusher::DispatchCalls() {
32 } 33 }
33 } 34 }
34 gpu.FlushCommands(); 35 gpu.FlushCommands();
36 gpu.SyncGuestHost();
37 gpu.OnCommandListEnd();
35} 38}
36 39
37bool DmaPusher::Step() { 40bool DmaPusher::Step() {
@@ -68,16 +71,22 @@ bool DmaPusher::Step() {
68 gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(), 71 gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(),
69 command_list_header.size * sizeof(u32)); 72 command_list_header.size * sizeof(u32));
70 73
71 for (const CommandHeader& command_header : command_headers) { 74 for (std::size_t index = 0; index < command_headers.size();) {
75 const CommandHeader& command_header = command_headers[index];
72 76
73 // now, see if we're in the middle of a command 77 if (dma_state.method_count) {
74 if (dma_state.length_pending) {
75 // Second word of long non-inc methods command - method count
76 dma_state.length_pending = 0;
77 dma_state.method_count = command_header.method_count_;
78 } else if (dma_state.method_count) {
79 // Data word of methods command 78 // Data word of methods command
80 CallMethod(command_header.argument); 79 if (dma_state.non_incrementing) {
80 const u32 max_write = static_cast<u32>(
81 std::min<std::size_t>(index + dma_state.method_count, command_headers.size()) -
82 index);
83 CallMultiMethod(&command_header.argument, max_write);
84 dma_state.method_count -= max_write;
85 index += max_write;
86 continue;
87 } else {
88 CallMethod(command_header.argument);
89 }
81 90
82 if (!dma_state.non_incrementing) { 91 if (!dma_state.non_incrementing) {
83 dma_state.method++; 92 dma_state.method++;
@@ -117,6 +126,7 @@ bool DmaPusher::Step() {
117 break; 126 break;
118 } 127 }
119 } 128 }
129 index++;
120 } 130 }
121 131
122 if (!non_main) { 132 if (!non_main) {
@@ -137,4 +147,9 @@ void DmaPusher::CallMethod(u32 argument) const {
137 gpu.CallMethod({dma_state.method, argument, dma_state.subchannel, dma_state.method_count}); 147 gpu.CallMethod({dma_state.method, argument, dma_state.subchannel, dma_state.method_count});
138} 148}
139 149
150void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const {
151 gpu.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods,
152 dma_state.method_count);
153}
154
140} // namespace Tegra 155} // namespace Tegra
diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h
index d6188614a..6cef71306 100644
--- a/src/video_core/dma_pusher.h
+++ b/src/video_core/dma_pusher.h
@@ -75,6 +75,7 @@ private:
75 void SetState(const CommandHeader& command_header); 75 void SetState(const CommandHeader& command_header);
76 76
77 void CallMethod(u32 argument) const; 77 void CallMethod(u32 argument) const;
78 void CallMultiMethod(const u32* base_start, u32 num_methods) const;
78 79
79 std::vector<CommandHeader> command_headers; ///< Buffer for list of commands fetched at once 80 std::vector<CommandHeader> command_headers; ///< Buffer for list of commands fetched at once
80 81
diff --git a/src/video_core/engines/fermi_2d.cpp b/src/video_core/engines/fermi_2d.cpp
index 85d308e26..8a47614d2 100644
--- a/src/video_core/engines/fermi_2d.cpp
+++ b/src/video_core/engines/fermi_2d.cpp
@@ -28,7 +28,13 @@ void Fermi2D::CallMethod(const GPU::MethodCall& method_call) {
28 } 28 }
29} 29}
30 30
31std::pair<u32, u32> DelimitLine(u32 src_1, u32 src_2, u32 dst_1, u32 dst_2, u32 src_line) { 31void Fermi2D::CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending) {
32 for (std::size_t i = 0; i < amount; i++) {
33 CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
34 }
35}
36
37static std::pair<u32, u32> DelimitLine(u32 src_1, u32 src_2, u32 dst_1, u32 dst_2, u32 src_line) {
32 const u32 line_a = src_2 - src_1; 38 const u32 line_a = src_2 - src_1;
33 const u32 line_b = dst_2 - dst_1; 39 const u32 line_b = dst_2 - dst_1;
34 const u32 excess = std::max<s32>(0, line_a - src_line + src_1); 40 const u32 excess = std::max<s32>(0, line_a - src_line + src_1);
diff --git a/src/video_core/engines/fermi_2d.h b/src/video_core/engines/fermi_2d.h
index dba342c70..939a5966d 100644
--- a/src/video_core/engines/fermi_2d.h
+++ b/src/video_core/engines/fermi_2d.h
@@ -39,6 +39,9 @@ public:
39 /// Write the value to the register identified by method. 39 /// Write the value to the register identified by method.
40 void CallMethod(const GPU::MethodCall& method_call); 40 void CallMethod(const GPU::MethodCall& method_call);
41 41
42 /// Write multiple values to the register identified by method.
43 void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
44
42 enum class Origin : u32 { 45 enum class Origin : u32 {
43 Center = 0, 46 Center = 0,
44 Corner = 1, 47 Corner = 1,
diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp
index 368c75a66..00a12175f 100644
--- a/src/video_core/engines/kepler_compute.cpp
+++ b/src/video_core/engines/kepler_compute.cpp
@@ -51,6 +51,13 @@ void KeplerCompute::CallMethod(const GPU::MethodCall& method_call) {
51 } 51 }
52} 52}
53 53
54void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
55 u32 methods_pending) {
56 for (std::size_t i = 0; i < amount; i++) {
57 CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
58 }
59}
60
54Texture::FullTextureInfo KeplerCompute::GetTexture(std::size_t offset) const { 61Texture::FullTextureInfo KeplerCompute::GetTexture(std::size_t offset) const {
55 const std::bitset<8> cbuf_mask = launch_description.const_buffer_enable_mask.Value(); 62 const std::bitset<8> cbuf_mask = launch_description.const_buffer_enable_mask.Value();
56 ASSERT(cbuf_mask[regs.tex_cb_index]); 63 ASSERT(cbuf_mask[regs.tex_cb_index]);
diff --git a/src/video_core/engines/kepler_compute.h b/src/video_core/engines/kepler_compute.h
index eeb79c56f..fe55fdfd0 100644
--- a/src/video_core/engines/kepler_compute.h
+++ b/src/video_core/engines/kepler_compute.h
@@ -202,6 +202,9 @@ public:
202 /// Write the value to the register identified by method. 202 /// Write the value to the register identified by method.
203 void CallMethod(const GPU::MethodCall& method_call); 203 void CallMethod(const GPU::MethodCall& method_call);
204 204
205 /// Write multiple values to the register identified by method.
206 void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
207
205 Texture::FullTextureInfo GetTexture(std::size_t offset) const; 208 Texture::FullTextureInfo GetTexture(std::size_t offset) const;
206 209
207 /// Given a texture handle, returns the TSC and TIC entries. 210 /// Given a texture handle, returns the TSC and TIC entries.
diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp
index 597872e43..586ff15dc 100644
--- a/src/video_core/engines/kepler_memory.cpp
+++ b/src/video_core/engines/kepler_memory.cpp
@@ -41,4 +41,11 @@ void KeplerMemory::CallMethod(const GPU::MethodCall& method_call) {
41 } 41 }
42} 42}
43 43
44void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
45 u32 methods_pending) {
46 for (std::size_t i = 0; i < amount; i++) {
47 CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
48 }
49}
50
44} // namespace Tegra::Engines 51} // namespace Tegra::Engines
diff --git a/src/video_core/engines/kepler_memory.h b/src/video_core/engines/kepler_memory.h
index 396fb6e86..bb26fb030 100644
--- a/src/video_core/engines/kepler_memory.h
+++ b/src/video_core/engines/kepler_memory.h
@@ -40,6 +40,9 @@ public:
40 /// Write the value to the register identified by method. 40 /// Write the value to the register identified by method.
41 void CallMethod(const GPU::MethodCall& method_call); 41 void CallMethod(const GPU::MethodCall& method_call);
42 42
43 /// Write multiple values to the register identified by method.
44 void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
45
43 struct Regs { 46 struct Regs {
44 static constexpr size_t NUM_REGS = 0x7F; 47 static constexpr size_t NUM_REGS = 0x7F;
45 48
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index ba63b44b4..7db055ea0 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -92,6 +92,10 @@ void Maxwell3D::InitializeRegisterDefaults() {
92 color_mask.A.Assign(1); 92 color_mask.A.Assign(1);
93 } 93 }
94 94
95 for (auto& format : regs.vertex_attrib_format) {
96 format.constant.Assign(1);
97 }
98
95 // NVN games expect these values to be enabled at boot 99 // NVN games expect these values to be enabled at boot
96 regs.rasterize_enable = 1; 100 regs.rasterize_enable = 1;
97 regs.rt_separate_frag_data = 1; 101 regs.rt_separate_frag_data = 1;
@@ -180,6 +184,10 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
180 } 184 }
181 185
182 switch (method) { 186 switch (method) {
187 case MAXWELL3D_REG_INDEX(wait_for_idle): {
188 rasterizer.WaitForIdle();
189 break;
190 }
183 case MAXWELL3D_REG_INDEX(shadow_ram_control): { 191 case MAXWELL3D_REG_INDEX(shadow_ram_control): {
184 shadow_state.shadow_ram_control = static_cast<Regs::ShadowRamControl>(method_call.argument); 192 shadow_state.shadow_ram_control = static_cast<Regs::ShadowRamControl>(method_call.argument);
185 break; 193 break;
@@ -276,6 +284,58 @@ void Maxwell3D::CallMethod(const GPU::MethodCall& method_call) {
276 } 284 }
277} 285}
278 286
287void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
288 u32 methods_pending) {
289 // Methods after 0xE00 are special, they're actually triggers for some microcode that was
290 // uploaded to the GPU during initialization.
291 if (method >= MacroRegistersStart) {
292 // We're trying to execute a macro
293 if (executing_macro == 0) {
294 // A macro call must begin by writing the macro method's register, not its argument.
295 ASSERT_MSG((method % 2) == 0,
296 "Can't start macro execution by writing to the ARGS register");
297 executing_macro = method;
298 }
299
300 for (std::size_t i = 0; i < amount; i++) {
301 macro_params.push_back(base_start[i]);
302 }
303
304 // Call the macro when there are no more parameters in the command buffer
305 if (amount == methods_pending) {
306 CallMacroMethod(executing_macro, macro_params.size(), macro_params.data());
307 macro_params.clear();
308 }
309 return;
310 }
311 switch (method) {
312 case MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]):
313 case MAXWELL3D_REG_INDEX(const_buffer.cb_data[1]):
314 case MAXWELL3D_REG_INDEX(const_buffer.cb_data[2]):
315 case MAXWELL3D_REG_INDEX(const_buffer.cb_data[3]):
316 case MAXWELL3D_REG_INDEX(const_buffer.cb_data[4]):
317 case MAXWELL3D_REG_INDEX(const_buffer.cb_data[5]):
318 case MAXWELL3D_REG_INDEX(const_buffer.cb_data[6]):
319 case MAXWELL3D_REG_INDEX(const_buffer.cb_data[7]):
320 case MAXWELL3D_REG_INDEX(const_buffer.cb_data[8]):
321 case MAXWELL3D_REG_INDEX(const_buffer.cb_data[9]):
322 case MAXWELL3D_REG_INDEX(const_buffer.cb_data[10]):
323 case MAXWELL3D_REG_INDEX(const_buffer.cb_data[11]):
324 case MAXWELL3D_REG_INDEX(const_buffer.cb_data[12]):
325 case MAXWELL3D_REG_INDEX(const_buffer.cb_data[13]):
326 case MAXWELL3D_REG_INDEX(const_buffer.cb_data[14]):
327 case MAXWELL3D_REG_INDEX(const_buffer.cb_data[15]): {
328 ProcessCBMultiData(method, base_start, amount);
329 break;
330 }
331 default: {
332 for (std::size_t i = 0; i < amount; i++) {
333 CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
334 }
335 }
336 }
337}
338
279void Maxwell3D::StepInstance(const MMEDrawMode expected_mode, const u32 count) { 339void Maxwell3D::StepInstance(const MMEDrawMode expected_mode, const u32 count) {
280 if (mme_draw.current_mode == MMEDrawMode::Undefined) { 340 if (mme_draw.current_mode == MMEDrawMode::Undefined) {
281 if (mme_draw.gl_begin_consume) { 341 if (mme_draw.gl_begin_consume) {
@@ -400,7 +460,11 @@ void Maxwell3D::ProcessQueryGet() {
400 460
401 switch (regs.query.query_get.operation) { 461 switch (regs.query.query_get.operation) {
402 case Regs::QueryOperation::Release: 462 case Regs::QueryOperation::Release:
403 StampQueryResult(regs.query.query_sequence, regs.query.query_get.short_query == 0); 463 if (regs.query.query_get.fence == 1) {
464 rasterizer.SignalSemaphore(regs.query.QueryAddress(), regs.query.query_sequence);
465 } else {
466 StampQueryResult(regs.query.query_sequence, regs.query.query_get.short_query == 0);
467 }
404 break; 468 break;
405 case Regs::QueryOperation::Acquire: 469 case Regs::QueryOperation::Acquire:
406 // TODO(Blinkhawk): Under this operation, the GPU waits for the CPU to write a value that 470 // TODO(Blinkhawk): Under this operation, the GPU waits for the CPU to write a value that
@@ -479,7 +543,7 @@ void Maxwell3D::ProcessSyncPoint() {
479 const u32 increment = regs.sync_info.increment.Value(); 543 const u32 increment = regs.sync_info.increment.Value();
480 [[maybe_unused]] const u32 cache_flush = regs.sync_info.unknown.Value(); 544 [[maybe_unused]] const u32 cache_flush = regs.sync_info.unknown.Value();
481 if (increment) { 545 if (increment) {
482 system.GPU().IncrementSyncPoint(sync_point); 546 rasterizer.SignalSyncPoint(sync_point);
483 } 547 }
484} 548}
485 549
@@ -562,6 +626,28 @@ void Maxwell3D::StartCBData(u32 method) {
562 ProcessCBData(regs.const_buffer.cb_data[cb_data_state.id]); 626 ProcessCBData(regs.const_buffer.cb_data[cb_data_state.id]);
563} 627}
564 628
629void Maxwell3D::ProcessCBMultiData(u32 method, const u32* start_base, u32 amount) {
630 if (cb_data_state.current != method) {
631 if (cb_data_state.current != null_cb_data) {
632 FinishCBData();
633 }
634 constexpr u32 first_cb_data = MAXWELL3D_REG_INDEX(const_buffer.cb_data[0]);
635 cb_data_state.start_pos = regs.const_buffer.cb_pos;
636 cb_data_state.id = method - first_cb_data;
637 cb_data_state.current = method;
638 cb_data_state.counter = 0;
639 }
640 const std::size_t id = cb_data_state.id;
641 const std::size_t size = amount;
642 std::size_t i = 0;
643 for (; i < size; i++) {
644 cb_data_state.buffer[id][cb_data_state.counter] = start_base[i];
645 cb_data_state.counter++;
646 }
647 // Increment the current buffer position.
648 regs.const_buffer.cb_pos = regs.const_buffer.cb_pos + 4 * amount;
649}
650
565void Maxwell3D::FinishCBData() { 651void Maxwell3D::FinishCBData() {
566 // Write the input value to the current const buffer at the current position. 652 // Write the input value to the current const buffer at the current position.
567 const GPUVAddr buffer_address = regs.const_buffer.BufferAddress(); 653 const GPUVAddr buffer_address = regs.const_buffer.BufferAddress();
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index 5cf6a4cc3..864924ff3 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -709,7 +709,9 @@ public:
709 709
710 union { 710 union {
711 struct { 711 struct {
712 INSERT_UNION_PADDING_WORDS(0x45); 712 INSERT_UNION_PADDING_WORDS(0x44);
713
714 u32 wait_for_idle;
713 715
714 struct { 716 struct {
715 u32 upload_address; 717 u32 upload_address;
@@ -1149,7 +1151,7 @@ public:
1149 1151
1150 /// Returns whether the vertex array specified by index is supposed to be 1152 /// Returns whether the vertex array specified by index is supposed to be
1151 /// accessed per instance or not. 1153 /// accessed per instance or not.
1152 bool IsInstancingEnabled(u32 index) const { 1154 bool IsInstancingEnabled(std::size_t index) const {
1153 return is_instanced[index]; 1155 return is_instanced[index];
1154 } 1156 }
1155 } instanced_arrays; 1157 } instanced_arrays;
@@ -1179,6 +1181,7 @@ public:
1179 BitField<0, 1, u32> depth_range_0_1; 1181 BitField<0, 1, u32> depth_range_0_1;
1180 BitField<3, 1, u32> depth_clamp_near; 1182 BitField<3, 1, u32> depth_clamp_near;
1181 BitField<4, 1, u32> depth_clamp_far; 1183 BitField<4, 1, u32> depth_clamp_far;
1184 BitField<11, 1, u32> depth_clamp_disabled;
1182 } view_volume_clip_control; 1185 } view_volume_clip_control;
1183 1186
1184 INSERT_UNION_PADDING_WORDS(0x1F); 1187 INSERT_UNION_PADDING_WORDS(0x1F);
@@ -1259,7 +1262,8 @@ public:
1259 1262
1260 GPUVAddr LimitAddress() const { 1263 GPUVAddr LimitAddress() const {
1261 return static_cast<GPUVAddr>((static_cast<GPUVAddr>(limit_high) << 32) | 1264 return static_cast<GPUVAddr>((static_cast<GPUVAddr>(limit_high) << 32) |
1262 limit_low); 1265 limit_low) +
1266 1;
1263 } 1267 }
1264 } vertex_array_limit[NumVertexArrays]; 1268 } vertex_array_limit[NumVertexArrays];
1265 1269
@@ -1358,6 +1362,9 @@ public:
1358 /// Write the value to the register identified by method. 1362 /// Write the value to the register identified by method.
1359 void CallMethod(const GPU::MethodCall& method_call); 1363 void CallMethod(const GPU::MethodCall& method_call);
1360 1364
1365 /// Write multiple values to the register identified by method.
1366 void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
1367
1361 /// Write the value to the register identified by method. 1368 /// Write the value to the register identified by method.
1362 void CallMethodFromMME(const GPU::MethodCall& method_call); 1369 void CallMethodFromMME(const GPU::MethodCall& method_call);
1363 1370
@@ -1511,6 +1518,7 @@ private:
1511 /// Handles a write to the CB_DATA[i] register. 1518 /// Handles a write to the CB_DATA[i] register.
1512 void StartCBData(u32 method); 1519 void StartCBData(u32 method);
1513 void ProcessCBData(u32 value); 1520 void ProcessCBData(u32 value);
1521 void ProcessCBMultiData(u32 method, const u32* start_base, u32 amount);
1514 void FinishCBData(); 1522 void FinishCBData();
1515 1523
1516 /// Handles a write to the CB_BIND register. 1524 /// Handles a write to the CB_BIND register.
@@ -1530,6 +1538,7 @@ private:
1530 static_assert(offsetof(Maxwell3D::Regs, field_name) == position * 4, \ 1538 static_assert(offsetof(Maxwell3D::Regs, field_name) == position * 4, \
1531 "Field " #field_name " has invalid position") 1539 "Field " #field_name " has invalid position")
1532 1540
1541ASSERT_REG_POSITION(wait_for_idle, 0x44);
1533ASSERT_REG_POSITION(macros, 0x45); 1542ASSERT_REG_POSITION(macros, 0x45);
1534ASSERT_REG_POSITION(shadow_ram_control, 0x49); 1543ASSERT_REG_POSITION(shadow_ram_control, 0x49);
1535ASSERT_REG_POSITION(upload, 0x60); 1544ASSERT_REG_POSITION(upload, 0x60);
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index c2610f992..6630005b0 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -36,6 +36,13 @@ void MaxwellDMA::CallMethod(const GPU::MethodCall& method_call) {
36#undef MAXWELLDMA_REG_INDEX 36#undef MAXWELLDMA_REG_INDEX
37} 37}
38 38
39void MaxwellDMA::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
40 u32 methods_pending) {
41 for (std::size_t i = 0; i < amount; i++) {
42 CallMethod({method, base_start[i], 0, methods_pending - static_cast<u32>(i)});
43 }
44}
45
39void MaxwellDMA::HandleCopy() { 46void MaxwellDMA::HandleCopy() {
40 LOG_TRACE(HW_GPU, "Requested a DMA copy"); 47 LOG_TRACE(HW_GPU, "Requested a DMA copy");
41 48
@@ -104,8 +111,13 @@ void MaxwellDMA::HandleCopy() {
104 write_buffer.resize(dst_size); 111 write_buffer.resize(dst_size);
105 } 112 }
106 113
107 memory_manager.ReadBlock(source, read_buffer.data(), src_size); 114 if (Settings::IsGPULevelExtreme()) {
108 memory_manager.ReadBlock(dest, write_buffer.data(), dst_size); 115 memory_manager.ReadBlock(source, read_buffer.data(), src_size);
116 memory_manager.ReadBlock(dest, write_buffer.data(), dst_size);
117 } else {
118 memory_manager.ReadBlockUnsafe(source, read_buffer.data(), src_size);
119 memory_manager.ReadBlockUnsafe(dest, write_buffer.data(), dst_size);
120 }
109 121
110 Texture::UnswizzleSubrect( 122 Texture::UnswizzleSubrect(
111 regs.x_count, regs.y_count, regs.dst_pitch, regs.src_params.size_x, bytes_per_pixel, 123 regs.x_count, regs.y_count, regs.dst_pitch, regs.src_params.size_x, bytes_per_pixel,
@@ -136,7 +148,7 @@ void MaxwellDMA::HandleCopy() {
136 write_buffer.resize(dst_size); 148 write_buffer.resize(dst_size);
137 } 149 }
138 150
139 if (Settings::values.use_accurate_gpu_emulation) { 151 if (Settings::IsGPULevelExtreme()) {
140 memory_manager.ReadBlock(source, read_buffer.data(), src_size); 152 memory_manager.ReadBlock(source, read_buffer.data(), src_size);
141 memory_manager.ReadBlock(dest, write_buffer.data(), dst_size); 153 memory_manager.ReadBlock(dest, write_buffer.data(), dst_size);
142 } else { 154 } else {
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index 4f40d1d1f..c43ed8194 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -35,6 +35,9 @@ public:
35 /// Write the value to the register identified by method. 35 /// Write the value to the register identified by method.
36 void CallMethod(const GPU::MethodCall& method_call); 36 void CallMethod(const GPU::MethodCall& method_call);
37 37
38 /// Write multiple values to the register identified by method.
39 void CallMultiMethod(u32 method, const u32* base_start, u32 amount, u32 methods_pending);
40
38 struct Regs { 41 struct Regs {
39 static constexpr std::size_t NUM_REGS = 0x1D6; 42 static constexpr std::size_t NUM_REGS = 0x1D6;
40 43
diff --git a/src/video_core/engines/shader_bytecode.h b/src/video_core/engines/shader_bytecode.h
index 7231597d4..8dae754d4 100644
--- a/src/video_core/engines/shader_bytecode.h
+++ b/src/video_core/engines/shader_bytecode.h
@@ -655,6 +655,7 @@ union Instruction {
655 } 655 }
656 656
657 constexpr Instruction(u64 value) : value{value} {} 657 constexpr Instruction(u64 value) : value{value} {}
658 constexpr Instruction(const Instruction& instr) : value(instr.value) {}
658 659
659 BitField<0, 8, Register> gpr0; 660 BitField<0, 8, Register> gpr0;
660 BitField<8, 8, Register> gpr8; 661 BitField<8, 8, Register> gpr8;
@@ -813,15 +814,17 @@ union Instruction {
813 } alu_integer; 814 } alu_integer;
814 815
815 union { 816 union {
817 BitField<43, 1, u64> x;
818 } iadd;
819
820 union {
816 BitField<39, 1, u64> ftz; 821 BitField<39, 1, u64> ftz;
817 BitField<32, 1, u64> saturate; 822 BitField<32, 1, u64> saturate;
818 BitField<49, 2, HalfMerge> merge; 823 BitField<49, 2, HalfMerge> merge;
819 824
820 BitField<43, 1, u64> negate_a;
821 BitField<44, 1, u64> abs_a; 825 BitField<44, 1, u64> abs_a;
822 BitField<47, 2, HalfType> type_a; 826 BitField<47, 2, HalfType> type_a;
823 827
824 BitField<31, 1, u64> negate_b;
825 BitField<30, 1, u64> abs_b; 828 BitField<30, 1, u64> abs_b;
826 BitField<28, 2, HalfType> type_b; 829 BitField<28, 2, HalfType> type_b;
827 830
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h
new file mode 100644
index 000000000..8b2a6a42c
--- /dev/null
+++ b/src/video_core/fence_manager.h
@@ -0,0 +1,172 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <algorithm>
8#include <array>
9#include <memory>
10#include <queue>
11
12#include "common/assert.h"
13#include "common/common_types.h"
14#include "core/core.h"
15#include "core/memory.h"
16#include "core/settings.h"
17#include "video_core/gpu.h"
18#include "video_core/memory_manager.h"
19#include "video_core/rasterizer_interface.h"
20
21namespace VideoCommon {
22
23class FenceBase {
24public:
25 FenceBase(u32 payload, bool is_stubbed)
26 : address{}, payload{payload}, is_semaphore{false}, is_stubbed{is_stubbed} {}
27
28 FenceBase(GPUVAddr address, u32 payload, bool is_stubbed)
29 : address{address}, payload{payload}, is_semaphore{true}, is_stubbed{is_stubbed} {}
30
31 GPUVAddr GetAddress() const {
32 return address;
33 }
34
35 u32 GetPayload() const {
36 return payload;
37 }
38
39 bool IsSemaphore() const {
40 return is_semaphore;
41 }
42
43private:
44 GPUVAddr address;
45 u32 payload;
46 bool is_semaphore;
47
48protected:
49 bool is_stubbed;
50};
51
52template <typename TFence, typename TTextureCache, typename TTBufferCache, typename TQueryCache>
53class FenceManager {
54public:
55 void SignalSemaphore(GPUVAddr addr, u32 value) {
56 TryReleasePendingFences();
57 const bool should_flush = ShouldFlush();
58 CommitAsyncFlushes();
59 TFence new_fence = CreateFence(addr, value, !should_flush);
60 fences.push(new_fence);
61 QueueFence(new_fence);
62 if (should_flush) {
63 rasterizer.FlushCommands();
64 }
65 rasterizer.SyncGuestHost();
66 }
67
68 void SignalSyncPoint(u32 value) {
69 TryReleasePendingFences();
70 const bool should_flush = ShouldFlush();
71 CommitAsyncFlushes();
72 TFence new_fence = CreateFence(value, !should_flush);
73 fences.push(new_fence);
74 QueueFence(new_fence);
75 if (should_flush) {
76 rasterizer.FlushCommands();
77 }
78 rasterizer.SyncGuestHost();
79 }
80
81 void WaitPendingFences() {
82 auto& gpu{system.GPU()};
83 auto& memory_manager{gpu.MemoryManager()};
84 while (!fences.empty()) {
85 TFence& current_fence = fences.front();
86 if (ShouldWait()) {
87 WaitFence(current_fence);
88 }
89 PopAsyncFlushes();
90 if (current_fence->IsSemaphore()) {
91 memory_manager.template Write<u32>(current_fence->GetAddress(),
92 current_fence->GetPayload());
93 } else {
94 gpu.IncrementSyncPoint(current_fence->GetPayload());
95 }
96 fences.pop();
97 }
98 }
99
100protected:
101 FenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
102 TTextureCache& texture_cache, TTBufferCache& buffer_cache,
103 TQueryCache& query_cache)
104 : system{system}, rasterizer{rasterizer}, texture_cache{texture_cache},
105 buffer_cache{buffer_cache}, query_cache{query_cache} {}
106
107 virtual ~FenceManager() {}
108
109 /// Creates a Sync Point Fence Interface, does not create a backend fence if 'is_stubbed' is
110 /// true
111 virtual TFence CreateFence(u32 value, bool is_stubbed) = 0;
112 /// Creates a Semaphore Fence Interface, does not create a backend fence if 'is_stubbed' is true
113 virtual TFence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) = 0;
114 /// Queues a fence into the backend if the fence isn't stubbed.
115 virtual void QueueFence(TFence& fence) = 0;
116 /// Notifies that the backend fence has been signaled/reached in host GPU.
117 virtual bool IsFenceSignaled(TFence& fence) const = 0;
118 /// Waits until a fence has been signalled by the host GPU.
119 virtual void WaitFence(TFence& fence) = 0;
120
121 Core::System& system;
122 VideoCore::RasterizerInterface& rasterizer;
123 TTextureCache& texture_cache;
124 TTBufferCache& buffer_cache;
125 TQueryCache& query_cache;
126
127private:
128 void TryReleasePendingFences() {
129 auto& gpu{system.GPU()};
130 auto& memory_manager{gpu.MemoryManager()};
131 while (!fences.empty()) {
132 TFence& current_fence = fences.front();
133 if (ShouldWait() && !IsFenceSignaled(current_fence)) {
134 return;
135 }
136 PopAsyncFlushes();
137 if (current_fence->IsSemaphore()) {
138 memory_manager.template Write<u32>(current_fence->GetAddress(),
139 current_fence->GetPayload());
140 } else {
141 gpu.IncrementSyncPoint(current_fence->GetPayload());
142 }
143 fences.pop();
144 }
145 }
146
147 bool ShouldWait() const {
148 return texture_cache.ShouldWaitAsyncFlushes() || buffer_cache.ShouldWaitAsyncFlushes() ||
149 query_cache.ShouldWaitAsyncFlushes();
150 }
151
152 bool ShouldFlush() const {
153 return texture_cache.HasUncommittedFlushes() || buffer_cache.HasUncommittedFlushes() ||
154 query_cache.HasUncommittedFlushes();
155 }
156
157 void PopAsyncFlushes() {
158 texture_cache.PopAsyncFlushes();
159 buffer_cache.PopAsyncFlushes();
160 query_cache.PopAsyncFlushes();
161 }
162
163 void CommitAsyncFlushes() {
164 texture_cache.CommitAsyncFlushes();
165 buffer_cache.CommitAsyncFlushes();
166 query_cache.CommitAsyncFlushes();
167 }
168
169 std::queue<TFence> fences;
170};
171
172} // namespace VideoCommon
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index a606f4abd..b87fd873d 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -9,6 +9,7 @@
9#include "core/core_timing_util.h" 9#include "core/core_timing_util.h"
10#include "core/frontend/emu_window.h" 10#include "core/frontend/emu_window.h"
11#include "core/memory.h" 11#include "core/memory.h"
12#include "core/settings.h"
12#include "video_core/engines/fermi_2d.h" 13#include "video_core/engines/fermi_2d.h"
13#include "video_core/engines/kepler_compute.h" 14#include "video_core/engines/kepler_compute.h"
14#include "video_core/engines/kepler_memory.h" 15#include "video_core/engines/kepler_memory.h"
@@ -125,6 +126,28 @@ bool GPU::CancelSyncptInterrupt(const u32 syncpoint_id, const u32 value) {
125 return true; 126 return true;
126} 127}
127 128
129u64 GPU::RequestFlush(VAddr addr, std::size_t size) {
130 std::unique_lock lck{flush_request_mutex};
131 const u64 fence = ++last_flush_fence;
132 flush_requests.emplace_back(fence, addr, size);
133 return fence;
134}
135
136void GPU::TickWork() {
137 std::unique_lock lck{flush_request_mutex};
138 while (!flush_requests.empty()) {
139 auto& request = flush_requests.front();
140 const u64 fence = request.fence;
141 const VAddr addr = request.addr;
142 const std::size_t size = request.size;
143 flush_requests.pop_front();
144 flush_request_mutex.unlock();
145 renderer->Rasterizer().FlushRegion(addr, size);
146 current_flush_fence.store(fence);
147 flush_request_mutex.lock();
148 }
149}
150
128u64 GPU::GetTicks() const { 151u64 GPU::GetTicks() const {
129 // This values were reversed engineered by fincs from NVN 152 // This values were reversed engineered by fincs from NVN
130 // The gpu clock is reported in units of 385/625 nanoseconds 153 // The gpu clock is reported in units of 385/625 nanoseconds
@@ -132,7 +155,10 @@ u64 GPU::GetTicks() const {
132 constexpr u64 gpu_ticks_den = 625; 155 constexpr u64 gpu_ticks_den = 625;
133 156
134 const u64 cpu_ticks = system.CoreTiming().GetTicks(); 157 const u64 cpu_ticks = system.CoreTiming().GetTicks();
135 const u64 nanoseconds = Core::Timing::CyclesToNs(cpu_ticks).count(); 158 u64 nanoseconds = Core::Timing::CyclesToNs(cpu_ticks).count();
159 if (Settings::values.use_fast_gpu_time) {
160 nanoseconds /= 256;
161 }
136 const u64 nanoseconds_num = nanoseconds / gpu_ticks_den; 162 const u64 nanoseconds_num = nanoseconds / gpu_ticks_den;
137 const u64 nanoseconds_rem = nanoseconds % gpu_ticks_den; 163 const u64 nanoseconds_rem = nanoseconds % gpu_ticks_den;
138 return nanoseconds_num * gpu_ticks_num + (nanoseconds_rem * gpu_ticks_num) / gpu_ticks_den; 164 return nanoseconds_num * gpu_ticks_num + (nanoseconds_rem * gpu_ticks_num) / gpu_ticks_den;
@@ -142,6 +168,13 @@ void GPU::FlushCommands() {
142 renderer->Rasterizer().FlushCommands(); 168 renderer->Rasterizer().FlushCommands();
143} 169}
144 170
171void GPU::SyncGuestHost() {
172 renderer->Rasterizer().SyncGuestHost();
173}
174
175void GPU::OnCommandListEnd() {
176 renderer->Rasterizer().ReleaseFences();
177}
145// Note that, traditionally, methods are treated as 4-byte addressable locations, and hence 178// Note that, traditionally, methods are treated as 4-byte addressable locations, and hence
146// their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4. 179// their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4.
147// So the values you see in docs might be multiplied by 4. 180// So the values you see in docs might be multiplied by 4.
@@ -180,16 +213,32 @@ void GPU::CallMethod(const MethodCall& method_call) {
180 213
181 ASSERT(method_call.subchannel < bound_engines.size()); 214 ASSERT(method_call.subchannel < bound_engines.size());
182 215
183 if (ExecuteMethodOnEngine(method_call)) { 216 if (ExecuteMethodOnEngine(method_call.method)) {
184 CallEngineMethod(method_call); 217 CallEngineMethod(method_call);
185 } else { 218 } else {
186 CallPullerMethod(method_call); 219 CallPullerMethod(method_call);
187 } 220 }
188} 221}
189 222
190bool GPU::ExecuteMethodOnEngine(const MethodCall& method_call) { 223void GPU::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
191 const auto method = static_cast<BufferMethods>(method_call.method); 224 u32 methods_pending) {
192 return method >= BufferMethods::NonPullerMethods; 225 LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel);
226
227 ASSERT(subchannel < bound_engines.size());
228
229 if (ExecuteMethodOnEngine(method)) {
230 CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending);
231 } else {
232 for (std::size_t i = 0; i < amount; i++) {
233 CallPullerMethod(
234 {method, base_start[i], subchannel, methods_pending - static_cast<u32>(i)});
235 }
236 }
237}
238
239bool GPU::ExecuteMethodOnEngine(u32 method) {
240 const auto buffer_method = static_cast<BufferMethods>(method);
241 return buffer_method >= BufferMethods::NonPullerMethods;
193} 242}
194 243
195void GPU::CallPullerMethod(const MethodCall& method_call) { 244void GPU::CallPullerMethod(const MethodCall& method_call) {
@@ -269,6 +318,31 @@ void GPU::CallEngineMethod(const MethodCall& method_call) {
269 } 318 }
270} 319}
271 320
321void GPU::CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
322 u32 methods_pending) {
323 const EngineID engine = bound_engines[subchannel];
324
325 switch (engine) {
326 case EngineID::FERMI_TWOD_A:
327 fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending);
328 break;
329 case EngineID::MAXWELL_B:
330 maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending);
331 break;
332 case EngineID::KEPLER_COMPUTE_B:
333 kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending);
334 break;
335 case EngineID::MAXWELL_DMA_COPY_A:
336 maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending);
337 break;
338 case EngineID::KEPLER_INLINE_TO_MEMORY_B:
339 kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending);
340 break;
341 default:
342 UNIMPLEMENTED_MSG("Unimplemented engine");
343 }
344}
345
272void GPU::ProcessBindMethod(const MethodCall& method_call) { 346void GPU::ProcessBindMethod(const MethodCall& method_call) {
273 // Bind the current subchannel to the desired engine id. 347 // Bind the current subchannel to the desired engine id.
274 LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel, 348 LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel,
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index 1a2d747be..dd51c95b7 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -155,7 +155,27 @@ public:
155 /// Calls a GPU method. 155 /// Calls a GPU method.
156 void CallMethod(const MethodCall& method_call); 156 void CallMethod(const MethodCall& method_call);
157 157
158 /// Calls a GPU multivalue method.
159 void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
160 u32 methods_pending);
161
162 /// Flush all current written commands into the host GPU for execution.
158 void FlushCommands(); 163 void FlushCommands();
164 /// Synchronizes CPU writes with Host GPU memory.
165 void SyncGuestHost();
166 /// Signal the ending of command list.
167 virtual void OnCommandListEnd();
168
169 /// Request a host GPU memory flush from the CPU.
170 u64 RequestFlush(VAddr addr, std::size_t size);
171
172 /// Obtains current flush request fence id.
173 u64 CurrentFlushRequestFence() const {
174 return current_flush_fence.load(std::memory_order_relaxed);
175 }
176
177 /// Tick pending requests within the GPU.
178 void TickWork();
159 179
160 /// Returns a reference to the Maxwell3D GPU engine. 180 /// Returns a reference to the Maxwell3D GPU engine.
161 Engines::Maxwell3D& Maxwell3D(); 181 Engines::Maxwell3D& Maxwell3D();
@@ -293,8 +313,12 @@ private:
293 /// Calls a GPU engine method. 313 /// Calls a GPU engine method.
294 void CallEngineMethod(const MethodCall& method_call); 314 void CallEngineMethod(const MethodCall& method_call);
295 315
316 /// Calls a GPU engine multivalue method.
317 void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
318 u32 methods_pending);
319
296 /// Determines where the method should be executed. 320 /// Determines where the method should be executed.
297 bool ExecuteMethodOnEngine(const MethodCall& method_call); 321 bool ExecuteMethodOnEngine(u32 method);
298 322
299protected: 323protected:
300 std::unique_ptr<Tegra::DmaPusher> dma_pusher; 324 std::unique_ptr<Tegra::DmaPusher> dma_pusher;
@@ -325,6 +349,19 @@ private:
325 349
326 std::condition_variable sync_cv; 350 std::condition_variable sync_cv;
327 351
352 struct FlushRequest {
353 FlushRequest(u64 fence, VAddr addr, std::size_t size)
354 : fence{fence}, addr{addr}, size{size} {}
355 u64 fence;
356 VAddr addr;
357 std::size_t size;
358 };
359
360 std::list<FlushRequest> flush_requests;
361 std::atomic<u64> current_flush_fence{};
362 u64 last_flush_fence{};
363 std::mutex flush_request_mutex;
364
328 const bool is_async; 365 const bool is_async;
329}; 366};
330 367
diff --git a/src/video_core/gpu_asynch.cpp b/src/video_core/gpu_asynch.cpp
index 20e73a37e..53305ab43 100644
--- a/src/video_core/gpu_asynch.cpp
+++ b/src/video_core/gpu_asynch.cpp
@@ -52,4 +52,8 @@ void GPUAsynch::WaitIdle() const {
52 gpu_thread.WaitIdle(); 52 gpu_thread.WaitIdle();
53} 53}
54 54
55void GPUAsynch::OnCommandListEnd() {
56 gpu_thread.OnCommandListEnd();
57}
58
55} // namespace VideoCommon 59} // namespace VideoCommon
diff --git a/src/video_core/gpu_asynch.h b/src/video_core/gpu_asynch.h
index 03fd0eef0..517658612 100644
--- a/src/video_core/gpu_asynch.h
+++ b/src/video_core/gpu_asynch.h
@@ -32,6 +32,8 @@ public:
32 void FlushAndInvalidateRegion(VAddr addr, u64 size) override; 32 void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
33 void WaitIdle() const override; 33 void WaitIdle() const override;
34 34
35 void OnCommandListEnd() override;
36
35protected: 37protected:
36 void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const override; 38 void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const override;
37 39
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index 10cda686b..c3bb4fe06 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -6,6 +6,7 @@
6#include "common/microprofile.h" 6#include "common/microprofile.h"
7#include "core/core.h" 7#include "core/core.h"
8#include "core/frontend/emu_window.h" 8#include "core/frontend/emu_window.h"
9#include "core/settings.h"
9#include "video_core/dma_pusher.h" 10#include "video_core/dma_pusher.h"
10#include "video_core/gpu.h" 11#include "video_core/gpu.h"
11#include "video_core/gpu_thread.h" 12#include "video_core/gpu_thread.h"
@@ -14,8 +15,9 @@
14namespace VideoCommon::GPUThread { 15namespace VideoCommon::GPUThread {
15 16
16/// Runs the GPU thread 17/// Runs the GPU thread
17static void RunThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context, 18static void RunThread(Core::System& system, VideoCore::RendererBase& renderer,
18 Tegra::DmaPusher& dma_pusher, SynchState& state) { 19 Core::Frontend::GraphicsContext& context, Tegra::DmaPusher& dma_pusher,
20 SynchState& state) {
19 MicroProfileOnThreadCreate("GpuThread"); 21 MicroProfileOnThreadCreate("GpuThread");
20 22
21 // Wait for first GPU command before acquiring the window context 23 // Wait for first GPU command before acquiring the window context
@@ -37,10 +39,14 @@ static void RunThread(VideoCore::RendererBase& renderer, Core::Frontend::Graphic
37 dma_pusher.DispatchCalls(); 39 dma_pusher.DispatchCalls();
38 } else if (const auto data = std::get_if<SwapBuffersCommand>(&next.data)) { 40 } else if (const auto data = std::get_if<SwapBuffersCommand>(&next.data)) {
39 renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr); 41 renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr);
42 } else if (const auto data = std::get_if<OnCommandListEndCommand>(&next.data)) {
43 renderer.Rasterizer().ReleaseFences();
44 } else if (const auto data = std::get_if<GPUTickCommand>(&next.data)) {
45 system.GPU().TickWork();
40 } else if (const auto data = std::get_if<FlushRegionCommand>(&next.data)) { 46 } else if (const auto data = std::get_if<FlushRegionCommand>(&next.data)) {
41 renderer.Rasterizer().FlushRegion(data->addr, data->size); 47 renderer.Rasterizer().FlushRegion(data->addr, data->size);
42 } else if (const auto data = std::get_if<InvalidateRegionCommand>(&next.data)) { 48 } else if (const auto data = std::get_if<InvalidateRegionCommand>(&next.data)) {
43 renderer.Rasterizer().InvalidateRegion(data->addr, data->size); 49 renderer.Rasterizer().OnCPUWrite(data->addr, data->size);
44 } else if (std::holds_alternative<EndProcessingCommand>(next.data)) { 50 } else if (std::holds_alternative<EndProcessingCommand>(next.data)) {
45 return; 51 return;
46 } else { 52 } else {
@@ -65,8 +71,8 @@ ThreadManager::~ThreadManager() {
65void ThreadManager::StartThread(VideoCore::RendererBase& renderer, 71void ThreadManager::StartThread(VideoCore::RendererBase& renderer,
66 Core::Frontend::GraphicsContext& context, 72 Core::Frontend::GraphicsContext& context,
67 Tegra::DmaPusher& dma_pusher) { 73 Tegra::DmaPusher& dma_pusher) {
68 thread = std::thread{RunThread, std::ref(renderer), std::ref(context), std::ref(dma_pusher), 74 thread = std::thread{RunThread, std::ref(system), std::ref(renderer),
69 std::ref(state)}; 75 std::ref(context), std::ref(dma_pusher), std::ref(state)};
70} 76}
71 77
72void ThreadManager::SubmitList(Tegra::CommandList&& entries) { 78void ThreadManager::SubmitList(Tegra::CommandList&& entries) {
@@ -78,16 +84,29 @@ void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
78} 84}
79 85
80void ThreadManager::FlushRegion(VAddr addr, u64 size) { 86void ThreadManager::FlushRegion(VAddr addr, u64 size) {
81 PushCommand(FlushRegionCommand(addr, size)); 87 if (!Settings::IsGPULevelHigh()) {
88 PushCommand(FlushRegionCommand(addr, size));
89 return;
90 }
91 if (!Settings::IsGPULevelExtreme()) {
92 return;
93 }
94 if (system.Renderer().Rasterizer().MustFlushRegion(addr, size)) {
95 auto& gpu = system.GPU();
96 u64 fence = gpu.RequestFlush(addr, size);
97 PushCommand(GPUTickCommand());
98 while (fence > gpu.CurrentFlushRequestFence()) {
99 }
100 }
82} 101}
83 102
84void ThreadManager::InvalidateRegion(VAddr addr, u64 size) { 103void ThreadManager::InvalidateRegion(VAddr addr, u64 size) {
85 system.Renderer().Rasterizer().InvalidateRegion(addr, size); 104 system.Renderer().Rasterizer().OnCPUWrite(addr, size);
86} 105}
87 106
88void ThreadManager::FlushAndInvalidateRegion(VAddr addr, u64 size) { 107void ThreadManager::FlushAndInvalidateRegion(VAddr addr, u64 size) {
89 // Skip flush on asynch mode, as FlushAndInvalidateRegion is not used for anything too important 108 // Skip flush on asynch mode, as FlushAndInvalidateRegion is not used for anything too important
90 InvalidateRegion(addr, size); 109 system.Renderer().Rasterizer().OnCPUWrite(addr, size);
91} 110}
92 111
93void ThreadManager::WaitIdle() const { 112void ThreadManager::WaitIdle() const {
@@ -95,6 +114,10 @@ void ThreadManager::WaitIdle() const {
95 } 114 }
96} 115}
97 116
117void ThreadManager::OnCommandListEnd() {
118 PushCommand(OnCommandListEndCommand());
119}
120
98u64 ThreadManager::PushCommand(CommandData&& command_data) { 121u64 ThreadManager::PushCommand(CommandData&& command_data) {
99 const u64 fence{++state.last_fence}; 122 const u64 fence{++state.last_fence};
100 state.queue.Push(CommandDataContainer(std::move(command_data), fence)); 123 state.queue.Push(CommandDataContainer(std::move(command_data), fence));
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
index cd74ad330..5a28335d6 100644
--- a/src/video_core/gpu_thread.h
+++ b/src/video_core/gpu_thread.h
@@ -70,9 +70,16 @@ struct FlushAndInvalidateRegionCommand final {
70 u64 size; 70 u64 size;
71}; 71};
72 72
73/// Command called within the gpu, to schedule actions after a command list end
74struct OnCommandListEndCommand final {};
75
76/// Command to make the gpu look into pending requests
77struct GPUTickCommand final {};
78
73using CommandData = 79using CommandData =
74 std::variant<EndProcessingCommand, SubmitListCommand, SwapBuffersCommand, FlushRegionCommand, 80 std::variant<EndProcessingCommand, SubmitListCommand, SwapBuffersCommand, FlushRegionCommand,
75 InvalidateRegionCommand, FlushAndInvalidateRegionCommand>; 81 InvalidateRegionCommand, FlushAndInvalidateRegionCommand, OnCommandListEndCommand,
82 GPUTickCommand>;
76 83
77struct CommandDataContainer { 84struct CommandDataContainer {
78 CommandDataContainer() = default; 85 CommandDataContainer() = default;
@@ -122,6 +129,8 @@ public:
122 // Wait until the gpu thread is idle. 129 // Wait until the gpu thread is idle.
123 void WaitIdle() const; 130 void WaitIdle() const;
124 131
132 void OnCommandListEnd();
133
125private: 134private:
126 /// Pushes a command to be executed by the GPU thread 135 /// Pushes a command to be executed by the GPU thread
127 u64 PushCommand(CommandData&& command_data); 136 u64 PushCommand(CommandData&& command_data);
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index fd49bc2a9..dbee9f634 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -51,11 +51,8 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) {
51 const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)}; 51 const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)};
52 52
53 MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr); 53 MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr);
54 ASSERT(system.CurrentProcess() 54 ASSERT(
55 ->PageTable() 55 system.CurrentProcess()->PageTable().LockForDeviceAddressSpace(cpu_addr, size).IsSuccess());
56 .SetMemoryAttribute(cpu_addr, size, Kernel::Memory::MemoryAttribute::DeviceShared,
57 Kernel::Memory::MemoryAttribute::DeviceShared)
58 .IsSuccess());
59 56
60 return gpu_addr; 57 return gpu_addr;
61} 58}
@@ -66,11 +63,8 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size)
66 const u64 aligned_size{Common::AlignUp(size, page_size)}; 63 const u64 aligned_size{Common::AlignUp(size, page_size)};
67 64
68 MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr); 65 MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr);
69 ASSERT(system.CurrentProcess() 66 ASSERT(
70 ->PageTable() 67 system.CurrentProcess()->PageTable().LockForDeviceAddressSpace(cpu_addr, size).IsSuccess());
71 .SetMemoryAttribute(cpu_addr, size, Kernel::Memory::MemoryAttribute::DeviceShared,
72 Kernel::Memory::MemoryAttribute::DeviceShared)
73 .IsSuccess());
74 return gpu_addr; 68 return gpu_addr;
75} 69}
76 70
@@ -87,9 +81,7 @@ GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) {
87 UnmapRange(gpu_addr, aligned_size); 81 UnmapRange(gpu_addr, aligned_size);
88 ASSERT(system.CurrentProcess() 82 ASSERT(system.CurrentProcess()
89 ->PageTable() 83 ->PageTable()
90 .SetMemoryAttribute(cpu_addr.value(), size, 84 .UnlockForDeviceAddressSpace(cpu_addr.value(), size)
91 Kernel::Memory::MemoryAttribute::DeviceShared,
92 Kernel::Memory::MemoryAttribute::None)
93 .IsSuccess()); 85 .IsSuccess());
94 86
95 return gpu_addr; 87 return gpu_addr;
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
index 5ea2b01f2..2f75f8801 100644
--- a/src/video_core/query_cache.h
+++ b/src/video_core/query_cache.h
@@ -12,10 +12,12 @@
12#include <mutex> 12#include <mutex>
13#include <optional> 13#include <optional>
14#include <unordered_map> 14#include <unordered_map>
15#include <unordered_set>
15#include <vector> 16#include <vector>
16 17
17#include "common/assert.h" 18#include "common/assert.h"
18#include "core/core.h" 19#include "core/core.h"
20#include "core/settings.h"
19#include "video_core/engines/maxwell_3d.h" 21#include "video_core/engines/maxwell_3d.h"
20#include "video_core/gpu.h" 22#include "video_core/gpu.h"
21#include "video_core/memory_manager.h" 23#include "video_core/memory_manager.h"
@@ -130,6 +132,9 @@ public:
130 } 132 }
131 133
132 query->BindCounter(Stream(type).Current(), timestamp); 134 query->BindCounter(Stream(type).Current(), timestamp);
135 if (Settings::values.use_asynchronous_gpu_emulation) {
136 AsyncFlushQuery(cpu_addr);
137 }
133 } 138 }
134 139
135 /// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch. 140 /// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch.
@@ -170,6 +175,37 @@ public:
170 return streams[static_cast<std::size_t>(type)]; 175 return streams[static_cast<std::size_t>(type)];
171 } 176 }
172 177
178 void CommitAsyncFlushes() {
179 committed_flushes.push_back(uncommitted_flushes);
180 uncommitted_flushes.reset();
181 }
182
183 bool HasUncommittedFlushes() const {
184 return uncommitted_flushes != nullptr;
185 }
186
187 bool ShouldWaitAsyncFlushes() const {
188 if (committed_flushes.empty()) {
189 return false;
190 }
191 return committed_flushes.front() != nullptr;
192 }
193
194 void PopAsyncFlushes() {
195 if (committed_flushes.empty()) {
196 return;
197 }
198 auto& flush_list = committed_flushes.front();
199 if (!flush_list) {
200 committed_flushes.pop_front();
201 return;
202 }
203 for (VAddr query_address : *flush_list) {
204 FlushAndRemoveRegion(query_address, 4);
205 }
206 committed_flushes.pop_front();
207 }
208
173protected: 209protected:
174 std::array<QueryPool, VideoCore::NumQueryTypes> query_pools; 210 std::array<QueryPool, VideoCore::NumQueryTypes> query_pools;
175 211
@@ -224,6 +260,13 @@ private:
224 return found != std::end(contents) ? &*found : nullptr; 260 return found != std::end(contents) ? &*found : nullptr;
225 } 261 }
226 262
263 void AsyncFlushQuery(VAddr addr) {
264 if (!uncommitted_flushes) {
265 uncommitted_flushes = std::make_shared<std::unordered_set<VAddr>>();
266 }
267 uncommitted_flushes->insert(addr);
268 }
269
227 static constexpr std::uintptr_t PAGE_SIZE = 4096; 270 static constexpr std::uintptr_t PAGE_SIZE = 4096;
228 static constexpr unsigned PAGE_SHIFT = 12; 271 static constexpr unsigned PAGE_SHIFT = 12;
229 272
@@ -235,6 +278,9 @@ private:
235 std::unordered_map<u64, std::vector<CachedQuery>> cached_queries; 278 std::unordered_map<u64, std::vector<CachedQuery>> cached_queries;
236 279
237 std::array<CounterStream, VideoCore::NumQueryTypes> streams; 280 std::array<CounterStream, VideoCore::NumQueryTypes> streams;
281
282 std::shared_ptr<std::unordered_set<VAddr>> uncommitted_flushes{};
283 std::list<std::shared_ptr<std::unordered_set<VAddr>>> committed_flushes;
238}; 284};
239 285
240template <class QueryCache, class HostCounter> 286template <class QueryCache, class HostCounter>
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index 8ae5b9c4e..3cbdac8e7 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -49,19 +49,40 @@ public:
49 /// Records a GPU query and caches it 49 /// Records a GPU query and caches it
50 virtual void Query(GPUVAddr gpu_addr, QueryType type, std::optional<u64> timestamp) = 0; 50 virtual void Query(GPUVAddr gpu_addr, QueryType type, std::optional<u64> timestamp) = 0;
51 51
52 /// Signal a GPU based semaphore as a fence
53 virtual void SignalSemaphore(GPUVAddr addr, u32 value) = 0;
54
55 /// Signal a GPU based syncpoint as a fence
56 virtual void SignalSyncPoint(u32 value) = 0;
57
58 /// Release all pending fences.
59 virtual void ReleaseFences() = 0;
60
52 /// Notify rasterizer that all caches should be flushed to Switch memory 61 /// Notify rasterizer that all caches should be flushed to Switch memory
53 virtual void FlushAll() = 0; 62 virtual void FlushAll() = 0;
54 63
55 /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory 64 /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
56 virtual void FlushRegion(VAddr addr, u64 size) = 0; 65 virtual void FlushRegion(VAddr addr, u64 size) = 0;
57 66
67 /// Check if the the specified memory area requires flushing to CPU Memory.
68 virtual bool MustFlushRegion(VAddr addr, u64 size) = 0;
69
58 /// Notify rasterizer that any caches of the specified region should be invalidated 70 /// Notify rasterizer that any caches of the specified region should be invalidated
59 virtual void InvalidateRegion(VAddr addr, u64 size) = 0; 71 virtual void InvalidateRegion(VAddr addr, u64 size) = 0;
60 72
73 /// Notify rasterizer that any caches of the specified region are desync with guest
74 virtual void OnCPUWrite(VAddr addr, u64 size) = 0;
75
76 /// Sync memory between guest and host.
77 virtual void SyncGuestHost() = 0;
78
61 /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory 79 /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
62 /// and invalidated 80 /// and invalidated
63 virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0; 81 virtual void FlushAndInvalidateRegion(VAddr addr, u64 size) = 0;
64 82
83 /// Notify the host renderer to wait for previous primitive and compute operations.
84 virtual void WaitForIdle() = 0;
85
65 /// Notify the rasterizer to send all written commands to the host GPU. 86 /// Notify the rasterizer to send all written commands to the host GPU.
66 virtual void FlushCommands() = 0; 87 virtual void FlushCommands() = 0;
67 88
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index cb5792407..d2cab50bd 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -51,10 +51,6 @@ Buffer OGLBufferCache::CreateBlock(VAddr cpu_addr, std::size_t size) {
51 return std::make_shared<CachedBufferBlock>(cpu_addr, size); 51 return std::make_shared<CachedBufferBlock>(cpu_addr, size);
52} 52}
53 53
54void OGLBufferCache::WriteBarrier() {
55 glMemoryBarrier(GL_ALL_BARRIER_BITS);
56}
57
58GLuint OGLBufferCache::ToHandle(const Buffer& buffer) { 54GLuint OGLBufferCache::ToHandle(const Buffer& buffer) {
59 return buffer->GetHandle(); 55 return buffer->GetHandle();
60} 56}
@@ -72,6 +68,7 @@ void OGLBufferCache::UploadBlockData(const Buffer& buffer, std::size_t offset, s
72void OGLBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, 68void OGLBufferCache::DownloadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
73 u8* data) { 69 u8* data) {
74 MICROPROFILE_SCOPE(OpenGL_Buffer_Download); 70 MICROPROFILE_SCOPE(OpenGL_Buffer_Download);
71 glMemoryBarrier(GL_BUFFER_UPDATE_BARRIER_BIT);
75 glGetNamedBufferSubData(buffer->GetHandle(), static_cast<GLintptr>(offset), 72 glGetNamedBufferSubData(buffer->GetHandle(), static_cast<GLintptr>(offset),
76 static_cast<GLsizeiptr>(size), data); 73 static_cast<GLsizeiptr>(size), data);
77} 74}
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.h b/src/video_core/renderer_opengl/gl_buffer_cache.h
index a74817857..a9e86cfc7 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.h
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.h
@@ -59,8 +59,6 @@ protected:
59 59
60 GLuint ToHandle(const Buffer& buffer) override; 60 GLuint ToHandle(const Buffer& buffer) override;
61 61
62 void WriteBarrier() override;
63
64 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, 62 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
65 const u8* data) override; 63 const u8* data) override;
66 64
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.cpp b/src/video_core/renderer_opengl/gl_fence_manager.cpp
new file mode 100644
index 000000000..99ddcb3f8
--- /dev/null
+++ b/src/video_core/renderer_opengl/gl_fence_manager.cpp
@@ -0,0 +1,72 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include "common/assert.h"
6
7#include "video_core/renderer_opengl/gl_fence_manager.h"
8
9namespace OpenGL {
10
11GLInnerFence::GLInnerFence(u32 payload, bool is_stubbed)
12 : VideoCommon::FenceBase(payload, is_stubbed), sync_object{} {}
13
14GLInnerFence::GLInnerFence(GPUVAddr address, u32 payload, bool is_stubbed)
15 : VideoCommon::FenceBase(address, payload, is_stubbed), sync_object{} {}
16
17GLInnerFence::~GLInnerFence() = default;
18
19void GLInnerFence::Queue() {
20 if (is_stubbed) {
21 return;
22 }
23 ASSERT(sync_object.handle == 0);
24 sync_object.Create();
25}
26
27bool GLInnerFence::IsSignaled() const {
28 if (is_stubbed) {
29 return true;
30 }
31 ASSERT(sync_object.handle != 0);
32 GLsizei length;
33 GLint sync_status;
34 glGetSynciv(sync_object.handle, GL_SYNC_STATUS, sizeof(GLint), &length, &sync_status);
35 return sync_status == GL_SIGNALED;
36}
37
38void GLInnerFence::Wait() {
39 if (is_stubbed) {
40 return;
41 }
42 ASSERT(sync_object.handle != 0);
43 glClientWaitSync(sync_object.handle, 0, GL_TIMEOUT_IGNORED);
44}
45
46FenceManagerOpenGL::FenceManagerOpenGL(Core::System& system,
47 VideoCore::RasterizerInterface& rasterizer,
48 TextureCacheOpenGL& texture_cache,
49 OGLBufferCache& buffer_cache, QueryCache& query_cache)
50 : GenericFenceManager(system, rasterizer, texture_cache, buffer_cache, query_cache) {}
51
52Fence FenceManagerOpenGL::CreateFence(u32 value, bool is_stubbed) {
53 return std::make_shared<GLInnerFence>(value, is_stubbed);
54}
55
56Fence FenceManagerOpenGL::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) {
57 return std::make_shared<GLInnerFence>(addr, value, is_stubbed);
58}
59
60void FenceManagerOpenGL::QueueFence(Fence& fence) {
61 fence->Queue();
62}
63
64bool FenceManagerOpenGL::IsFenceSignaled(Fence& fence) const {
65 return fence->IsSignaled();
66}
67
68void FenceManagerOpenGL::WaitFence(Fence& fence) {
69 fence->Wait();
70}
71
72} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.h b/src/video_core/renderer_opengl/gl_fence_manager.h
new file mode 100644
index 000000000..c917b3343
--- /dev/null
+++ b/src/video_core/renderer_opengl/gl_fence_manager.h
@@ -0,0 +1,53 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8#include <glad/glad.h>
9
10#include "common/common_types.h"
11#include "video_core/fence_manager.h"
12#include "video_core/renderer_opengl/gl_buffer_cache.h"
13#include "video_core/renderer_opengl/gl_query_cache.h"
14#include "video_core/renderer_opengl/gl_resource_manager.h"
15#include "video_core/renderer_opengl/gl_texture_cache.h"
16
17namespace OpenGL {
18
19class GLInnerFence : public VideoCommon::FenceBase {
20public:
21 GLInnerFence(u32 payload, bool is_stubbed);
22 GLInnerFence(GPUVAddr address, u32 payload, bool is_stubbed);
23 ~GLInnerFence();
24
25 void Queue();
26
27 bool IsSignaled() const;
28
29 void Wait();
30
31private:
32 OGLSync sync_object;
33};
34
35using Fence = std::shared_ptr<GLInnerFence>;
36using GenericFenceManager =
37 VideoCommon::FenceManager<Fence, TextureCacheOpenGL, OGLBufferCache, QueryCache>;
38
39class FenceManagerOpenGL final : public GenericFenceManager {
40public:
41 FenceManagerOpenGL(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
42 TextureCacheOpenGL& texture_cache, OGLBufferCache& buffer_cache,
43 QueryCache& query_cache);
44
45protected:
46 Fence CreateFence(u32 value, bool is_stubbed) override;
47 Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override;
48 void QueueFence(Fence& fence) override;
49 bool IsFenceSignaled(Fence& fence) const override;
50 void WaitFence(Fence& fence) override;
51};
52
53} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index 175374f0d..8b3b3ce92 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -59,14 +59,12 @@ constexpr std::size_t NumSupportedVertexAttributes = 16;
59template <typename Engine, typename Entry> 59template <typename Engine, typename Entry>
60Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry, 60Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry,
61 ShaderType shader_type, std::size_t index = 0) { 61 ShaderType shader_type, std::size_t index = 0) {
62 if (entry.IsBindless()) { 62 if (entry.is_bindless) {
63 const Tegra::Texture::TextureHandle tex_handle = 63 const auto tex_handle = engine.AccessConstBuffer32(shader_type, entry.buffer, entry.offset);
64 engine.AccessConstBuffer32(shader_type, entry.GetBuffer(), entry.GetOffset());
65 return engine.GetTextureInfo(tex_handle); 64 return engine.GetTextureInfo(tex_handle);
66 } 65 }
67 const auto& gpu_profile = engine.AccessGuestDriverProfile(); 66 const auto& gpu_profile = engine.AccessGuestDriverProfile();
68 const u32 offset = 67 const u32 offset = entry.offset + static_cast<u32>(index * gpu_profile.GetTextureHandlerSize());
69 entry.GetOffset() + static_cast<u32>(index * gpu_profile.GetTextureHandlerSize());
70 if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) { 68 if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) {
71 return engine.GetStageTexture(shader_type, offset); 69 return engine.GetStageTexture(shader_type, offset);
72 } else { 70 } else {
@@ -99,9 +97,10 @@ RasterizerOpenGL::RasterizerOpenGL(Core::System& system, Core::Frontend::EmuWind
99 ScreenInfo& info, GLShader::ProgramManager& program_manager, 97 ScreenInfo& info, GLShader::ProgramManager& program_manager,
100 StateTracker& state_tracker) 98 StateTracker& state_tracker)
101 : RasterizerAccelerated{system.Memory()}, texture_cache{system, *this, device, state_tracker}, 99 : RasterizerAccelerated{system.Memory()}, texture_cache{system, *this, device, state_tracker},
102 shader_cache{*this, system, emu_window, device}, query_cache{system, *this}, system{system}, 100 shader_cache{*this, system, emu_window, device}, query_cache{system, *this},
103 screen_info{info}, program_manager{program_manager}, state_tracker{state_tracker}, 101 buffer_cache{*this, system, device, STREAM_BUFFER_SIZE},
104 buffer_cache{*this, system, device, STREAM_BUFFER_SIZE} { 102 fence_manager{system, *this, texture_cache, buffer_cache, query_cache}, system{system},
103 screen_info{info}, program_manager{program_manager}, state_tracker{state_tracker} {
105 CheckExtensions(); 104 CheckExtensions();
106} 105}
107 106
@@ -185,8 +184,12 @@ void RasterizerOpenGL::SetupVertexBuffer() {
185 const GPUVAddr start = vertex_array.StartAddress(); 184 const GPUVAddr start = vertex_array.StartAddress();
186 const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress(); 185 const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress();
187 186
188 ASSERT(end > start); 187 ASSERT(end >= start);
189 const u64 size = end - start + 1; 188 const u64 size = end - start;
189 if (size == 0) {
190 glBindVertexBuffer(static_cast<GLuint>(index), 0, 0, vertex_array.stride);
191 continue;
192 }
190 const auto [vertex_buffer, vertex_buffer_offset] = buffer_cache.UploadMemory(start, size); 193 const auto [vertex_buffer, vertex_buffer_offset] = buffer_cache.UploadMemory(start, size);
191 glBindVertexBuffer(static_cast<GLuint>(index), vertex_buffer, vertex_buffer_offset, 194 glBindVertexBuffer(static_cast<GLuint>(index), vertex_buffer, vertex_buffer_offset,
192 vertex_array.stride); 195 vertex_array.stride);
@@ -310,8 +313,8 @@ std::size_t RasterizerOpenGL::CalculateVertexArraysSize() const {
310 const GPUVAddr start = regs.vertex_array[index].StartAddress(); 313 const GPUVAddr start = regs.vertex_array[index].StartAddress();
311 const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress(); 314 const GPUVAddr end = regs.vertex_array_limit[index].LimitAddress();
312 315
313 ASSERT(end > start); 316 size += end - start;
314 size += end - start + 1; 317 ASSERT(end >= start);
315 } 318 }
316 319
317 return size; 320 return size;
@@ -343,7 +346,7 @@ void RasterizerOpenGL::ConfigureFramebuffers() {
343 346
344 texture_cache.GuardRenderTargets(true); 347 texture_cache.GuardRenderTargets(true);
345 348
346 View depth_surface = texture_cache.GetDepthBufferSurface(); 349 View depth_surface = texture_cache.GetDepthBufferSurface(true);
347 350
348 const auto& regs = gpu.regs; 351 const auto& regs = gpu.regs;
349 UNIMPLEMENTED_IF(regs.rt_separate_frag_data == 0); 352 UNIMPLEMENTED_IF(regs.rt_separate_frag_data == 0);
@@ -352,7 +355,7 @@ void RasterizerOpenGL::ConfigureFramebuffers() {
352 FramebufferCacheKey key; 355 FramebufferCacheKey key;
353 const auto colors_count = static_cast<std::size_t>(regs.rt_control.count); 356 const auto colors_count = static_cast<std::size_t>(regs.rt_control.count);
354 for (std::size_t index = 0; index < colors_count; ++index) { 357 for (std::size_t index = 0; index < colors_count; ++index) {
355 View color_surface{texture_cache.GetColorBufferSurface(index)}; 358 View color_surface{texture_cache.GetColorBufferSurface(index, true)};
356 if (!color_surface) { 359 if (!color_surface) {
357 continue; 360 continue;
358 } 361 }
@@ -376,28 +379,52 @@ void RasterizerOpenGL::ConfigureFramebuffers() {
376 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key)); 379 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key));
377} 380}
378 381
379void RasterizerOpenGL::ConfigureClearFramebuffer(bool using_color_fb, bool using_depth_fb, 382void RasterizerOpenGL::ConfigureClearFramebuffer(bool using_color, bool using_depth_stencil) {
380 bool using_stencil_fb) {
381 auto& gpu = system.GPU().Maxwell3D(); 383 auto& gpu = system.GPU().Maxwell3D();
382 const auto& regs = gpu.regs; 384 const auto& regs = gpu.regs;
383 385
384 texture_cache.GuardRenderTargets(true); 386 texture_cache.GuardRenderTargets(true);
385 View color_surface; 387 View color_surface;
386 if (using_color_fb) { 388
389 if (using_color) {
390 // Determine if we have to preserve the contents.
391 // First we have to make sure all clear masks are enabled.
392 bool preserve_contents = !regs.clear_buffers.R || !regs.clear_buffers.G ||
393 !regs.clear_buffers.B || !regs.clear_buffers.A;
387 const std::size_t index = regs.clear_buffers.RT; 394 const std::size_t index = regs.clear_buffers.RT;
388 color_surface = texture_cache.GetColorBufferSurface(index); 395 if (regs.clear_flags.scissor) {
396 // Then we have to confirm scissor testing clears the whole image.
397 const auto& scissor = regs.scissor_test[0];
398 preserve_contents |= scissor.min_x > 0;
399 preserve_contents |= scissor.min_y > 0;
400 preserve_contents |= scissor.max_x < regs.rt[index].width;
401 preserve_contents |= scissor.max_y < regs.rt[index].height;
402 }
403
404 color_surface = texture_cache.GetColorBufferSurface(index, preserve_contents);
389 texture_cache.MarkColorBufferInUse(index); 405 texture_cache.MarkColorBufferInUse(index);
390 } 406 }
407
391 View depth_surface; 408 View depth_surface;
392 if (using_depth_fb || using_stencil_fb) { 409 if (using_depth_stencil) {
393 depth_surface = texture_cache.GetDepthBufferSurface(); 410 bool preserve_contents = false;
411 if (regs.clear_flags.scissor) {
412 // For depth stencil clears we only have to confirm scissor test covers the whole image.
413 const auto& scissor = regs.scissor_test[0];
414 preserve_contents |= scissor.min_x > 0;
415 preserve_contents |= scissor.min_y > 0;
416 preserve_contents |= scissor.max_x < regs.zeta_width;
417 preserve_contents |= scissor.max_y < regs.zeta_height;
418 }
419
420 depth_surface = texture_cache.GetDepthBufferSurface(preserve_contents);
394 texture_cache.MarkDepthBufferInUse(); 421 texture_cache.MarkDepthBufferInUse();
395 } 422 }
396 texture_cache.GuardRenderTargets(false); 423 texture_cache.GuardRenderTargets(false);
397 424
398 FramebufferCacheKey key; 425 FramebufferCacheKey key;
399 key.colors[0] = color_surface; 426 key.colors[0] = std::move(color_surface);
400 key.zeta = depth_surface; 427 key.zeta = std::move(depth_surface);
401 428
402 state_tracker.NotifyFramebuffer(); 429 state_tracker.NotifyFramebuffer();
403 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key)); 430 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, framebuffer_cache.GetFramebuffer(key));
@@ -417,8 +444,7 @@ void RasterizerOpenGL::Clear() {
417 if (regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B || 444 if (regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B ||
418 regs.clear_buffers.A) { 445 regs.clear_buffers.A) {
419 use_color = true; 446 use_color = true;
420 } 447
421 if (use_color) {
422 state_tracker.NotifyColorMask0(); 448 state_tracker.NotifyColorMask0();
423 glColorMaski(0, regs.clear_buffers.R != 0, regs.clear_buffers.G != 0, 449 glColorMaski(0, regs.clear_buffers.R != 0, regs.clear_buffers.G != 0,
424 regs.clear_buffers.B != 0, regs.clear_buffers.A != 0); 450 regs.clear_buffers.B != 0, regs.clear_buffers.A != 0);
@@ -456,7 +482,7 @@ void RasterizerOpenGL::Clear() {
456 482
457 UNIMPLEMENTED_IF(regs.clear_flags.viewport); 483 UNIMPLEMENTED_IF(regs.clear_flags.viewport);
458 484
459 ConfigureClearFramebuffer(use_color, use_depth, use_stencil); 485 ConfigureClearFramebuffer(use_color, use_depth || use_stencil);
460 486
461 if (use_color) { 487 if (use_color) {
462 glClearBufferfv(GL_COLOR, 0, regs.clear_color); 488 glClearBufferfv(GL_COLOR, 0, regs.clear_color);
@@ -599,6 +625,8 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
599 EndTransformFeedback(); 625 EndTransformFeedback();
600 626
601 ++num_queued_commands; 627 ++num_queued_commands;
628
629 system.GPU().TickWork();
602} 630}
603 631
604void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) { 632void RasterizerOpenGL::DispatchCompute(GPUVAddr code_addr) {
@@ -649,6 +677,13 @@ void RasterizerOpenGL::FlushRegion(VAddr addr, u64 size) {
649 query_cache.FlushRegion(addr, size); 677 query_cache.FlushRegion(addr, size);
650} 678}
651 679
680bool RasterizerOpenGL::MustFlushRegion(VAddr addr, u64 size) {
681 if (!Settings::IsGPULevelHigh()) {
682 return buffer_cache.MustFlushRegion(addr, size);
683 }
684 return texture_cache.MustFlushRegion(addr, size) || buffer_cache.MustFlushRegion(addr, size);
685}
686
652void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) { 687void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
653 MICROPROFILE_SCOPE(OpenGL_CacheManagement); 688 MICROPROFILE_SCOPE(OpenGL_CacheManagement);
654 if (addr == 0 || size == 0) { 689 if (addr == 0 || size == 0) {
@@ -660,13 +695,68 @@ void RasterizerOpenGL::InvalidateRegion(VAddr addr, u64 size) {
660 query_cache.InvalidateRegion(addr, size); 695 query_cache.InvalidateRegion(addr, size);
661} 696}
662 697
698void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) {
699 MICROPROFILE_SCOPE(OpenGL_CacheManagement);
700 if (addr == 0 || size == 0) {
701 return;
702 }
703 texture_cache.OnCPUWrite(addr, size);
704 shader_cache.InvalidateRegion(addr, size);
705 buffer_cache.OnCPUWrite(addr, size);
706 query_cache.InvalidateRegion(addr, size);
707}
708
709void RasterizerOpenGL::SyncGuestHost() {
710 MICROPROFILE_SCOPE(OpenGL_CacheManagement);
711 texture_cache.SyncGuestHost();
712 buffer_cache.SyncGuestHost();
713}
714
715void RasterizerOpenGL::SignalSemaphore(GPUVAddr addr, u32 value) {
716 auto& gpu{system.GPU()};
717 if (!gpu.IsAsync()) {
718 auto& memory_manager{gpu.MemoryManager()};
719 memory_manager.Write<u32>(addr, value);
720 return;
721 }
722 fence_manager.SignalSemaphore(addr, value);
723}
724
725void RasterizerOpenGL::SignalSyncPoint(u32 value) {
726 auto& gpu{system.GPU()};
727 if (!gpu.IsAsync()) {
728 gpu.IncrementSyncPoint(value);
729 return;
730 }
731 fence_manager.SignalSyncPoint(value);
732}
733
734void RasterizerOpenGL::ReleaseFences() {
735 auto& gpu{system.GPU()};
736 if (!gpu.IsAsync()) {
737 return;
738 }
739 fence_manager.WaitPendingFences();
740}
741
663void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) { 742void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size) {
664 if (Settings::values.use_accurate_gpu_emulation) { 743 if (Settings::IsGPULevelExtreme()) {
665 FlushRegion(addr, size); 744 FlushRegion(addr, size);
666 } 745 }
667 InvalidateRegion(addr, size); 746 InvalidateRegion(addr, size);
668} 747}
669 748
749void RasterizerOpenGL::WaitForIdle() {
750 // Place a barrier on everything that is not framebuffer related.
751 // This is related to another flag that is not currently implemented.
752 glMemoryBarrier(GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT | GL_ELEMENT_ARRAY_BARRIER_BIT |
753 GL_UNIFORM_BARRIER_BIT | GL_TEXTURE_FETCH_BARRIER_BIT |
754 GL_SHADER_IMAGE_ACCESS_BARRIER_BIT | GL_COMMAND_BARRIER_BIT |
755 GL_PIXEL_BUFFER_BARRIER_BIT | GL_TEXTURE_UPDATE_BARRIER_BIT |
756 GL_BUFFER_UPDATE_BARRIER_BIT | GL_TRANSFORM_FEEDBACK_BARRIER_BIT |
757 GL_SHADER_STORAGE_BARRIER_BIT | GL_QUERY_BUFFER_BARRIER_BIT);
758}
759
670void RasterizerOpenGL::FlushCommands() { 760void RasterizerOpenGL::FlushCommands() {
671 // Only flush when we have commands queued to OpenGL. 761 // Only flush when we have commands queued to OpenGL.
672 if (num_queued_commands == 0) { 762 if (num_queued_commands == 0) {
@@ -775,9 +865,9 @@ void RasterizerOpenGL::SetupDrawGlobalMemory(std::size_t stage_index, const Shad
775 865
776 u32 binding = device.GetBaseBindings(stage_index).shader_storage_buffer; 866 u32 binding = device.GetBaseBindings(stage_index).shader_storage_buffer;
777 for (const auto& entry : shader->GetEntries().global_memory_entries) { 867 for (const auto& entry : shader->GetEntries().global_memory_entries) {
778 const auto addr{cbufs.const_buffers[entry.GetCbufIndex()].address + entry.GetCbufOffset()}; 868 const GPUVAddr addr{cbufs.const_buffers[entry.cbuf_index].address + entry.cbuf_offset};
779 const auto gpu_addr{memory_manager.Read<u64>(addr)}; 869 const GPUVAddr gpu_addr{memory_manager.Read<u64>(addr)};
780 const auto size{memory_manager.Read<u32>(addr + 8)}; 870 const u32 size{memory_manager.Read<u32>(addr + 8)};
781 SetupGlobalMemory(binding++, entry, gpu_addr, size); 871 SetupGlobalMemory(binding++, entry, gpu_addr, size);
782 } 872 }
783} 873}
@@ -789,7 +879,7 @@ void RasterizerOpenGL::SetupComputeGlobalMemory(const Shader& kernel) {
789 879
790 u32 binding = 0; 880 u32 binding = 0;
791 for (const auto& entry : kernel->GetEntries().global_memory_entries) { 881 for (const auto& entry : kernel->GetEntries().global_memory_entries) {
792 const auto addr{cbufs[entry.GetCbufIndex()].Address() + entry.GetCbufOffset()}; 882 const auto addr{cbufs[entry.cbuf_index].Address() + entry.cbuf_offset};
793 const auto gpu_addr{memory_manager.Read<u64>(addr)}; 883 const auto gpu_addr{memory_manager.Read<u64>(addr)};
794 const auto size{memory_manager.Read<u32>(addr + 8)}; 884 const auto size{memory_manager.Read<u32>(addr + 8)};
795 SetupGlobalMemory(binding++, entry, gpu_addr, size); 885 SetupGlobalMemory(binding++, entry, gpu_addr, size);
@@ -800,7 +890,7 @@ void RasterizerOpenGL::SetupGlobalMemory(u32 binding, const GlobalMemoryEntry& e
800 GPUVAddr gpu_addr, std::size_t size) { 890 GPUVAddr gpu_addr, std::size_t size) {
801 const auto alignment{device.GetShaderStorageBufferAlignment()}; 891 const auto alignment{device.GetShaderStorageBufferAlignment()};
802 const auto [ssbo, buffer_offset] = 892 const auto [ssbo, buffer_offset] =
803 buffer_cache.UploadMemory(gpu_addr, size, alignment, entry.IsWritten()); 893 buffer_cache.UploadMemory(gpu_addr, size, alignment, entry.is_written);
804 glBindBufferRange(GL_SHADER_STORAGE_BUFFER, binding, ssbo, buffer_offset, 894 glBindBufferRange(GL_SHADER_STORAGE_BUFFER, binding, ssbo, buffer_offset,
805 static_cast<GLsizeiptr>(size)); 895 static_cast<GLsizeiptr>(size));
806} 896}
@@ -811,7 +901,7 @@ void RasterizerOpenGL::SetupDrawTextures(std::size_t stage_index, const Shader&
811 u32 binding = device.GetBaseBindings(stage_index).sampler; 901 u32 binding = device.GetBaseBindings(stage_index).sampler;
812 for (const auto& entry : shader->GetEntries().samplers) { 902 for (const auto& entry : shader->GetEntries().samplers) {
813 const auto shader_type = static_cast<ShaderType>(stage_index); 903 const auto shader_type = static_cast<ShaderType>(stage_index);
814 for (std::size_t i = 0; i < entry.Size(); ++i) { 904 for (std::size_t i = 0; i < entry.size; ++i) {
815 const auto texture = GetTextureInfo(maxwell3d, entry, shader_type, i); 905 const auto texture = GetTextureInfo(maxwell3d, entry, shader_type, i);
816 SetupTexture(binding++, texture, entry); 906 SetupTexture(binding++, texture, entry);
817 } 907 }
@@ -823,7 +913,7 @@ void RasterizerOpenGL::SetupComputeTextures(const Shader& kernel) {
823 const auto& compute = system.GPU().KeplerCompute(); 913 const auto& compute = system.GPU().KeplerCompute();
824 u32 binding = 0; 914 u32 binding = 0;
825 for (const auto& entry : kernel->GetEntries().samplers) { 915 for (const auto& entry : kernel->GetEntries().samplers) {
826 for (std::size_t i = 0; i < entry.Size(); ++i) { 916 for (std::size_t i = 0; i < entry.size; ++i) {
827 const auto texture = GetTextureInfo(compute, entry, ShaderType::Compute, i); 917 const auto texture = GetTextureInfo(compute, entry, ShaderType::Compute, i);
828 SetupTexture(binding++, texture, entry); 918 SetupTexture(binding++, texture, entry);
829 } 919 }
@@ -880,7 +970,7 @@ void RasterizerOpenGL::SetupImage(u32 binding, const Tegra::Texture::TICEntry& t
880 if (!tic.IsBuffer()) { 970 if (!tic.IsBuffer()) {
881 view->ApplySwizzle(tic.x_source, tic.y_source, tic.z_source, tic.w_source); 971 view->ApplySwizzle(tic.x_source, tic.y_source, tic.z_source, tic.w_source);
882 } 972 }
883 if (entry.IsWritten()) { 973 if (entry.is_written) {
884 view->MarkAsModified(texture_cache.Tick()); 974 view->MarkAsModified(texture_cache.Tick());
885 } 975 }
886 glBindImageTexture(binding, view->GetTexture(), 0, GL_TRUE, 0, GL_READ_WRITE, 976 glBindImageTexture(binding, view->GetTexture(), 0, GL_TRUE, 0, GL_READ_WRITE,
@@ -941,11 +1031,7 @@ void RasterizerOpenGL::SyncDepthClamp() {
941 } 1031 }
942 flags[Dirty::DepthClampEnabled] = false; 1032 flags[Dirty::DepthClampEnabled] = false;
943 1033
944 const auto& state = gpu.regs.view_volume_clip_control; 1034 oglEnable(GL_DEPTH_CLAMP, gpu.regs.view_volume_clip_control.depth_clamp_disabled == 0);
945 UNIMPLEMENTED_IF_MSG(state.depth_clamp_far != state.depth_clamp_near,
946 "Unimplemented depth clamp separation!");
947
948 oglEnable(GL_DEPTH_CLAMP, state.depth_clamp_far || state.depth_clamp_near);
949} 1035}
950 1036
951void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) { 1037void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) {
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index caea174d2..b94c65907 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -23,6 +23,7 @@
23#include "video_core/rasterizer_interface.h" 23#include "video_core/rasterizer_interface.h"
24#include "video_core/renderer_opengl/gl_buffer_cache.h" 24#include "video_core/renderer_opengl/gl_buffer_cache.h"
25#include "video_core/renderer_opengl/gl_device.h" 25#include "video_core/renderer_opengl/gl_device.h"
26#include "video_core/renderer_opengl/gl_fence_manager.h"
26#include "video_core/renderer_opengl/gl_framebuffer_cache.h" 27#include "video_core/renderer_opengl/gl_framebuffer_cache.h"
27#include "video_core/renderer_opengl/gl_query_cache.h" 28#include "video_core/renderer_opengl/gl_query_cache.h"
28#include "video_core/renderer_opengl/gl_resource_manager.h" 29#include "video_core/renderer_opengl/gl_resource_manager.h"
@@ -66,8 +67,15 @@ public:
66 void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override; 67 void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
67 void FlushAll() override; 68 void FlushAll() override;
68 void FlushRegion(VAddr addr, u64 size) override; 69 void FlushRegion(VAddr addr, u64 size) override;
70 bool MustFlushRegion(VAddr addr, u64 size) override;
69 void InvalidateRegion(VAddr addr, u64 size) override; 71 void InvalidateRegion(VAddr addr, u64 size) override;
72 void OnCPUWrite(VAddr addr, u64 size) override;
73 void SyncGuestHost() override;
74 void SignalSemaphore(GPUVAddr addr, u32 value) override;
75 void SignalSyncPoint(u32 value) override;
76 void ReleaseFences() override;
70 void FlushAndInvalidateRegion(VAddr addr, u64 size) override; 77 void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
78 void WaitForIdle() override;
71 void FlushCommands() override; 79 void FlushCommands() override;
72 void TickFrame() override; 80 void TickFrame() override;
73 bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src, 81 bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
@@ -88,7 +96,8 @@ private:
88 /// Configures the color and depth framebuffer states. 96 /// Configures the color and depth framebuffer states.
89 void ConfigureFramebuffers(); 97 void ConfigureFramebuffers();
90 98
91 void ConfigureClearFramebuffer(bool using_color_fb, bool using_depth_fb, bool using_stencil_fb); 99 /// Configures the color and depth framebuffer for clearing.
100 void ConfigureClearFramebuffer(bool using_color, bool using_depth_stencil);
92 101
93 /// Configures the current constbuffers to use for the draw command. 102 /// Configures the current constbuffers to use for the draw command.
94 void SetupDrawConstBuffers(std::size_t stage_index, const Shader& shader); 103 void SetupDrawConstBuffers(std::size_t stage_index, const Shader& shader);
@@ -222,6 +231,8 @@ private:
222 SamplerCacheOpenGL sampler_cache; 231 SamplerCacheOpenGL sampler_cache;
223 FramebufferCacheOpenGL framebuffer_cache; 232 FramebufferCacheOpenGL framebuffer_cache;
224 QueryCache query_cache; 233 QueryCache query_cache;
234 OGLBufferCache buffer_cache;
235 FenceManagerOpenGL fence_manager;
225 236
226 Core::System& system; 237 Core::System& system;
227 ScreenInfo& screen_info; 238 ScreenInfo& screen_info;
@@ -229,7 +240,6 @@ private:
229 StateTracker& state_tracker; 240 StateTracker& state_tracker;
230 241
231 static constexpr std::size_t STREAM_BUFFER_SIZE = 128 * 1024 * 1024; 242 static constexpr std::size_t STREAM_BUFFER_SIZE = 128 * 1024 * 1024;
232 OGLBufferCache buffer_cache;
233 243
234 GLint vertex_binding = 0; 244 GLint vertex_binding = 0;
235 245
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index 6d2ff20f9..9759a7078 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -10,8 +10,6 @@
10#include <thread> 10#include <thread>
11#include <unordered_set> 11#include <unordered_set>
12 12
13#include <boost/functional/hash.hpp>
14
15#include "common/alignment.h" 13#include "common/alignment.h"
16#include "common/assert.h" 14#include "common/assert.h"
17#include "common/logging/log.h" 15#include "common/logging/log.h"
@@ -28,76 +26,26 @@
28#include "video_core/renderer_opengl/gl_shader_disk_cache.h" 26#include "video_core/renderer_opengl/gl_shader_disk_cache.h"
29#include "video_core/renderer_opengl/gl_state_tracker.h" 27#include "video_core/renderer_opengl/gl_state_tracker.h"
30#include "video_core/renderer_opengl/utils.h" 28#include "video_core/renderer_opengl/utils.h"
29#include "video_core/shader/memory_util.h"
31#include "video_core/shader/registry.h" 30#include "video_core/shader/registry.h"
32#include "video_core/shader/shader_ir.h" 31#include "video_core/shader/shader_ir.h"
33 32
34namespace OpenGL { 33namespace OpenGL {
35 34
36using Tegra::Engines::ShaderType; 35using Tegra::Engines::ShaderType;
36using VideoCommon::Shader::GetShaderAddress;
37using VideoCommon::Shader::GetShaderCode;
38using VideoCommon::Shader::GetUniqueIdentifier;
39using VideoCommon::Shader::KERNEL_MAIN_OFFSET;
37using VideoCommon::Shader::ProgramCode; 40using VideoCommon::Shader::ProgramCode;
38using VideoCommon::Shader::Registry; 41using VideoCommon::Shader::Registry;
39using VideoCommon::Shader::ShaderIR; 42using VideoCommon::Shader::ShaderIR;
43using VideoCommon::Shader::STAGE_MAIN_OFFSET;
40 44
41namespace { 45namespace {
42 46
43constexpr u32 STAGE_MAIN_OFFSET = 10;
44constexpr u32 KERNEL_MAIN_OFFSET = 0;
45
46constexpr VideoCommon::Shader::CompilerSettings COMPILER_SETTINGS{}; 47constexpr VideoCommon::Shader::CompilerSettings COMPILER_SETTINGS{};
47 48
48/// Gets the address for the specified shader stage program
49GPUVAddr GetShaderAddress(Core::System& system, Maxwell::ShaderProgram program) {
50 const auto& gpu{system.GPU().Maxwell3D()};
51 const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]};
52 return gpu.regs.code_address.CodeAddress() + shader_config.offset;
53}
54
55/// Gets if the current instruction offset is a scheduler instruction
56constexpr bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) {
57 // Sched instructions appear once every 4 instructions.
58 constexpr std::size_t SchedPeriod = 4;
59 const std::size_t absolute_offset = offset - main_offset;
60 return (absolute_offset % SchedPeriod) == 0;
61}
62
63/// Calculates the size of a program stream
64std::size_t CalculateProgramSize(const ProgramCode& program) {
65 constexpr std::size_t start_offset = 10;
66 // This is the encoded version of BRA that jumps to itself. All Nvidia
67 // shaders end with one.
68 constexpr u64 self_jumping_branch = 0xE2400FFFFF07000FULL;
69 constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL;
70 std::size_t offset = start_offset;
71 while (offset < program.size()) {
72 const u64 instruction = program[offset];
73 if (!IsSchedInstruction(offset, start_offset)) {
74 if ((instruction & mask) == self_jumping_branch) {
75 // End on Maxwell's "nop" instruction
76 break;
77 }
78 if (instruction == 0) {
79 break;
80 }
81 }
82 offset++;
83 }
84 // The last instruction is included in the program size
85 return std::min(offset + 1, program.size());
86}
87
88/// Gets the shader program code from memory for the specified address
89ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr gpu_addr,
90 const u8* host_ptr) {
91 ProgramCode code(VideoCommon::Shader::MAX_PROGRAM_LENGTH);
92 ASSERT_OR_EXECUTE(host_ptr != nullptr, {
93 std::fill(code.begin(), code.end(), 0);
94 return code;
95 });
96 memory_manager.ReadBlockUnsafe(gpu_addr, code.data(), code.size() * sizeof(u64));
97 code.resize(CalculateProgramSize(code));
98 return code;
99}
100
101/// Gets the shader type from a Maxwell program type 49/// Gets the shader type from a Maxwell program type
102constexpr GLenum GetGLShaderType(ShaderType shader_type) { 50constexpr GLenum GetGLShaderType(ShaderType shader_type) {
103 switch (shader_type) { 51 switch (shader_type) {
@@ -114,17 +62,6 @@ constexpr GLenum GetGLShaderType(ShaderType shader_type) {
114 } 62 }
115} 63}
116 64
117/// Hashes one (or two) program streams
118u64 GetUniqueIdentifier(ShaderType shader_type, bool is_a, const ProgramCode& code,
119 const ProgramCode& code_b = {}) {
120 u64 unique_identifier = boost::hash_value(code);
121 if (is_a) {
122 // VertexA programs include two programs
123 boost::hash_combine(unique_identifier, boost::hash_value(code_b));
124 }
125 return unique_identifier;
126}
127
128constexpr const char* GetShaderTypeName(ShaderType shader_type) { 65constexpr const char* GetShaderTypeName(ShaderType shader_type) {
129 switch (shader_type) { 66 switch (shader_type) {
130 case ShaderType::Vertex: 67 case ShaderType::Vertex:
@@ -448,7 +385,7 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
448 385
449 // Look up shader in the cache based on address 386 // Look up shader in the cache based on address
450 const auto cpu_addr{memory_manager.GpuToCpuAddress(address)}; 387 const auto cpu_addr{memory_manager.GpuToCpuAddress(address)};
451 Shader shader{cpu_addr ? TryGet(*cpu_addr) : nullptr}; 388 Shader shader{cpu_addr ? TryGet(*cpu_addr) : null_shader};
452 if (shader) { 389 if (shader) {
453 return last_shaders[static_cast<std::size_t>(program)] = shader; 390 return last_shaders[static_cast<std::size_t>(program)] = shader;
454 } 391 }
@@ -456,11 +393,12 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
456 const auto host_ptr{memory_manager.GetPointer(address)}; 393 const auto host_ptr{memory_manager.GetPointer(address)};
457 394
458 // No shader found - create a new one 395 // No shader found - create a new one
459 ProgramCode code{GetShaderCode(memory_manager, address, host_ptr)}; 396 ProgramCode code{GetShaderCode(memory_manager, address, host_ptr, false)};
460 ProgramCode code_b; 397 ProgramCode code_b;
461 if (program == Maxwell::ShaderProgram::VertexA) { 398 if (program == Maxwell::ShaderProgram::VertexA) {
462 const GPUVAddr address_b{GetShaderAddress(system, Maxwell::ShaderProgram::VertexB)}; 399 const GPUVAddr address_b{GetShaderAddress(system, Maxwell::ShaderProgram::VertexB)};
463 code_b = GetShaderCode(memory_manager, address_b, memory_manager.GetPointer(address_b)); 400 const u8* host_ptr_b = memory_manager.GetPointer(address_b);
401 code_b = GetShaderCode(memory_manager, address_b, host_ptr_b, false);
464 } 402 }
465 403
466 const auto unique_identifier = GetUniqueIdentifier( 404 const auto unique_identifier = GetUniqueIdentifier(
@@ -477,7 +415,12 @@ Shader ShaderCacheOpenGL::GetStageProgram(Maxwell::ShaderProgram program) {
477 const std::size_t size_in_bytes = code.size() * sizeof(u64); 415 const std::size_t size_in_bytes = code.size() * sizeof(u64);
478 shader = CachedShader::CreateFromCache(params, found->second, size_in_bytes); 416 shader = CachedShader::CreateFromCache(params, found->second, size_in_bytes);
479 } 417 }
480 Register(shader); 418
419 if (cpu_addr) {
420 Register(shader);
421 } else {
422 null_shader = shader;
423 }
481 424
482 return last_shaders[static_cast<std::size_t>(program)] = shader; 425 return last_shaders[static_cast<std::size_t>(program)] = shader;
483} 426}
@@ -486,14 +429,14 @@ Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) {
486 auto& memory_manager{system.GPU().MemoryManager()}; 429 auto& memory_manager{system.GPU().MemoryManager()};
487 const auto cpu_addr{memory_manager.GpuToCpuAddress(code_addr)}; 430 const auto cpu_addr{memory_manager.GpuToCpuAddress(code_addr)};
488 431
489 auto kernel = cpu_addr ? TryGet(*cpu_addr) : nullptr; 432 auto kernel = cpu_addr ? TryGet(*cpu_addr) : null_kernel;
490 if (kernel) { 433 if (kernel) {
491 return kernel; 434 return kernel;
492 } 435 }
493 436
494 const auto host_ptr{memory_manager.GetPointer(code_addr)}; 437 const auto host_ptr{memory_manager.GetPointer(code_addr)};
495 // No kernel found, create a new one 438 // No kernel found, create a new one
496 auto code{GetShaderCode(memory_manager, code_addr, host_ptr)}; 439 auto code{GetShaderCode(memory_manager, code_addr, host_ptr, true)};
497 const auto unique_identifier{GetUniqueIdentifier(ShaderType::Compute, false, code)}; 440 const auto unique_identifier{GetUniqueIdentifier(ShaderType::Compute, false, code)};
498 441
499 const ShaderParameters params{system, disk_cache, device, 442 const ShaderParameters params{system, disk_cache, device,
@@ -507,7 +450,11 @@ Shader ShaderCacheOpenGL::GetComputeKernel(GPUVAddr code_addr) {
507 kernel = CachedShader::CreateFromCache(params, found->second, size_in_bytes); 450 kernel = CachedShader::CreateFromCache(params, found->second, size_in_bytes);
508 } 451 }
509 452
510 Register(kernel); 453 if (cpu_addr) {
454 Register(kernel);
455 } else {
456 null_kernel = kernel;
457 }
511 return kernel; 458 return kernel;
512} 459}
513 460
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h
index c836df5bd..91690b470 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_cache.h
@@ -125,6 +125,9 @@ private:
125 ShaderDiskCacheOpenGL disk_cache; 125 ShaderDiskCacheOpenGL disk_cache;
126 std::unordered_map<u64, PrecompiledShader> runtime_cache; 126 std::unordered_map<u64, PrecompiledShader> runtime_cache;
127 127
128 Shader null_shader{};
129 Shader null_kernel{};
130
128 std::array<Shader, Maxwell::MaxShaderProgram> last_shaders; 131 std::array<Shader, Maxwell::MaxShaderProgram> last_shaders;
129}; 132};
130 133
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
index 22242cce9..99fd4ae2c 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.cpp
@@ -484,7 +484,7 @@ private:
484 code.AddLine("switch (jmp_to) {{"); 484 code.AddLine("switch (jmp_to) {{");
485 485
486 for (const auto& pair : ir.GetBasicBlocks()) { 486 for (const auto& pair : ir.GetBasicBlocks()) {
487 const auto [address, bb] = pair; 487 const auto& [address, bb] = pair;
488 code.AddLine("case 0x{:X}U: {{", address); 488 code.AddLine("case 0x{:X}U: {{", address);
489 ++code.scope; 489 ++code.scope;
490 490
@@ -870,13 +870,13 @@ private:
870 for (const auto& sampler : ir.GetSamplers()) { 870 for (const auto& sampler : ir.GetSamplers()) {
871 const std::string name = GetSampler(sampler); 871 const std::string name = GetSampler(sampler);
872 const std::string description = fmt::format("layout (binding = {}) uniform", binding); 872 const std::string description = fmt::format("layout (binding = {}) uniform", binding);
873 binding += sampler.IsIndexed() ? sampler.Size() : 1; 873 binding += sampler.is_indexed ? sampler.size : 1;
874 874
875 std::string sampler_type = [&]() { 875 std::string sampler_type = [&]() {
876 if (sampler.IsBuffer()) { 876 if (sampler.is_buffer) {
877 return "samplerBuffer"; 877 return "samplerBuffer";
878 } 878 }
879 switch (sampler.GetType()) { 879 switch (sampler.type) {
880 case Tegra::Shader::TextureType::Texture1D: 880 case Tegra::Shader::TextureType::Texture1D:
881 return "sampler1D"; 881 return "sampler1D";
882 case Tegra::Shader::TextureType::Texture2D: 882 case Tegra::Shader::TextureType::Texture2D:
@@ -890,17 +890,17 @@ private:
890 return "sampler2D"; 890 return "sampler2D";
891 } 891 }
892 }(); 892 }();
893 if (sampler.IsArray()) { 893 if (sampler.is_array) {
894 sampler_type += "Array"; 894 sampler_type += "Array";
895 } 895 }
896 if (sampler.IsShadow()) { 896 if (sampler.is_shadow) {
897 sampler_type += "Shadow"; 897 sampler_type += "Shadow";
898 } 898 }
899 899
900 if (!sampler.IsIndexed()) { 900 if (!sampler.is_indexed) {
901 code.AddLine("{} {} {};", description, sampler_type, name); 901 code.AddLine("{} {} {};", description, sampler_type, name);
902 } else { 902 } else {
903 code.AddLine("{} {} {}[{}];", description, sampler_type, name, sampler.Size()); 903 code.AddLine("{} {} {}[{}];", description, sampler_type, name, sampler.size);
904 } 904 }
905 } 905 }
906 if (!ir.GetSamplers().empty()) { 906 if (!ir.GetSamplers().empty()) {
@@ -946,14 +946,14 @@ private:
946 u32 binding = device.GetBaseBindings(stage).image; 946 u32 binding = device.GetBaseBindings(stage).image;
947 for (const auto& image : ir.GetImages()) { 947 for (const auto& image : ir.GetImages()) {
948 std::string qualifier = "coherent volatile"; 948 std::string qualifier = "coherent volatile";
949 if (image.IsRead() && !image.IsWritten()) { 949 if (image.is_read && !image.is_written) {
950 qualifier += " readonly"; 950 qualifier += " readonly";
951 } else if (image.IsWritten() && !image.IsRead()) { 951 } else if (image.is_written && !image.is_read) {
952 qualifier += " writeonly"; 952 qualifier += " writeonly";
953 } 953 }
954 954
955 const char* format = image.IsAtomic() ? "r32ui, " : ""; 955 const char* format = image.is_atomic ? "r32ui, " : "";
956 const char* type_declaration = GetImageTypeDeclaration(image.GetType()); 956 const char* type_declaration = GetImageTypeDeclaration(image.type);
957 code.AddLine("layout ({}binding = {}) {} uniform uimage{} {};", format, binding++, 957 code.AddLine("layout ({}binding = {}) {} uniform uimage{} {};", format, binding++,
958 qualifier, type_declaration, GetImage(image)); 958 qualifier, type_declaration, GetImage(image));
959 } 959 }
@@ -1337,8 +1337,8 @@ private:
1337 ASSERT(meta); 1337 ASSERT(meta);
1338 1338
1339 const std::size_t count = operation.GetOperandsCount(); 1339 const std::size_t count = operation.GetOperandsCount();
1340 const bool has_array = meta->sampler.IsArray(); 1340 const bool has_array = meta->sampler.is_array;
1341 const bool has_shadow = meta->sampler.IsShadow(); 1341 const bool has_shadow = meta->sampler.is_shadow;
1342 1342
1343 std::string expr = "texture" + function_suffix; 1343 std::string expr = "texture" + function_suffix;
1344 if (!meta->aoffi.empty()) { 1344 if (!meta->aoffi.empty()) {
@@ -1346,7 +1346,7 @@ private:
1346 } else if (!meta->ptp.empty()) { 1346 } else if (!meta->ptp.empty()) {
1347 expr += "Offsets"; 1347 expr += "Offsets";
1348 } 1348 }
1349 if (!meta->sampler.IsIndexed()) { 1349 if (!meta->sampler.is_indexed) {
1350 expr += '(' + GetSampler(meta->sampler) + ", "; 1350 expr += '(' + GetSampler(meta->sampler) + ", ";
1351 } else { 1351 } else {
1352 expr += '(' + GetSampler(meta->sampler) + '[' + Visit(meta->index).AsUint() + "], "; 1352 expr += '(' + GetSampler(meta->sampler) + '[' + Visit(meta->index).AsUint() + "], ";
@@ -1484,8 +1484,8 @@ private:
1484 dy += '('; 1484 dy += '(';
1485 1485
1486 for (std::size_t index = 0; index < components; ++index) { 1486 for (std::size_t index = 0; index < components; ++index) {
1487 const auto operand_x{derivates.at(index * 2)}; 1487 const auto& operand_x{derivates.at(index * 2)};
1488 const auto operand_y{derivates.at(index * 2 + 1)}; 1488 const auto& operand_y{derivates.at(index * 2 + 1)};
1489 dx += Visit(operand_x).AsFloat(); 1489 dx += Visit(operand_x).AsFloat();
1490 dy += Visit(operand_y).AsFloat(); 1490 dy += Visit(operand_y).AsFloat();
1491 1491
@@ -1870,6 +1870,14 @@ private:
1870 return GenerateBinaryInfix(operation, ">=", Type::Bool, type, type); 1870 return GenerateBinaryInfix(operation, ">=", Type::Bool, type, type);
1871 } 1871 }
1872 1872
1873 Expression LogicalAddCarry(Operation operation) {
1874 const std::string carry = code.GenerateTemporary();
1875 code.AddLine("uint {};", carry);
1876 code.AddLine("uaddCarry({}, {}, {});", VisitOperand(operation, 0).AsUint(),
1877 VisitOperand(operation, 1).AsUint(), carry);
1878 return {fmt::format("({} != 0)", carry), Type::Bool};
1879 }
1880
1873 Expression LogicalFIsNan(Operation operation) { 1881 Expression LogicalFIsNan(Operation operation) {
1874 return GenerateUnary(operation, "isnan", Type::Bool, Type::Float); 1882 return GenerateUnary(operation, "isnan", Type::Bool, Type::Float);
1875 } 1883 }
@@ -1974,7 +1982,7 @@ private:
1974 1982
1975 std::string expr = GenerateTexture( 1983 std::string expr = GenerateTexture(
1976 operation, "", {TextureOffset{}, TextureArgument{Type::Float, meta->bias}}); 1984 operation, "", {TextureOffset{}, TextureArgument{Type::Float, meta->bias}});
1977 if (meta->sampler.IsShadow()) { 1985 if (meta->sampler.is_shadow) {
1978 expr = "vec4(" + expr + ')'; 1986 expr = "vec4(" + expr + ')';
1979 } 1987 }
1980 return {expr + GetSwizzle(meta->element), Type::Float}; 1988 return {expr + GetSwizzle(meta->element), Type::Float};
@@ -1986,7 +1994,7 @@ private:
1986 1994
1987 std::string expr = GenerateTexture( 1995 std::string expr = GenerateTexture(
1988 operation, "Lod", {TextureArgument{Type::Float, meta->lod}, TextureOffset{}}); 1996 operation, "Lod", {TextureArgument{Type::Float, meta->lod}, TextureOffset{}});
1989 if (meta->sampler.IsShadow()) { 1997 if (meta->sampler.is_shadow) {
1990 expr = "vec4(" + expr + ')'; 1998 expr = "vec4(" + expr + ')';
1991 } 1999 }
1992 return {expr + GetSwizzle(meta->element), Type::Float}; 2000 return {expr + GetSwizzle(meta->element), Type::Float};
@@ -1995,11 +2003,11 @@ private:
1995 Expression TextureGather(Operation operation) { 2003 Expression TextureGather(Operation operation) {
1996 const auto& meta = std::get<MetaTexture>(operation.GetMeta()); 2004 const auto& meta = std::get<MetaTexture>(operation.GetMeta());
1997 2005
1998 const auto type = meta.sampler.IsShadow() ? Type::Float : Type::Int; 2006 const auto type = meta.sampler.is_shadow ? Type::Float : Type::Int;
1999 const bool separate_dc = meta.sampler.IsShadow(); 2007 const bool separate_dc = meta.sampler.is_shadow;
2000 2008
2001 std::vector<TextureIR> ir; 2009 std::vector<TextureIR> ir;
2002 if (meta.sampler.IsShadow()) { 2010 if (meta.sampler.is_shadow) {
2003 ir = {TextureOffset{}}; 2011 ir = {TextureOffset{}};
2004 } else { 2012 } else {
2005 ir = {TextureOffset{}, TextureArgument{type, meta.component}}; 2013 ir = {TextureOffset{}, TextureArgument{type, meta.component}};
@@ -2044,7 +2052,7 @@ private:
2044 constexpr std::array constructors = {"int", "ivec2", "ivec3", "ivec4"}; 2052 constexpr std::array constructors = {"int", "ivec2", "ivec3", "ivec4"};
2045 const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); 2053 const auto meta = std::get_if<MetaTexture>(&operation.GetMeta());
2046 ASSERT(meta); 2054 ASSERT(meta);
2047 UNIMPLEMENTED_IF(meta->sampler.IsArray()); 2055 UNIMPLEMENTED_IF(meta->sampler.is_array);
2048 const std::size_t count = operation.GetOperandsCount(); 2056 const std::size_t count = operation.GetOperandsCount();
2049 2057
2050 std::string expr = "texelFetch("; 2058 std::string expr = "texelFetch(";
@@ -2065,7 +2073,7 @@ private:
2065 } 2073 }
2066 expr += ')'; 2074 expr += ')';
2067 2075
2068 if (meta->lod && !meta->sampler.IsBuffer()) { 2076 if (meta->lod && !meta->sampler.is_buffer) {
2069 expr += ", "; 2077 expr += ", ";
2070 expr += Visit(meta->lod).AsInt(); 2078 expr += Visit(meta->lod).AsInt();
2071 } 2079 }
@@ -2076,12 +2084,10 @@ private:
2076 } 2084 }
2077 2085
2078 Expression TextureGradient(Operation operation) { 2086 Expression TextureGradient(Operation operation) {
2079 const auto meta = std::get_if<MetaTexture>(&operation.GetMeta()); 2087 const auto& meta = std::get<MetaTexture>(operation.GetMeta());
2080 ASSERT(meta);
2081
2082 std::string expr = 2088 std::string expr =
2083 GenerateTexture(operation, "Grad", {TextureDerivates{}, TextureOffset{}}); 2089 GenerateTexture(operation, "Grad", {TextureDerivates{}, TextureOffset{}});
2084 return {std::move(expr) + GetSwizzle(meta->element), Type::Float}; 2090 return {std::move(expr) + GetSwizzle(meta.element), Type::Float};
2085 } 2091 }
2086 2092
2087 Expression ImageLoad(Operation operation) { 2093 Expression ImageLoad(Operation operation) {
@@ -2441,6 +2447,8 @@ private:
2441 &GLSLDecompiler::LogicalNotEqual<Type::Uint>, 2447 &GLSLDecompiler::LogicalNotEqual<Type::Uint>,
2442 &GLSLDecompiler::LogicalGreaterEqual<Type::Uint>, 2448 &GLSLDecompiler::LogicalGreaterEqual<Type::Uint>,
2443 2449
2450 &GLSLDecompiler::LogicalAddCarry,
2451
2444 &GLSLDecompiler::Logical2HLessThan<false>, 2452 &GLSLDecompiler::Logical2HLessThan<false>,
2445 &GLSLDecompiler::Logical2HEqual<false>, 2453 &GLSLDecompiler::Logical2HEqual<false>,
2446 &GLSLDecompiler::Logical2HLessEqual<false>, 2454 &GLSLDecompiler::Logical2HLessEqual<false>,
@@ -2598,11 +2606,11 @@ private:
2598 } 2606 }
2599 2607
2600 std::string GetSampler(const Sampler& sampler) const { 2608 std::string GetSampler(const Sampler& sampler) const {
2601 return AppendSuffix(static_cast<u32>(sampler.GetIndex()), "sampler"); 2609 return AppendSuffix(sampler.index, "sampler");
2602 } 2610 }
2603 2611
2604 std::string GetImage(const Image& image) const { 2612 std::string GetImage(const Image& image) const {
2605 return AppendSuffix(static_cast<u32>(image.GetIndex()), "image"); 2613 return AppendSuffix(image.index, "image");
2606 } 2614 }
2607 2615
2608 std::string AppendSuffix(u32 index, std::string_view name) const { 2616 std::string AppendSuffix(u32 index, std::string_view name) const {
diff --git a/src/video_core/renderer_opengl/gl_shader_decompiler.h b/src/video_core/renderer_opengl/gl_shader_decompiler.h
index e7dbd810c..e8a178764 100644
--- a/src/video_core/renderer_opengl/gl_shader_decompiler.h
+++ b/src/video_core/renderer_opengl/gl_shader_decompiler.h
@@ -33,36 +33,19 @@ public:
33 } 33 }
34 34
35private: 35private:
36 u32 index{}; 36 u32 index = 0;
37}; 37};
38 38
39class GlobalMemoryEntry { 39struct GlobalMemoryEntry {
40public: 40 constexpr explicit GlobalMemoryEntry(u32 cbuf_index, u32 cbuf_offset, bool is_read,
41 explicit GlobalMemoryEntry(u32 cbuf_index, u32 cbuf_offset, bool is_read, bool is_written) 41 bool is_written)
42 : cbuf_index{cbuf_index}, cbuf_offset{cbuf_offset}, is_read{is_read}, is_written{ 42 : cbuf_index{cbuf_index}, cbuf_offset{cbuf_offset}, is_read{is_read}, is_written{
43 is_written} {} 43 is_written} {}
44 44
45 u32 GetCbufIndex() const { 45 u32 cbuf_index = 0;
46 return cbuf_index; 46 u32 cbuf_offset = 0;
47 } 47 bool is_read = false;
48 48 bool is_written = false;
49 u32 GetCbufOffset() const {
50 return cbuf_offset;
51 }
52
53 bool IsRead() const {
54 return is_read;
55 }
56
57 bool IsWritten() const {
58 return is_written;
59 }
60
61private:
62 u32 cbuf_index{};
63 u32 cbuf_offset{};
64 bool is_read{};
65 bool is_written{};
66}; 49};
67 50
68struct ShaderEntries { 51struct ShaderEntries {
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
index 2bb376555..648b1e71b 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.cpp
@@ -2,10 +2,12 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <cstring>
5#include <tuple> 6#include <tuple>
6 7
7#include <boost/functional/hash.hpp> 8#include <boost/functional/hash.hpp>
8 9
10#include "common/cityhash.h"
9#include "common/common_types.h" 11#include "common/common_types.h"
10#include "video_core/renderer_vulkan/fixed_pipeline_state.h" 12#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
11 13
@@ -13,289 +15,349 @@ namespace Vulkan {
13 15
14namespace { 16namespace {
15 17
16constexpr FixedPipelineState::DepthStencil GetDepthStencilState(const Maxwell& regs) { 18constexpr std::size_t POINT = 0;
17 const FixedPipelineState::StencilFace front_stencil( 19constexpr std::size_t LINE = 1;
18 regs.stencil_front_op_fail, regs.stencil_front_op_zfail, regs.stencil_front_op_zpass, 20constexpr std::size_t POLYGON = 2;
19 regs.stencil_front_func_func); 21constexpr std::array POLYGON_OFFSET_ENABLE_LUT = {
20 const FixedPipelineState::StencilFace back_stencil = 22 POINT, // Points
21 regs.stencil_two_side_enable 23 LINE, // Lines
22 ? FixedPipelineState::StencilFace(regs.stencil_back_op_fail, regs.stencil_back_op_zfail, 24 LINE, // LineLoop
23 regs.stencil_back_op_zpass, 25 LINE, // LineStrip
24 regs.stencil_back_func_func) 26 POLYGON, // Triangles
25 : front_stencil; 27 POLYGON, // TriangleStrip
26 return FixedPipelineState::DepthStencil( 28 POLYGON, // TriangleFan
27 regs.depth_test_enable == 1, regs.depth_write_enabled == 1, regs.depth_bounds_enable == 1, 29 POLYGON, // Quads
28 regs.stencil_enable == 1, regs.depth_test_func, front_stencil, back_stencil); 30 POLYGON, // QuadStrip
29} 31 POLYGON, // Polygon
30 32 LINE, // LinesAdjacency
31constexpr FixedPipelineState::InputAssembly GetInputAssemblyState(const Maxwell& regs) { 33 LINE, // LineStripAdjacency
32 return FixedPipelineState::InputAssembly( 34 POLYGON, // TrianglesAdjacency
33 regs.draw.topology, regs.primitive_restart.enabled, 35 POLYGON, // TriangleStripAdjacency
34 regs.draw.topology == Maxwell::PrimitiveTopology::Points ? regs.point_size : 0.0f); 36 POLYGON, // Patches
35} 37};
36
37constexpr FixedPipelineState::BlendingAttachment GetBlendingAttachmentState(
38 const Maxwell& regs, std::size_t render_target) {
39 const auto& mask = regs.color_mask[regs.color_mask_common ? 0 : render_target];
40 const std::array components = {mask.R != 0, mask.G != 0, mask.B != 0, mask.A != 0};
41
42 const FixedPipelineState::BlendingAttachment default_blending(
43 false, Maxwell::Blend::Equation::Add, Maxwell::Blend::Factor::One,
44 Maxwell::Blend::Factor::Zero, Maxwell::Blend::Equation::Add, Maxwell::Blend::Factor::One,
45 Maxwell::Blend::Factor::Zero, components);
46 if (render_target >= regs.rt_control.count) {
47 return default_blending;
48 }
49 38
50 if (!regs.independent_blend_enable) { 39} // Anonymous namespace
51 const auto& src = regs.blend;
52 if (!src.enable[render_target]) {
53 return default_blending;
54 }
55 return FixedPipelineState::BlendingAttachment(
56 true, src.equation_rgb, src.factor_source_rgb, src.factor_dest_rgb, src.equation_a,
57 src.factor_source_a, src.factor_dest_a, components);
58 }
59 40
60 if (!regs.blend.enable[render_target]) { 41void FixedPipelineState::DepthStencil::Fill(const Maxwell& regs) noexcept {
61 return default_blending; 42 raw = 0;
43 front.action_stencil_fail.Assign(PackStencilOp(regs.stencil_front_op_fail));
44 front.action_depth_fail.Assign(PackStencilOp(regs.stencil_front_op_zfail));
45 front.action_depth_pass.Assign(PackStencilOp(regs.stencil_front_op_zpass));
46 front.test_func.Assign(PackComparisonOp(regs.stencil_front_func_func));
47 if (regs.stencil_two_side_enable) {
48 back.action_stencil_fail.Assign(PackStencilOp(regs.stencil_back_op_fail));
49 back.action_depth_fail.Assign(PackStencilOp(regs.stencil_back_op_zfail));
50 back.action_depth_pass.Assign(PackStencilOp(regs.stencil_back_op_zpass));
51 back.test_func.Assign(PackComparisonOp(regs.stencil_back_func_func));
52 } else {
53 back.action_stencil_fail.Assign(front.action_stencil_fail);
54 back.action_depth_fail.Assign(front.action_depth_fail);
55 back.action_depth_pass.Assign(front.action_depth_pass);
56 back.test_func.Assign(front.test_func);
62 } 57 }
63 const auto& src = regs.independent_blend[render_target]; 58 depth_test_enable.Assign(regs.depth_test_enable);
64 return FixedPipelineState::BlendingAttachment( 59 depth_write_enable.Assign(regs.depth_write_enabled);
65 true, src.equation_rgb, src.factor_source_rgb, src.factor_dest_rgb, src.equation_a, 60 depth_bounds_enable.Assign(regs.depth_bounds_enable);
66 src.factor_source_a, src.factor_dest_a, components); 61 stencil_enable.Assign(regs.stencil_enable);
67} 62 depth_test_func.Assign(PackComparisonOp(regs.depth_test_func));
68
69constexpr FixedPipelineState::ColorBlending GetColorBlendingState(const Maxwell& regs) {
70 return FixedPipelineState::ColorBlending(
71 {regs.blend_color.r, regs.blend_color.g, regs.blend_color.b, regs.blend_color.a},
72 regs.rt_control.count,
73 {GetBlendingAttachmentState(regs, 0), GetBlendingAttachmentState(regs, 1),
74 GetBlendingAttachmentState(regs, 2), GetBlendingAttachmentState(regs, 3),
75 GetBlendingAttachmentState(regs, 4), GetBlendingAttachmentState(regs, 5),
76 GetBlendingAttachmentState(regs, 6), GetBlendingAttachmentState(regs, 7)});
77}
78
79constexpr FixedPipelineState::Tessellation GetTessellationState(const Maxwell& regs) {
80 return FixedPipelineState::Tessellation(regs.patch_vertices, regs.tess_mode.prim,
81 regs.tess_mode.spacing, regs.tess_mode.cw != 0);
82} 63}
83 64
84constexpr std::size_t Point = 0; 65void FixedPipelineState::Rasterizer::Fill(const Maxwell& regs) noexcept {
85constexpr std::size_t Line = 1; 66 const auto& clip = regs.view_volume_clip_control;
86constexpr std::size_t Polygon = 2;
87constexpr std::array PolygonOffsetEnableLUT = {
88 Point, // Points
89 Line, // Lines
90 Line, // LineLoop
91 Line, // LineStrip
92 Polygon, // Triangles
93 Polygon, // TriangleStrip
94 Polygon, // TriangleFan
95 Polygon, // Quads
96 Polygon, // QuadStrip
97 Polygon, // Polygon
98 Line, // LinesAdjacency
99 Line, // LineStripAdjacency
100 Polygon, // TrianglesAdjacency
101 Polygon, // TriangleStripAdjacency
102 Polygon, // Patches
103};
104
105constexpr FixedPipelineState::Rasterizer GetRasterizerState(const Maxwell& regs) {
106 const std::array enabled_lut = {regs.polygon_offset_point_enable, 67 const std::array enabled_lut = {regs.polygon_offset_point_enable,
107 regs.polygon_offset_line_enable, 68 regs.polygon_offset_line_enable,
108 regs.polygon_offset_fill_enable}; 69 regs.polygon_offset_fill_enable};
109 const auto topology = static_cast<std::size_t>(regs.draw.topology.Value()); 70 const u32 topology_index = static_cast<u32>(regs.draw.topology.Value());
110 const bool depth_bias_enabled = enabled_lut[PolygonOffsetEnableLUT[topology]];
111
112 const auto& clip = regs.view_volume_clip_control;
113 const bool depth_clamp_enabled = clip.depth_clamp_near == 1 || clip.depth_clamp_far == 1;
114 71
115 Maxwell::FrontFace front_face = regs.front_face; 72 u32 packed_front_face = PackFrontFace(regs.front_face);
116 if (regs.screen_y_control.triangle_rast_flip != 0 && 73 if (regs.screen_y_control.triangle_rast_flip != 0 &&
117 regs.viewport_transform[0].scale_y > 0.0f) { 74 regs.viewport_transform[0].scale_y > 0.0f) {
118 if (front_face == Maxwell::FrontFace::CounterClockWise) 75 // Flip front face
119 front_face = Maxwell::FrontFace::ClockWise; 76 packed_front_face = 1 - packed_front_face;
120 else if (front_face == Maxwell::FrontFace::ClockWise)
121 front_face = Maxwell::FrontFace::CounterClockWise;
122 } 77 }
123 78
124 const bool gl_ndc = regs.depth_mode == Maxwell::DepthMode::MinusOneToOne; 79 raw = 0;
125 return FixedPipelineState::Rasterizer(regs.cull_test_enabled, depth_bias_enabled, 80 topology.Assign(topology_index);
126 depth_clamp_enabled, gl_ndc, regs.cull_face, front_face); 81 primitive_restart_enable.Assign(regs.primitive_restart.enabled != 0 ? 1 : 0);
82 cull_enable.Assign(regs.cull_test_enabled != 0 ? 1 : 0);
83 depth_bias_enable.Assign(enabled_lut[POLYGON_OFFSET_ENABLE_LUT[topology_index]] != 0 ? 1 : 0);
84 depth_clamp_disabled.Assign(regs.view_volume_clip_control.depth_clamp_disabled.Value());
85 ndc_minus_one_to_one.Assign(regs.depth_mode == Maxwell::DepthMode::MinusOneToOne ? 1 : 0);
86 cull_face.Assign(PackCullFace(regs.cull_face));
87 front_face.Assign(packed_front_face);
88 polygon_mode.Assign(PackPolygonMode(regs.polygon_mode_front));
89 patch_control_points_minus_one.Assign(regs.patch_vertices - 1);
90 tessellation_primitive.Assign(static_cast<u32>(regs.tess_mode.prim.Value()));
91 tessellation_spacing.Assign(static_cast<u32>(regs.tess_mode.spacing.Value()));
92 tessellation_clockwise.Assign(regs.tess_mode.cw.Value());
93 logic_op_enable.Assign(regs.logic_op.enable != 0 ? 1 : 0);
94 logic_op.Assign(PackLogicOp(regs.logic_op.operation));
95 std::memcpy(&point_size, &regs.point_size, sizeof(point_size)); // TODO: C++20 std::bit_cast
127} 96}
128 97
129} // Anonymous namespace 98void FixedPipelineState::ColorBlending::Fill(const Maxwell& regs) noexcept {
130 99 for (std::size_t index = 0; index < std::size(attachments); ++index) {
131std::size_t FixedPipelineState::VertexBinding::Hash() const noexcept { 100 attachments[index].Fill(regs, index);
132 return (index << stride) ^ divisor; 101 }
133} 102}
134 103
135bool FixedPipelineState::VertexBinding::operator==(const VertexBinding& rhs) const noexcept { 104void FixedPipelineState::BlendingAttachment::Fill(const Maxwell& regs, std::size_t index) {
136 return std::tie(index, stride, divisor) == std::tie(rhs.index, rhs.stride, rhs.divisor); 105 const auto& mask = regs.color_mask[regs.color_mask_common ? 0 : index];
137} 106
107 raw = 0;
108 mask_r.Assign(mask.R);
109 mask_g.Assign(mask.G);
110 mask_b.Assign(mask.B);
111 mask_a.Assign(mask.A);
112
113 // TODO: C++20 Use templated lambda to deduplicate code
114
115 if (!regs.independent_blend_enable) {
116 const auto& src = regs.blend;
117 if (!src.enable[index]) {
118 return;
119 }
120 equation_rgb.Assign(PackBlendEquation(src.equation_rgb));
121 equation_a.Assign(PackBlendEquation(src.equation_a));
122 factor_source_rgb.Assign(PackBlendFactor(src.factor_source_rgb));
123 factor_dest_rgb.Assign(PackBlendFactor(src.factor_dest_rgb));
124 factor_source_a.Assign(PackBlendFactor(src.factor_source_a));
125 factor_dest_a.Assign(PackBlendFactor(src.factor_dest_a));
126 enable.Assign(1);
127 return;
128 }
138 129
139std::size_t FixedPipelineState::VertexAttribute::Hash() const noexcept { 130 if (!regs.blend.enable[index]) {
140 return static_cast<std::size_t>(index) ^ (static_cast<std::size_t>(buffer) << 13) ^ 131 return;
141 (static_cast<std::size_t>(type) << 22) ^ (static_cast<std::size_t>(size) << 31) ^ 132 }
142 (static_cast<std::size_t>(offset) << 36); 133 const auto& src = regs.independent_blend[index];
134 equation_rgb.Assign(PackBlendEquation(src.equation_rgb));
135 equation_a.Assign(PackBlendEquation(src.equation_a));
136 factor_source_rgb.Assign(PackBlendFactor(src.factor_source_rgb));
137 factor_dest_rgb.Assign(PackBlendFactor(src.factor_dest_rgb));
138 factor_source_a.Assign(PackBlendFactor(src.factor_source_a));
139 factor_dest_a.Assign(PackBlendFactor(src.factor_dest_a));
140 enable.Assign(1);
143} 141}
144 142
145bool FixedPipelineState::VertexAttribute::operator==(const VertexAttribute& rhs) const noexcept { 143void FixedPipelineState::Fill(const Maxwell& regs) {
146 return std::tie(index, buffer, type, size, offset) == 144 rasterizer.Fill(regs);
147 std::tie(rhs.index, rhs.buffer, rhs.type, rhs.size, rhs.offset); 145 depth_stencil.Fill(regs);
146 color_blending.Fill(regs);
148} 147}
149 148
150std::size_t FixedPipelineState::StencilFace::Hash() const noexcept { 149std::size_t FixedPipelineState::Hash() const noexcept {
151 return static_cast<std::size_t>(action_stencil_fail) ^ 150 const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
152 (static_cast<std::size_t>(action_depth_fail) << 4) ^ 151 return static_cast<std::size_t>(hash);
153 (static_cast<std::size_t>(action_depth_fail) << 20) ^
154 (static_cast<std::size_t>(action_depth_pass) << 36);
155} 152}
156 153
157bool FixedPipelineState::StencilFace::operator==(const StencilFace& rhs) const noexcept { 154bool FixedPipelineState::operator==(const FixedPipelineState& rhs) const noexcept {
158 return std::tie(action_stencil_fail, action_depth_fail, action_depth_pass, test_func) == 155 return std::memcmp(this, &rhs, sizeof *this) == 0;
159 std::tie(rhs.action_stencil_fail, rhs.action_depth_fail, rhs.action_depth_pass,
160 rhs.test_func);
161} 156}
162 157
163std::size_t FixedPipelineState::BlendingAttachment::Hash() const noexcept { 158u32 FixedPipelineState::PackComparisonOp(Maxwell::ComparisonOp op) noexcept {
164 return static_cast<std::size_t>(enable) ^ (static_cast<std::size_t>(rgb_equation) << 5) ^ 159 // OpenGL enums go from 0x200 to 0x207 and the others from 1 to 8
165 (static_cast<std::size_t>(src_rgb_func) << 10) ^ 160 // If we substract 0x200 to OpenGL enums and 1 to the others we get a 0-7 range.
166 (static_cast<std::size_t>(dst_rgb_func) << 15) ^ 161 // Perfect for a hash.
167 (static_cast<std::size_t>(a_equation) << 20) ^ 162 const u32 value = static_cast<u32>(op);
168 (static_cast<std::size_t>(src_a_func) << 25) ^ 163 return value - (value >= 0x200 ? 0x200 : 1);
169 (static_cast<std::size_t>(dst_a_func) << 30) ^
170 (static_cast<std::size_t>(components[0]) << 35) ^
171 (static_cast<std::size_t>(components[1]) << 36) ^
172 (static_cast<std::size_t>(components[2]) << 37) ^
173 (static_cast<std::size_t>(components[3]) << 38);
174} 164}
175 165
176bool FixedPipelineState::BlendingAttachment::operator==(const BlendingAttachment& rhs) const 166Maxwell::ComparisonOp FixedPipelineState::UnpackComparisonOp(u32 packed) noexcept {
177 noexcept { 167 // Read PackComparisonOp for the logic behind this.
178 return std::tie(enable, rgb_equation, src_rgb_func, dst_rgb_func, a_equation, src_a_func, 168 return static_cast<Maxwell::ComparisonOp>(packed + 1);
179 dst_a_func, components) ==
180 std::tie(rhs.enable, rhs.rgb_equation, rhs.src_rgb_func, rhs.dst_rgb_func,
181 rhs.a_equation, rhs.src_a_func, rhs.dst_a_func, rhs.components);
182} 169}
183 170
184std::size_t FixedPipelineState::VertexInput::Hash() const noexcept { 171u32 FixedPipelineState::PackStencilOp(Maxwell::StencilOp op) noexcept {
185 std::size_t hash = num_bindings ^ (num_attributes << 32); 172 switch (op) {
186 for (std::size_t i = 0; i < num_bindings; ++i) { 173 case Maxwell::StencilOp::Keep:
187 boost::hash_combine(hash, bindings[i].Hash()); 174 case Maxwell::StencilOp::KeepOGL:
188 } 175 return 0;
189 for (std::size_t i = 0; i < num_attributes; ++i) { 176 case Maxwell::StencilOp::Zero:
190 boost::hash_combine(hash, attributes[i].Hash()); 177 case Maxwell::StencilOp::ZeroOGL:
178 return 1;
179 case Maxwell::StencilOp::Replace:
180 case Maxwell::StencilOp::ReplaceOGL:
181 return 2;
182 case Maxwell::StencilOp::Incr:
183 case Maxwell::StencilOp::IncrOGL:
184 return 3;
185 case Maxwell::StencilOp::Decr:
186 case Maxwell::StencilOp::DecrOGL:
187 return 4;
188 case Maxwell::StencilOp::Invert:
189 case Maxwell::StencilOp::InvertOGL:
190 return 5;
191 case Maxwell::StencilOp::IncrWrap:
192 case Maxwell::StencilOp::IncrWrapOGL:
193 return 6;
194 case Maxwell::StencilOp::DecrWrap:
195 case Maxwell::StencilOp::DecrWrapOGL:
196 return 7;
191 } 197 }
192 return hash; 198 return 0;
193} 199}
194 200
195bool FixedPipelineState::VertexInput::operator==(const VertexInput& rhs) const noexcept { 201Maxwell::StencilOp FixedPipelineState::UnpackStencilOp(u32 packed) noexcept {
196 return std::equal(bindings.begin(), bindings.begin() + num_bindings, rhs.bindings.begin(), 202 static constexpr std::array LUT = {Maxwell::StencilOp::Keep, Maxwell::StencilOp::Zero,
197 rhs.bindings.begin() + rhs.num_bindings) && 203 Maxwell::StencilOp::Replace, Maxwell::StencilOp::Incr,
198 std::equal(attributes.begin(), attributes.begin() + num_attributes, 204 Maxwell::StencilOp::Decr, Maxwell::StencilOp::Invert,
199 rhs.attributes.begin(), rhs.attributes.begin() + rhs.num_attributes); 205 Maxwell::StencilOp::IncrWrap, Maxwell::StencilOp::DecrWrap};
206 return LUT[packed];
200} 207}
201 208
202std::size_t FixedPipelineState::InputAssembly::Hash() const noexcept { 209u32 FixedPipelineState::PackCullFace(Maxwell::CullFace cull) noexcept {
203 std::size_t point_size_int = 0; 210 // FrontAndBack is 0x408, by substracting 0x406 in it we get 2.
204 std::memcpy(&point_size_int, &point_size, sizeof(point_size)); 211 // Individual cull faces are in 0x404 and 0x405, substracting 0x404 we get 0 and 1.
205 return (static_cast<std::size_t>(topology) << 24) ^ (point_size_int << 32) ^ 212 const u32 value = static_cast<u32>(cull);
206 static_cast<std::size_t>(primitive_restart_enable); 213 return value - (value == 0x408 ? 0x406 : 0x404);
207} 214}
208 215
209bool FixedPipelineState::InputAssembly::operator==(const InputAssembly& rhs) const noexcept { 216Maxwell::CullFace FixedPipelineState::UnpackCullFace(u32 packed) noexcept {
210 return std::tie(topology, primitive_restart_enable, point_size) == 217 static constexpr std::array LUT = {Maxwell::CullFace::Front, Maxwell::CullFace::Back,
211 std::tie(rhs.topology, rhs.primitive_restart_enable, rhs.point_size); 218 Maxwell::CullFace::FrontAndBack};
219 return LUT[packed];
212} 220}
213 221
214std::size_t FixedPipelineState::Tessellation::Hash() const noexcept { 222u32 FixedPipelineState::PackFrontFace(Maxwell::FrontFace face) noexcept {
215 return static_cast<std::size_t>(patch_control_points) ^ 223 return static_cast<u32>(face) - 0x900;
216 (static_cast<std::size_t>(primitive) << 6) ^ (static_cast<std::size_t>(spacing) << 8) ^
217 (static_cast<std::size_t>(clockwise) << 10);
218} 224}
219 225
220bool FixedPipelineState::Tessellation::operator==(const Tessellation& rhs) const noexcept { 226Maxwell::FrontFace FixedPipelineState::UnpackFrontFace(u32 packed) noexcept {
221 return std::tie(patch_control_points, primitive, spacing, clockwise) == 227 return static_cast<Maxwell::FrontFace>(packed + 0x900);
222 std::tie(rhs.patch_control_points, rhs.primitive, rhs.spacing, rhs.clockwise);
223} 228}
224 229
225std::size_t FixedPipelineState::Rasterizer::Hash() const noexcept { 230u32 FixedPipelineState::PackPolygonMode(Maxwell::PolygonMode mode) noexcept {
226 return static_cast<std::size_t>(cull_enable) ^ 231 return static_cast<u32>(mode) - 0x1B00;
227 (static_cast<std::size_t>(depth_bias_enable) << 1) ^
228 (static_cast<std::size_t>(depth_clamp_enable) << 2) ^
229 (static_cast<std::size_t>(ndc_minus_one_to_one) << 3) ^
230 (static_cast<std::size_t>(cull_face) << 24) ^
231 (static_cast<std::size_t>(front_face) << 48);
232} 232}
233 233
234bool FixedPipelineState::Rasterizer::operator==(const Rasterizer& rhs) const noexcept { 234Maxwell::PolygonMode FixedPipelineState::UnpackPolygonMode(u32 packed) noexcept {
235 return std::tie(cull_enable, depth_bias_enable, depth_clamp_enable, ndc_minus_one_to_one, 235 return static_cast<Maxwell::PolygonMode>(packed + 0x1B00);
236 cull_face, front_face) ==
237 std::tie(rhs.cull_enable, rhs.depth_bias_enable, rhs.depth_clamp_enable,
238 rhs.ndc_minus_one_to_one, rhs.cull_face, rhs.front_face);
239} 236}
240 237
241std::size_t FixedPipelineState::DepthStencil::Hash() const noexcept { 238u32 FixedPipelineState::PackLogicOp(Maxwell::LogicOperation op) noexcept {
242 std::size_t hash = static_cast<std::size_t>(depth_test_enable) ^ 239 return static_cast<u32>(op) - 0x1500;
243 (static_cast<std::size_t>(depth_write_enable) << 1) ^
244 (static_cast<std::size_t>(depth_bounds_enable) << 2) ^
245 (static_cast<std::size_t>(stencil_enable) << 3) ^
246 (static_cast<std::size_t>(depth_test_function) << 4);
247 boost::hash_combine(hash, front_stencil.Hash());
248 boost::hash_combine(hash, back_stencil.Hash());
249 return hash;
250} 240}
251 241
252bool FixedPipelineState::DepthStencil::operator==(const DepthStencil& rhs) const noexcept { 242Maxwell::LogicOperation FixedPipelineState::UnpackLogicOp(u32 packed) noexcept {
253 return std::tie(depth_test_enable, depth_write_enable, depth_bounds_enable, depth_test_function, 243 return static_cast<Maxwell::LogicOperation>(packed + 0x1500);
254 stencil_enable, front_stencil, back_stencil) ==
255 std::tie(rhs.depth_test_enable, rhs.depth_write_enable, rhs.depth_bounds_enable,
256 rhs.depth_test_function, rhs.stencil_enable, rhs.front_stencil,
257 rhs.back_stencil);
258} 244}
259 245
260std::size_t FixedPipelineState::ColorBlending::Hash() const noexcept { 246u32 FixedPipelineState::PackBlendEquation(Maxwell::Blend::Equation equation) noexcept {
261 std::size_t hash = attachments_count << 13; 247 switch (equation) {
262 for (std::size_t rt = 0; rt < static_cast<std::size_t>(attachments_count); ++rt) { 248 case Maxwell::Blend::Equation::Add:
263 boost::hash_combine(hash, attachments[rt].Hash()); 249 case Maxwell::Blend::Equation::AddGL:
250 return 0;
251 case Maxwell::Blend::Equation::Subtract:
252 case Maxwell::Blend::Equation::SubtractGL:
253 return 1;
254 case Maxwell::Blend::Equation::ReverseSubtract:
255 case Maxwell::Blend::Equation::ReverseSubtractGL:
256 return 2;
257 case Maxwell::Blend::Equation::Min:
258 case Maxwell::Blend::Equation::MinGL:
259 return 3;
260 case Maxwell::Blend::Equation::Max:
261 case Maxwell::Blend::Equation::MaxGL:
262 return 4;
264 } 263 }
265 return hash; 264 return 0;
266} 265}
267 266
268bool FixedPipelineState::ColorBlending::operator==(const ColorBlending& rhs) const noexcept { 267Maxwell::Blend::Equation FixedPipelineState::UnpackBlendEquation(u32 packed) noexcept {
269 return std::equal(attachments.begin(), attachments.begin() + attachments_count, 268 static constexpr std::array LUT = {
270 rhs.attachments.begin(), rhs.attachments.begin() + rhs.attachments_count); 269 Maxwell::Blend::Equation::Add, Maxwell::Blend::Equation::Subtract,
270 Maxwell::Blend::Equation::ReverseSubtract, Maxwell::Blend::Equation::Min,
271 Maxwell::Blend::Equation::Max};
272 return LUT[packed];
271} 273}
272 274
273std::size_t FixedPipelineState::Hash() const noexcept { 275u32 FixedPipelineState::PackBlendFactor(Maxwell::Blend::Factor factor) noexcept {
274 std::size_t hash = 0; 276 switch (factor) {
275 boost::hash_combine(hash, vertex_input.Hash()); 277 case Maxwell::Blend::Factor::Zero:
276 boost::hash_combine(hash, input_assembly.Hash()); 278 case Maxwell::Blend::Factor::ZeroGL:
277 boost::hash_combine(hash, tessellation.Hash()); 279 return 0;
278 boost::hash_combine(hash, rasterizer.Hash()); 280 case Maxwell::Blend::Factor::One:
279 boost::hash_combine(hash, depth_stencil.Hash()); 281 case Maxwell::Blend::Factor::OneGL:
280 boost::hash_combine(hash, color_blending.Hash()); 282 return 1;
281 return hash; 283 case Maxwell::Blend::Factor::SourceColor:
282} 284 case Maxwell::Blend::Factor::SourceColorGL:
283 285 return 2;
284bool FixedPipelineState::operator==(const FixedPipelineState& rhs) const noexcept { 286 case Maxwell::Blend::Factor::OneMinusSourceColor:
285 return std::tie(vertex_input, input_assembly, tessellation, rasterizer, depth_stencil, 287 case Maxwell::Blend::Factor::OneMinusSourceColorGL:
286 color_blending) == std::tie(rhs.vertex_input, rhs.input_assembly, 288 return 3;
287 rhs.tessellation, rhs.rasterizer, rhs.depth_stencil, 289 case Maxwell::Blend::Factor::SourceAlpha:
288 rhs.color_blending); 290 case Maxwell::Blend::Factor::SourceAlphaGL:
291 return 4;
292 case Maxwell::Blend::Factor::OneMinusSourceAlpha:
293 case Maxwell::Blend::Factor::OneMinusSourceAlphaGL:
294 return 5;
295 case Maxwell::Blend::Factor::DestAlpha:
296 case Maxwell::Blend::Factor::DestAlphaGL:
297 return 6;
298 case Maxwell::Blend::Factor::OneMinusDestAlpha:
299 case Maxwell::Blend::Factor::OneMinusDestAlphaGL:
300 return 7;
301 case Maxwell::Blend::Factor::DestColor:
302 case Maxwell::Blend::Factor::DestColorGL:
303 return 8;
304 case Maxwell::Blend::Factor::OneMinusDestColor:
305 case Maxwell::Blend::Factor::OneMinusDestColorGL:
306 return 9;
307 case Maxwell::Blend::Factor::SourceAlphaSaturate:
308 case Maxwell::Blend::Factor::SourceAlphaSaturateGL:
309 return 10;
310 case Maxwell::Blend::Factor::Source1Color:
311 case Maxwell::Blend::Factor::Source1ColorGL:
312 return 11;
313 case Maxwell::Blend::Factor::OneMinusSource1Color:
314 case Maxwell::Blend::Factor::OneMinusSource1ColorGL:
315 return 12;
316 case Maxwell::Blend::Factor::Source1Alpha:
317 case Maxwell::Blend::Factor::Source1AlphaGL:
318 return 13;
319 case Maxwell::Blend::Factor::OneMinusSource1Alpha:
320 case Maxwell::Blend::Factor::OneMinusSource1AlphaGL:
321 return 14;
322 case Maxwell::Blend::Factor::ConstantColor:
323 case Maxwell::Blend::Factor::ConstantColorGL:
324 return 15;
325 case Maxwell::Blend::Factor::OneMinusConstantColor:
326 case Maxwell::Blend::Factor::OneMinusConstantColorGL:
327 return 16;
328 case Maxwell::Blend::Factor::ConstantAlpha:
329 case Maxwell::Blend::Factor::ConstantAlphaGL:
330 return 17;
331 case Maxwell::Blend::Factor::OneMinusConstantAlpha:
332 case Maxwell::Blend::Factor::OneMinusConstantAlphaGL:
333 return 18;
334 }
335 return 0;
289} 336}
290 337
291FixedPipelineState GetFixedPipelineState(const Maxwell& regs) { 338Maxwell::Blend::Factor FixedPipelineState::UnpackBlendFactor(u32 packed) noexcept {
292 FixedPipelineState fixed_state; 339 static constexpr std::array LUT = {
293 fixed_state.input_assembly = GetInputAssemblyState(regs); 340 Maxwell::Blend::Factor::Zero,
294 fixed_state.tessellation = GetTessellationState(regs); 341 Maxwell::Blend::Factor::One,
295 fixed_state.rasterizer = GetRasterizerState(regs); 342 Maxwell::Blend::Factor::SourceColor,
296 fixed_state.depth_stencil = GetDepthStencilState(regs); 343 Maxwell::Blend::Factor::OneMinusSourceColor,
297 fixed_state.color_blending = GetColorBlendingState(regs); 344 Maxwell::Blend::Factor::SourceAlpha,
298 return fixed_state; 345 Maxwell::Blend::Factor::OneMinusSourceAlpha,
346 Maxwell::Blend::Factor::DestAlpha,
347 Maxwell::Blend::Factor::OneMinusDestAlpha,
348 Maxwell::Blend::Factor::DestColor,
349 Maxwell::Blend::Factor::OneMinusDestColor,
350 Maxwell::Blend::Factor::SourceAlphaSaturate,
351 Maxwell::Blend::Factor::Source1Color,
352 Maxwell::Blend::Factor::OneMinusSource1Color,
353 Maxwell::Blend::Factor::Source1Alpha,
354 Maxwell::Blend::Factor::OneMinusSource1Alpha,
355 Maxwell::Blend::Factor::ConstantColor,
356 Maxwell::Blend::Factor::OneMinusConstantColor,
357 Maxwell::Blend::Factor::ConstantAlpha,
358 Maxwell::Blend::Factor::OneMinusConstantAlpha,
359 };
360 return LUT[packed];
299} 361}
300 362
301} // namespace Vulkan 363} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/fixed_pipeline_state.h b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
index 4c8ba7f90..8652067a7 100644
--- a/src/video_core/renderer_vulkan/fixed_pipeline_state.h
+++ b/src/video_core/renderer_vulkan/fixed_pipeline_state.h
@@ -7,6 +7,7 @@
7#include <array> 7#include <array>
8#include <type_traits> 8#include <type_traits>
9 9
10#include "common/bit_field.h"
10#include "common/common_types.h" 11#include "common/common_types.h"
11 12
12#include "video_core/engines/maxwell_3d.h" 13#include "video_core/engines/maxwell_3d.h"
@@ -16,93 +17,48 @@ namespace Vulkan {
16 17
17using Maxwell = Tegra::Engines::Maxwell3D::Regs; 18using Maxwell = Tegra::Engines::Maxwell3D::Regs;
18 19
19// TODO(Rodrigo): Optimize this structure.
20
21struct FixedPipelineState { 20struct FixedPipelineState {
22 using PixelFormat = VideoCore::Surface::PixelFormat; 21 static u32 PackComparisonOp(Maxwell::ComparisonOp op) noexcept;
23 22 static Maxwell::ComparisonOp UnpackComparisonOp(u32 packed) noexcept;
24 struct VertexBinding {
25 constexpr VertexBinding(u32 index, u32 stride, u32 divisor)
26 : index{index}, stride{stride}, divisor{divisor} {}
27 VertexBinding() = default;
28
29 u32 index;
30 u32 stride;
31 u32 divisor;
32
33 std::size_t Hash() const noexcept;
34 23
35 bool operator==(const VertexBinding& rhs) const noexcept; 24 static u32 PackStencilOp(Maxwell::StencilOp op) noexcept;
36 25 static Maxwell::StencilOp UnpackStencilOp(u32 packed) noexcept;
37 bool operator!=(const VertexBinding& rhs) const noexcept {
38 return !operator==(rhs);
39 }
40 };
41 26
42 struct VertexAttribute { 27 static u32 PackCullFace(Maxwell::CullFace cull) noexcept;
43 constexpr VertexAttribute(u32 index, u32 buffer, Maxwell::VertexAttribute::Type type, 28 static Maxwell::CullFace UnpackCullFace(u32 packed) noexcept;
44 Maxwell::VertexAttribute::Size size, u32 offset)
45 : index{index}, buffer{buffer}, type{type}, size{size}, offset{offset} {}
46 VertexAttribute() = default;
47
48 u32 index;
49 u32 buffer;
50 Maxwell::VertexAttribute::Type type;
51 Maxwell::VertexAttribute::Size size;
52 u32 offset;
53
54 std::size_t Hash() const noexcept;
55
56 bool operator==(const VertexAttribute& rhs) const noexcept;
57
58 bool operator!=(const VertexAttribute& rhs) const noexcept {
59 return !operator==(rhs);
60 }
61 };
62 29
63 struct StencilFace { 30 static u32 PackFrontFace(Maxwell::FrontFace face) noexcept;
64 constexpr StencilFace(Maxwell::StencilOp action_stencil_fail, 31 static Maxwell::FrontFace UnpackFrontFace(u32 packed) noexcept;
65 Maxwell::StencilOp action_depth_fail,
66 Maxwell::StencilOp action_depth_pass, Maxwell::ComparisonOp test_func)
67 : action_stencil_fail{action_stencil_fail}, action_depth_fail{action_depth_fail},
68 action_depth_pass{action_depth_pass}, test_func{test_func} {}
69 StencilFace() = default;
70 32
71 Maxwell::StencilOp action_stencil_fail; 33 static u32 PackPolygonMode(Maxwell::PolygonMode mode) noexcept;
72 Maxwell::StencilOp action_depth_fail; 34 static Maxwell::PolygonMode UnpackPolygonMode(u32 packed) noexcept;
73 Maxwell::StencilOp action_depth_pass;
74 Maxwell::ComparisonOp test_func;
75 35
76 std::size_t Hash() const noexcept; 36 static u32 PackLogicOp(Maxwell::LogicOperation op) noexcept;
37 static Maxwell::LogicOperation UnpackLogicOp(u32 packed) noexcept;
77 38
78 bool operator==(const StencilFace& rhs) const noexcept; 39 static u32 PackBlendEquation(Maxwell::Blend::Equation equation) noexcept;
40 static Maxwell::Blend::Equation UnpackBlendEquation(u32 packed) noexcept;
79 41
80 bool operator!=(const StencilFace& rhs) const noexcept { 42 static u32 PackBlendFactor(Maxwell::Blend::Factor factor) noexcept;
81 return !operator==(rhs); 43 static Maxwell::Blend::Factor UnpackBlendFactor(u32 packed) noexcept;
82 }
83 };
84 44
85 struct BlendingAttachment { 45 struct BlendingAttachment {
86 constexpr BlendingAttachment(bool enable, Maxwell::Blend::Equation rgb_equation, 46 union {
87 Maxwell::Blend::Factor src_rgb_func, 47 u32 raw;
88 Maxwell::Blend::Factor dst_rgb_func, 48 BitField<0, 1, u32> mask_r;
89 Maxwell::Blend::Equation a_equation, 49 BitField<1, 1, u32> mask_g;
90 Maxwell::Blend::Factor src_a_func, 50 BitField<2, 1, u32> mask_b;
91 Maxwell::Blend::Factor dst_a_func, 51 BitField<3, 1, u32> mask_a;
92 std::array<bool, 4> components) 52 BitField<4, 3, u32> equation_rgb;
93 : enable{enable}, rgb_equation{rgb_equation}, src_rgb_func{src_rgb_func}, 53 BitField<7, 3, u32> equation_a;
94 dst_rgb_func{dst_rgb_func}, a_equation{a_equation}, src_a_func{src_a_func}, 54 BitField<10, 5, u32> factor_source_rgb;
95 dst_a_func{dst_a_func}, components{components} {} 55 BitField<15, 5, u32> factor_dest_rgb;
96 BlendingAttachment() = default; 56 BitField<20, 5, u32> factor_source_a;
97 57 BitField<25, 5, u32> factor_dest_a;
98 bool enable; 58 BitField<30, 1, u32> enable;
99 Maxwell::Blend::Equation rgb_equation; 59 };
100 Maxwell::Blend::Factor src_rgb_func; 60
101 Maxwell::Blend::Factor dst_rgb_func; 61 void Fill(const Maxwell& regs, std::size_t index);
102 Maxwell::Blend::Equation a_equation;
103 Maxwell::Blend::Factor src_a_func;
104 Maxwell::Blend::Factor dst_a_func;
105 std::array<bool, 4> components;
106 62
107 std::size_t Hash() const noexcept; 63 std::size_t Hash() const noexcept;
108 64
@@ -111,135 +67,178 @@ struct FixedPipelineState {
111 bool operator!=(const BlendingAttachment& rhs) const noexcept { 67 bool operator!=(const BlendingAttachment& rhs) const noexcept {
112 return !operator==(rhs); 68 return !operator==(rhs);
113 } 69 }
114 };
115
116 struct VertexInput {
117 std::size_t num_bindings = 0;
118 std::size_t num_attributes = 0;
119 std::array<VertexBinding, Maxwell::NumVertexArrays> bindings;
120 std::array<VertexAttribute, Maxwell::NumVertexAttributes> attributes;
121
122 std::size_t Hash() const noexcept;
123 70
124 bool operator==(const VertexInput& rhs) const noexcept; 71 constexpr std::array<bool, 4> Mask() const noexcept {
72 return {mask_r != 0, mask_g != 0, mask_b != 0, mask_a != 0};
73 }
125 74
126 bool operator!=(const VertexInput& rhs) const noexcept { 75 Maxwell::Blend::Equation EquationRGB() const noexcept {
127 return !operator==(rhs); 76 return UnpackBlendEquation(equation_rgb.Value());
128 } 77 }
129 };
130 78
131 struct InputAssembly { 79 Maxwell::Blend::Equation EquationAlpha() const noexcept {
132 constexpr InputAssembly(Maxwell::PrimitiveTopology topology, bool primitive_restart_enable, 80 return UnpackBlendEquation(equation_a.Value());
133 float point_size) 81 }
134 : topology{topology}, primitive_restart_enable{primitive_restart_enable},
135 point_size{point_size} {}
136 InputAssembly() = default;
137 82
138 Maxwell::PrimitiveTopology topology; 83 Maxwell::Blend::Factor SourceRGBFactor() const noexcept {
139 bool primitive_restart_enable; 84 return UnpackBlendFactor(factor_source_rgb.Value());
140 float point_size; 85 }
141 86
142 std::size_t Hash() const noexcept; 87 Maxwell::Blend::Factor DestRGBFactor() const noexcept {
88 return UnpackBlendFactor(factor_dest_rgb.Value());
89 }
143 90
144 bool operator==(const InputAssembly& rhs) const noexcept; 91 Maxwell::Blend::Factor SourceAlphaFactor() const noexcept {
92 return UnpackBlendFactor(factor_source_a.Value());
93 }
145 94
146 bool operator!=(const InputAssembly& rhs) const noexcept { 95 Maxwell::Blend::Factor DestAlphaFactor() const noexcept {
147 return !operator==(rhs); 96 return UnpackBlendFactor(factor_dest_a.Value());
148 } 97 }
149 }; 98 };
150 99
151 struct Tessellation { 100 struct VertexInput {
152 constexpr Tessellation(u32 patch_control_points, Maxwell::TessellationPrimitive primitive, 101 union Binding {
153 Maxwell::TessellationSpacing spacing, bool clockwise) 102 u16 raw;
154 : patch_control_points{patch_control_points}, primitive{primitive}, spacing{spacing}, 103 BitField<0, 1, u16> enabled;
155 clockwise{clockwise} {} 104 BitField<1, 12, u16> stride;
156 Tessellation() = default; 105 };
157 106
158 u32 patch_control_points; 107 union Attribute {
159 Maxwell::TessellationPrimitive primitive; 108 u32 raw;
160 Maxwell::TessellationSpacing spacing; 109 BitField<0, 1, u32> enabled;
161 bool clockwise; 110 BitField<1, 5, u32> buffer;
162 111 BitField<6, 14, u32> offset;
163 std::size_t Hash() const noexcept; 112 BitField<20, 3, u32> type;
164 113 BitField<23, 6, u32> size;
165 bool operator==(const Tessellation& rhs) const noexcept; 114
115 constexpr Maxwell::VertexAttribute::Type Type() const noexcept {
116 return static_cast<Maxwell::VertexAttribute::Type>(type.Value());
117 }
118
119 constexpr Maxwell::VertexAttribute::Size Size() const noexcept {
120 return static_cast<Maxwell::VertexAttribute::Size>(size.Value());
121 }
122 };
123
124 std::array<Binding, Maxwell::NumVertexArrays> bindings;
125 std::array<u32, Maxwell::NumVertexArrays> binding_divisors;
126 std::array<Attribute, Maxwell::NumVertexAttributes> attributes;
127
128 void SetBinding(std::size_t index, bool enabled, u32 stride, u32 divisor) noexcept {
129 auto& binding = bindings[index];
130 binding.raw = 0;
131 binding.enabled.Assign(enabled ? 1 : 0);
132 binding.stride.Assign(static_cast<u16>(stride));
133 binding_divisors[index] = divisor;
134 }
166 135
167 bool operator!=(const Tessellation& rhs) const noexcept { 136 void SetAttribute(std::size_t index, bool enabled, u32 buffer, u32 offset,
168 return !operator==(rhs); 137 Maxwell::VertexAttribute::Type type,
138 Maxwell::VertexAttribute::Size size) noexcept {
139 auto& attribute = attributes[index];
140 attribute.raw = 0;
141 attribute.enabled.Assign(enabled ? 1 : 0);
142 attribute.buffer.Assign(buffer);
143 attribute.offset.Assign(offset);
144 attribute.type.Assign(static_cast<u32>(type));
145 attribute.size.Assign(static_cast<u32>(size));
169 } 146 }
170 }; 147 };
171 148
172 struct Rasterizer { 149 struct Rasterizer {
173 constexpr Rasterizer(bool cull_enable, bool depth_bias_enable, bool depth_clamp_enable, 150 union {
174 bool ndc_minus_one_to_one, Maxwell::CullFace cull_face, 151 u32 raw;
175 Maxwell::FrontFace front_face) 152 BitField<0, 4, u32> topology;
176 : cull_enable{cull_enable}, depth_bias_enable{depth_bias_enable}, 153 BitField<4, 1, u32> primitive_restart_enable;
177 depth_clamp_enable{depth_clamp_enable}, ndc_minus_one_to_one{ndc_minus_one_to_one}, 154 BitField<5, 1, u32> cull_enable;
178 cull_face{cull_face}, front_face{front_face} {} 155 BitField<6, 1, u32> depth_bias_enable;
179 Rasterizer() = default; 156 BitField<7, 1, u32> depth_clamp_disabled;
180 157 BitField<8, 1, u32> ndc_minus_one_to_one;
181 bool cull_enable; 158 BitField<9, 2, u32> cull_face;
182 bool depth_bias_enable; 159 BitField<11, 1, u32> front_face;
183 bool depth_clamp_enable; 160 BitField<12, 2, u32> polygon_mode;
184 bool ndc_minus_one_to_one; 161 BitField<14, 5, u32> patch_control_points_minus_one;
185 Maxwell::CullFace cull_face; 162 BitField<19, 2, u32> tessellation_primitive;
186 Maxwell::FrontFace front_face; 163 BitField<21, 2, u32> tessellation_spacing;
187 164 BitField<23, 1, u32> tessellation_clockwise;
188 std::size_t Hash() const noexcept; 165 BitField<24, 1, u32> logic_op_enable;
166 BitField<25, 4, u32> logic_op;
167 };
168
169 // TODO(Rodrigo): Move this to push constants
170 u32 point_size;
171
172 void Fill(const Maxwell& regs) noexcept;
173
174 constexpr Maxwell::PrimitiveTopology Topology() const noexcept {
175 return static_cast<Maxwell::PrimitiveTopology>(topology.Value());
176 }
189 177
190 bool operator==(const Rasterizer& rhs) const noexcept; 178 Maxwell::CullFace CullFace() const noexcept {
179 return UnpackCullFace(cull_face.Value());
180 }
191 181
192 bool operator!=(const Rasterizer& rhs) const noexcept { 182 Maxwell::FrontFace FrontFace() const noexcept {
193 return !operator==(rhs); 183 return UnpackFrontFace(front_face.Value());
194 } 184 }
195 }; 185 };
196 186
197 struct DepthStencil { 187 struct DepthStencil {
198 constexpr DepthStencil(bool depth_test_enable, bool depth_write_enable, 188 template <std::size_t Position>
199 bool depth_bounds_enable, bool stencil_enable, 189 union StencilFace {
200 Maxwell::ComparisonOp depth_test_function, StencilFace front_stencil, 190 BitField<Position + 0, 3, u32> action_stencil_fail;
201 StencilFace back_stencil) 191 BitField<Position + 3, 3, u32> action_depth_fail;
202 : depth_test_enable{depth_test_enable}, depth_write_enable{depth_write_enable}, 192 BitField<Position + 6, 3, u32> action_depth_pass;
203 depth_bounds_enable{depth_bounds_enable}, stencil_enable{stencil_enable}, 193 BitField<Position + 9, 3, u32> test_func;
204 depth_test_function{depth_test_function}, front_stencil{front_stencil}, 194
205 back_stencil{back_stencil} {} 195 Maxwell::StencilOp ActionStencilFail() const noexcept {
206 DepthStencil() = default; 196 return UnpackStencilOp(action_stencil_fail);
207 197 }
208 bool depth_test_enable; 198
209 bool depth_write_enable; 199 Maxwell::StencilOp ActionDepthFail() const noexcept {
210 bool depth_bounds_enable; 200 return UnpackStencilOp(action_depth_fail);
211 bool stencil_enable; 201 }
212 Maxwell::ComparisonOp depth_test_function; 202
213 StencilFace front_stencil; 203 Maxwell::StencilOp ActionDepthPass() const noexcept {
214 StencilFace back_stencil; 204 return UnpackStencilOp(action_depth_pass);
215 205 }
216 std::size_t Hash() const noexcept; 206
217 207 Maxwell::ComparisonOp TestFunc() const noexcept {
218 bool operator==(const DepthStencil& rhs) const noexcept; 208 return UnpackComparisonOp(test_func);
219 209 }
220 bool operator!=(const DepthStencil& rhs) const noexcept { 210 };
221 return !operator==(rhs); 211
212 union {
213 u32 raw;
214 StencilFace<0> front;
215 StencilFace<12> back;
216 BitField<24, 1, u32> depth_test_enable;
217 BitField<25, 1, u32> depth_write_enable;
218 BitField<26, 1, u32> depth_bounds_enable;
219 BitField<27, 1, u32> stencil_enable;
220 BitField<28, 3, u32> depth_test_func;
221 };
222
223 void Fill(const Maxwell& regs) noexcept;
224
225 Maxwell::ComparisonOp DepthTestFunc() const noexcept {
226 return UnpackComparisonOp(depth_test_func);
222 } 227 }
223 }; 228 };
224 229
225 struct ColorBlending { 230 struct ColorBlending {
226 constexpr ColorBlending(
227 std::array<float, 4> blend_constants, std::size_t attachments_count,
228 std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments)
229 : attachments_count{attachments_count}, attachments{attachments} {}
230 ColorBlending() = default;
231
232 std::size_t attachments_count;
233 std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments; 231 std::array<BlendingAttachment, Maxwell::NumRenderTargets> attachments;
234 232
235 std::size_t Hash() const noexcept; 233 void Fill(const Maxwell& regs) noexcept;
234 };
236 235
237 bool operator==(const ColorBlending& rhs) const noexcept; 236 VertexInput vertex_input;
237 Rasterizer rasterizer;
238 DepthStencil depth_stencil;
239 ColorBlending color_blending;
238 240
239 bool operator!=(const ColorBlending& rhs) const noexcept { 241 void Fill(const Maxwell& regs);
240 return !operator==(rhs);
241 }
242 };
243 242
244 std::size_t Hash() const noexcept; 243 std::size_t Hash() const noexcept;
245 244
@@ -248,27 +247,10 @@ struct FixedPipelineState {
248 bool operator!=(const FixedPipelineState& rhs) const noexcept { 247 bool operator!=(const FixedPipelineState& rhs) const noexcept {
249 return !operator==(rhs); 248 return !operator==(rhs);
250 } 249 }
251
252 VertexInput vertex_input;
253 InputAssembly input_assembly;
254 Tessellation tessellation;
255 Rasterizer rasterizer;
256 DepthStencil depth_stencil;
257 ColorBlending color_blending;
258}; 250};
259static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexBinding>); 251static_assert(std::has_unique_object_representations_v<FixedPipelineState>);
260static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexAttribute>);
261static_assert(std::is_trivially_copyable_v<FixedPipelineState::StencilFace>);
262static_assert(std::is_trivially_copyable_v<FixedPipelineState::BlendingAttachment>);
263static_assert(std::is_trivially_copyable_v<FixedPipelineState::VertexInput>);
264static_assert(std::is_trivially_copyable_v<FixedPipelineState::InputAssembly>);
265static_assert(std::is_trivially_copyable_v<FixedPipelineState::Tessellation>);
266static_assert(std::is_trivially_copyable_v<FixedPipelineState::Rasterizer>);
267static_assert(std::is_trivially_copyable_v<FixedPipelineState::DepthStencil>);
268static_assert(std::is_trivially_copyable_v<FixedPipelineState::ColorBlending>);
269static_assert(std::is_trivially_copyable_v<FixedPipelineState>); 252static_assert(std::is_trivially_copyable_v<FixedPipelineState>);
270 253static_assert(std::is_trivially_constructible_v<FixedPipelineState>);
271FixedPipelineState GetFixedPipelineState(const Maxwell& regs);
272 254
273} // namespace Vulkan 255} // namespace Vulkan
274 256
diff --git a/src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp
new file mode 100644
index 000000000..435c8c1b8
--- /dev/null
+++ b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.cpp
@@ -0,0 +1,220 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#ifdef HAS_NSIGHT_AFTERMATH
6
7#include <mutex>
8#include <string>
9#include <string_view>
10#include <utility>
11#include <vector>
12
13#include <fmt/format.h>
14
15#define VK_NO_PROTOTYPES
16#include <vulkan/vulkan.h>
17
18#include <GFSDK_Aftermath.h>
19#include <GFSDK_Aftermath_Defines.h>
20#include <GFSDK_Aftermath_GpuCrashDump.h>
21#include <GFSDK_Aftermath_GpuCrashDumpDecoding.h>
22
23#include "common/common_paths.h"
24#include "common/common_types.h"
25#include "common/file_util.h"
26#include "common/logging/log.h"
27#include "common/scope_exit.h"
28
29#include "video_core/renderer_vulkan/nsight_aftermath_tracker.h"
30
31namespace Vulkan {
32
33static constexpr char AFTERMATH_LIB_NAME[] = "GFSDK_Aftermath_Lib.x64.dll";
34
35NsightAftermathTracker::NsightAftermathTracker() = default;
36
37NsightAftermathTracker::~NsightAftermathTracker() {
38 if (initialized) {
39 (void)GFSDK_Aftermath_DisableGpuCrashDumps();
40 }
41}
42
43bool NsightAftermathTracker::Initialize() {
44 if (!dl.Open(AFTERMATH_LIB_NAME)) {
45 LOG_ERROR(Render_Vulkan, "Failed to load Nsight Aftermath DLL");
46 return false;
47 }
48
49 if (!dl.GetSymbol("GFSDK_Aftermath_DisableGpuCrashDumps",
50 &GFSDK_Aftermath_DisableGpuCrashDumps) ||
51 !dl.GetSymbol("GFSDK_Aftermath_EnableGpuCrashDumps",
52 &GFSDK_Aftermath_EnableGpuCrashDumps) ||
53 !dl.GetSymbol("GFSDK_Aftermath_GetShaderDebugInfoIdentifier",
54 &GFSDK_Aftermath_GetShaderDebugInfoIdentifier) ||
55 !dl.GetSymbol("GFSDK_Aftermath_GetShaderHashSpirv", &GFSDK_Aftermath_GetShaderHashSpirv) ||
56 !dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_CreateDecoder",
57 &GFSDK_Aftermath_GpuCrashDump_CreateDecoder) ||
58 !dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_DestroyDecoder",
59 &GFSDK_Aftermath_GpuCrashDump_DestroyDecoder) ||
60 !dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_GenerateJSON",
61 &GFSDK_Aftermath_GpuCrashDump_GenerateJSON) ||
62 !dl.GetSymbol("GFSDK_Aftermath_GpuCrashDump_GetJSON",
63 &GFSDK_Aftermath_GpuCrashDump_GetJSON)) {
64 LOG_ERROR(Render_Vulkan, "Failed to load Nsight Aftermath function pointers");
65 return false;
66 }
67
68 dump_dir = FileUtil::GetUserPath(FileUtil::UserPath::LogDir) + "gpucrash";
69
70 (void)FileUtil::DeleteDirRecursively(dump_dir);
71 if (!FileUtil::CreateDir(dump_dir)) {
72 LOG_ERROR(Render_Vulkan, "Failed to create Nsight Aftermath dump directory");
73 return false;
74 }
75
76 if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_EnableGpuCrashDumps(
77 GFSDK_Aftermath_Version_API, GFSDK_Aftermath_GpuCrashDumpWatchedApiFlags_Vulkan,
78 GFSDK_Aftermath_GpuCrashDumpFeatureFlags_Default, GpuCrashDumpCallback,
79 ShaderDebugInfoCallback, CrashDumpDescriptionCallback, this))) {
80 LOG_ERROR(Render_Vulkan, "GFSDK_Aftermath_EnableGpuCrashDumps failed");
81 return false;
82 }
83
84 LOG_INFO(Render_Vulkan, "Nsight Aftermath dump directory is \"{}\"", dump_dir);
85
86 initialized = true;
87 return true;
88}
89
90void NsightAftermathTracker::SaveShader(const std::vector<u32>& spirv) const {
91 if (!initialized) {
92 return;
93 }
94
95 std::vector<u32> spirv_copy = spirv;
96 GFSDK_Aftermath_SpirvCode shader;
97 shader.pData = spirv_copy.data();
98 shader.size = static_cast<u32>(spirv_copy.size() * 4);
99
100 std::scoped_lock lock{mutex};
101
102 GFSDK_Aftermath_ShaderHash hash;
103 if (!GFSDK_Aftermath_SUCCEED(
104 GFSDK_Aftermath_GetShaderHashSpirv(GFSDK_Aftermath_Version_API, &shader, &hash))) {
105 LOG_ERROR(Render_Vulkan, "Failed to hash SPIR-V module");
106 return;
107 }
108
109 FileUtil::IOFile file(fmt::format("{}/source_{:016x}.spv", dump_dir, hash.hash), "wb");
110 if (!file.IsOpen()) {
111 LOG_ERROR(Render_Vulkan, "Failed to dump SPIR-V module with hash={:016x}", hash.hash);
112 return;
113 }
114 if (file.WriteArray(spirv.data(), spirv.size()) != spirv.size()) {
115 LOG_ERROR(Render_Vulkan, "Failed to write SPIR-V module with hash={:016x}", hash.hash);
116 return;
117 }
118}
119
120void NsightAftermathTracker::OnGpuCrashDumpCallback(const void* gpu_crash_dump,
121 u32 gpu_crash_dump_size) {
122 std::scoped_lock lock{mutex};
123
124 LOG_CRITICAL(Render_Vulkan, "called");
125
126 GFSDK_Aftermath_GpuCrashDump_Decoder decoder;
127 if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_GpuCrashDump_CreateDecoder(
128 GFSDK_Aftermath_Version_API, gpu_crash_dump, gpu_crash_dump_size, &decoder))) {
129 LOG_ERROR(Render_Vulkan, "Failed to create decoder");
130 return;
131 }
132 SCOPE_EXIT({ GFSDK_Aftermath_GpuCrashDump_DestroyDecoder(decoder); });
133
134 u32 json_size = 0;
135 if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_GpuCrashDump_GenerateJSON(
136 decoder, GFSDK_Aftermath_GpuCrashDumpDecoderFlags_ALL_INFO,
137 GFSDK_Aftermath_GpuCrashDumpFormatterFlags_NONE, nullptr, nullptr, nullptr, nullptr,
138 this, &json_size))) {
139 LOG_ERROR(Render_Vulkan, "Failed to generate JSON");
140 return;
141 }
142 std::vector<char> json(json_size);
143 if (!GFSDK_Aftermath_SUCCEED(
144 GFSDK_Aftermath_GpuCrashDump_GetJSON(decoder, json_size, json.data()))) {
145 LOG_ERROR(Render_Vulkan, "Failed to query JSON");
146 return;
147 }
148
149 const std::string base_name = [this] {
150 const int id = dump_id++;
151 if (id == 0) {
152 return fmt::format("{}/crash.nv-gpudmp", dump_dir);
153 } else {
154 return fmt::format("{}/crash_{}.nv-gpudmp", dump_dir, id);
155 }
156 }();
157
158 std::string_view dump_view(static_cast<const char*>(gpu_crash_dump), gpu_crash_dump_size);
159 if (FileUtil::WriteStringToFile(false, base_name, dump_view) != gpu_crash_dump_size) {
160 LOG_ERROR(Render_Vulkan, "Failed to write dump file");
161 return;
162 }
163 const std::string_view json_view(json.data(), json.size());
164 if (FileUtil::WriteStringToFile(true, base_name + ".json", json_view) != json.size()) {
165 LOG_ERROR(Render_Vulkan, "Failed to write JSON");
166 return;
167 }
168}
169
170void NsightAftermathTracker::OnShaderDebugInfoCallback(const void* shader_debug_info,
171 u32 shader_debug_info_size) {
172 std::scoped_lock lock{mutex};
173
174 GFSDK_Aftermath_ShaderDebugInfoIdentifier identifier;
175 if (!GFSDK_Aftermath_SUCCEED(GFSDK_Aftermath_GetShaderDebugInfoIdentifier(
176 GFSDK_Aftermath_Version_API, shader_debug_info, shader_debug_info_size, &identifier))) {
177 LOG_ERROR(Render_Vulkan, "GFSDK_Aftermath_GetShaderDebugInfoIdentifier failed");
178 return;
179 }
180
181 const std::string path =
182 fmt::format("{}/shader_{:016x}{:016x}.nvdbg", dump_dir, identifier.id[0], identifier.id[1]);
183 FileUtil::IOFile file(path, "wb");
184 if (!file.IsOpen()) {
185 LOG_ERROR(Render_Vulkan, "Failed to create file {}", path);
186 return;
187 }
188 if (file.WriteBytes(static_cast<const u8*>(shader_debug_info), shader_debug_info_size) !=
189 shader_debug_info_size) {
190 LOG_ERROR(Render_Vulkan, "Failed to write file {}", path);
191 return;
192 }
193}
194
195void NsightAftermathTracker::OnCrashDumpDescriptionCallback(
196 PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description) {
197 add_description(GFSDK_Aftermath_GpuCrashDumpDescriptionKey_ApplicationName, "yuzu");
198}
199
200void NsightAftermathTracker::GpuCrashDumpCallback(const void* gpu_crash_dump,
201 u32 gpu_crash_dump_size, void* user_data) {
202 static_cast<NsightAftermathTracker*>(user_data)->OnGpuCrashDumpCallback(gpu_crash_dump,
203 gpu_crash_dump_size);
204}
205
206void NsightAftermathTracker::ShaderDebugInfoCallback(const void* shader_debug_info,
207 u32 shader_debug_info_size, void* user_data) {
208 static_cast<NsightAftermathTracker*>(user_data)->OnShaderDebugInfoCallback(
209 shader_debug_info, shader_debug_info_size);
210}
211
212void NsightAftermathTracker::CrashDumpDescriptionCallback(
213 PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description, void* user_data) {
214 static_cast<NsightAftermathTracker*>(user_data)->OnCrashDumpDescriptionCallback(
215 add_description);
216}
217
218} // namespace Vulkan
219
220#endif // HAS_NSIGHT_AFTERMATH
diff --git a/src/video_core/renderer_vulkan/nsight_aftermath_tracker.h b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.h
new file mode 100644
index 000000000..afe7ae99e
--- /dev/null
+++ b/src/video_core/renderer_vulkan/nsight_aftermath_tracker.h
@@ -0,0 +1,87 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <mutex>
8#include <string>
9#include <vector>
10
11#define VK_NO_PROTOTYPES
12#include <vulkan/vulkan.h>
13
14#ifdef HAS_NSIGHT_AFTERMATH
15#include <GFSDK_Aftermath_Defines.h>
16#include <GFSDK_Aftermath_GpuCrashDump.h>
17#include <GFSDK_Aftermath_GpuCrashDumpDecoding.h>
18#endif
19
20#include "common/common_types.h"
21#include "common/dynamic_library.h"
22
23namespace Vulkan {
24
25class NsightAftermathTracker {
26public:
27 NsightAftermathTracker();
28 ~NsightAftermathTracker();
29
30 NsightAftermathTracker(const NsightAftermathTracker&) = delete;
31 NsightAftermathTracker& operator=(const NsightAftermathTracker&) = delete;
32
33 // Delete move semantics because Aftermath initialization uses a pointer to this.
34 NsightAftermathTracker(NsightAftermathTracker&&) = delete;
35 NsightAftermathTracker& operator=(NsightAftermathTracker&&) = delete;
36
37 bool Initialize();
38
39 void SaveShader(const std::vector<u32>& spirv) const;
40
41private:
42#ifdef HAS_NSIGHT_AFTERMATH
43 static void GpuCrashDumpCallback(const void* gpu_crash_dump, u32 gpu_crash_dump_size,
44 void* user_data);
45
46 static void ShaderDebugInfoCallback(const void* shader_debug_info, u32 shader_debug_info_size,
47 void* user_data);
48
49 static void CrashDumpDescriptionCallback(
50 PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description, void* user_data);
51
52 void OnGpuCrashDumpCallback(const void* gpu_crash_dump, u32 gpu_crash_dump_size);
53
54 void OnShaderDebugInfoCallback(const void* shader_debug_info, u32 shader_debug_info_size);
55
56 void OnCrashDumpDescriptionCallback(
57 PFN_GFSDK_Aftermath_AddGpuCrashDumpDescription add_description);
58
59 mutable std::mutex mutex;
60
61 std::string dump_dir;
62 int dump_id = 0;
63
64 bool initialized = false;
65
66 Common::DynamicLibrary dl;
67 PFN_GFSDK_Aftermath_DisableGpuCrashDumps GFSDK_Aftermath_DisableGpuCrashDumps;
68 PFN_GFSDK_Aftermath_EnableGpuCrashDumps GFSDK_Aftermath_EnableGpuCrashDumps;
69 PFN_GFSDK_Aftermath_GetShaderDebugInfoIdentifier GFSDK_Aftermath_GetShaderDebugInfoIdentifier;
70 PFN_GFSDK_Aftermath_GetShaderHashSpirv GFSDK_Aftermath_GetShaderHashSpirv;
71 PFN_GFSDK_Aftermath_GpuCrashDump_CreateDecoder GFSDK_Aftermath_GpuCrashDump_CreateDecoder;
72 PFN_GFSDK_Aftermath_GpuCrashDump_DestroyDecoder GFSDK_Aftermath_GpuCrashDump_DestroyDecoder;
73 PFN_GFSDK_Aftermath_GpuCrashDump_GenerateJSON GFSDK_Aftermath_GpuCrashDump_GenerateJSON;
74 PFN_GFSDK_Aftermath_GpuCrashDump_GetJSON GFSDK_Aftermath_GpuCrashDump_GetJSON;
75#endif
76};
77
78#ifndef HAS_NSIGHT_AFTERMATH
79inline NsightAftermathTracker::NsightAftermathTracker() = default;
80inline NsightAftermathTracker::~NsightAftermathTracker() = default;
81inline bool NsightAftermathTracker::Initialize() {
82 return false;
83}
84inline void NsightAftermathTracker::SaveShader(const std::vector<u32>&) const {}
85#endif
86
87} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index 04532f8f8..59b441943 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -12,15 +12,12 @@
12 12
13#include <fmt/format.h> 13#include <fmt/format.h>
14 14
15#include "common/assert.h"
16#include "common/dynamic_library.h" 15#include "common/dynamic_library.h"
17#include "common/logging/log.h" 16#include "common/logging/log.h"
18#include "common/telemetry.h" 17#include "common/telemetry.h"
19#include "core/core.h" 18#include "core/core.h"
20#include "core/core_timing.h" 19#include "core/core_timing.h"
21#include "core/frontend/emu_window.h" 20#include "core/frontend/emu_window.h"
22#include "core/memory.h"
23#include "core/perf_stats.h"
24#include "core/settings.h" 21#include "core/settings.h"
25#include "core/telemetry_session.h" 22#include "core/telemetry_session.h"
26#include "video_core/gpu.h" 23#include "video_core/gpu.h"
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.h b/src/video_core/renderer_vulkan/renderer_vulkan.h
index 18270909b..522b5bff8 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.h
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.h
@@ -5,7 +5,6 @@
5#pragma once 5#pragma once
6 6
7#include <memory> 7#include <memory>
8#include <optional>
9#include <string> 8#include <string>
10#include <vector> 9#include <vector>
11 10
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h
index 5eb544aea..243640fab 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.h
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.h
@@ -4,7 +4,6 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <array>
8#include <memory> 7#include <memory>
9#include <tuple> 8#include <tuple>
10 9
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index 81e1de2be..5b494da8c 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -5,11 +5,7 @@
5#include <algorithm> 5#include <algorithm>
6#include <cstring> 6#include <cstring>
7#include <memory> 7#include <memory>
8#include <optional>
9#include <tuple>
10 8
11#include "common/assert.h"
12#include "common/bit_util.h"
13#include "core/core.h" 9#include "core/core.h"
14#include "video_core/renderer_vulkan/vk_buffer_cache.h" 10#include "video_core/renderer_vulkan/vk_buffer_cache.h"
15#include "video_core/renderer_vulkan/vk_device.h" 11#include "video_core/renderer_vulkan/vk_device.h"
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.h b/src/video_core/renderer_vulkan/vk_buffer_cache.h
index 3cd2e2774..a54583e7d 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.h
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.h
@@ -5,14 +5,11 @@
5#pragma once 5#pragma once
6 6
7#include <memory> 7#include <memory>
8#include <unordered_map>
9#include <vector>
10 8
11#include "common/common_types.h" 9#include "common/common_types.h"
12#include "video_core/buffer_cache/buffer_cache.h" 10#include "video_core/buffer_cache/buffer_cache.h"
13#include "video_core/rasterizer_cache.h" 11#include "video_core/rasterizer_cache.h"
14#include "video_core/renderer_vulkan/vk_memory_manager.h" 12#include "video_core/renderer_vulkan/vk_memory_manager.h"
15#include "video_core/renderer_vulkan/vk_resource_manager.h"
16#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h" 13#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
17#include "video_core/renderer_vulkan/vk_stream_buffer.h" 14#include "video_core/renderer_vulkan/vk_stream_buffer.h"
18#include "video_core/renderer_vulkan/wrapper.h" 15#include "video_core/renderer_vulkan/wrapper.h"
@@ -55,8 +52,6 @@ public:
55protected: 52protected:
56 VkBuffer ToHandle(const Buffer& buffer) override; 53 VkBuffer ToHandle(const Buffer& buffer) override;
57 54
58 void WriteBarrier() override {}
59
60 Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override; 55 Buffer CreateBlock(VAddr cpu_addr, std::size_t size) override;
61 56
62 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size, 57 void UploadBlockData(const Buffer& buffer, std::size_t offset, std::size_t size,
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 7b0268033..da71e710c 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -6,7 +6,7 @@
6#include <memory> 6#include <memory>
7#include <optional> 7#include <optional>
8#include <utility> 8#include <utility>
9#include <vector> 9
10#include "common/alignment.h" 10#include "common/alignment.h"
11#include "common/assert.h" 11#include "common/assert.h"
12#include "common/common_types.h" 12#include "common/common_types.h"
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index 26bf834de..230b526bc 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -6,7 +6,7 @@
6 6
7#include <optional> 7#include <optional>
8#include <utility> 8#include <utility>
9#include <vector> 9
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "video_core/engines/maxwell_3d.h" 11#include "video_core/engines/maxwell_3d.h"
12#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 12#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 23beafa4f..8e1b46277 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -2,14 +2,12 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <memory>
6#include <vector> 5#include <vector>
7 6
8#include "video_core/renderer_vulkan/vk_compute_pipeline.h" 7#include "video_core/renderer_vulkan/vk_compute_pipeline.h"
9#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 8#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
10#include "video_core/renderer_vulkan/vk_device.h" 9#include "video_core/renderer_vulkan/vk_device.h"
11#include "video_core/renderer_vulkan/vk_pipeline_cache.h" 10#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
12#include "video_core/renderer_vulkan/vk_resource_manager.h"
13#include "video_core/renderer_vulkan/vk_scheduler.h" 11#include "video_core/renderer_vulkan/vk_scheduler.h"
14#include "video_core/renderer_vulkan/vk_shader_decompiler.h" 12#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
15#include "video_core/renderer_vulkan/vk_update_descriptor.h" 13#include "video_core/renderer_vulkan/vk_update_descriptor.h"
@@ -105,6 +103,8 @@ vk::DescriptorUpdateTemplateKHR VKComputePipeline::CreateDescriptorUpdateTemplat
105} 103}
106 104
107vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const { 105vk::ShaderModule VKComputePipeline::CreateShaderModule(const std::vector<u32>& code) const {
106 device.SaveShader(code);
107
108 VkShaderModuleCreateInfo ci; 108 VkShaderModuleCreateInfo ci;
109 ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; 109 ci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
110 ci.pNext = nullptr; 110 ci.pNext = nullptr;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.h b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
index 33b9af29e..6e2f22a4a 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.h
@@ -4,8 +4,6 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <memory>
8
9#include "common/common_types.h" 7#include "common/common_types.h"
10#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 8#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
11#include "video_core/renderer_vulkan/vk_shader_decompiler.h" 9#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
index e9d528aa6..890fd52cf 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.cpp
@@ -2,7 +2,6 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <memory>
6#include <vector> 5#include <vector>
7 6
8#include "common/common_types.h" 7#include "common/common_types.h"
diff --git a/src/video_core/renderer_vulkan/vk_descriptor_pool.h b/src/video_core/renderer_vulkan/vk_descriptor_pool.h
index ab40c70f0..9efa66bef 100644
--- a/src/video_core/renderer_vulkan/vk_descriptor_pool.h
+++ b/src/video_core/renderer_vulkan/vk_descriptor_pool.h
@@ -4,10 +4,8 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <memory>
8#include <vector> 7#include <vector>
9 8
10#include "common/common_types.h"
11#include "video_core/renderer_vulkan/vk_resource_manager.h" 9#include "video_core/renderer_vulkan/vk_resource_manager.h"
12#include "video_core/renderer_vulkan/wrapper.h" 10#include "video_core/renderer_vulkan/wrapper.h"
13 11
diff --git a/src/video_core/renderer_vulkan/vk_device.cpp b/src/video_core/renderer_vulkan/vk_device.cpp
index 52d29e49d..0e4bbca97 100644
--- a/src/video_core/renderer_vulkan/vk_device.cpp
+++ b/src/video_core/renderer_vulkan/vk_device.cpp
@@ -4,11 +4,11 @@
4 4
5#include <bitset> 5#include <bitset>
6#include <chrono> 6#include <chrono>
7#include <cstdlib>
8#include <optional> 7#include <optional>
9#include <string_view> 8#include <string_view>
10#include <thread> 9#include <thread>
11#include <unordered_set> 10#include <unordered_set>
11#include <utility>
12#include <vector> 12#include <vector>
13 13
14#include "common/assert.h" 14#include "common/assert.h"
@@ -167,6 +167,7 @@ bool VKDevice::Create() {
167 VkPhysicalDeviceFeatures2 features2; 167 VkPhysicalDeviceFeatures2 features2;
168 features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2; 168 features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
169 features2.pNext = nullptr; 169 features2.pNext = nullptr;
170 const void* first_next = &features2;
170 void** next = &features2.pNext; 171 void** next = &features2.pNext;
171 172
172 auto& features = features2.features; 173 auto& features = features2.features;
@@ -296,7 +297,19 @@ bool VKDevice::Create() {
296 LOG_INFO(Render_Vulkan, "Device doesn't support depth range unrestricted"); 297 LOG_INFO(Render_Vulkan, "Device doesn't support depth range unrestricted");
297 } 298 }
298 299
299 logical = vk::Device::Create(physical, queue_cis, extensions, features2, dld); 300 VkDeviceDiagnosticsConfigCreateInfoNV diagnostics_nv;
301 if (nv_device_diagnostics_config) {
302 nsight_aftermath_tracker.Initialize();
303
304 diagnostics_nv.sType = VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV;
305 diagnostics_nv.pNext = &features2;
306 diagnostics_nv.flags = VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_SHADER_DEBUG_INFO_BIT_NV |
307 VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_RESOURCE_TRACKING_BIT_NV |
308 VK_DEVICE_DIAGNOSTICS_CONFIG_ENABLE_AUTOMATIC_CHECKPOINTS_BIT_NV;
309 first_next = &diagnostics_nv;
310 }
311
312 logical = vk::Device::Create(physical, queue_cis, extensions, first_next, dld);
300 if (!logical) { 313 if (!logical) {
301 LOG_ERROR(Render_Vulkan, "Failed to create logical device"); 314 LOG_ERROR(Render_Vulkan, "Failed to create logical device");
302 return false; 315 return false;
@@ -344,17 +357,12 @@ VkFormat VKDevice::GetSupportedFormat(VkFormat wanted_format, VkFormatFeatureFla
344void VKDevice::ReportLoss() const { 357void VKDevice::ReportLoss() const {
345 LOG_CRITICAL(Render_Vulkan, "Device loss occured!"); 358 LOG_CRITICAL(Render_Vulkan, "Device loss occured!");
346 359
347 // Wait some time to let the log flush 360 // Wait for the log to flush and for Nsight Aftermath to dump the results
348 std::this_thread::sleep_for(std::chrono::seconds{1}); 361 std::this_thread::sleep_for(std::chrono::seconds{3});
349 362}
350 if (!nv_device_diagnostic_checkpoints) {
351 return;
352 }
353 363
354 [[maybe_unused]] const std::vector data = graphics_queue.GetCheckpointDataNV(dld); 364void VKDevice::SaveShader(const std::vector<u32>& spirv) const {
355 // Catch here in debug builds (or with optimizations disabled) the last graphics pipeline to be 365 nsight_aftermath_tracker.SaveShader(spirv);
356 // executed. It can be done on a debugger by evaluating the expression:
357 // *(VKGraphicsPipeline*)data[0]
358} 366}
359 367
360bool VKDevice::IsOptimalAstcSupported(const VkPhysicalDeviceFeatures& features) const { 368bool VKDevice::IsOptimalAstcSupported(const VkPhysicalDeviceFeatures& features) const {
@@ -527,8 +535,8 @@ std::vector<const char*> VKDevice::LoadExtensions() {
527 Test(extension, has_ext_transform_feedback, VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME, 535 Test(extension, has_ext_transform_feedback, VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME,
528 false); 536 false);
529 if (Settings::values.renderer_debug) { 537 if (Settings::values.renderer_debug) {
530 Test(extension, nv_device_diagnostic_checkpoints, 538 Test(extension, nv_device_diagnostics_config,
531 VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME, true); 539 VK_NV_DEVICE_DIAGNOSTICS_CONFIG_EXTENSION_NAME, true);
532 } 540 }
533 } 541 }
534 542
diff --git a/src/video_core/renderer_vulkan/vk_device.h b/src/video_core/renderer_vulkan/vk_device.h
index 60d64572a..c8640762d 100644
--- a/src/video_core/renderer_vulkan/vk_device.h
+++ b/src/video_core/renderer_vulkan/vk_device.h
@@ -10,6 +10,7 @@
10#include <vector> 10#include <vector>
11 11
12#include "common/common_types.h" 12#include "common/common_types.h"
13#include "video_core/renderer_vulkan/nsight_aftermath_tracker.h"
13#include "video_core/renderer_vulkan/wrapper.h" 14#include "video_core/renderer_vulkan/wrapper.h"
14 15
15namespace Vulkan { 16namespace Vulkan {
@@ -43,6 +44,9 @@ public:
43 /// Reports a device loss. 44 /// Reports a device loss.
44 void ReportLoss() const; 45 void ReportLoss() const;
45 46
47 /// Reports a shader to Nsight Aftermath.
48 void SaveShader(const std::vector<u32>& spirv) const;
49
46 /// Returns the dispatch loader with direct function pointers of the device. 50 /// Returns the dispatch loader with direct function pointers of the device.
47 const vk::DeviceDispatch& GetDispatchLoader() const { 51 const vk::DeviceDispatch& GetDispatchLoader() const {
48 return dld; 52 return dld;
@@ -78,11 +82,6 @@ public:
78 return present_family; 82 return present_family;
79 } 83 }
80 84
81 /// Returns true if the device is integrated with the host CPU.
82 bool IsIntegrated() const {
83 return properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
84 }
85
86 /// Returns the current Vulkan API version provided in Vulkan-formatted version numbers. 85 /// Returns the current Vulkan API version provided in Vulkan-formatted version numbers.
87 u32 GetApiVersion() const { 86 u32 GetApiVersion() const {
88 return properties.apiVersion; 87 return properties.apiVersion;
@@ -173,11 +172,6 @@ public:
173 return ext_transform_feedback; 172 return ext_transform_feedback;
174 } 173 }
175 174
176 /// Returns true if the device supports VK_NV_device_diagnostic_checkpoints.
177 bool IsNvDeviceDiagnosticCheckpoints() const {
178 return nv_device_diagnostic_checkpoints;
179 }
180
181 /// Returns the vendor name reported from Vulkan. 175 /// Returns the vendor name reported from Vulkan.
182 std::string_view GetVendorName() const { 176 std::string_view GetVendorName() const {
183 return vendor_name; 177 return vendor_name;
@@ -233,7 +227,7 @@ private:
233 bool ext_depth_range_unrestricted{}; ///< Support for VK_EXT_depth_range_unrestricted. 227 bool ext_depth_range_unrestricted{}; ///< Support for VK_EXT_depth_range_unrestricted.
234 bool ext_shader_viewport_index_layer{}; ///< Support for VK_EXT_shader_viewport_index_layer. 228 bool ext_shader_viewport_index_layer{}; ///< Support for VK_EXT_shader_viewport_index_layer.
235 bool ext_transform_feedback{}; ///< Support for VK_EXT_transform_feedback. 229 bool ext_transform_feedback{}; ///< Support for VK_EXT_transform_feedback.
236 bool nv_device_diagnostic_checkpoints{}; ///< Support for VK_NV_device_diagnostic_checkpoints. 230 bool nv_device_diagnostics_config{}; ///< Support for VK_NV_device_diagnostics_config.
237 231
238 // Telemetry parameters 232 // Telemetry parameters
239 std::string vendor_name; ///< Device's driver name. 233 std::string vendor_name; ///< Device's driver name.
@@ -241,6 +235,9 @@ private:
241 235
242 /// Format properties dictionary. 236 /// Format properties dictionary.
243 std::unordered_map<VkFormat, VkFormatProperties> format_properties; 237 std::unordered_map<VkFormat, VkFormatProperties> format_properties;
238
239 /// Nsight Aftermath GPU crash tracker
240 NsightAftermathTracker nsight_aftermath_tracker;
244}; 241};
245 242
246} // namespace Vulkan 243} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
new file mode 100644
index 000000000..a02be5487
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
@@ -0,0 +1,101 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <memory>
6#include <thread>
7
8#include "video_core/renderer_vulkan/vk_buffer_cache.h"
9#include "video_core/renderer_vulkan/vk_device.h"
10#include "video_core/renderer_vulkan/vk_fence_manager.h"
11#include "video_core/renderer_vulkan/vk_scheduler.h"
12#include "video_core/renderer_vulkan/vk_texture_cache.h"
13#include "video_core/renderer_vulkan/wrapper.h"
14
15namespace Vulkan {
16
17InnerFence::InnerFence(const VKDevice& device, VKScheduler& scheduler, u32 payload, bool is_stubbed)
18 : VideoCommon::FenceBase(payload, is_stubbed), device{device}, scheduler{scheduler} {}
19
20InnerFence::InnerFence(const VKDevice& device, VKScheduler& scheduler, GPUVAddr address,
21 u32 payload, bool is_stubbed)
22 : VideoCommon::FenceBase(address, payload, is_stubbed), device{device}, scheduler{scheduler} {}
23
24InnerFence::~InnerFence() = default;
25
26void InnerFence::Queue() {
27 if (is_stubbed) {
28 return;
29 }
30 ASSERT(!event);
31
32 event = device.GetLogical().CreateEvent();
33 ticks = scheduler.Ticks();
34
35 scheduler.RequestOutsideRenderPassOperationContext();
36 scheduler.Record([event = *event](vk::CommandBuffer cmdbuf) {
37 cmdbuf.SetEvent(event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
38 });
39}
40
41bool InnerFence::IsSignaled() const {
42 if (is_stubbed) {
43 return true;
44 }
45 ASSERT(event);
46 return IsEventSignalled();
47}
48
49void InnerFence::Wait() {
50 if (is_stubbed) {
51 return;
52 }
53 ASSERT(event);
54
55 if (ticks >= scheduler.Ticks()) {
56 scheduler.Flush();
57 }
58 while (!IsEventSignalled()) {
59 std::this_thread::yield();
60 }
61}
62
63bool InnerFence::IsEventSignalled() const {
64 switch (const VkResult result = event.GetStatus()) {
65 case VK_EVENT_SET:
66 return true;
67 case VK_EVENT_RESET:
68 return false;
69 default:
70 throw vk::Exception(result);
71 }
72}
73
74VKFenceManager::VKFenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
75 const VKDevice& device, VKScheduler& scheduler,
76 VKTextureCache& texture_cache, VKBufferCache& buffer_cache,
77 VKQueryCache& query_cache)
78 : GenericFenceManager(system, rasterizer, texture_cache, buffer_cache, query_cache),
79 device{device}, scheduler{scheduler} {}
80
81Fence VKFenceManager::CreateFence(u32 value, bool is_stubbed) {
82 return std::make_shared<InnerFence>(device, scheduler, value, is_stubbed);
83}
84
85Fence VKFenceManager::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) {
86 return std::make_shared<InnerFence>(device, scheduler, addr, value, is_stubbed);
87}
88
89void VKFenceManager::QueueFence(Fence& fence) {
90 fence->Queue();
91}
92
93bool VKFenceManager::IsFenceSignaled(Fence& fence) const {
94 return fence->IsSignaled();
95}
96
97void VKFenceManager::WaitFence(Fence& fence) {
98 fence->Wait();
99}
100
101} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h
new file mode 100644
index 000000000..04d07fe6a
--- /dev/null
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.h
@@ -0,0 +1,74 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <memory>
8
9#include "video_core/fence_manager.h"
10#include "video_core/renderer_vulkan/wrapper.h"
11
12namespace Core {
13class System;
14}
15
16namespace VideoCore {
17class RasterizerInterface;
18}
19
20namespace Vulkan {
21
22class VKBufferCache;
23class VKDevice;
24class VKQueryCache;
25class VKScheduler;
26class VKTextureCache;
27
28class InnerFence : public VideoCommon::FenceBase {
29public:
30 explicit InnerFence(const VKDevice& device, VKScheduler& scheduler, u32 payload,
31 bool is_stubbed);
32 explicit InnerFence(const VKDevice& device, VKScheduler& scheduler, GPUVAddr address,
33 u32 payload, bool is_stubbed);
34 ~InnerFence();
35
36 void Queue();
37
38 bool IsSignaled() const;
39
40 void Wait();
41
42private:
43 bool IsEventSignalled() const;
44
45 const VKDevice& device;
46 VKScheduler& scheduler;
47 vk::Event event;
48 u64 ticks = 0;
49};
50using Fence = std::shared_ptr<InnerFence>;
51
52using GenericFenceManager =
53 VideoCommon::FenceManager<Fence, VKTextureCache, VKBufferCache, VKQueryCache>;
54
55class VKFenceManager final : public GenericFenceManager {
56public:
57 explicit VKFenceManager(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
58 const VKDevice& device, VKScheduler& scheduler,
59 VKTextureCache& texture_cache, VKBufferCache& buffer_cache,
60 VKQueryCache& query_cache);
61
62protected:
63 Fence CreateFence(u32 value, bool is_stubbed) override;
64 Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override;
65 void QueueFence(Fence& fence) override;
66 bool IsFenceSignaled(Fence& fence) const override;
67 void WaitFence(Fence& fence) override;
68
69private:
70 const VKDevice& device;
71 VKScheduler& scheduler;
72};
73
74} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index b540b838d..1ac981974 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -6,7 +6,6 @@
6#include <cstring> 6#include <cstring>
7#include <vector> 7#include <vector>
8 8
9#include "common/assert.h"
10#include "common/common_types.h" 9#include "common/common_types.h"
11#include "common/microprofile.h" 10#include "common/microprofile.h"
12#include "video_core/renderer_vulkan/fixed_pipeline_state.h" 11#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
@@ -26,12 +25,13 @@ MICROPROFILE_DECLARE(Vulkan_PipelineCache);
26 25
27namespace { 26namespace {
28 27
29VkStencilOpState GetStencilFaceState(const FixedPipelineState::StencilFace& face) { 28template <class StencilFace>
29VkStencilOpState GetStencilFaceState(const StencilFace& face) {
30 VkStencilOpState state; 30 VkStencilOpState state;
31 state.failOp = MaxwellToVK::StencilOp(face.action_stencil_fail); 31 state.failOp = MaxwellToVK::StencilOp(face.ActionStencilFail());
32 state.passOp = MaxwellToVK::StencilOp(face.action_depth_pass); 32 state.passOp = MaxwellToVK::StencilOp(face.ActionDepthPass());
33 state.depthFailOp = MaxwellToVK::StencilOp(face.action_depth_fail); 33 state.depthFailOp = MaxwellToVK::StencilOp(face.ActionDepthFail());
34 state.compareOp = MaxwellToVK::ComparisonOp(face.test_func); 34 state.compareOp = MaxwellToVK::ComparisonOp(face.TestFunc());
35 state.compareMask = 0; 35 state.compareMask = 0;
36 state.writeMask = 0; 36 state.writeMask = 0;
37 state.reference = 0; 37 state.reference = 0;
@@ -147,6 +147,8 @@ std::vector<vk::ShaderModule> VKGraphicsPipeline::CreateShaderModules(
147 continue; 147 continue;
148 } 148 }
149 149
150 device.SaveShader(stage->code);
151
150 ci.codeSize = stage->code.size() * sizeof(u32); 152 ci.codeSize = stage->code.size() * sizeof(u32);
151 ci.pCode = stage->code.data(); 153 ci.pCode = stage->code.data();
152 modules.push_back(device.GetLogical().CreateShaderModule(ci)); 154 modules.push_back(device.GetLogical().CreateShaderModule(ci));
@@ -157,43 +159,47 @@ std::vector<vk::ShaderModule> VKGraphicsPipeline::CreateShaderModules(
157vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpass_params, 159vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpass_params,
158 const SPIRVProgram& program) const { 160 const SPIRVProgram& program) const {
159 const auto& vi = fixed_state.vertex_input; 161 const auto& vi = fixed_state.vertex_input;
160 const auto& ia = fixed_state.input_assembly;
161 const auto& ds = fixed_state.depth_stencil; 162 const auto& ds = fixed_state.depth_stencil;
162 const auto& cd = fixed_state.color_blending; 163 const auto& cd = fixed_state.color_blending;
163 const auto& ts = fixed_state.tessellation;
164 const auto& rs = fixed_state.rasterizer; 164 const auto& rs = fixed_state.rasterizer;
165 165
166 std::vector<VkVertexInputBindingDescription> vertex_bindings; 166 std::vector<VkVertexInputBindingDescription> vertex_bindings;
167 std::vector<VkVertexInputBindingDivisorDescriptionEXT> vertex_binding_divisors; 167 std::vector<VkVertexInputBindingDivisorDescriptionEXT> vertex_binding_divisors;
168 for (std::size_t i = 0; i < vi.num_bindings; ++i) { 168 for (std::size_t index = 0; index < std::size(vi.bindings); ++index) {
169 const auto& binding = vi.bindings[i]; 169 const auto& binding = vi.bindings[index];
170 const bool instanced = binding.divisor != 0; 170 if (!binding.enabled) {
171 continue;
172 }
173 const bool instanced = vi.binding_divisors[index] != 0;
171 const auto rate = instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX; 174 const auto rate = instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
172 175
173 auto& vertex_binding = vertex_bindings.emplace_back(); 176 auto& vertex_binding = vertex_bindings.emplace_back();
174 vertex_binding.binding = binding.index; 177 vertex_binding.binding = static_cast<u32>(index);
175 vertex_binding.stride = binding.stride; 178 vertex_binding.stride = binding.stride;
176 vertex_binding.inputRate = rate; 179 vertex_binding.inputRate = rate;
177 180
178 if (instanced) { 181 if (instanced) {
179 auto& binding_divisor = vertex_binding_divisors.emplace_back(); 182 auto& binding_divisor = vertex_binding_divisors.emplace_back();
180 binding_divisor.binding = binding.index; 183 binding_divisor.binding = static_cast<u32>(index);
181 binding_divisor.divisor = binding.divisor; 184 binding_divisor.divisor = vi.binding_divisors[index];
182 } 185 }
183 } 186 }
184 187
185 std::vector<VkVertexInputAttributeDescription> vertex_attributes; 188 std::vector<VkVertexInputAttributeDescription> vertex_attributes;
186 const auto& input_attributes = program[0]->entries.attributes; 189 const auto& input_attributes = program[0]->entries.attributes;
187 for (std::size_t i = 0; i < vi.num_attributes; ++i) { 190 for (std::size_t index = 0; index < std::size(vi.attributes); ++index) {
188 const auto& attribute = vi.attributes[i]; 191 const auto& attribute = vi.attributes[index];
189 if (input_attributes.find(attribute.index) == input_attributes.end()) { 192 if (!attribute.enabled) {
193 continue;
194 }
195 if (input_attributes.find(static_cast<u32>(index)) == input_attributes.end()) {
190 // Skip attributes not used by the vertex shaders. 196 // Skip attributes not used by the vertex shaders.
191 continue; 197 continue;
192 } 198 }
193 auto& vertex_attribute = vertex_attributes.emplace_back(); 199 auto& vertex_attribute = vertex_attributes.emplace_back();
194 vertex_attribute.location = attribute.index; 200 vertex_attribute.location = static_cast<u32>(index);
195 vertex_attribute.binding = attribute.buffer; 201 vertex_attribute.binding = attribute.buffer;
196 vertex_attribute.format = MaxwellToVK::VertexFormat(attribute.type, attribute.size); 202 vertex_attribute.format = MaxwellToVK::VertexFormat(attribute.Type(), attribute.Size());
197 vertex_attribute.offset = attribute.offset; 203 vertex_attribute.offset = attribute.offset;
198 } 204 }
199 205
@@ -219,15 +225,15 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpa
219 input_assembly_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; 225 input_assembly_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
220 input_assembly_ci.pNext = nullptr; 226 input_assembly_ci.pNext = nullptr;
221 input_assembly_ci.flags = 0; 227 input_assembly_ci.flags = 0;
222 input_assembly_ci.topology = MaxwellToVK::PrimitiveTopology(device, ia.topology); 228 input_assembly_ci.topology = MaxwellToVK::PrimitiveTopology(device, rs.Topology());
223 input_assembly_ci.primitiveRestartEnable = 229 input_assembly_ci.primitiveRestartEnable =
224 ia.primitive_restart_enable && SupportsPrimitiveRestart(input_assembly_ci.topology); 230 rs.primitive_restart_enable != 0 && SupportsPrimitiveRestart(input_assembly_ci.topology);
225 231
226 VkPipelineTessellationStateCreateInfo tessellation_ci; 232 VkPipelineTessellationStateCreateInfo tessellation_ci;
227 tessellation_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO; 233 tessellation_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO;
228 tessellation_ci.pNext = nullptr; 234 tessellation_ci.pNext = nullptr;
229 tessellation_ci.flags = 0; 235 tessellation_ci.flags = 0;
230 tessellation_ci.patchControlPoints = ts.patch_control_points; 236 tessellation_ci.patchControlPoints = rs.patch_control_points_minus_one.Value() + 1;
231 237
232 VkPipelineViewportStateCreateInfo viewport_ci; 238 VkPipelineViewportStateCreateInfo viewport_ci;
233 viewport_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; 239 viewport_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
@@ -242,12 +248,12 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpa
242 rasterization_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; 248 rasterization_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
243 rasterization_ci.pNext = nullptr; 249 rasterization_ci.pNext = nullptr;
244 rasterization_ci.flags = 0; 250 rasterization_ci.flags = 0;
245 rasterization_ci.depthClampEnable = rs.depth_clamp_enable; 251 rasterization_ci.depthClampEnable = rs.depth_clamp_disabled == 0 ? VK_TRUE : VK_FALSE;
246 rasterization_ci.rasterizerDiscardEnable = VK_FALSE; 252 rasterization_ci.rasterizerDiscardEnable = VK_FALSE;
247 rasterization_ci.polygonMode = VK_POLYGON_MODE_FILL; 253 rasterization_ci.polygonMode = VK_POLYGON_MODE_FILL;
248 rasterization_ci.cullMode = 254 rasterization_ci.cullMode =
249 rs.cull_enable ? MaxwellToVK::CullFace(rs.cull_face) : VK_CULL_MODE_NONE; 255 rs.cull_enable ? MaxwellToVK::CullFace(rs.CullFace()) : VK_CULL_MODE_NONE;
250 rasterization_ci.frontFace = MaxwellToVK::FrontFace(rs.front_face); 256 rasterization_ci.frontFace = MaxwellToVK::FrontFace(rs.FrontFace());
251 rasterization_ci.depthBiasEnable = rs.depth_bias_enable; 257 rasterization_ci.depthBiasEnable = rs.depth_bias_enable;
252 rasterization_ci.depthBiasConstantFactor = 0.0f; 258 rasterization_ci.depthBiasConstantFactor = 0.0f;
253 rasterization_ci.depthBiasClamp = 0.0f; 259 rasterization_ci.depthBiasClamp = 0.0f;
@@ -271,40 +277,38 @@ vk::Pipeline VKGraphicsPipeline::CreatePipeline(const RenderPassParams& renderpa
271 depth_stencil_ci.flags = 0; 277 depth_stencil_ci.flags = 0;
272 depth_stencil_ci.depthTestEnable = ds.depth_test_enable; 278 depth_stencil_ci.depthTestEnable = ds.depth_test_enable;
273 depth_stencil_ci.depthWriteEnable = ds.depth_write_enable; 279 depth_stencil_ci.depthWriteEnable = ds.depth_write_enable;
274 depth_stencil_ci.depthCompareOp = ds.depth_test_enable 280 depth_stencil_ci.depthCompareOp =
275 ? MaxwellToVK::ComparisonOp(ds.depth_test_function) 281 ds.depth_test_enable ? MaxwellToVK::ComparisonOp(ds.DepthTestFunc()) : VK_COMPARE_OP_ALWAYS;
276 : VK_COMPARE_OP_ALWAYS;
277 depth_stencil_ci.depthBoundsTestEnable = ds.depth_bounds_enable; 282 depth_stencil_ci.depthBoundsTestEnable = ds.depth_bounds_enable;
278 depth_stencil_ci.stencilTestEnable = ds.stencil_enable; 283 depth_stencil_ci.stencilTestEnable = ds.stencil_enable;
279 depth_stencil_ci.front = GetStencilFaceState(ds.front_stencil); 284 depth_stencil_ci.front = GetStencilFaceState(ds.front);
280 depth_stencil_ci.back = GetStencilFaceState(ds.back_stencil); 285 depth_stencil_ci.back = GetStencilFaceState(ds.back);
281 depth_stencil_ci.minDepthBounds = 0.0f; 286 depth_stencil_ci.minDepthBounds = 0.0f;
282 depth_stencil_ci.maxDepthBounds = 0.0f; 287 depth_stencil_ci.maxDepthBounds = 0.0f;
283 288
284 std::array<VkPipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments; 289 std::array<VkPipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments;
285 const std::size_t num_attachments = 290 const auto num_attachments = static_cast<std::size_t>(renderpass_params.num_color_attachments);
286 std::min(cd.attachments_count, renderpass_params.color_attachments.size()); 291 for (std::size_t index = 0; index < num_attachments; ++index) {
287 for (std::size_t i = 0; i < num_attachments; ++i) { 292 static constexpr std::array COMPONENT_TABLE = {
288 static constexpr std::array component_table = {
289 VK_COLOR_COMPONENT_R_BIT, VK_COLOR_COMPONENT_G_BIT, VK_COLOR_COMPONENT_B_BIT, 293 VK_COLOR_COMPONENT_R_BIT, VK_COLOR_COMPONENT_G_BIT, VK_COLOR_COMPONENT_B_BIT,
290 VK_COLOR_COMPONENT_A_BIT}; 294 VK_COLOR_COMPONENT_A_BIT};
291 const auto& blend = cd.attachments[i]; 295 const auto& blend = cd.attachments[index];
292 296
293 VkColorComponentFlags color_components = 0; 297 VkColorComponentFlags color_components = 0;
294 for (std::size_t j = 0; j < component_table.size(); ++j) { 298 for (std::size_t i = 0; i < COMPONENT_TABLE.size(); ++i) {
295 if (blend.components[j]) { 299 if (blend.Mask()[i]) {
296 color_components |= component_table[j]; 300 color_components |= COMPONENT_TABLE[i];
297 } 301 }
298 } 302 }
299 303
300 VkPipelineColorBlendAttachmentState& attachment = cb_attachments[i]; 304 VkPipelineColorBlendAttachmentState& attachment = cb_attachments[index];
301 attachment.blendEnable = blend.enable; 305 attachment.blendEnable = blend.enable != 0;
302 attachment.srcColorBlendFactor = MaxwellToVK::BlendFactor(blend.src_rgb_func); 306 attachment.srcColorBlendFactor = MaxwellToVK::BlendFactor(blend.SourceRGBFactor());
303 attachment.dstColorBlendFactor = MaxwellToVK::BlendFactor(blend.dst_rgb_func); 307 attachment.dstColorBlendFactor = MaxwellToVK::BlendFactor(blend.DestRGBFactor());
304 attachment.colorBlendOp = MaxwellToVK::BlendEquation(blend.rgb_equation); 308 attachment.colorBlendOp = MaxwellToVK::BlendEquation(blend.EquationRGB());
305 attachment.srcAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.src_a_func); 309 attachment.srcAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.SourceAlphaFactor());
306 attachment.dstAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.dst_a_func); 310 attachment.dstAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.DestAlphaFactor());
307 attachment.alphaBlendOp = MaxwellToVK::BlendEquation(blend.a_equation); 311 attachment.alphaBlendOp = MaxwellToVK::BlendEquation(blend.EquationAlpha());
308 attachment.colorWriteMask = color_components; 312 attachment.colorWriteMask = color_components;
309 } 313 }
310 314
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
index 7aba70960..a1d699a6c 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
@@ -5,16 +5,13 @@
5#pragma once 5#pragma once
6 6
7#include <array> 7#include <array>
8#include <memory>
9#include <optional> 8#include <optional>
10#include <unordered_map>
11#include <vector> 9#include <vector>
12 10
13#include "video_core/engines/maxwell_3d.h" 11#include "video_core/engines/maxwell_3d.h"
14#include "video_core/renderer_vulkan/fixed_pipeline_state.h" 12#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
15#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 13#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
16#include "video_core/renderer_vulkan/vk_renderpass_cache.h" 14#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
17#include "video_core/renderer_vulkan/vk_resource_manager.h"
18#include "video_core/renderer_vulkan/vk_shader_decompiler.h" 15#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
19#include "video_core/renderer_vulkan/wrapper.h" 16#include "video_core/renderer_vulkan/wrapper.h"
20 17
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.cpp b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
index 6a9e658bf..b4c650a63 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.cpp
@@ -118,8 +118,7 @@ private:
118}; 118};
119 119
120VKMemoryManager::VKMemoryManager(const VKDevice& device) 120VKMemoryManager::VKMemoryManager(const VKDevice& device)
121 : device{device}, properties{device.GetPhysical().GetMemoryProperties()}, 121 : device{device}, properties{device.GetPhysical().GetMemoryProperties()} {}
122 is_memory_unified{GetMemoryUnified(properties)} {}
123 122
124VKMemoryManager::~VKMemoryManager() = default; 123VKMemoryManager::~VKMemoryManager() = default;
125 124
@@ -209,16 +208,6 @@ VKMemoryCommit VKMemoryManager::TryAllocCommit(const VkMemoryRequirements& requi
209 return {}; 208 return {};
210} 209}
211 210
212bool VKMemoryManager::GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties) {
213 for (u32 heap_index = 0; heap_index < properties.memoryHeapCount; ++heap_index) {
214 if (!(properties.memoryHeaps[heap_index].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)) {
215 // Memory is considered unified when heaps are device local only.
216 return false;
217 }
218 }
219 return true;
220}
221
222VKMemoryCommitImpl::VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation, 211VKMemoryCommitImpl::VKMemoryCommitImpl(const VKDevice& device, VKMemoryAllocation* allocation,
223 const vk::DeviceMemory& memory, u64 begin, u64 end) 212 const vk::DeviceMemory& memory, u64 begin, u64 end)
224 : device{device}, memory{memory}, interval{begin, end}, allocation{allocation} {} 213 : device{device}, memory{memory}, interval{begin, end}, allocation{allocation} {}
diff --git a/src/video_core/renderer_vulkan/vk_memory_manager.h b/src/video_core/renderer_vulkan/vk_memory_manager.h
index 5b6858e9b..1af88e3d4 100644
--- a/src/video_core/renderer_vulkan/vk_memory_manager.h
+++ b/src/video_core/renderer_vulkan/vk_memory_manager.h
@@ -40,11 +40,6 @@ public:
40 /// Commits memory required by the image and binds it. 40 /// Commits memory required by the image and binds it.
41 VKMemoryCommit Commit(const vk::Image& image, bool host_visible); 41 VKMemoryCommit Commit(const vk::Image& image, bool host_visible);
42 42
43 /// Returns true if the memory allocations are done always in host visible and coherent memory.
44 bool IsMemoryUnified() const {
45 return is_memory_unified;
46 }
47
48private: 43private:
49 /// Allocates a chunk of memory. 44 /// Allocates a chunk of memory.
50 bool AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size); 45 bool AllocMemory(VkMemoryPropertyFlags wanted_properties, u32 type_mask, u64 size);
@@ -53,12 +48,8 @@ private:
53 VKMemoryCommit TryAllocCommit(const VkMemoryRequirements& requirements, 48 VKMemoryCommit TryAllocCommit(const VkMemoryRequirements& requirements,
54 VkMemoryPropertyFlags wanted_properties); 49 VkMemoryPropertyFlags wanted_properties);
55 50
56 /// Returns true if the device uses an unified memory model. 51 const VKDevice& device; ///< Device handler.
57 static bool GetMemoryUnified(const VkPhysicalDeviceMemoryProperties& properties); 52 const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
58
59 const VKDevice& device; ///< Device handler.
60 const VkPhysicalDeviceMemoryProperties properties; ///< Physical device properties.
61 const bool is_memory_unified; ///< True if memory model is unified.
62 std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations. 53 std::vector<std::unique_ptr<VKMemoryAllocation>> allocations; ///< Current allocations.
63}; 54};
64 55
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 90e3a8edd..fe45ed269 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -22,17 +22,22 @@
22#include "video_core/renderer_vulkan/vk_pipeline_cache.h" 22#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
23#include "video_core/renderer_vulkan/vk_rasterizer.h" 23#include "video_core/renderer_vulkan/vk_rasterizer.h"
24#include "video_core/renderer_vulkan/vk_renderpass_cache.h" 24#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
25#include "video_core/renderer_vulkan/vk_resource_manager.h"
26#include "video_core/renderer_vulkan/vk_scheduler.h" 25#include "video_core/renderer_vulkan/vk_scheduler.h"
27#include "video_core/renderer_vulkan/vk_update_descriptor.h" 26#include "video_core/renderer_vulkan/vk_update_descriptor.h"
28#include "video_core/renderer_vulkan/wrapper.h" 27#include "video_core/renderer_vulkan/wrapper.h"
29#include "video_core/shader/compiler_settings.h" 28#include "video_core/shader/compiler_settings.h"
29#include "video_core/shader/memory_util.h"
30 30
31namespace Vulkan { 31namespace Vulkan {
32 32
33MICROPROFILE_DECLARE(Vulkan_PipelineCache); 33MICROPROFILE_DECLARE(Vulkan_PipelineCache);
34 34
35using Tegra::Engines::ShaderType; 35using Tegra::Engines::ShaderType;
36using VideoCommon::Shader::GetShaderAddress;
37using VideoCommon::Shader::GetShaderCode;
38using VideoCommon::Shader::KERNEL_MAIN_OFFSET;
39using VideoCommon::Shader::ProgramCode;
40using VideoCommon::Shader::STAGE_MAIN_OFFSET;
36 41
37namespace { 42namespace {
38 43
@@ -45,60 +50,6 @@ constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
45constexpr VideoCommon::Shader::CompilerSettings compiler_settings{ 50constexpr VideoCommon::Shader::CompilerSettings compiler_settings{
46 VideoCommon::Shader::CompileDepth::FullDecompile}; 51 VideoCommon::Shader::CompileDepth::FullDecompile};
47 52
48/// Gets the address for the specified shader stage program
49GPUVAddr GetShaderAddress(Core::System& system, Maxwell::ShaderProgram program) {
50 const auto& gpu{system.GPU().Maxwell3D()};
51 const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]};
52 return gpu.regs.code_address.CodeAddress() + shader_config.offset;
53}
54
55/// Gets if the current instruction offset is a scheduler instruction
56constexpr bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) {
57 // Sched instructions appear once every 4 instructions.
58 constexpr std::size_t SchedPeriod = 4;
59 const std::size_t absolute_offset = offset - main_offset;
60 return (absolute_offset % SchedPeriod) == 0;
61}
62
63/// Calculates the size of a program stream
64std::size_t CalculateProgramSize(const ProgramCode& program, bool is_compute) {
65 const std::size_t start_offset = is_compute ? 0 : 10;
66 // This is the encoded version of BRA that jumps to itself. All Nvidia
67 // shaders end with one.
68 constexpr u64 self_jumping_branch = 0xE2400FFFFF07000FULL;
69 constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL;
70 std::size_t offset = start_offset;
71 while (offset < program.size()) {
72 const u64 instruction = program[offset];
73 if (!IsSchedInstruction(offset, start_offset)) {
74 if ((instruction & mask) == self_jumping_branch) {
75 // End on Maxwell's "nop" instruction
76 break;
77 }
78 if (instruction == 0) {
79 break;
80 }
81 }
82 ++offset;
83 }
84 // The last instruction is included in the program size
85 return std::min(offset + 1, program.size());
86}
87
88/// Gets the shader program code from memory for the specified address
89ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, const GPUVAddr gpu_addr,
90 const u8* host_ptr, bool is_compute) {
91 ProgramCode program_code(VideoCommon::Shader::MAX_PROGRAM_LENGTH);
92 ASSERT_OR_EXECUTE(host_ptr != nullptr, {
93 std::fill(program_code.begin(), program_code.end(), 0);
94 return program_code;
95 });
96 memory_manager.ReadBlockUnsafe(gpu_addr, program_code.data(),
97 program_code.size() * sizeof(u64));
98 program_code.resize(CalculateProgramSize(program_code, is_compute));
99 return program_code;
100}
101
102constexpr std::size_t GetStageFromProgram(std::size_t program) { 53constexpr std::size_t GetStageFromProgram(std::size_t program) {
103 return program == 0 ? 0 : program - 1; 54 return program == 0 ? 0 : program - 1;
104} 55}
@@ -133,7 +84,7 @@ void AddBindings(std::vector<VkDescriptorSetLayoutBinding>& bindings, u32& bindi
133 u32 count = 1; 84 u32 count = 1;
134 if constexpr (descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) { 85 if constexpr (descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
135 // Combined image samplers can be arrayed. 86 // Combined image samplers can be arrayed.
136 count = container[i].Size(); 87 count = container[i].size;
137 } 88 }
138 VkDescriptorSetLayoutBinding& entry = bindings.emplace_back(); 89 VkDescriptorSetLayoutBinding& entry = bindings.emplace_back();
139 entry.binding = binding++; 90 entry.binding = binding++;
@@ -161,6 +112,24 @@ u32 FillDescriptorLayout(const ShaderEntries& entries,
161 112
162} // Anonymous namespace 113} // Anonymous namespace
163 114
115std::size_t GraphicsPipelineCacheKey::Hash() const noexcept {
116 const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
117 return static_cast<std::size_t>(hash);
118}
119
120bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) const noexcept {
121 return std::memcmp(&rhs, this, sizeof *this) == 0;
122}
123
124std::size_t ComputePipelineCacheKey::Hash() const noexcept {
125 const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
126 return static_cast<std::size_t>(hash);
127}
128
129bool ComputePipelineCacheKey::operator==(const ComputePipelineCacheKey& rhs) const noexcept {
130 return std::memcmp(&rhs, this, sizeof *this) == 0;
131}
132
164CachedShader::CachedShader(Core::System& system, Tegra::Engines::ShaderType stage, 133CachedShader::CachedShader(Core::System& system, Tegra::Engines::ShaderType stage,
165 GPUVAddr gpu_addr, VAddr cpu_addr, ProgramCode program_code, 134 GPUVAddr gpu_addr, VAddr cpu_addr, ProgramCode program_code,
166 u32 main_offset) 135 u32 main_offset)
@@ -207,18 +176,22 @@ std::array<Shader, Maxwell::MaxShaderProgram> VKPipelineCache::GetShaders() {
207 const GPUVAddr program_addr{GetShaderAddress(system, program)}; 176 const GPUVAddr program_addr{GetShaderAddress(system, program)};
208 const std::optional cpu_addr = memory_manager.GpuToCpuAddress(program_addr); 177 const std::optional cpu_addr = memory_manager.GpuToCpuAddress(program_addr);
209 ASSERT(cpu_addr); 178 ASSERT(cpu_addr);
210 auto shader = cpu_addr ? TryGet(*cpu_addr) : nullptr; 179 auto shader = cpu_addr ? TryGet(*cpu_addr) : null_shader;
211 if (!shader) { 180 if (!shader) {
212 const auto host_ptr{memory_manager.GetPointer(program_addr)}; 181 const auto host_ptr{memory_manager.GetPointer(program_addr)};
213 182
214 // No shader found - create a new one 183 // No shader found - create a new one
215 constexpr u32 stage_offset = 10; 184 constexpr u32 stage_offset = STAGE_MAIN_OFFSET;
216 const auto stage = static_cast<Tegra::Engines::ShaderType>(index == 0 ? 0 : index - 1); 185 const auto stage = static_cast<Tegra::Engines::ShaderType>(index == 0 ? 0 : index - 1);
217 auto code = GetShaderCode(memory_manager, program_addr, host_ptr, false); 186 ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, false);
218 187
219 shader = std::make_shared<CachedShader>(system, stage, program_addr, *cpu_addr, 188 shader = std::make_shared<CachedShader>(system, stage, program_addr, *cpu_addr,
220 std::move(code), stage_offset); 189 std::move(code), stage_offset);
221 Register(shader); 190 if (cpu_addr) {
191 Register(shader);
192 } else {
193 null_shader = shader;
194 }
222 } 195 }
223 shaders[index] = std::move(shader); 196 shaders[index] = std::move(shader);
224 } 197 }
@@ -261,17 +234,20 @@ VKComputePipeline& VKPipelineCache::GetComputePipeline(const ComputePipelineCach
261 const auto cpu_addr = memory_manager.GpuToCpuAddress(program_addr); 234 const auto cpu_addr = memory_manager.GpuToCpuAddress(program_addr);
262 ASSERT(cpu_addr); 235 ASSERT(cpu_addr);
263 236
264 auto shader = cpu_addr ? TryGet(*cpu_addr) : nullptr; 237 auto shader = cpu_addr ? TryGet(*cpu_addr) : null_kernel;
265 if (!shader) { 238 if (!shader) {
266 // No shader found - create a new one 239 // No shader found - create a new one
267 const auto host_ptr = memory_manager.GetPointer(program_addr); 240 const auto host_ptr = memory_manager.GetPointer(program_addr);
268 241
269 auto code = GetShaderCode(memory_manager, program_addr, host_ptr, true); 242 ProgramCode code = GetShaderCode(memory_manager, program_addr, host_ptr, true);
270 constexpr u32 kernel_main_offset = 0;
271 shader = std::make_shared<CachedShader>(system, Tegra::Engines::ShaderType::Compute, 243 shader = std::make_shared<CachedShader>(system, Tegra::Engines::ShaderType::Compute,
272 program_addr, *cpu_addr, std::move(code), 244 program_addr, *cpu_addr, std::move(code),
273 kernel_main_offset); 245 KERNEL_MAIN_OFFSET);
274 Register(shader); 246 if (cpu_addr) {
247 Register(shader);
248 } else {
249 null_kernel = shader;
250 }
275 } 251 }
276 252
277 Specialization specialization; 253 Specialization specialization;
@@ -329,12 +305,14 @@ VKPipelineCache::DecompileShaders(const GraphicsPipelineCacheKey& key) {
329 const auto& gpu = system.GPU().Maxwell3D(); 305 const auto& gpu = system.GPU().Maxwell3D();
330 306
331 Specialization specialization; 307 Specialization specialization;
332 if (fixed_state.input_assembly.topology == Maxwell::PrimitiveTopology::Points) { 308 if (fixed_state.rasterizer.Topology() == Maxwell::PrimitiveTopology::Points) {
333 ASSERT(fixed_state.input_assembly.point_size != 0.0f); 309 float point_size;
334 specialization.point_size = fixed_state.input_assembly.point_size; 310 std::memcpy(&point_size, &fixed_state.rasterizer.point_size, sizeof(float));
311 specialization.point_size = point_size;
312 ASSERT(point_size != 0.0f);
335 } 313 }
336 for (std::size_t i = 0; i < Maxwell::NumVertexAttributes; ++i) { 314 for (std::size_t i = 0; i < Maxwell::NumVertexAttributes; ++i) {
337 specialization.attribute_types[i] = fixed_state.vertex_input.attributes[i].type; 315 specialization.attribute_types[i] = fixed_state.vertex_input.attributes[i].Type();
338 } 316 }
339 specialization.ndc_minus_one_to_one = fixed_state.rasterizer.ndc_minus_one_to_one; 317 specialization.ndc_minus_one_to_one = fixed_state.rasterizer.ndc_minus_one_to_one;
340 318
@@ -383,7 +361,7 @@ void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u3
383 361
384 if constexpr (descriptor_type == COMBINED_IMAGE_SAMPLER) { 362 if constexpr (descriptor_type == COMBINED_IMAGE_SAMPLER) {
385 for (u32 i = 0; i < count; ++i) { 363 for (u32 i = 0; i < count; ++i) {
386 const u32 num_samplers = container[i].Size(); 364 const u32 num_samplers = container[i].size;
387 VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back(); 365 VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
388 entry.dstBinding = binding; 366 entry.dstBinding = binding;
389 entry.dstArrayElement = 0; 367 entry.dstArrayElement = 0;
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index 7ccdb7083..0b5796fef 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -7,7 +7,6 @@
7#include <array> 7#include <array>
8#include <cstddef> 8#include <cstddef>
9#include <memory> 9#include <memory>
10#include <tuple>
11#include <type_traits> 10#include <type_traits>
12#include <unordered_map> 11#include <unordered_map>
13#include <utility> 12#include <utility>
@@ -22,12 +21,11 @@
22#include "video_core/renderer_vulkan/fixed_pipeline_state.h" 21#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
23#include "video_core/renderer_vulkan/vk_graphics_pipeline.h" 22#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
24#include "video_core/renderer_vulkan/vk_renderpass_cache.h" 23#include "video_core/renderer_vulkan/vk_renderpass_cache.h"
25#include "video_core/renderer_vulkan/vk_resource_manager.h"
26#include "video_core/renderer_vulkan/vk_shader_decompiler.h" 24#include "video_core/renderer_vulkan/vk_shader_decompiler.h"
27#include "video_core/renderer_vulkan/wrapper.h" 25#include "video_core/renderer_vulkan/wrapper.h"
26#include "video_core/shader/memory_util.h"
28#include "video_core/shader/registry.h" 27#include "video_core/shader/registry.h"
29#include "video_core/shader/shader_ir.h" 28#include "video_core/shader/shader_ir.h"
30#include "video_core/surface.h"
31 29
32namespace Core { 30namespace Core {
33class System; 31class System;
@@ -47,46 +45,40 @@ class CachedShader;
47using Shader = std::shared_ptr<CachedShader>; 45using Shader = std::shared_ptr<CachedShader>;
48using Maxwell = Tegra::Engines::Maxwell3D::Regs; 46using Maxwell = Tegra::Engines::Maxwell3D::Regs;
49 47
50using ProgramCode = std::vector<u64>;
51
52struct GraphicsPipelineCacheKey { 48struct GraphicsPipelineCacheKey {
53 FixedPipelineState fixed_state; 49 FixedPipelineState fixed_state;
54 std::array<GPUVAddr, Maxwell::MaxShaderProgram> shaders;
55 RenderPassParams renderpass_params; 50 RenderPassParams renderpass_params;
51 std::array<GPUVAddr, Maxwell::MaxShaderProgram> shaders;
52 u64 padding; // This is necessary for unique object representations
56 53
57 std::size_t Hash() const noexcept { 54 std::size_t Hash() const noexcept;
58 std::size_t hash = fixed_state.Hash(); 55
59 for (const auto& shader : shaders) { 56 bool operator==(const GraphicsPipelineCacheKey& rhs) const noexcept;
60 boost::hash_combine(hash, shader);
61 }
62 boost::hash_combine(hash, renderpass_params.Hash());
63 return hash;
64 }
65 57
66 bool operator==(const GraphicsPipelineCacheKey& rhs) const noexcept { 58 bool operator!=(const GraphicsPipelineCacheKey& rhs) const noexcept {
67 return std::tie(fixed_state, shaders, renderpass_params) == 59 return !operator==(rhs);
68 std::tie(rhs.fixed_state, rhs.shaders, rhs.renderpass_params);
69 } 60 }
70}; 61};
62static_assert(std::has_unique_object_representations_v<GraphicsPipelineCacheKey>);
63static_assert(std::is_trivially_copyable_v<GraphicsPipelineCacheKey>);
64static_assert(std::is_trivially_constructible_v<GraphicsPipelineCacheKey>);
71 65
72struct ComputePipelineCacheKey { 66struct ComputePipelineCacheKey {
73 GPUVAddr shader{}; 67 GPUVAddr shader;
74 u32 shared_memory_size{}; 68 u32 shared_memory_size;
75 std::array<u32, 3> workgroup_size{}; 69 std::array<u32, 3> workgroup_size;
76 70
77 std::size_t Hash() const noexcept { 71 std::size_t Hash() const noexcept;
78 return static_cast<std::size_t>(shader) ^
79 ((static_cast<std::size_t>(shared_memory_size) >> 7) << 40) ^
80 static_cast<std::size_t>(workgroup_size[0]) ^
81 (static_cast<std::size_t>(workgroup_size[1]) << 16) ^
82 (static_cast<std::size_t>(workgroup_size[2]) << 24);
83 }
84 72
85 bool operator==(const ComputePipelineCacheKey& rhs) const noexcept { 73 bool operator==(const ComputePipelineCacheKey& rhs) const noexcept;
86 return std::tie(shader, shared_memory_size, workgroup_size) == 74
87 std::tie(rhs.shader, rhs.shared_memory_size, rhs.workgroup_size); 75 bool operator!=(const ComputePipelineCacheKey& rhs) const noexcept {
76 return !operator==(rhs);
88 } 77 }
89}; 78};
79static_assert(std::has_unique_object_representations_v<ComputePipelineCacheKey>);
80static_assert(std::is_trivially_copyable_v<ComputePipelineCacheKey>);
81static_assert(std::is_trivially_constructible_v<ComputePipelineCacheKey>);
90 82
91} // namespace Vulkan 83} // namespace Vulkan
92 84
@@ -113,7 +105,8 @@ namespace Vulkan {
113class CachedShader final : public RasterizerCacheObject { 105class CachedShader final : public RasterizerCacheObject {
114public: 106public:
115 explicit CachedShader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr, 107 explicit CachedShader(Core::System& system, Tegra::Engines::ShaderType stage, GPUVAddr gpu_addr,
116 VAddr cpu_addr, ProgramCode program_code, u32 main_offset); 108 VAddr cpu_addr, VideoCommon::Shader::ProgramCode program_code,
109 u32 main_offset);
117 ~CachedShader(); 110 ~CachedShader();
118 111
119 GPUVAddr GetGpuAddr() const { 112 GPUVAddr GetGpuAddr() const {
@@ -145,7 +138,7 @@ private:
145 Tegra::Engines::ShaderType stage); 138 Tegra::Engines::ShaderType stage);
146 139
147 GPUVAddr gpu_addr{}; 140 GPUVAddr gpu_addr{};
148 ProgramCode program_code; 141 VideoCommon::Shader::ProgramCode program_code;
149 VideoCommon::Shader::Registry registry; 142 VideoCommon::Shader::Registry registry;
150 VideoCommon::Shader::ShaderIR shader_ir; 143 VideoCommon::Shader::ShaderIR shader_ir;
151 ShaderEntries entries; 144 ShaderEntries entries;
@@ -182,6 +175,9 @@ private:
182 VKUpdateDescriptorQueue& update_descriptor_queue; 175 VKUpdateDescriptorQueue& update_descriptor_queue;
183 VKRenderPassCache& renderpass_cache; 176 VKRenderPassCache& renderpass_cache;
184 177
178 Shader null_shader{};
179 Shader null_kernel{};
180
185 std::array<Shader, Maxwell::MaxShaderProgram> last_shaders; 181 std::array<Shader, Maxwell::MaxShaderProgram> last_shaders;
186 182
187 GraphicsPipelineCacheKey last_graphics_key; 183 GraphicsPipelineCacheKey last_graphics_key;
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 0966c7ff7..bc91c48cc 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -4,7 +4,6 @@
4 4
5#include <algorithm> 5#include <algorithm>
6#include <cstddef> 6#include <cstddef>
7#include <cstdint>
8#include <utility> 7#include <utility>
9#include <vector> 8#include <vector>
10 9
@@ -113,8 +112,19 @@ u64 HostCounter::BlockingQuery() const {
113 if (ticks >= cache.Scheduler().Ticks()) { 112 if (ticks >= cache.Scheduler().Ticks()) {
114 cache.Scheduler().Flush(); 113 cache.Scheduler().Flush();
115 } 114 }
116 return cache.Device().GetLogical().GetQueryResult<u64>( 115 u64 data;
117 query.first, query.second, VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT); 116 const VkResult result = cache.Device().GetLogical().GetQueryResults(
117 query.first, query.second, 1, sizeof(data), &data, sizeof(data),
118 VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
119 switch (result) {
120 case VK_SUCCESS:
121 return data;
122 case VK_ERROR_DEVICE_LOST:
123 cache.Device().ReportLoss();
124 [[fallthrough]];
125 default:
126 throw vk::Exception(result);
127 }
118} 128}
119 129
120} // namespace Vulkan 130} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h
index b63784f4b..40119e6d3 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.h
+++ b/src/video_core/renderer_vulkan/vk_query_cache.h
@@ -5,7 +5,6 @@
5#pragma once 5#pragma once
6 6
7#include <cstddef> 7#include <cstddef>
8#include <cstdint>
9#include <memory> 8#include <memory>
10#include <utility> 9#include <utility>
11#include <vector> 10#include <vector>
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 857bea19f..8b009fc22 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -9,14 +9,13 @@
9#include <vector> 9#include <vector>
10 10
11#include <boost/container/static_vector.hpp> 11#include <boost/container/static_vector.hpp>
12#include <boost/functional/hash.hpp>
13 12
14#include "common/alignment.h" 13#include "common/alignment.h"
15#include "common/assert.h" 14#include "common/assert.h"
16#include "common/logging/log.h" 15#include "common/logging/log.h"
17#include "common/microprofile.h" 16#include "common/microprofile.h"
18#include "core/core.h" 17#include "core/core.h"
19#include "core/memory.h" 18#include "core/settings.h"
20#include "video_core/engines/kepler_compute.h" 19#include "video_core/engines/kepler_compute.h"
21#include "video_core/engines/maxwell_3d.h" 20#include "video_core/engines/maxwell_3d.h"
22#include "video_core/renderer_vulkan/fixed_pipeline_state.h" 21#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
@@ -118,14 +117,13 @@ template <typename Engine, typename Entry>
118Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry, 117Tegra::Texture::FullTextureInfo GetTextureInfo(const Engine& engine, const Entry& entry,
119 std::size_t stage, std::size_t index = 0) { 118 std::size_t stage, std::size_t index = 0) {
120 const auto stage_type = static_cast<Tegra::Engines::ShaderType>(stage); 119 const auto stage_type = static_cast<Tegra::Engines::ShaderType>(stage);
121 if (entry.IsBindless()) { 120 if (entry.is_bindless) {
122 const Tegra::Texture::TextureHandle tex_handle = 121 const auto tex_handle = engine.AccessConstBuffer32(stage_type, entry.buffer, entry.offset);
123 engine.AccessConstBuffer32(stage_type, entry.GetBuffer(), entry.GetOffset());
124 return engine.GetTextureInfo(tex_handle); 122 return engine.GetTextureInfo(tex_handle);
125 } 123 }
126 const auto& gpu_profile = engine.AccessGuestDriverProfile(); 124 const auto& gpu_profile = engine.AccessGuestDriverProfile();
127 const u32 entry_offset = static_cast<u32>(index * gpu_profile.GetTextureHandlerSize()); 125 const u32 entry_offset = static_cast<u32>(index * gpu_profile.GetTextureHandlerSize());
128 const u32 offset = entry.GetOffset() + entry_offset; 126 const u32 offset = entry.offset + entry_offset;
129 if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) { 127 if constexpr (std::is_same_v<Engine, Tegra::Engines::Maxwell3D>) {
130 return engine.GetStageTexture(stage_type, offset); 128 return engine.GetStageTexture(stage_type, offset);
131 } else { 129 } else {
@@ -292,14 +290,16 @@ RasterizerVulkan::RasterizerVulkan(Core::System& system, Core::Frontend::EmuWind
292 staging_pool(device, memory_manager, scheduler), descriptor_pool(device), 290 staging_pool(device, memory_manager, scheduler), descriptor_pool(device),
293 update_descriptor_queue(device, scheduler), renderpass_cache(device), 291 update_descriptor_queue(device, scheduler), renderpass_cache(device),
294 quad_array_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), 292 quad_array_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
295 uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
296 quad_indexed_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue), 293 quad_indexed_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
294 uint8_pass(device, scheduler, descriptor_pool, staging_pool, update_descriptor_queue),
297 texture_cache(system, *this, device, resource_manager, memory_manager, scheduler, 295 texture_cache(system, *this, device, resource_manager, memory_manager, scheduler,
298 staging_pool), 296 staging_pool),
299 pipeline_cache(system, *this, device, scheduler, descriptor_pool, update_descriptor_queue, 297 pipeline_cache(system, *this, device, scheduler, descriptor_pool, update_descriptor_queue,
300 renderpass_cache), 298 renderpass_cache),
301 buffer_cache(*this, system, device, memory_manager, scheduler, staging_pool), 299 buffer_cache(*this, system, device, memory_manager, scheduler, staging_pool),
302 sampler_cache(device), query_cache(system, *this, device, scheduler) { 300 sampler_cache(device),
301 fence_manager(system, *this, device, scheduler, texture_cache, buffer_cache, query_cache),
302 query_cache(system, *this, device, scheduler), wfi_event{device.GetLogical().CreateEvent()} {
303 scheduler.SetQueryCache(query_cache); 303 scheduler.SetQueryCache(query_cache);
304} 304}
305 305
@@ -313,7 +313,8 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
313 query_cache.UpdateCounters(); 313 query_cache.UpdateCounters();
314 314
315 const auto& gpu = system.GPU().Maxwell3D(); 315 const auto& gpu = system.GPU().Maxwell3D();
316 GraphicsPipelineCacheKey key{GetFixedPipelineState(gpu.regs)}; 316 GraphicsPipelineCacheKey key;
317 key.fixed_state.Fill(gpu.regs);
317 318
318 buffer_cache.Map(CalculateGraphicsStreamBufferSize(is_indexed)); 319 buffer_cache.Map(CalculateGraphicsStreamBufferSize(is_indexed));
319 320
@@ -331,10 +332,11 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
331 332
332 buffer_cache.Unmap(); 333 buffer_cache.Unmap();
333 334
334 const auto texceptions = UpdateAttachments(); 335 const Texceptions texceptions = UpdateAttachments();
335 SetupImageTransitions(texceptions, color_attachments, zeta_attachment); 336 SetupImageTransitions(texceptions, color_attachments, zeta_attachment);
336 337
337 key.renderpass_params = GetRenderPassParams(texceptions); 338 key.renderpass_params = GetRenderPassParams(texceptions);
339 key.padding = 0;
338 340
339 auto& pipeline = pipeline_cache.GetGraphicsPipeline(key); 341 auto& pipeline = pipeline_cache.GetGraphicsPipeline(key);
340 scheduler.BindGraphicsPipeline(pipeline.GetHandle()); 342 scheduler.BindGraphicsPipeline(pipeline.GetHandle());
@@ -347,11 +349,6 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
347 349
348 buffer_bindings.Bind(scheduler); 350 buffer_bindings.Bind(scheduler);
349 351
350 if (device.IsNvDeviceDiagnosticCheckpoints()) {
351 scheduler.Record(
352 [&pipeline](vk::CommandBuffer cmdbuf) { cmdbuf.SetCheckpointNV(&pipeline); });
353 }
354
355 BeginTransformFeedback(); 352 BeginTransformFeedback();
356 353
357 const auto pipeline_layout = pipeline.GetLayout(); 354 const auto pipeline_layout = pipeline.GetLayout();
@@ -365,6 +362,8 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
365 }); 362 });
366 363
367 EndTransformFeedback(); 364 EndTransformFeedback();
365
366 system.GPU().TickWork();
368} 367}
369 368
370void RasterizerVulkan::Clear() { 369void RasterizerVulkan::Clear() {
@@ -453,10 +452,12 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
453 query_cache.UpdateCounters(); 452 query_cache.UpdateCounters();
454 453
455 const auto& launch_desc = system.GPU().KeplerCompute().launch_description; 454 const auto& launch_desc = system.GPU().KeplerCompute().launch_description;
456 const ComputePipelineCacheKey key{ 455 ComputePipelineCacheKey key;
457 code_addr, 456 key.shader = code_addr;
458 launch_desc.shared_alloc, 457 key.shared_memory_size = launch_desc.shared_alloc;
459 {launch_desc.block_dim_x, launch_desc.block_dim_y, launch_desc.block_dim_z}}; 458 key.workgroup_size = {launch_desc.block_dim_x, launch_desc.block_dim_y,
459 launch_desc.block_dim_z};
460
460 auto& pipeline = pipeline_cache.GetComputePipeline(key); 461 auto& pipeline = pipeline_cache.GetComputePipeline(key);
461 462
462 // Compute dispatches can't be executed inside a renderpass 463 // Compute dispatches can't be executed inside a renderpass
@@ -478,11 +479,6 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
478 TransitionImages(image_views, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 479 TransitionImages(image_views, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
479 VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT); 480 VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT);
480 481
481 if (device.IsNvDeviceDiagnosticCheckpoints()) {
482 scheduler.Record(
483 [&pipeline](vk::CommandBuffer cmdbuf) { cmdbuf.SetCheckpointNV(nullptr); });
484 }
485
486 scheduler.Record([grid_x = launch_desc.grid_dim_x, grid_y = launch_desc.grid_dim_y, 482 scheduler.Record([grid_x = launch_desc.grid_dim_x, grid_y = launch_desc.grid_dim_y,
487 grid_z = launch_desc.grid_dim_z, pipeline_handle = pipeline.GetHandle(), 483 grid_z = launch_desc.grid_dim_z, pipeline_handle = pipeline.GetHandle(),
488 layout = pipeline.GetLayout(), 484 layout = pipeline.GetLayout(),
@@ -514,6 +510,13 @@ void RasterizerVulkan::FlushRegion(VAddr addr, u64 size) {
514 query_cache.FlushRegion(addr, size); 510 query_cache.FlushRegion(addr, size);
515} 511}
516 512
513bool RasterizerVulkan::MustFlushRegion(VAddr addr, u64 size) {
514 if (!Settings::IsGPULevelHigh()) {
515 return buffer_cache.MustFlushRegion(addr, size);
516 }
517 return texture_cache.MustFlushRegion(addr, size) || buffer_cache.MustFlushRegion(addr, size);
518}
519
517void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size) { 520void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size) {
518 if (addr == 0 || size == 0) { 521 if (addr == 0 || size == 0) {
519 return; 522 return;
@@ -524,11 +527,72 @@ void RasterizerVulkan::InvalidateRegion(VAddr addr, u64 size) {
524 query_cache.InvalidateRegion(addr, size); 527 query_cache.InvalidateRegion(addr, size);
525} 528}
526 529
530void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) {
531 if (addr == 0 || size == 0) {
532 return;
533 }
534 texture_cache.OnCPUWrite(addr, size);
535 pipeline_cache.InvalidateRegion(addr, size);
536 buffer_cache.OnCPUWrite(addr, size);
537 query_cache.InvalidateRegion(addr, size);
538}
539
540void RasterizerVulkan::SyncGuestHost() {
541 texture_cache.SyncGuestHost();
542 buffer_cache.SyncGuestHost();
543}
544
545void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) {
546 auto& gpu{system.GPU()};
547 if (!gpu.IsAsync()) {
548 gpu.MemoryManager().Write<u32>(addr, value);
549 return;
550 }
551 fence_manager.SignalSemaphore(addr, value);
552}
553
554void RasterizerVulkan::SignalSyncPoint(u32 value) {
555 auto& gpu{system.GPU()};
556 if (!gpu.IsAsync()) {
557 gpu.IncrementSyncPoint(value);
558 return;
559 }
560 fence_manager.SignalSyncPoint(value);
561}
562
563void RasterizerVulkan::ReleaseFences() {
564 auto& gpu{system.GPU()};
565 if (!gpu.IsAsync()) {
566 return;
567 }
568 fence_manager.WaitPendingFences();
569}
570
527void RasterizerVulkan::FlushAndInvalidateRegion(VAddr addr, u64 size) { 571void RasterizerVulkan::FlushAndInvalidateRegion(VAddr addr, u64 size) {
528 FlushRegion(addr, size); 572 FlushRegion(addr, size);
529 InvalidateRegion(addr, size); 573 InvalidateRegion(addr, size);
530} 574}
531 575
576void RasterizerVulkan::WaitForIdle() {
577 // Everything but wait pixel operations. This intentionally includes FRAGMENT_SHADER_BIT because
578 // fragment shaders can still write storage buffers.
579 VkPipelineStageFlags flags =
580 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
581 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
582 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
583 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
584 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT;
585 if (device.IsExtTransformFeedbackSupported()) {
586 flags |= VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT;
587 }
588
589 scheduler.RequestOutsideRenderPassOperationContext();
590 scheduler.Record([event = *wfi_event, flags](vk::CommandBuffer cmdbuf) {
591 cmdbuf.SetEvent(event, flags);
592 cmdbuf.WaitEvents(event, flags, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, {}, {}, {});
593 });
594}
595
532void RasterizerVulkan::FlushCommands() { 596void RasterizerVulkan::FlushCommands() {
533 if (draw_counter > 0) { 597 if (draw_counter > 0) {
534 draw_counter = 0; 598 draw_counter = 0;
@@ -609,7 +673,7 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() {
609 Texceptions texceptions; 673 Texceptions texceptions;
610 for (std::size_t rt = 0; rt < Maxwell::NumRenderTargets; ++rt) { 674 for (std::size_t rt = 0; rt < Maxwell::NumRenderTargets; ++rt) {
611 if (update_rendertargets) { 675 if (update_rendertargets) {
612 color_attachments[rt] = texture_cache.GetColorBufferSurface(rt); 676 color_attachments[rt] = texture_cache.GetColorBufferSurface(rt, true);
613 } 677 }
614 if (color_attachments[rt] && WalkAttachmentOverlaps(*color_attachments[rt])) { 678 if (color_attachments[rt] && WalkAttachmentOverlaps(*color_attachments[rt])) {
615 texceptions[rt] = true; 679 texceptions[rt] = true;
@@ -617,7 +681,7 @@ RasterizerVulkan::Texceptions RasterizerVulkan::UpdateAttachments() {
617 } 681 }
618 682
619 if (update_rendertargets) { 683 if (update_rendertargets) {
620 zeta_attachment = texture_cache.GetDepthBufferSurface(); 684 zeta_attachment = texture_cache.GetDepthBufferSurface(true);
621 } 685 }
622 if (zeta_attachment && WalkAttachmentOverlaps(*zeta_attachment)) { 686 if (zeta_attachment && WalkAttachmentOverlaps(*zeta_attachment)) {
623 texceptions[ZETA_TEXCEPTION_INDEX] = true; 687 texceptions[ZETA_TEXCEPTION_INDEX] = true;
@@ -645,7 +709,7 @@ std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
645 FramebufferCacheKey key{renderpass, std::numeric_limits<u32>::max(), 709 FramebufferCacheKey key{renderpass, std::numeric_limits<u32>::max(),
646 std::numeric_limits<u32>::max(), std::numeric_limits<u32>::max()}; 710 std::numeric_limits<u32>::max(), std::numeric_limits<u32>::max()};
647 711
648 const auto try_push = [&](const View& view) { 712 const auto try_push = [&key](const View& view) {
649 if (!view) { 713 if (!view) {
650 return false; 714 return false;
651 } 715 }
@@ -656,7 +720,9 @@ std::tuple<VkFramebuffer, VkExtent2D> RasterizerVulkan::ConfigureFramebuffers(
656 return true; 720 return true;
657 }; 721 };
658 722
659 for (std::size_t index = 0; index < std::size(color_attachments); ++index) { 723 const auto& regs = system.GPU().Maxwell3D().regs;
724 const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count);
725 for (std::size_t index = 0; index < num_attachments; ++index) {
660 if (try_push(color_attachments[index])) { 726 if (try_push(color_attachments[index])) {
661 texture_cache.MarkColorBufferInUse(index); 727 texture_cache.MarkColorBufferInUse(index);
662 } 728 }
@@ -807,42 +873,49 @@ void RasterizerVulkan::SetupVertexArrays(FixedPipelineState::VertexInput& vertex
807 BufferBindings& buffer_bindings) { 873 BufferBindings& buffer_bindings) {
808 const auto& regs = system.GPU().Maxwell3D().regs; 874 const auto& regs = system.GPU().Maxwell3D().regs;
809 875
810 for (u32 index = 0; index < static_cast<u32>(Maxwell::NumVertexAttributes); ++index) { 876 for (std::size_t index = 0; index < Maxwell::NumVertexAttributes; ++index) {
811 const auto& attrib = regs.vertex_attrib_format[index]; 877 const auto& attrib = regs.vertex_attrib_format[index];
812 if (!attrib.IsValid()) { 878 if (!attrib.IsValid()) {
879 vertex_input.SetAttribute(index, false, 0, 0, {}, {});
813 continue; 880 continue;
814 } 881 }
815 882
816 const auto& buffer = regs.vertex_array[attrib.buffer]; 883 [[maybe_unused]] const auto& buffer = regs.vertex_array[attrib.buffer];
817 ASSERT(buffer.IsEnabled()); 884 ASSERT(buffer.IsEnabled());
818 885
819 vertex_input.attributes[vertex_input.num_attributes++] = 886 vertex_input.SetAttribute(index, true, attrib.buffer, attrib.offset, attrib.type.Value(),
820 FixedPipelineState::VertexAttribute(index, attrib.buffer, attrib.type, attrib.size, 887 attrib.size.Value());
821 attrib.offset);
822 } 888 }
823 889
824 for (u32 index = 0; index < static_cast<u32>(Maxwell::NumVertexArrays); ++index) { 890 for (std::size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
825 const auto& vertex_array = regs.vertex_array[index]; 891 const auto& vertex_array = regs.vertex_array[index];
826 if (!vertex_array.IsEnabled()) { 892 if (!vertex_array.IsEnabled()) {
893 vertex_input.SetBinding(index, false, 0, 0);
827 continue; 894 continue;
828 } 895 }
896 vertex_input.SetBinding(
897 index, true, vertex_array.stride,
898 regs.instanced_arrays.IsInstancingEnabled(index) ? vertex_array.divisor : 0);
829 899
830 const GPUVAddr start{vertex_array.StartAddress()}; 900 const GPUVAddr start{vertex_array.StartAddress()};
831 const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()}; 901 const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()};
832 902
833 ASSERT(end > start); 903 ASSERT(end >= start);
834 const std::size_t size{end - start + 1}; 904 const std::size_t size{end - start};
905 if (size == 0) {
906 buffer_bindings.AddVertexBinding(DefaultBuffer(), 0);
907 continue;
908 }
835 const auto [buffer, offset] = buffer_cache.UploadMemory(start, size); 909 const auto [buffer, offset] = buffer_cache.UploadMemory(start, size);
836
837 vertex_input.bindings[vertex_input.num_bindings++] = FixedPipelineState::VertexBinding(
838 index, vertex_array.stride,
839 regs.instanced_arrays.IsInstancingEnabled(index) ? vertex_array.divisor : 0);
840 buffer_bindings.AddVertexBinding(buffer, offset); 910 buffer_bindings.AddVertexBinding(buffer, offset);
841 } 911 }
842} 912}
843 913
844void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawParameters& params, 914void RasterizerVulkan::SetupIndexBuffer(BufferBindings& buffer_bindings, DrawParameters& params,
845 bool is_indexed) { 915 bool is_indexed) {
916 if (params.num_vertices == 0) {
917 return;
918 }
846 const auto& regs = system.GPU().Maxwell3D().regs; 919 const auto& regs = system.GPU().Maxwell3D().regs;
847 switch (regs.draw.topology) { 920 switch (regs.draw.topology) {
848 case Maxwell::PrimitiveTopology::Quads: { 921 case Maxwell::PrimitiveTopology::Quads: {
@@ -918,7 +991,7 @@ void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, std::
918 MICROPROFILE_SCOPE(Vulkan_Textures); 991 MICROPROFILE_SCOPE(Vulkan_Textures);
919 const auto& gpu = system.GPU().Maxwell3D(); 992 const auto& gpu = system.GPU().Maxwell3D();
920 for (const auto& entry : entries.samplers) { 993 for (const auto& entry : entries.samplers) {
921 for (std::size_t i = 0; i < entry.Size(); ++i) { 994 for (std::size_t i = 0; i < entry.size; ++i) {
922 const auto texture = GetTextureInfo(gpu, entry, stage, i); 995 const auto texture = GetTextureInfo(gpu, entry, stage, i);
923 SetupTexture(texture, entry); 996 SetupTexture(texture, entry);
924 } 997 }
@@ -970,7 +1043,7 @@ void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) {
970 MICROPROFILE_SCOPE(Vulkan_Textures); 1043 MICROPROFILE_SCOPE(Vulkan_Textures);
971 const auto& gpu = system.GPU().KeplerCompute(); 1044 const auto& gpu = system.GPU().KeplerCompute();
972 for (const auto& entry : entries.samplers) { 1045 for (const auto& entry : entries.samplers) {
973 for (std::size_t i = 0; i < entry.Size(); ++i) { 1046 for (std::size_t i = 0; i < entry.size; ++i) {
974 const auto texture = GetTextureInfo(gpu, entry, ComputeShaderIndex, i); 1047 const auto texture = GetTextureInfo(gpu, entry, ComputeShaderIndex, i);
975 SetupTexture(texture, entry); 1048 SetupTexture(texture, entry);
976 } 1049 }
@@ -990,8 +1063,7 @@ void RasterizerVulkan::SetupConstBuffer(const ConstBufferEntry& entry,
990 const Tegra::Engines::ConstBufferInfo& buffer) { 1063 const Tegra::Engines::ConstBufferInfo& buffer) {
991 if (!buffer.enabled) { 1064 if (!buffer.enabled) {
992 // Set values to zero to unbind buffers 1065 // Set values to zero to unbind buffers
993 update_descriptor_queue.AddBuffer(buffer_cache.GetEmptyBuffer(sizeof(float)), 0, 1066 update_descriptor_queue.AddBuffer(DefaultBuffer(), 0, DEFAULT_BUFFER_SIZE);
994 sizeof(float));
995 return; 1067 return;
996 } 1068 }
997 1069
@@ -1014,7 +1086,9 @@ void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAdd
1014 if (size == 0) { 1086 if (size == 0) {
1015 // Sometimes global memory pointers don't have a proper size. Upload a dummy entry 1087 // Sometimes global memory pointers don't have a proper size. Upload a dummy entry
1016 // because Vulkan doesn't like empty buffers. 1088 // because Vulkan doesn't like empty buffers.
1017 constexpr std::size_t dummy_size = 4; 1089 // Note: Do *not* use DefaultBuffer() here, storage buffers can be written breaking the
1090 // default buffer.
1091 static constexpr std::size_t dummy_size = 4;
1018 const auto buffer = buffer_cache.GetEmptyBuffer(dummy_size); 1092 const auto buffer = buffer_cache.GetEmptyBuffer(dummy_size);
1019 update_descriptor_queue.AddBuffer(buffer, 0, dummy_size); 1093 update_descriptor_queue.AddBuffer(buffer, 0, dummy_size);
1020 return; 1094 return;
@@ -1051,7 +1125,7 @@ void RasterizerVulkan::SetupTexture(const Tegra::Texture::FullTextureInfo& textu
1051void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry) { 1125void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry) {
1052 auto view = texture_cache.GetImageSurface(tic, entry); 1126 auto view = texture_cache.GetImageSurface(tic, entry);
1053 1127
1054 if (entry.IsWritten()) { 1128 if (entry.is_written) {
1055 view->MarkAsModified(texture_cache.Tick()); 1129 view->MarkAsModified(texture_cache.Tick());
1056 } 1130 }
1057 1131
@@ -1179,7 +1253,7 @@ std::size_t RasterizerVulkan::CalculateVertexArraysSize() const {
1179 const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()}; 1253 const GPUVAddr end{regs.vertex_array_limit[index].LimitAddress()};
1180 DEBUG_ASSERT(end >= start); 1254 DEBUG_ASSERT(end >= start);
1181 1255
1182 size += (end - start + 1) * regs.vertex_array[index].enable; 1256 size += (end - start) * regs.vertex_array[index].enable;
1183 } 1257 }
1184 return size; 1258 return size;
1185} 1259}
@@ -1202,28 +1276,54 @@ std::size_t RasterizerVulkan::CalculateConstBufferSize(
1202} 1276}
1203 1277
1204RenderPassParams RasterizerVulkan::GetRenderPassParams(Texceptions texceptions) const { 1278RenderPassParams RasterizerVulkan::GetRenderPassParams(Texceptions texceptions) const {
1205 using namespace VideoCore::Surface;
1206
1207 const auto& regs = system.GPU().Maxwell3D().regs; 1279 const auto& regs = system.GPU().Maxwell3D().regs;
1208 RenderPassParams renderpass_params; 1280 const std::size_t num_attachments = static_cast<std::size_t>(regs.rt_control.count);
1209 1281
1210 for (std::size_t rt = 0; rt < static_cast<std::size_t>(regs.rt_control.count); ++rt) { 1282 RenderPassParams params;
1283 params.color_formats = {};
1284 std::size_t color_texceptions = 0;
1285
1286 std::size_t index = 0;
1287 for (std::size_t rt = 0; rt < num_attachments; ++rt) {
1211 const auto& rendertarget = regs.rt[rt]; 1288 const auto& rendertarget = regs.rt[rt];
1212 if (rendertarget.Address() == 0 || rendertarget.format == Tegra::RenderTargetFormat::NONE) { 1289 if (rendertarget.Address() == 0 || rendertarget.format == Tegra::RenderTargetFormat::NONE) {
1213 continue; 1290 continue;
1214 } 1291 }
1215 renderpass_params.color_attachments.push_back(RenderPassParams::ColorAttachment{ 1292 params.color_formats[index] = static_cast<u8>(rendertarget.format);
1216 static_cast<u32>(rt), PixelFormatFromRenderTargetFormat(rendertarget.format), 1293 color_texceptions |= (texceptions[rt] ? 1ULL : 0ULL) << index;
1217 texceptions[rt]}); 1294 ++index;
1218 } 1295 }
1296 params.num_color_attachments = static_cast<u8>(index);
1297 params.texceptions = static_cast<u8>(color_texceptions);
1298
1299 params.zeta_format = regs.zeta_enable ? static_cast<u8>(regs.zeta.format) : 0;
1300 params.zeta_texception = texceptions[ZETA_TEXCEPTION_INDEX];
1301 return params;
1302}
1219 1303
1220 renderpass_params.has_zeta = regs.zeta_enable; 1304VkBuffer RasterizerVulkan::DefaultBuffer() {
1221 if (renderpass_params.has_zeta) { 1305 if (default_buffer) {
1222 renderpass_params.zeta_pixel_format = PixelFormatFromDepthFormat(regs.zeta.format); 1306 return *default_buffer;
1223 renderpass_params.zeta_texception = texceptions[ZETA_TEXCEPTION_INDEX];
1224 } 1307 }
1225 1308
1226 return renderpass_params; 1309 VkBufferCreateInfo ci;
1310 ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
1311 ci.pNext = nullptr;
1312 ci.flags = 0;
1313 ci.size = DEFAULT_BUFFER_SIZE;
1314 ci.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
1315 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
1316 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
1317 ci.queueFamilyIndexCount = 0;
1318 ci.pQueueFamilyIndices = nullptr;
1319 default_buffer = device.GetLogical().CreateBuffer(ci);
1320 default_buffer_commit = memory_manager.Commit(default_buffer, false);
1321
1322 scheduler.RequestOutsideRenderPassOperationContext();
1323 scheduler.Record([buffer = *default_buffer](vk::CommandBuffer cmdbuf) {
1324 cmdbuf.FillBuffer(buffer, 0, DEFAULT_BUFFER_SIZE, 0);
1325 });
1326 return *default_buffer;
1227} 1327}
1228 1328
1229} // namespace Vulkan 1329} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index d9108f862..0ed0e48c6 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -14,13 +14,13 @@
14#include <boost/functional/hash.hpp> 14#include <boost/functional/hash.hpp>
15 15
16#include "common/common_types.h" 16#include "common/common_types.h"
17#include "video_core/memory_manager.h"
18#include "video_core/rasterizer_accelerated.h" 17#include "video_core/rasterizer_accelerated.h"
19#include "video_core/rasterizer_interface.h" 18#include "video_core/rasterizer_interface.h"
20#include "video_core/renderer_vulkan/fixed_pipeline_state.h" 19#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
21#include "video_core/renderer_vulkan/vk_buffer_cache.h" 20#include "video_core/renderer_vulkan/vk_buffer_cache.h"
22#include "video_core/renderer_vulkan/vk_compute_pass.h" 21#include "video_core/renderer_vulkan/vk_compute_pass.h"
23#include "video_core/renderer_vulkan/vk_descriptor_pool.h" 22#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
23#include "video_core/renderer_vulkan/vk_fence_manager.h"
24#include "video_core/renderer_vulkan/vk_memory_manager.h" 24#include "video_core/renderer_vulkan/vk_memory_manager.h"
25#include "video_core/renderer_vulkan/vk_pipeline_cache.h" 25#include "video_core/renderer_vulkan/vk_pipeline_cache.h"
26#include "video_core/renderer_vulkan/vk_query_cache.h" 26#include "video_core/renderer_vulkan/vk_query_cache.h"
@@ -118,8 +118,15 @@ public:
118 void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override; 118 void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
119 void FlushAll() override; 119 void FlushAll() override;
120 void FlushRegion(VAddr addr, u64 size) override; 120 void FlushRegion(VAddr addr, u64 size) override;
121 bool MustFlushRegion(VAddr addr, u64 size) override;
121 void InvalidateRegion(VAddr addr, u64 size) override; 122 void InvalidateRegion(VAddr addr, u64 size) override;
123 void OnCPUWrite(VAddr addr, u64 size) override;
124 void SyncGuestHost() override;
125 void SignalSemaphore(GPUVAddr addr, u32 value) override;
126 void SignalSyncPoint(u32 value) override;
127 void ReleaseFences() override;
122 void FlushAndInvalidateRegion(VAddr addr, u64 size) override; 128 void FlushAndInvalidateRegion(VAddr addr, u64 size) override;
129 void WaitForIdle() override;
123 void FlushCommands() override; 130 void FlushCommands() override;
124 void TickFrame() override; 131 void TickFrame() override;
125 bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src, 132 bool AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Regs::Surface& src,
@@ -148,6 +155,7 @@ private:
148 using Texceptions = std::bitset<Maxwell::NumRenderTargets + 1>; 155 using Texceptions = std::bitset<Maxwell::NumRenderTargets + 1>;
149 156
150 static constexpr std::size_t ZETA_TEXCEPTION_INDEX = 8; 157 static constexpr std::size_t ZETA_TEXCEPTION_INDEX = 8;
158 static constexpr VkDeviceSize DEFAULT_BUFFER_SIZE = 4 * sizeof(float);
151 159
152 void FlushWork(); 160 void FlushWork();
153 161
@@ -240,6 +248,8 @@ private:
240 248
241 RenderPassParams GetRenderPassParams(Texceptions texceptions) const; 249 RenderPassParams GetRenderPassParams(Texceptions texceptions) const;
242 250
251 VkBuffer DefaultBuffer();
252
243 Core::System& system; 253 Core::System& system;
244 Core::Frontend::EmuWindow& render_window; 254 Core::Frontend::EmuWindow& render_window;
245 VKScreenInfo& screen_info; 255 VKScreenInfo& screen_info;
@@ -261,8 +271,13 @@ private:
261 VKPipelineCache pipeline_cache; 271 VKPipelineCache pipeline_cache;
262 VKBufferCache buffer_cache; 272 VKBufferCache buffer_cache;
263 VKSamplerCache sampler_cache; 273 VKSamplerCache sampler_cache;
274 VKFenceManager fence_manager;
264 VKQueryCache query_cache; 275 VKQueryCache query_cache;
265 276
277 vk::Buffer default_buffer;
278 VKMemoryCommit default_buffer_commit;
279 vk::Event wfi_event;
280
266 std::array<View, Maxwell::NumRenderTargets> color_attachments; 281 std::array<View, Maxwell::NumRenderTargets> color_attachments;
267 View zeta_attachment; 282 View zeta_attachment;
268 283
diff --git a/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp b/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
index 4e5286a69..3f71d005e 100644
--- a/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_renderpass_cache.cpp
@@ -2,9 +2,11 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <cstring>
5#include <memory> 6#include <memory>
6#include <vector> 7#include <vector>
7 8
9#include "common/cityhash.h"
8#include "video_core/engines/maxwell_3d.h" 10#include "video_core/engines/maxwell_3d.h"
9#include "video_core/renderer_vulkan/maxwell_to_vk.h" 11#include "video_core/renderer_vulkan/maxwell_to_vk.h"
10#include "video_core/renderer_vulkan/vk_device.h" 12#include "video_core/renderer_vulkan/vk_device.h"
@@ -13,6 +15,15 @@
13 15
14namespace Vulkan { 16namespace Vulkan {
15 17
18std::size_t RenderPassParams::Hash() const noexcept {
19 const u64 hash = Common::CityHash64(reinterpret_cast<const char*>(this), sizeof *this);
20 return static_cast<std::size_t>(hash);
21}
22
23bool RenderPassParams::operator==(const RenderPassParams& rhs) const noexcept {
24 return std::memcmp(&rhs, this, sizeof *this) == 0;
25}
26
16VKRenderPassCache::VKRenderPassCache(const VKDevice& device) : device{device} {} 27VKRenderPassCache::VKRenderPassCache(const VKDevice& device) : device{device} {}
17 28
18VKRenderPassCache::~VKRenderPassCache() = default; 29VKRenderPassCache::~VKRenderPassCache() = default;
@@ -27,20 +38,22 @@ VkRenderPass VKRenderPassCache::GetRenderPass(const RenderPassParams& params) {
27} 38}
28 39
29vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& params) const { 40vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& params) const {
41 using namespace VideoCore::Surface;
30 std::vector<VkAttachmentDescription> descriptors; 42 std::vector<VkAttachmentDescription> descriptors;
31 std::vector<VkAttachmentReference> color_references; 43 std::vector<VkAttachmentReference> color_references;
32 44
33 for (std::size_t rt = 0; rt < params.color_attachments.size(); ++rt) { 45 const std::size_t num_attachments = static_cast<std::size_t>(params.num_color_attachments);
34 const auto attachment = params.color_attachments[rt]; 46 for (std::size_t rt = 0; rt < num_attachments; ++rt) {
35 const auto format = 47 const auto guest_format = static_cast<Tegra::RenderTargetFormat>(params.color_formats[rt]);
36 MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, attachment.pixel_format); 48 const PixelFormat pixel_format = PixelFormatFromRenderTargetFormat(guest_format);
49 const auto format = MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, pixel_format);
37 ASSERT_MSG(format.attachable, "Trying to attach a non-attachable format with format={}", 50 ASSERT_MSG(format.attachable, "Trying to attach a non-attachable format with format={}",
38 static_cast<u32>(attachment.pixel_format)); 51 static_cast<int>(pixel_format));
39 52
40 // TODO(Rodrigo): Add eMayAlias when it's needed. 53 // TODO(Rodrigo): Add MAY_ALIAS_BIT when it's needed.
41 const auto color_layout = attachment.is_texception 54 const VkImageLayout color_layout = ((params.texceptions >> rt) & 1) != 0
42 ? VK_IMAGE_LAYOUT_GENERAL 55 ? VK_IMAGE_LAYOUT_GENERAL
43 : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; 56 : VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
44 VkAttachmentDescription& descriptor = descriptors.emplace_back(); 57 VkAttachmentDescription& descriptor = descriptors.emplace_back();
45 descriptor.flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT; 58 descriptor.flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT;
46 descriptor.format = format.format; 59 descriptor.format = format.format;
@@ -58,15 +71,17 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
58 } 71 }
59 72
60 VkAttachmentReference zeta_attachment_ref; 73 VkAttachmentReference zeta_attachment_ref;
61 if (params.has_zeta) { 74 const bool has_zeta = params.zeta_format != 0;
62 const auto format = 75 if (has_zeta) {
63 MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, params.zeta_pixel_format); 76 const auto guest_format = static_cast<Tegra::DepthFormat>(params.zeta_format);
77 const PixelFormat pixel_format = PixelFormatFromDepthFormat(guest_format);
78 const auto format = MaxwellToVK::SurfaceFormat(device, FormatType::Optimal, pixel_format);
64 ASSERT_MSG(format.attachable, "Trying to attach a non-attachable format with format={}", 79 ASSERT_MSG(format.attachable, "Trying to attach a non-attachable format with format={}",
65 static_cast<u32>(params.zeta_pixel_format)); 80 static_cast<int>(pixel_format));
66 81
67 const auto zeta_layout = params.zeta_texception 82 const VkImageLayout zeta_layout = params.zeta_texception != 0
68 ? VK_IMAGE_LAYOUT_GENERAL 83 ? VK_IMAGE_LAYOUT_GENERAL
69 : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; 84 : VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
70 VkAttachmentDescription& descriptor = descriptors.emplace_back(); 85 VkAttachmentDescription& descriptor = descriptors.emplace_back();
71 descriptor.flags = 0; 86 descriptor.flags = 0;
72 descriptor.format = format.format; 87 descriptor.format = format.format;
@@ -78,7 +93,7 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
78 descriptor.initialLayout = zeta_layout; 93 descriptor.initialLayout = zeta_layout;
79 descriptor.finalLayout = zeta_layout; 94 descriptor.finalLayout = zeta_layout;
80 95
81 zeta_attachment_ref.attachment = static_cast<u32>(params.color_attachments.size()); 96 zeta_attachment_ref.attachment = static_cast<u32>(num_attachments);
82 zeta_attachment_ref.layout = zeta_layout; 97 zeta_attachment_ref.layout = zeta_layout;
83 } 98 }
84 99
@@ -90,7 +105,7 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
90 subpass_description.colorAttachmentCount = static_cast<u32>(color_references.size()); 105 subpass_description.colorAttachmentCount = static_cast<u32>(color_references.size());
91 subpass_description.pColorAttachments = color_references.data(); 106 subpass_description.pColorAttachments = color_references.data();
92 subpass_description.pResolveAttachments = nullptr; 107 subpass_description.pResolveAttachments = nullptr;
93 subpass_description.pDepthStencilAttachment = params.has_zeta ? &zeta_attachment_ref : nullptr; 108 subpass_description.pDepthStencilAttachment = has_zeta ? &zeta_attachment_ref : nullptr;
94 subpass_description.preserveAttachmentCount = 0; 109 subpass_description.preserveAttachmentCount = 0;
95 subpass_description.pPreserveAttachments = nullptr; 110 subpass_description.pPreserveAttachments = nullptr;
96 111
@@ -101,7 +116,7 @@ vk::RenderPass VKRenderPassCache::CreateRenderPass(const RenderPassParams& param
101 stage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; 116 stage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
102 } 117 }
103 118
104 if (params.has_zeta) { 119 if (has_zeta) {
105 access |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | 120 access |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
106 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; 121 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
107 stage |= VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT; 122 stage |= VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
diff --git a/src/video_core/renderer_vulkan/vk_renderpass_cache.h b/src/video_core/renderer_vulkan/vk_renderpass_cache.h
index 921b6efb5..8b0fec720 100644
--- a/src/video_core/renderer_vulkan/vk_renderpass_cache.h
+++ b/src/video_core/renderer_vulkan/vk_renderpass_cache.h
@@ -4,8 +4,7 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <memory> 7#include <type_traits>
8#include <tuple>
9#include <unordered_map> 8#include <unordered_map>
10 9
11#include <boost/container/static_vector.hpp> 10#include <boost/container/static_vector.hpp>
@@ -19,51 +18,25 @@ namespace Vulkan {
19 18
20class VKDevice; 19class VKDevice;
21 20
22// TODO(Rodrigo): Optimize this structure for faster hashing
23
24struct RenderPassParams { 21struct RenderPassParams {
25 struct ColorAttachment { 22 std::array<u8, Tegra::Engines::Maxwell3D::Regs::NumRenderTargets> color_formats;
26 u32 index = 0; 23 u8 num_color_attachments;
27 VideoCore::Surface::PixelFormat pixel_format = VideoCore::Surface::PixelFormat::Invalid; 24 u8 texceptions;
28 bool is_texception = false; 25
29 26 u8 zeta_format;
30 std::size_t Hash() const noexcept { 27 u8 zeta_texception;
31 return static_cast<std::size_t>(pixel_format) | 28
32 static_cast<std::size_t>(is_texception) << 6 | 29 std::size_t Hash() const noexcept;
33 static_cast<std::size_t>(index) << 7; 30
34 } 31 bool operator==(const RenderPassParams& rhs) const noexcept;
35
36 bool operator==(const ColorAttachment& rhs) const noexcept {
37 return std::tie(index, pixel_format, is_texception) ==
38 std::tie(rhs.index, rhs.pixel_format, rhs.is_texception);
39 }
40 };
41
42 boost::container::static_vector<ColorAttachment,
43 Tegra::Engines::Maxwell3D::Regs::NumRenderTargets>
44 color_attachments{};
45 // TODO(Rodrigo): Unify has_zeta into zeta_pixel_format and zeta_component_type.
46 VideoCore::Surface::PixelFormat zeta_pixel_format = VideoCore::Surface::PixelFormat::Invalid;
47 bool has_zeta = false;
48 bool zeta_texception = false;
49
50 std::size_t Hash() const noexcept {
51 std::size_t hash = 0;
52 for (const auto& rt : color_attachments) {
53 boost::hash_combine(hash, rt.Hash());
54 }
55 boost::hash_combine(hash, zeta_pixel_format);
56 boost::hash_combine(hash, has_zeta);
57 boost::hash_combine(hash, zeta_texception);
58 return hash;
59 }
60 32
61 bool operator==(const RenderPassParams& rhs) const { 33 bool operator!=(const RenderPassParams& rhs) const noexcept {
62 return std::tie(color_attachments, zeta_pixel_format, has_zeta, zeta_texception) == 34 return !operator==(rhs);
63 std::tie(rhs.color_attachments, rhs.zeta_pixel_format, rhs.has_zeta,
64 rhs.zeta_texception);
65 } 35 }
66}; 36};
37static_assert(std::has_unique_object_representations_v<RenderPassParams>);
38static_assert(std::is_trivially_copyable_v<RenderPassParams>);
39static_assert(std::is_trivially_constructible_v<RenderPassParams>);
67 40
68} // namespace Vulkan 41} // namespace Vulkan
69 42
diff --git a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
index 07bbcf520..2687d8d95 100644
--- a/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_sampler_cache.cpp
@@ -2,11 +2,8 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <cstring>
6#include <optional>
7#include <unordered_map> 5#include <unordered_map>
8 6
9#include "common/assert.h"
10#include "video_core/renderer_vulkan/maxwell_to_vk.h" 7#include "video_core/renderer_vulkan/maxwell_to_vk.h"
11#include "video_core/renderer_vulkan/vk_sampler_cache.h" 8#include "video_core/renderer_vulkan/vk_sampler_cache.h"
12#include "video_core/renderer_vulkan/wrapper.h" 9#include "video_core/renderer_vulkan/wrapper.h"
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index 900f551b3..82ec9180e 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -8,7 +8,6 @@
8#include <thread> 8#include <thread>
9#include <utility> 9#include <utility>
10 10
11#include "common/assert.h"
12#include "common/microprofile.h" 11#include "common/microprofile.h"
13#include "video_core/renderer_vulkan/vk_device.h" 12#include "video_core/renderer_vulkan/vk_device.h"
14#include "video_core/renderer_vulkan/vk_query_cache.h" 13#include "video_core/renderer_vulkan/vk_query_cache.h"
@@ -166,7 +165,15 @@ void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
166 submit_info.pCommandBuffers = current_cmdbuf.address(); 165 submit_info.pCommandBuffers = current_cmdbuf.address();
167 submit_info.signalSemaphoreCount = semaphore ? 1 : 0; 166 submit_info.signalSemaphoreCount = semaphore ? 1 : 0;
168 submit_info.pSignalSemaphores = &semaphore; 167 submit_info.pSignalSemaphores = &semaphore;
169 device.GetGraphicsQueue().Submit(submit_info, *current_fence); 168 switch (const VkResult result = device.GetGraphicsQueue().Submit(submit_info, *current_fence)) {
169 case VK_SUCCESS:
170 break;
171 case VK_ERROR_DEVICE_LOST:
172 device.ReportLoss();
173 [[fallthrough]];
174 default:
175 vk::Check(result);
176 }
170} 177}
171 178
172void VKScheduler::AllocateNewContext() { 179void VKScheduler::AllocateNewContext() {
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
index 82a8adc69..970a65566 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.h
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -7,7 +7,6 @@
7#include <atomic> 7#include <atomic>
8#include <condition_variable> 8#include <condition_variable>
9#include <memory> 9#include <memory>
10#include <optional>
11#include <stack> 10#include <stack>
12#include <thread> 11#include <thread>
13#include <utility> 12#include <utility>
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
index aaa138f52..18678968c 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.cpp
@@ -103,8 +103,8 @@ struct GenericVaryingDescription {
103}; 103};
104 104
105spv::Dim GetSamplerDim(const Sampler& sampler) { 105spv::Dim GetSamplerDim(const Sampler& sampler) {
106 ASSERT(!sampler.IsBuffer()); 106 ASSERT(!sampler.is_buffer);
107 switch (sampler.GetType()) { 107 switch (sampler.type) {
108 case Tegra::Shader::TextureType::Texture1D: 108 case Tegra::Shader::TextureType::Texture1D:
109 return spv::Dim::Dim1D; 109 return spv::Dim::Dim1D;
110 case Tegra::Shader::TextureType::Texture2D: 110 case Tegra::Shader::TextureType::Texture2D:
@@ -114,13 +114,13 @@ spv::Dim GetSamplerDim(const Sampler& sampler) {
114 case Tegra::Shader::TextureType::TextureCube: 114 case Tegra::Shader::TextureType::TextureCube:
115 return spv::Dim::Cube; 115 return spv::Dim::Cube;
116 default: 116 default:
117 UNIMPLEMENTED_MSG("Unimplemented sampler type={}", static_cast<u32>(sampler.GetType())); 117 UNIMPLEMENTED_MSG("Unimplemented sampler type={}", static_cast<int>(sampler.type));
118 return spv::Dim::Dim2D; 118 return spv::Dim::Dim2D;
119 } 119 }
120} 120}
121 121
122std::pair<spv::Dim, bool> GetImageDim(const Image& image) { 122std::pair<spv::Dim, bool> GetImageDim(const Image& image) {
123 switch (image.GetType()) { 123 switch (image.type) {
124 case Tegra::Shader::ImageType::Texture1D: 124 case Tegra::Shader::ImageType::Texture1D:
125 return {spv::Dim::Dim1D, false}; 125 return {spv::Dim::Dim1D, false};
126 case Tegra::Shader::ImageType::TextureBuffer: 126 case Tegra::Shader::ImageType::TextureBuffer:
@@ -134,7 +134,7 @@ std::pair<spv::Dim, bool> GetImageDim(const Image& image) {
134 case Tegra::Shader::ImageType::Texture3D: 134 case Tegra::Shader::ImageType::Texture3D:
135 return {spv::Dim::Dim3D, false}; 135 return {spv::Dim::Dim3D, false};
136 default: 136 default:
137 UNIMPLEMENTED_MSG("Unimplemented image type={}", static_cast<u32>(image.GetType())); 137 UNIMPLEMENTED_MSG("Unimplemented image type={}", static_cast<int>(image.type));
138 return {spv::Dim::Dim2D, false}; 138 return {spv::Dim::Dim2D, false};
139 } 139 }
140} 140}
@@ -879,11 +879,11 @@ private:
879 879
880 u32 DeclareTexelBuffers(u32 binding) { 880 u32 DeclareTexelBuffers(u32 binding) {
881 for (const auto& sampler : ir.GetSamplers()) { 881 for (const auto& sampler : ir.GetSamplers()) {
882 if (!sampler.IsBuffer()) { 882 if (!sampler.is_buffer) {
883 continue; 883 continue;
884 } 884 }
885 ASSERT(!sampler.IsArray()); 885 ASSERT(!sampler.is_array);
886 ASSERT(!sampler.IsShadow()); 886 ASSERT(!sampler.is_shadow);
887 887
888 constexpr auto dim = spv::Dim::Buffer; 888 constexpr auto dim = spv::Dim::Buffer;
889 constexpr int depth = 0; 889 constexpr int depth = 0;
@@ -894,23 +894,23 @@ private:
894 const Id image_type = TypeImage(t_float, dim, depth, arrayed, ms, sampled, format); 894 const Id image_type = TypeImage(t_float, dim, depth, arrayed, ms, sampled, format);
895 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type); 895 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
896 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant); 896 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
897 AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.GetIndex()))); 897 AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.index)));
898 Decorate(id, spv::Decoration::Binding, binding++); 898 Decorate(id, spv::Decoration::Binding, binding++);
899 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); 899 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
900 900
901 texel_buffers.emplace(sampler.GetIndex(), TexelBuffer{image_type, id}); 901 texel_buffers.emplace(sampler.index, TexelBuffer{image_type, id});
902 } 902 }
903 return binding; 903 return binding;
904 } 904 }
905 905
906 u32 DeclareSamplers(u32 binding) { 906 u32 DeclareSamplers(u32 binding) {
907 for (const auto& sampler : ir.GetSamplers()) { 907 for (const auto& sampler : ir.GetSamplers()) {
908 if (sampler.IsBuffer()) { 908 if (sampler.is_buffer) {
909 continue; 909 continue;
910 } 910 }
911 const auto dim = GetSamplerDim(sampler); 911 const auto dim = GetSamplerDim(sampler);
912 const int depth = sampler.IsShadow() ? 1 : 0; 912 const int depth = sampler.is_shadow ? 1 : 0;
913 const int arrayed = sampler.IsArray() ? 1 : 0; 913 const int arrayed = sampler.is_array ? 1 : 0;
914 constexpr bool ms = false; 914 constexpr bool ms = false;
915 constexpr int sampled = 1; 915 constexpr int sampled = 1;
916 constexpr auto format = spv::ImageFormat::Unknown; 916 constexpr auto format = spv::ImageFormat::Unknown;
@@ -918,17 +918,17 @@ private:
918 const Id sampler_type = TypeSampledImage(image_type); 918 const Id sampler_type = TypeSampledImage(image_type);
919 const Id sampler_pointer_type = 919 const Id sampler_pointer_type =
920 TypePointer(spv::StorageClass::UniformConstant, sampler_type); 920 TypePointer(spv::StorageClass::UniformConstant, sampler_type);
921 const Id type = sampler.IsIndexed() 921 const Id type = sampler.is_indexed
922 ? TypeArray(sampler_type, Constant(t_uint, sampler.Size())) 922 ? TypeArray(sampler_type, Constant(t_uint, sampler.size))
923 : sampler_type; 923 : sampler_type;
924 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, type); 924 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, type);
925 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant); 925 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
926 AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.GetIndex()))); 926 AddGlobalVariable(Name(id, fmt::format("sampler_{}", sampler.index)));
927 Decorate(id, spv::Decoration::Binding, binding++); 927 Decorate(id, spv::Decoration::Binding, binding++);
928 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); 928 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
929 929
930 sampled_images.emplace(sampler.GetIndex(), SampledImage{image_type, sampler_type, 930 sampled_images.emplace(
931 sampler_pointer_type, id}); 931 sampler.index, SampledImage{image_type, sampler_type, sampler_pointer_type, id});
932 } 932 }
933 return binding; 933 return binding;
934 } 934 }
@@ -943,17 +943,17 @@ private:
943 const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {}); 943 const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {});
944 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type); 944 const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
945 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant); 945 const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
946 AddGlobalVariable(Name(id, fmt::format("image_{}", image.GetIndex()))); 946 AddGlobalVariable(Name(id, fmt::format("image_{}", image.index)));
947 947
948 Decorate(id, spv::Decoration::Binding, binding++); 948 Decorate(id, spv::Decoration::Binding, binding++);
949 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET); 949 Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
950 if (image.IsRead() && !image.IsWritten()) { 950 if (image.is_read && !image.is_written) {
951 Decorate(id, spv::Decoration::NonWritable); 951 Decorate(id, spv::Decoration::NonWritable);
952 } else if (image.IsWritten() && !image.IsRead()) { 952 } else if (image.is_written && !image.is_read) {
953 Decorate(id, spv::Decoration::NonReadable); 953 Decorate(id, spv::Decoration::NonReadable);
954 } 954 }
955 955
956 images.emplace(static_cast<u32>(image.GetIndex()), StorageImage{image_type, id}); 956 images.emplace(image.index, StorageImage{image_type, id});
957 } 957 }
958 return binding; 958 return binding;
959 } 959 }
@@ -1584,6 +1584,15 @@ private:
1584 return {OpCompositeConstruct(t_half, low, high), Type::HalfFloat}; 1584 return {OpCompositeConstruct(t_half, low, high), Type::HalfFloat};
1585 } 1585 }
1586 1586
1587 Expression LogicalAddCarry(Operation operation) {
1588 const Id op_a = AsUint(Visit(operation[0]));
1589 const Id op_b = AsUint(Visit(operation[1]));
1590
1591 const Id result = OpIAddCarry(TypeStruct({t_uint, t_uint}), op_a, op_b);
1592 const Id carry = OpCompositeExtract(t_uint, result, 1);
1593 return {OpINotEqual(t_bool, carry, Constant(t_uint, 0)), Type::Bool};
1594 }
1595
1587 Expression LogicalAssign(Operation operation) { 1596 Expression LogicalAssign(Operation operation) {
1588 const Node& dest = operation[0]; 1597 const Node& dest = operation[0];
1589 const Node& src = operation[1]; 1598 const Node& src = operation[1];
@@ -1611,11 +1620,11 @@ private:
1611 1620
1612 Id GetTextureSampler(Operation operation) { 1621 Id GetTextureSampler(Operation operation) {
1613 const auto& meta = std::get<MetaTexture>(operation.GetMeta()); 1622 const auto& meta = std::get<MetaTexture>(operation.GetMeta());
1614 ASSERT(!meta.sampler.IsBuffer()); 1623 ASSERT(!meta.sampler.is_buffer);
1615 1624
1616 const auto& entry = sampled_images.at(meta.sampler.GetIndex()); 1625 const auto& entry = sampled_images.at(meta.sampler.index);
1617 Id sampler = entry.variable; 1626 Id sampler = entry.variable;
1618 if (meta.sampler.IsIndexed()) { 1627 if (meta.sampler.is_indexed) {
1619 const Id index = AsInt(Visit(meta.index)); 1628 const Id index = AsInt(Visit(meta.index));
1620 sampler = OpAccessChain(entry.sampler_pointer_type, sampler, index); 1629 sampler = OpAccessChain(entry.sampler_pointer_type, sampler, index);
1621 } 1630 }
@@ -1624,8 +1633,8 @@ private:
1624 1633
1625 Id GetTextureImage(Operation operation) { 1634 Id GetTextureImage(Operation operation) {
1626 const auto& meta = std::get<MetaTexture>(operation.GetMeta()); 1635 const auto& meta = std::get<MetaTexture>(operation.GetMeta());
1627 const u32 index = meta.sampler.GetIndex(); 1636 const u32 index = meta.sampler.index;
1628 if (meta.sampler.IsBuffer()) { 1637 if (meta.sampler.is_buffer) {
1629 const auto& entry = texel_buffers.at(index); 1638 const auto& entry = texel_buffers.at(index);
1630 return OpLoad(entry.image_type, entry.image); 1639 return OpLoad(entry.image_type, entry.image);
1631 } else { 1640 } else {
@@ -1636,7 +1645,7 @@ private:
1636 1645
1637 Id GetImage(Operation operation) { 1646 Id GetImage(Operation operation) {
1638 const auto& meta = std::get<MetaImage>(operation.GetMeta()); 1647 const auto& meta = std::get<MetaImage>(operation.GetMeta());
1639 const auto entry = images.at(meta.image.GetIndex()); 1648 const auto entry = images.at(meta.image.index);
1640 return OpLoad(entry.image_type, entry.image); 1649 return OpLoad(entry.image_type, entry.image);
1641 } 1650 }
1642 1651
@@ -1652,7 +1661,7 @@ private:
1652 } 1661 }
1653 if (const auto meta = std::get_if<MetaTexture>(&operation.GetMeta())) { 1662 if (const auto meta = std::get_if<MetaTexture>(&operation.GetMeta())) {
1654 // Add array coordinate for textures 1663 // Add array coordinate for textures
1655 if (meta->sampler.IsArray()) { 1664 if (meta->sampler.is_array) {
1656 Id array = AsInt(Visit(meta->array)); 1665 Id array = AsInt(Visit(meta->array));
1657 if (type == Type::Float) { 1666 if (type == Type::Float) {
1658 array = OpConvertSToF(t_float, array); 1667 array = OpConvertSToF(t_float, array);
@@ -1758,7 +1767,7 @@ private:
1758 operands.push_back(GetOffsetCoordinates(operation)); 1767 operands.push_back(GetOffsetCoordinates(operation));
1759 } 1768 }
1760 1769
1761 if (meta.sampler.IsShadow()) { 1770 if (meta.sampler.is_shadow) {
1762 const Id dref = AsFloat(Visit(meta.depth_compare)); 1771 const Id dref = AsFloat(Visit(meta.depth_compare));
1763 return {OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, operands), 1772 return {OpImageSampleDrefExplicitLod(t_float, sampler, coords, dref, mask, operands),
1764 Type::Float}; 1773 Type::Float};
@@ -1773,7 +1782,7 @@ private:
1773 1782
1774 const Id coords = GetCoordinates(operation, Type::Float); 1783 const Id coords = GetCoordinates(operation, Type::Float);
1775 Id texture{}; 1784 Id texture{};
1776 if (meta.sampler.IsShadow()) { 1785 if (meta.sampler.is_shadow) {
1777 texture = OpImageDrefGather(t_float4, GetTextureSampler(operation), coords, 1786 texture = OpImageDrefGather(t_float4, GetTextureSampler(operation), coords,
1778 AsFloat(Visit(meta.depth_compare))); 1787 AsFloat(Visit(meta.depth_compare)));
1779 } else { 1788 } else {
@@ -1800,8 +1809,8 @@ private:
1800 } 1809 }
1801 1810
1802 const Id lod = AsUint(Visit(operation[0])); 1811 const Id lod = AsUint(Visit(operation[0]));
1803 const std::size_t coords_count = [&]() { 1812 const std::size_t coords_count = [&meta] {
1804 switch (const auto type = meta.sampler.GetType(); type) { 1813 switch (const auto type = meta.sampler.type) {
1805 case Tegra::Shader::TextureType::Texture1D: 1814 case Tegra::Shader::TextureType::Texture1D:
1806 return 1; 1815 return 1;
1807 case Tegra::Shader::TextureType::Texture2D: 1816 case Tegra::Shader::TextureType::Texture2D:
@@ -1810,7 +1819,7 @@ private:
1810 case Tegra::Shader::TextureType::Texture3D: 1819 case Tegra::Shader::TextureType::Texture3D:
1811 return 3; 1820 return 3;
1812 default: 1821 default:
1813 UNREACHABLE_MSG("Invalid texture type={}", static_cast<u32>(type)); 1822 UNREACHABLE_MSG("Invalid texture type={}", static_cast<int>(type));
1814 return 2; 1823 return 2;
1815 } 1824 }
1816 }(); 1825 }();
@@ -1853,7 +1862,7 @@ private:
1853 const Id image = GetTextureImage(operation); 1862 const Id image = GetTextureImage(operation);
1854 const Id coords = GetCoordinates(operation, Type::Int); 1863 const Id coords = GetCoordinates(operation, Type::Int);
1855 Id fetch; 1864 Id fetch;
1856 if (meta.lod && !meta.sampler.IsBuffer()) { 1865 if (meta.lod && !meta.sampler.is_buffer) {
1857 fetch = OpImageFetch(t_float4, image, coords, spv::ImageOperandsMask::Lod, 1866 fetch = OpImageFetch(t_float4, image, coords, spv::ImageOperandsMask::Lod,
1858 AsInt(Visit(meta.lod))); 1867 AsInt(Visit(meta.lod)));
1859 } else { 1868 } else {
@@ -2518,6 +2527,8 @@ private:
2518 &SPIRVDecompiler::Binary<&Module::OpINotEqual, Type::Bool, Type::Uint>, 2527 &SPIRVDecompiler::Binary<&Module::OpINotEqual, Type::Bool, Type::Uint>,
2519 &SPIRVDecompiler::Binary<&Module::OpUGreaterThanEqual, Type::Bool, Type::Uint>, 2528 &SPIRVDecompiler::Binary<&Module::OpUGreaterThanEqual, Type::Bool, Type::Uint>,
2520 2529
2530 &SPIRVDecompiler::LogicalAddCarry,
2531
2521 &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool2, Type::HalfFloat>, 2532 &SPIRVDecompiler::Binary<&Module::OpFOrdLessThan, Type::Bool2, Type::HalfFloat>,
2522 &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool2, Type::HalfFloat>, 2533 &SPIRVDecompiler::Binary<&Module::OpFOrdEqual, Type::Bool2, Type::HalfFloat>,
2523 &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool2, Type::HalfFloat>, 2534 &SPIRVDecompiler::Binary<&Module::OpFOrdLessThanEqual, Type::Bool2, Type::HalfFloat>,
@@ -2969,7 +2980,7 @@ ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir) {
2969 entries.global_buffers.emplace_back(base.cbuf_index, base.cbuf_offset, usage.is_written); 2980 entries.global_buffers.emplace_back(base.cbuf_index, base.cbuf_offset, usage.is_written);
2970 } 2981 }
2971 for (const auto& sampler : ir.GetSamplers()) { 2982 for (const auto& sampler : ir.GetSamplers()) {
2972 if (sampler.IsBuffer()) { 2983 if (sampler.is_buffer) {
2973 entries.texel_buffers.emplace_back(sampler); 2984 entries.texel_buffers.emplace_back(sampler);
2974 } else { 2985 } else {
2975 entries.samplers.emplace_back(sampler); 2986 entries.samplers.emplace_back(sampler);
diff --git a/src/video_core/renderer_vulkan/vk_shader_decompiler.h b/src/video_core/renderer_vulkan/vk_shader_decompiler.h
index ffea4709e..f4c05ac3c 100644
--- a/src/video_core/renderer_vulkan/vk_shader_decompiler.h
+++ b/src/video_core/renderer_vulkan/vk_shader_decompiler.h
@@ -5,11 +5,7 @@
5#pragma once 5#pragma once
6 6
7#include <array> 7#include <array>
8#include <bitset>
9#include <memory>
10#include <set> 8#include <set>
11#include <type_traits>
12#include <utility>
13#include <vector> 9#include <vector>
14 10
15#include "common/common_types.h" 11#include "common/common_types.h"
diff --git a/src/video_core/renderer_vulkan/vk_shader_util.cpp b/src/video_core/renderer_vulkan/vk_shader_util.cpp
index 784839327..112df9c71 100644
--- a/src/video_core/renderer_vulkan/vk_shader_util.cpp
+++ b/src/video_core/renderer_vulkan/vk_shader_util.cpp
@@ -4,8 +4,7 @@
4 4
5#include <cstring> 5#include <cstring>
6#include <memory> 6#include <memory>
7#include <vector> 7
8#include "common/alignment.h"
9#include "common/assert.h" 8#include "common/assert.h"
10#include "common/common_types.h" 9#include "common/common_types.h"
11#include "video_core/renderer_vulkan/vk_device.h" 10#include "video_core/renderer_vulkan/vk_device.h"
diff --git a/src/video_core/renderer_vulkan/vk_shader_util.h b/src/video_core/renderer_vulkan/vk_shader_util.h
index be38d6697..d1d3f3cae 100644
--- a/src/video_core/renderer_vulkan/vk_shader_util.h
+++ b/src/video_core/renderer_vulkan/vk_shader_util.h
@@ -4,7 +4,6 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <vector>
8#include "common/common_types.h" 7#include "common/common_types.h"
9#include "video_core/renderer_vulkan/wrapper.h" 8#include "video_core/renderer_vulkan/wrapper.h"
10 9
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
index 94d954d7a..45c180221 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -39,8 +39,7 @@ VKStagingBufferPool::StagingBuffer& VKStagingBufferPool::StagingBuffer::operator
39 39
40VKStagingBufferPool::VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager, 40VKStagingBufferPool::VKStagingBufferPool(const VKDevice& device, VKMemoryManager& memory_manager,
41 VKScheduler& scheduler) 41 VKScheduler& scheduler)
42 : device{device}, memory_manager{memory_manager}, scheduler{scheduler}, 42 : device{device}, memory_manager{memory_manager}, scheduler{scheduler} {}
43 is_device_integrated{device.IsIntegrated()} {}
44 43
45VKStagingBufferPool::~VKStagingBufferPool() = default; 44VKStagingBufferPool::~VKStagingBufferPool() = default;
46 45
@@ -56,9 +55,7 @@ void VKStagingBufferPool::TickFrame() {
56 current_delete_level = (current_delete_level + 1) % NumLevels; 55 current_delete_level = (current_delete_level + 1) % NumLevels;
57 56
58 ReleaseCache(true); 57 ReleaseCache(true);
59 if (!is_device_integrated) { 58 ReleaseCache(false);
60 ReleaseCache(false);
61 }
62} 59}
63 60
64VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) { 61VKBuffer* VKStagingBufferPool::TryGetReservedBuffer(std::size_t size, bool host_visible) {
@@ -81,7 +78,7 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v
81 ci.size = 1ULL << log2; 78 ci.size = 1ULL << log2;
82 ci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | 79 ci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
83 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | 80 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
84 VK_BUFFER_USAGE_INDEX_BUFFER_BIT; 81 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
85 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; 82 ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
86 ci.queueFamilyIndexCount = 0; 83 ci.queueFamilyIndexCount = 0;
87 ci.pQueueFamilyIndices = nullptr; 84 ci.pQueueFamilyIndices = nullptr;
@@ -95,7 +92,7 @@ VKBuffer& VKStagingBufferPool::CreateStagingBuffer(std::size_t size, bool host_v
95} 92}
96 93
97VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) { 94VKStagingBufferPool::StagingBuffersCache& VKStagingBufferPool::GetCache(bool host_visible) {
98 return is_device_integrated || host_visible ? host_staging_buffers : device_staging_buffers; 95 return host_visible ? host_staging_buffers : device_staging_buffers;
99} 96}
100 97
101void VKStagingBufferPool::ReleaseCache(bool host_visible) { 98void VKStagingBufferPool::ReleaseCache(bool host_visible) {
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
index a0840ff8c..3c4901437 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
@@ -5,8 +5,6 @@
5#pragma once 5#pragma once
6 6
7#include <climits> 7#include <climits>
8#include <unordered_map>
9#include <utility>
10#include <vector> 8#include <vector>
11 9
12#include "common/common_types.h" 10#include "common/common_types.h"
@@ -71,7 +69,6 @@ private:
71 const VKDevice& device; 69 const VKDevice& device;
72 VKMemoryManager& memory_manager; 70 VKMemoryManager& memory_manager;
73 VKScheduler& scheduler; 71 VKScheduler& scheduler;
74 const bool is_device_integrated;
75 72
76 StagingBuffersCache host_staging_buffers; 73 StagingBuffersCache host_staging_buffers;
77 StagingBuffersCache device_staging_buffers; 74 StagingBuffersCache device_staging_buffers;
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
index 38a93a01a..868447af2 100644
--- a/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.cpp
@@ -3,6 +3,7 @@
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <algorithm> 5#include <algorithm>
6#include <limits>
6#include <optional> 7#include <optional>
7#include <tuple> 8#include <tuple>
8#include <vector> 9#include <vector>
@@ -22,22 +23,38 @@ namespace {
22constexpr u64 WATCHES_INITIAL_RESERVE = 0x4000; 23constexpr u64 WATCHES_INITIAL_RESERVE = 0x4000;
23constexpr u64 WATCHES_RESERVE_CHUNK = 0x1000; 24constexpr u64 WATCHES_RESERVE_CHUNK = 0x1000;
24 25
25constexpr u64 STREAM_BUFFER_SIZE = 256 * 1024 * 1024; 26constexpr u64 PREFERRED_STREAM_BUFFER_SIZE = 256 * 1024 * 1024;
26 27
27std::optional<u32> FindMemoryType(const VKDevice& device, u32 filter, 28/// Find a memory type with the passed requirements
28 VkMemoryPropertyFlags wanted) { 29std::optional<u32> FindMemoryType(const VkPhysicalDeviceMemoryProperties& properties,
29 const auto properties = device.GetPhysical().GetMemoryProperties(); 30 VkMemoryPropertyFlags wanted,
30 for (u32 i = 0; i < properties.memoryTypeCount; i++) { 31 u32 filter = std::numeric_limits<u32>::max()) {
31 if (!(filter & (1 << i))) { 32 for (u32 i = 0; i < properties.memoryTypeCount; ++i) {
32 continue; 33 const auto flags = properties.memoryTypes[i].propertyFlags;
33 } 34 if ((flags & wanted) == wanted && (filter & (1U << i)) != 0) {
34 if ((properties.memoryTypes[i].propertyFlags & wanted) == wanted) {
35 return i; 35 return i;
36 } 36 }
37 } 37 }
38 return std::nullopt; 38 return std::nullopt;
39} 39}
40 40
41/// Get the preferred host visible memory type.
42u32 GetMemoryType(const VkPhysicalDeviceMemoryProperties& properties,
43 u32 filter = std::numeric_limits<u32>::max()) {
44 // Prefer device local host visible allocations. Both AMD and Nvidia now provide one.
45 // Otherwise search for a host visible allocation.
46 static constexpr auto HOST_MEMORY =
47 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
48 static constexpr auto DYNAMIC_MEMORY = HOST_MEMORY | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
49
50 std::optional preferred_type = FindMemoryType(properties, DYNAMIC_MEMORY);
51 if (!preferred_type) {
52 preferred_type = FindMemoryType(properties, HOST_MEMORY);
53 ASSERT_MSG(preferred_type, "No host visible and coherent memory type found");
54 }
55 return preferred_type.value_or(0);
56}
57
41} // Anonymous namespace 58} // Anonymous namespace
42 59
43VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler, 60VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler,
@@ -51,7 +68,7 @@ VKStreamBuffer::VKStreamBuffer(const VKDevice& device, VKScheduler& scheduler,
51VKStreamBuffer::~VKStreamBuffer() = default; 68VKStreamBuffer::~VKStreamBuffer() = default;
52 69
53std::tuple<u8*, u64, bool> VKStreamBuffer::Map(u64 size, u64 alignment) { 70std::tuple<u8*, u64, bool> VKStreamBuffer::Map(u64 size, u64 alignment) {
54 ASSERT(size <= STREAM_BUFFER_SIZE); 71 ASSERT(size <= stream_buffer_size);
55 mapped_size = size; 72 mapped_size = size;
56 73
57 if (alignment > 0) { 74 if (alignment > 0) {
@@ -61,7 +78,7 @@ std::tuple<u8*, u64, bool> VKStreamBuffer::Map(u64 size, u64 alignment) {
61 WaitPendingOperations(offset); 78 WaitPendingOperations(offset);
62 79
63 bool invalidated = false; 80 bool invalidated = false;
64 if (offset + size > STREAM_BUFFER_SIZE) { 81 if (offset + size > stream_buffer_size) {
65 // The buffer would overflow, save the amount of used watches and reset the state. 82 // The buffer would overflow, save the amount of used watches and reset the state.
66 invalidation_mark = current_watch_cursor; 83 invalidation_mark = current_watch_cursor;
67 current_watch_cursor = 0; 84 current_watch_cursor = 0;
@@ -98,40 +115,37 @@ void VKStreamBuffer::Unmap(u64 size) {
98} 115}
99 116
100void VKStreamBuffer::CreateBuffers(VkBufferUsageFlags usage) { 117void VKStreamBuffer::CreateBuffers(VkBufferUsageFlags usage) {
118 const auto memory_properties = device.GetPhysical().GetMemoryProperties();
119 const u32 preferred_type = GetMemoryType(memory_properties);
120 const u32 preferred_heap = memory_properties.memoryTypes[preferred_type].heapIndex;
121
122 // Substract from the preferred heap size some bytes to avoid getting out of memory.
123 const VkDeviceSize heap_size = memory_properties.memoryHeaps[preferred_heap].size;
124 const VkDeviceSize allocable_size = heap_size - 4 * 1024 * 1024;
125
101 VkBufferCreateInfo buffer_ci; 126 VkBufferCreateInfo buffer_ci;
102 buffer_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; 127 buffer_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
103 buffer_ci.pNext = nullptr; 128 buffer_ci.pNext = nullptr;
104 buffer_ci.flags = 0; 129 buffer_ci.flags = 0;
105 buffer_ci.size = STREAM_BUFFER_SIZE; 130 buffer_ci.size = std::min(PREFERRED_STREAM_BUFFER_SIZE, allocable_size);
106 buffer_ci.usage = usage; 131 buffer_ci.usage = usage;
107 buffer_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; 132 buffer_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
108 buffer_ci.queueFamilyIndexCount = 0; 133 buffer_ci.queueFamilyIndexCount = 0;
109 buffer_ci.pQueueFamilyIndices = nullptr; 134 buffer_ci.pQueueFamilyIndices = nullptr;
110 135
111 const auto& dev = device.GetLogical(); 136 buffer = device.GetLogical().CreateBuffer(buffer_ci);
112 buffer = dev.CreateBuffer(buffer_ci); 137
113 138 const auto requirements = device.GetLogical().GetBufferMemoryRequirements(*buffer);
114 const auto& dld = device.GetDispatchLoader(); 139 const u32 required_flags = requirements.memoryTypeBits;
115 const auto requirements = dev.GetBufferMemoryRequirements(*buffer); 140 stream_buffer_size = static_cast<u64>(requirements.size);
116 // Prefer device local host visible allocations (this should hit AMD's pinned memory). 141
117 auto type =
118 FindMemoryType(device, requirements.memoryTypeBits,
119 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
120 VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
121 if (!type) {
122 // Otherwise search for a host visible allocation.
123 type = FindMemoryType(device, requirements.memoryTypeBits,
124 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
125 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
126 ASSERT_MSG(type, "No host visible and coherent memory type found");
127 }
128 VkMemoryAllocateInfo memory_ai; 142 VkMemoryAllocateInfo memory_ai;
129 memory_ai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; 143 memory_ai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
130 memory_ai.pNext = nullptr; 144 memory_ai.pNext = nullptr;
131 memory_ai.allocationSize = requirements.size; 145 memory_ai.allocationSize = requirements.size;
132 memory_ai.memoryTypeIndex = *type; 146 memory_ai.memoryTypeIndex = GetMemoryType(memory_properties, required_flags);
133 147
134 memory = dev.AllocateMemory(memory_ai); 148 memory = device.GetLogical().AllocateMemory(memory_ai);
135 buffer.BindMemory(*memory, 0); 149 buffer.BindMemory(*memory, 0);
136} 150}
137 151
diff --git a/src/video_core/renderer_vulkan/vk_stream_buffer.h b/src/video_core/renderer_vulkan/vk_stream_buffer.h
index 58ce8b973..dfddf7ad6 100644
--- a/src/video_core/renderer_vulkan/vk_stream_buffer.h
+++ b/src/video_core/renderer_vulkan/vk_stream_buffer.h
@@ -56,8 +56,9 @@ private:
56 const VKDevice& device; ///< Vulkan device manager. 56 const VKDevice& device; ///< Vulkan device manager.
57 VKScheduler& scheduler; ///< Command scheduler. 57 VKScheduler& scheduler; ///< Command scheduler.
58 58
59 vk::Buffer buffer; ///< Mapped buffer. 59 vk::Buffer buffer; ///< Mapped buffer.
60 vk::DeviceMemory memory; ///< Memory allocation. 60 vk::DeviceMemory memory; ///< Memory allocation.
61 u64 stream_buffer_size{}; ///< Stream buffer size.
61 62
62 u64 offset{}; ///< Buffer iterator. 63 u64 offset{}; ///< Buffer iterator.
63 u64 mapped_size{}; ///< Size reserved for the current copy. 64 u64 mapped_size{}; ///< Size reserved for the current copy.
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index de4c23120..55f43e61b 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -10,11 +10,9 @@
10#include <variant> 10#include <variant>
11#include <vector> 11#include <vector>
12 12
13#include "common/alignment.h"
14#include "common/assert.h" 13#include "common/assert.h"
15#include "common/common_types.h" 14#include "common/common_types.h"
16#include "core/core.h" 15#include "core/core.h"
17#include "core/memory.h"
18#include "video_core/engines/maxwell_3d.h" 16#include "video_core/engines/maxwell_3d.h"
19#include "video_core/morton.h" 17#include "video_core/morton.h"
20#include "video_core/renderer_vulkan/maxwell_to_vk.h" 18#include "video_core/renderer_vulkan/maxwell_to_vk.h"
@@ -26,7 +24,6 @@
26#include "video_core/renderer_vulkan/vk_texture_cache.h" 24#include "video_core/renderer_vulkan/vk_texture_cache.h"
27#include "video_core/renderer_vulkan/wrapper.h" 25#include "video_core/renderer_vulkan/wrapper.h"
28#include "video_core/surface.h" 26#include "video_core/surface.h"
29#include "video_core/textures/convert.h"
30 27
31namespace Vulkan { 28namespace Vulkan {
32 29
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 115595f28..f211ccb1e 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -7,19 +7,13 @@
7#include <memory> 7#include <memory>
8#include <unordered_map> 8#include <unordered_map>
9 9
10#include "common/assert.h"
11#include "common/common_types.h" 10#include "common/common_types.h"
12#include "common/logging/log.h"
13#include "common/math_util.h"
14#include "video_core/gpu.h"
15#include "video_core/rasterizer_cache.h"
16#include "video_core/renderer_vulkan/vk_image.h" 11#include "video_core/renderer_vulkan/vk_image.h"
17#include "video_core/renderer_vulkan/vk_memory_manager.h" 12#include "video_core/renderer_vulkan/vk_memory_manager.h"
18#include "video_core/renderer_vulkan/vk_scheduler.h" 13#include "video_core/renderer_vulkan/vk_scheduler.h"
19#include "video_core/renderer_vulkan/wrapper.h" 14#include "video_core/renderer_vulkan/wrapper.h"
20#include "video_core/texture_cache/surface_base.h" 15#include "video_core/texture_cache/surface_base.h"
21#include "video_core/texture_cache/texture_cache.h" 16#include "video_core/texture_cache/texture_cache.h"
22#include "video_core/textures/decoders.h"
23 17
24namespace Core { 18namespace Core {
25class System; 19class System;
diff --git a/src/video_core/renderer_vulkan/vk_update_descriptor.h b/src/video_core/renderer_vulkan/vk_update_descriptor.h
index 6ba2c9997..cc7e3dff4 100644
--- a/src/video_core/renderer_vulkan/vk_update_descriptor.h
+++ b/src/video_core/renderer_vulkan/vk_update_descriptor.h
@@ -4,7 +4,6 @@
4 4
5#pragma once 5#pragma once
6 6
7#include <type_traits>
8#include <variant> 7#include <variant>
9#include <boost/container/static_vector.hpp> 8#include <boost/container/static_vector.hpp>
10 9
diff --git a/src/video_core/renderer_vulkan/wrapper.cpp b/src/video_core/renderer_vulkan/wrapper.cpp
index 9b94dfff1..2ce9b0626 100644
--- a/src/video_core/renderer_vulkan/wrapper.cpp
+++ b/src/video_core/renderer_vulkan/wrapper.cpp
@@ -2,6 +2,7 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <algorithm>
5#include <exception> 6#include <exception>
6#include <memory> 7#include <memory>
7#include <optional> 8#include <optional>
@@ -16,6 +17,23 @@ namespace Vulkan::vk {
16 17
17namespace { 18namespace {
18 19
20void SortPhysicalDevices(std::vector<VkPhysicalDevice>& devices, const InstanceDispatch& dld) {
21 std::stable_sort(devices.begin(), devices.end(), [&](auto lhs, auto rhs) {
22 // This will call Vulkan more than needed, but these calls are cheap.
23 const auto lhs_properties = vk::PhysicalDevice(lhs, dld).GetProperties();
24 const auto rhs_properties = vk::PhysicalDevice(rhs, dld).GetProperties();
25
26 // Prefer discrete GPUs, Nvidia over AMD, AMD over Intel, Intel over the rest.
27 const bool preferred =
28 (lhs_properties.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU &&
29 rhs_properties.deviceType != VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) ||
30 (lhs_properties.vendorID == 0x10DE && rhs_properties.vendorID != 0x10DE) ||
31 (lhs_properties.vendorID == 0x1002 && rhs_properties.vendorID != 0x1002) ||
32 (lhs_properties.vendorID == 0x8086 && rhs_properties.vendorID != 0x8086);
33 return !preferred;
34 });
35}
36
19template <typename T> 37template <typename T>
20bool Proc(T& result, const InstanceDispatch& dld, const char* proc_name, 38bool Proc(T& result, const InstanceDispatch& dld, const char* proc_name,
21 VkInstance instance = nullptr) noexcept { 39 VkInstance instance = nullptr) noexcept {
@@ -61,14 +79,15 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
61 X(vkCmdPipelineBarrier); 79 X(vkCmdPipelineBarrier);
62 X(vkCmdPushConstants); 80 X(vkCmdPushConstants);
63 X(vkCmdSetBlendConstants); 81 X(vkCmdSetBlendConstants);
64 X(vkCmdSetCheckpointNV);
65 X(vkCmdSetDepthBias); 82 X(vkCmdSetDepthBias);
66 X(vkCmdSetDepthBounds); 83 X(vkCmdSetDepthBounds);
84 X(vkCmdSetEvent);
67 X(vkCmdSetScissor); 85 X(vkCmdSetScissor);
68 X(vkCmdSetStencilCompareMask); 86 X(vkCmdSetStencilCompareMask);
69 X(vkCmdSetStencilReference); 87 X(vkCmdSetStencilReference);
70 X(vkCmdSetStencilWriteMask); 88 X(vkCmdSetStencilWriteMask);
71 X(vkCmdSetViewport); 89 X(vkCmdSetViewport);
90 X(vkCmdWaitEvents);
72 X(vkCreateBuffer); 91 X(vkCreateBuffer);
73 X(vkCreateBufferView); 92 X(vkCreateBufferView);
74 X(vkCreateCommandPool); 93 X(vkCreateCommandPool);
@@ -76,6 +95,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
76 X(vkCreateDescriptorPool); 95 X(vkCreateDescriptorPool);
77 X(vkCreateDescriptorSetLayout); 96 X(vkCreateDescriptorSetLayout);
78 X(vkCreateDescriptorUpdateTemplateKHR); 97 X(vkCreateDescriptorUpdateTemplateKHR);
98 X(vkCreateEvent);
79 X(vkCreateFence); 99 X(vkCreateFence);
80 X(vkCreateFramebuffer); 100 X(vkCreateFramebuffer);
81 X(vkCreateGraphicsPipelines); 101 X(vkCreateGraphicsPipelines);
@@ -94,6 +114,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
94 X(vkDestroyDescriptorPool); 114 X(vkDestroyDescriptorPool);
95 X(vkDestroyDescriptorSetLayout); 115 X(vkDestroyDescriptorSetLayout);
96 X(vkDestroyDescriptorUpdateTemplateKHR); 116 X(vkDestroyDescriptorUpdateTemplateKHR);
117 X(vkDestroyEvent);
97 X(vkDestroyFence); 118 X(vkDestroyFence);
98 X(vkDestroyFramebuffer); 119 X(vkDestroyFramebuffer);
99 X(vkDestroyImage); 120 X(vkDestroyImage);
@@ -113,10 +134,10 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
113 X(vkFreeMemory); 134 X(vkFreeMemory);
114 X(vkGetBufferMemoryRequirements); 135 X(vkGetBufferMemoryRequirements);
115 X(vkGetDeviceQueue); 136 X(vkGetDeviceQueue);
137 X(vkGetEventStatus);
116 X(vkGetFenceStatus); 138 X(vkGetFenceStatus);
117 X(vkGetImageMemoryRequirements); 139 X(vkGetImageMemoryRequirements);
118 X(vkGetQueryPoolResults); 140 X(vkGetQueryPoolResults);
119 X(vkGetQueueCheckpointDataNV);
120 X(vkMapMemory); 141 X(vkMapMemory);
121 X(vkQueueSubmit); 142 X(vkQueueSubmit);
122 X(vkResetFences); 143 X(vkResetFences);
@@ -271,6 +292,10 @@ void Destroy(VkDevice device, VkDeviceMemory handle, const DeviceDispatch& dld)
271 dld.vkFreeMemory(device, handle, nullptr); 292 dld.vkFreeMemory(device, handle, nullptr);
272} 293}
273 294
295void Destroy(VkDevice device, VkEvent handle, const DeviceDispatch& dld) noexcept {
296 dld.vkDestroyEvent(device, handle, nullptr);
297}
298
274void Destroy(VkDevice device, VkFence handle, const DeviceDispatch& dld) noexcept { 299void Destroy(VkDevice device, VkFence handle, const DeviceDispatch& dld) noexcept {
275 dld.vkDestroyFence(device, handle, nullptr); 300 dld.vkDestroyFence(device, handle, nullptr);
276} 301}
@@ -383,7 +408,8 @@ std::optional<std::vector<VkPhysicalDevice>> Instance::EnumeratePhysicalDevices(
383 if (dld->vkEnumeratePhysicalDevices(handle, &num, physical_devices.data()) != VK_SUCCESS) { 408 if (dld->vkEnumeratePhysicalDevices(handle, &num, physical_devices.data()) != VK_SUCCESS) {
384 return std::nullopt; 409 return std::nullopt;
385 } 410 }
386 return physical_devices; 411 SortPhysicalDevices(physical_devices, *dld);
412 return std::make_optional(std::move(physical_devices));
387} 413}
388 414
389DebugCallback Instance::TryCreateDebugCallback( 415DebugCallback Instance::TryCreateDebugCallback(
@@ -409,17 +435,6 @@ DebugCallback Instance::TryCreateDebugCallback(
409 return DebugCallback(messenger, handle, *dld); 435 return DebugCallback(messenger, handle, *dld);
410} 436}
411 437
412std::vector<VkCheckpointDataNV> Queue::GetCheckpointDataNV(const DeviceDispatch& dld) const {
413 if (!dld.vkGetQueueCheckpointDataNV) {
414 return {};
415 }
416 u32 num;
417 dld.vkGetQueueCheckpointDataNV(queue, &num, nullptr);
418 std::vector<VkCheckpointDataNV> checkpoints(num);
419 dld.vkGetQueueCheckpointDataNV(queue, &num, checkpoints.data());
420 return checkpoints;
421}
422
423void Buffer::BindMemory(VkDeviceMemory memory, VkDeviceSize offset) const { 438void Buffer::BindMemory(VkDeviceMemory memory, VkDeviceSize offset) const {
424 Check(dld->vkBindBufferMemory(owner, handle, memory, offset)); 439 Check(dld->vkBindBufferMemory(owner, handle, memory, offset));
425} 440}
@@ -469,12 +484,11 @@ std::vector<VkImage> SwapchainKHR::GetImages() const {
469} 484}
470 485
471Device Device::Create(VkPhysicalDevice physical_device, Span<VkDeviceQueueCreateInfo> queues_ci, 486Device Device::Create(VkPhysicalDevice physical_device, Span<VkDeviceQueueCreateInfo> queues_ci,
472 Span<const char*> enabled_extensions, 487 Span<const char*> enabled_extensions, const void* next,
473 const VkPhysicalDeviceFeatures2& enabled_features,
474 DeviceDispatch& dld) noexcept { 488 DeviceDispatch& dld) noexcept {
475 VkDeviceCreateInfo ci; 489 VkDeviceCreateInfo ci;
476 ci.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; 490 ci.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
477 ci.pNext = &enabled_features; 491 ci.pNext = next;
478 ci.flags = 0; 492 ci.flags = 0;
479 ci.queueCreateInfoCount = queues_ci.size(); 493 ci.queueCreateInfoCount = queues_ci.size();
480 ci.pQueueCreateInfos = queues_ci.data(); 494 ci.pQueueCreateInfos = queues_ci.data();
@@ -613,6 +627,16 @@ ShaderModule Device::CreateShaderModule(const VkShaderModuleCreateInfo& ci) cons
613 return ShaderModule(object, handle, *dld); 627 return ShaderModule(object, handle, *dld);
614} 628}
615 629
630Event Device::CreateEvent() const {
631 VkEventCreateInfo ci;
632 ci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
633 ci.pNext = nullptr;
634 ci.flags = 0;
635 VkEvent object;
636 Check(dld->vkCreateEvent(handle, &ci, nullptr, &object));
637 return Event(object, handle, *dld);
638}
639
616SwapchainKHR Device::CreateSwapchainKHR(const VkSwapchainCreateInfoKHR& ci) const { 640SwapchainKHR Device::CreateSwapchainKHR(const VkSwapchainCreateInfoKHR& ci) const {
617 VkSwapchainKHR object; 641 VkSwapchainKHR object;
618 Check(dld->vkCreateSwapchainKHR(handle, &ci, nullptr, &object)); 642 Check(dld->vkCreateSwapchainKHR(handle, &ci, nullptr, &object));
diff --git a/src/video_core/renderer_vulkan/wrapper.h b/src/video_core/renderer_vulkan/wrapper.h
index fb3657819..98937a77a 100644
--- a/src/video_core/renderer_vulkan/wrapper.h
+++ b/src/video_core/renderer_vulkan/wrapper.h
@@ -197,14 +197,15 @@ struct DeviceDispatch : public InstanceDispatch {
197 PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier; 197 PFN_vkCmdPipelineBarrier vkCmdPipelineBarrier;
198 PFN_vkCmdPushConstants vkCmdPushConstants; 198 PFN_vkCmdPushConstants vkCmdPushConstants;
199 PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants; 199 PFN_vkCmdSetBlendConstants vkCmdSetBlendConstants;
200 PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV;
201 PFN_vkCmdSetDepthBias vkCmdSetDepthBias; 200 PFN_vkCmdSetDepthBias vkCmdSetDepthBias;
202 PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds; 201 PFN_vkCmdSetDepthBounds vkCmdSetDepthBounds;
202 PFN_vkCmdSetEvent vkCmdSetEvent;
203 PFN_vkCmdSetScissor vkCmdSetScissor; 203 PFN_vkCmdSetScissor vkCmdSetScissor;
204 PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask; 204 PFN_vkCmdSetStencilCompareMask vkCmdSetStencilCompareMask;
205 PFN_vkCmdSetStencilReference vkCmdSetStencilReference; 205 PFN_vkCmdSetStencilReference vkCmdSetStencilReference;
206 PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask; 206 PFN_vkCmdSetStencilWriteMask vkCmdSetStencilWriteMask;
207 PFN_vkCmdSetViewport vkCmdSetViewport; 207 PFN_vkCmdSetViewport vkCmdSetViewport;
208 PFN_vkCmdWaitEvents vkCmdWaitEvents;
208 PFN_vkCreateBuffer vkCreateBuffer; 209 PFN_vkCreateBuffer vkCreateBuffer;
209 PFN_vkCreateBufferView vkCreateBufferView; 210 PFN_vkCreateBufferView vkCreateBufferView;
210 PFN_vkCreateCommandPool vkCreateCommandPool; 211 PFN_vkCreateCommandPool vkCreateCommandPool;
@@ -212,6 +213,7 @@ struct DeviceDispatch : public InstanceDispatch {
212 PFN_vkCreateDescriptorPool vkCreateDescriptorPool; 213 PFN_vkCreateDescriptorPool vkCreateDescriptorPool;
213 PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout; 214 PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayout;
214 PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR; 215 PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR;
216 PFN_vkCreateEvent vkCreateEvent;
215 PFN_vkCreateFence vkCreateFence; 217 PFN_vkCreateFence vkCreateFence;
216 PFN_vkCreateFramebuffer vkCreateFramebuffer; 218 PFN_vkCreateFramebuffer vkCreateFramebuffer;
217 PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines; 219 PFN_vkCreateGraphicsPipelines vkCreateGraphicsPipelines;
@@ -230,6 +232,7 @@ struct DeviceDispatch : public InstanceDispatch {
230 PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool; 232 PFN_vkDestroyDescriptorPool vkDestroyDescriptorPool;
231 PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout; 233 PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayout;
232 PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR; 234 PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR;
235 PFN_vkDestroyEvent vkDestroyEvent;
233 PFN_vkDestroyFence vkDestroyFence; 236 PFN_vkDestroyFence vkDestroyFence;
234 PFN_vkDestroyFramebuffer vkDestroyFramebuffer; 237 PFN_vkDestroyFramebuffer vkDestroyFramebuffer;
235 PFN_vkDestroyImage vkDestroyImage; 238 PFN_vkDestroyImage vkDestroyImage;
@@ -249,10 +252,10 @@ struct DeviceDispatch : public InstanceDispatch {
249 PFN_vkFreeMemory vkFreeMemory; 252 PFN_vkFreeMemory vkFreeMemory;
250 PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; 253 PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
251 PFN_vkGetDeviceQueue vkGetDeviceQueue; 254 PFN_vkGetDeviceQueue vkGetDeviceQueue;
255 PFN_vkGetEventStatus vkGetEventStatus;
252 PFN_vkGetFenceStatus vkGetFenceStatus; 256 PFN_vkGetFenceStatus vkGetFenceStatus;
253 PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; 257 PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
254 PFN_vkGetQueryPoolResults vkGetQueryPoolResults; 258 PFN_vkGetQueryPoolResults vkGetQueryPoolResults;
255 PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV;
256 PFN_vkMapMemory vkMapMemory; 259 PFN_vkMapMemory vkMapMemory;
257 PFN_vkQueueSubmit vkQueueSubmit; 260 PFN_vkQueueSubmit vkQueueSubmit;
258 PFN_vkResetFences vkResetFences; 261 PFN_vkResetFences vkResetFences;
@@ -281,6 +284,7 @@ void Destroy(VkDevice, VkDescriptorPool, const DeviceDispatch&) noexcept;
281void Destroy(VkDevice, VkDescriptorSetLayout, const DeviceDispatch&) noexcept; 284void Destroy(VkDevice, VkDescriptorSetLayout, const DeviceDispatch&) noexcept;
282void Destroy(VkDevice, VkDescriptorUpdateTemplateKHR, const DeviceDispatch&) noexcept; 285void Destroy(VkDevice, VkDescriptorUpdateTemplateKHR, const DeviceDispatch&) noexcept;
283void Destroy(VkDevice, VkDeviceMemory, const DeviceDispatch&) noexcept; 286void Destroy(VkDevice, VkDeviceMemory, const DeviceDispatch&) noexcept;
287void Destroy(VkDevice, VkEvent, const DeviceDispatch&) noexcept;
284void Destroy(VkDevice, VkFence, const DeviceDispatch&) noexcept; 288void Destroy(VkDevice, VkFence, const DeviceDispatch&) noexcept;
285void Destroy(VkDevice, VkFramebuffer, const DeviceDispatch&) noexcept; 289void Destroy(VkDevice, VkFramebuffer, const DeviceDispatch&) noexcept;
286void Destroy(VkDevice, VkImage, const DeviceDispatch&) noexcept; 290void Destroy(VkDevice, VkImage, const DeviceDispatch&) noexcept;
@@ -567,12 +571,8 @@ public:
567 /// Construct a queue handle. 571 /// Construct a queue handle.
568 constexpr Queue(VkQueue queue, const DeviceDispatch& dld) noexcept : queue{queue}, dld{&dld} {} 572 constexpr Queue(VkQueue queue, const DeviceDispatch& dld) noexcept : queue{queue}, dld{&dld} {}
569 573
570 /// Returns the checkpoint data. 574 VkResult Submit(Span<VkSubmitInfo> submit_infos, VkFence fence) const noexcept {
571 /// @note Returns an empty vector when the function pointer is not present. 575 return dld->vkQueueSubmit(queue, submit_infos.size(), submit_infos.data(), fence);
572 std::vector<VkCheckpointDataNV> GetCheckpointDataNV(const DeviceDispatch& dld) const;
573
574 void Submit(Span<VkSubmitInfo> submit_infos, VkFence fence) const {
575 Check(dld->vkQueueSubmit(queue, submit_infos.size(), submit_infos.data(), fence));
576 } 576 }
577 577
578 VkResult Present(const VkPresentInfoKHR& present_info) const noexcept { 578 VkResult Present(const VkPresentInfoKHR& present_info) const noexcept {
@@ -654,13 +654,21 @@ public:
654 std::vector<VkImage> GetImages() const; 654 std::vector<VkImage> GetImages() const;
655}; 655};
656 656
657class Event : public Handle<VkEvent, VkDevice, DeviceDispatch> {
658 using Handle<VkEvent, VkDevice, DeviceDispatch>::Handle;
659
660public:
661 VkResult GetStatus() const noexcept {
662 return dld->vkGetEventStatus(owner, handle);
663 }
664};
665
657class Device : public Handle<VkDevice, NoOwner, DeviceDispatch> { 666class Device : public Handle<VkDevice, NoOwner, DeviceDispatch> {
658 using Handle<VkDevice, NoOwner, DeviceDispatch>::Handle; 667 using Handle<VkDevice, NoOwner, DeviceDispatch>::Handle;
659 668
660public: 669public:
661 static Device Create(VkPhysicalDevice physical_device, Span<VkDeviceQueueCreateInfo> queues_ci, 670 static Device Create(VkPhysicalDevice physical_device, Span<VkDeviceQueueCreateInfo> queues_ci,
662 Span<const char*> enabled_extensions, 671 Span<const char*> enabled_extensions, const void* next,
663 const VkPhysicalDeviceFeatures2& enabled_features,
664 DeviceDispatch& dld) noexcept; 672 DeviceDispatch& dld) noexcept;
665 673
666 Queue GetQueue(u32 family_index) const noexcept; 674 Queue GetQueue(u32 family_index) const noexcept;
@@ -702,6 +710,8 @@ public:
702 710
703 ShaderModule CreateShaderModule(const VkShaderModuleCreateInfo& ci) const; 711 ShaderModule CreateShaderModule(const VkShaderModuleCreateInfo& ci) const;
704 712
713 Event CreateEvent() const;
714
705 SwapchainKHR CreateSwapchainKHR(const VkSwapchainCreateInfoKHR& ci) const; 715 SwapchainKHR CreateSwapchainKHR(const VkSwapchainCreateInfoKHR& ci) const;
706 716
707 DeviceMemory TryAllocateMemory(const VkMemoryAllocateInfo& ai) const noexcept; 717 DeviceMemory TryAllocateMemory(const VkMemoryAllocateInfo& ai) const noexcept;
@@ -734,18 +744,11 @@ public:
734 dld->vkResetQueryPoolEXT(handle, query_pool, first, count); 744 dld->vkResetQueryPoolEXT(handle, query_pool, first, count);
735 } 745 }
736 746
737 void GetQueryResults(VkQueryPool query_pool, u32 first, u32 count, std::size_t data_size, 747 VkResult GetQueryResults(VkQueryPool query_pool, u32 first, u32 count, std::size_t data_size,
738 void* data, VkDeviceSize stride, VkQueryResultFlags flags) const { 748 void* data, VkDeviceSize stride, VkQueryResultFlags flags) const
739 Check(dld->vkGetQueryPoolResults(handle, query_pool, first, count, data_size, data, stride, 749 noexcept {
740 flags)); 750 return dld->vkGetQueryPoolResults(handle, query_pool, first, count, data_size, data, stride,
741 } 751 flags);
742
743 template <typename T>
744 T GetQueryResult(VkQueryPool query_pool, u32 first, VkQueryResultFlags flags) const {
745 static_assert(std::is_trivially_copyable_v<T>);
746 T value;
747 GetQueryResults(query_pool, first, 1, sizeof(T), &value, sizeof(T), flags);
748 return value;
749 } 752 }
750}; 753};
751 754
@@ -920,10 +923,6 @@ public:
920 dld->vkCmdPushConstants(handle, layout, flags, offset, size, values); 923 dld->vkCmdPushConstants(handle, layout, flags, offset, size, values);
921 } 924 }
922 925
923 void SetCheckpointNV(const void* checkpoint_marker) const noexcept {
924 dld->vkCmdSetCheckpointNV(handle, checkpoint_marker);
925 }
926
927 void SetViewport(u32 first, Span<VkViewport> viewports) const noexcept { 926 void SetViewport(u32 first, Span<VkViewport> viewports) const noexcept {
928 dld->vkCmdSetViewport(handle, first, viewports.size(), viewports.data()); 927 dld->vkCmdSetViewport(handle, first, viewports.size(), viewports.data());
929 } 928 }
@@ -956,6 +955,19 @@ public:
956 dld->vkCmdSetDepthBounds(handle, min_depth_bounds, max_depth_bounds); 955 dld->vkCmdSetDepthBounds(handle, min_depth_bounds, max_depth_bounds);
957 } 956 }
958 957
958 void SetEvent(VkEvent event, VkPipelineStageFlags stage_flags) const noexcept {
959 dld->vkCmdSetEvent(handle, event, stage_flags);
960 }
961
962 void WaitEvents(Span<VkEvent> events, VkPipelineStageFlags src_stage_mask,
963 VkPipelineStageFlags dst_stage_mask, Span<VkMemoryBarrier> memory_barriers,
964 Span<VkBufferMemoryBarrier> buffer_barriers,
965 Span<VkImageMemoryBarrier> image_barriers) const noexcept {
966 dld->vkCmdWaitEvents(handle, events.size(), events.data(), src_stage_mask, dst_stage_mask,
967 memory_barriers.size(), memory_barriers.data(), buffer_barriers.size(),
968 buffer_barriers.data(), image_barriers.size(), image_barriers.data());
969 }
970
959 void BindTransformFeedbackBuffersEXT(u32 first, u32 count, const VkBuffer* buffers, 971 void BindTransformFeedbackBuffersEXT(u32 first, u32 count, const VkBuffer* buffers,
960 const VkDeviceSize* offsets, 972 const VkDeviceSize* offsets,
961 const VkDeviceSize* sizes) const noexcept { 973 const VkDeviceSize* sizes) const noexcept {
diff --git a/src/video_core/shader/control_flow.cpp b/src/video_core/shader/control_flow.cpp
index 6d313963a..8d86020f6 100644
--- a/src/video_core/shader/control_flow.cpp
+++ b/src/video_core/shader/control_flow.cpp
@@ -13,6 +13,7 @@
13#include "common/common_types.h" 13#include "common/common_types.h"
14#include "video_core/shader/ast.h" 14#include "video_core/shader/ast.h"
15#include "video_core/shader/control_flow.h" 15#include "video_core/shader/control_flow.h"
16#include "video_core/shader/memory_util.h"
16#include "video_core/shader/registry.h" 17#include "video_core/shader/registry.h"
17#include "video_core/shader/shader_ir.h" 18#include "video_core/shader/shader_ir.h"
18 19
@@ -115,17 +116,6 @@ Pred GetPredicate(u32 index, bool negated) {
115 return static_cast<Pred>(static_cast<u64>(index) + (negated ? 8ULL : 0ULL)); 116 return static_cast<Pred>(static_cast<u64>(index) + (negated ? 8ULL : 0ULL));
116} 117}
117 118
118/**
119 * Returns whether the instruction at the specified offset is a 'sched' instruction.
120 * Sched instructions always appear before a sequence of 3 instructions.
121 */
122constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) {
123 constexpr u32 SchedPeriod = 4;
124 u32 absolute_offset = offset - main_offset;
125
126 return (absolute_offset % SchedPeriod) == 0;
127}
128
129enum class ParseResult : u32 { 119enum class ParseResult : u32 {
130 ControlCaught, 120 ControlCaught,
131 BlockEnd, 121 BlockEnd,
@@ -587,8 +577,6 @@ bool TryQuery(CFGRebuildState& state) {
587 return true; 577 return true;
588} 578}
589 579
590} // Anonymous namespace
591
592void InsertBranch(ASTManager& mm, const BlockBranchInfo& branch_info) { 580void InsertBranch(ASTManager& mm, const BlockBranchInfo& branch_info) {
593 const auto get_expr = ([&](const Condition& cond) -> Expr { 581 const auto get_expr = ([&](const Condition& cond) -> Expr {
594 Expr result{}; 582 Expr result{};
@@ -655,6 +643,8 @@ void DecompileShader(CFGRebuildState& state) {
655 state.manager->Decompile(); 643 state.manager->Decompile();
656} 644}
657 645
646} // Anonymous namespace
647
658std::unique_ptr<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, u32 start_address, 648std::unique_ptr<ShaderCharacteristics> ScanFlow(const ProgramCode& program_code, u32 start_address,
659 const CompilerSettings& settings, 649 const CompilerSettings& settings,
660 Registry& registry) { 650 Registry& registry) {
diff --git a/src/video_core/shader/decode.cpp b/src/video_core/shader/decode.cpp
index 87ac9ac6c..a75a5cc63 100644
--- a/src/video_core/shader/decode.cpp
+++ b/src/video_core/shader/decode.cpp
@@ -13,6 +13,7 @@
13#include "video_core/engines/shader_bytecode.h" 13#include "video_core/engines/shader_bytecode.h"
14#include "video_core/engines/shader_header.h" 14#include "video_core/engines/shader_header.h"
15#include "video_core/shader/control_flow.h" 15#include "video_core/shader/control_flow.h"
16#include "video_core/shader/memory_util.h"
16#include "video_core/shader/node_helper.h" 17#include "video_core/shader/node_helper.h"
17#include "video_core/shader/shader_ir.h" 18#include "video_core/shader/shader_ir.h"
18 19
@@ -23,17 +24,6 @@ using Tegra::Shader::OpCode;
23 24
24namespace { 25namespace {
25 26
26/**
27 * Returns whether the instruction at the specified offset is a 'sched' instruction.
28 * Sched instructions always appear before a sequence of 3 instructions.
29 */
30constexpr bool IsSchedInstruction(u32 offset, u32 main_offset) {
31 constexpr u32 SchedPeriod = 4;
32 u32 absolute_offset = offset - main_offset;
33
34 return (absolute_offset % SchedPeriod) == 0;
35}
36
37void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver, 27void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
38 const std::list<Sampler>& used_samplers) { 28 const std::list<Sampler>& used_samplers) {
39 if (gpu_driver.IsTextureHandlerSizeKnown() || used_samplers.size() <= 1) { 29 if (gpu_driver.IsTextureHandlerSizeKnown() || used_samplers.size() <= 1) {
@@ -42,11 +32,11 @@ void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
42 u32 count{}; 32 u32 count{};
43 std::vector<u32> bound_offsets; 33 std::vector<u32> bound_offsets;
44 for (const auto& sampler : used_samplers) { 34 for (const auto& sampler : used_samplers) {
45 if (sampler.IsBindless()) { 35 if (sampler.is_bindless) {
46 continue; 36 continue;
47 } 37 }
48 ++count; 38 ++count;
49 bound_offsets.emplace_back(sampler.GetOffset()); 39 bound_offsets.emplace_back(sampler.offset);
50 } 40 }
51 if (count > 1) { 41 if (count > 1) {
52 gpu_driver.DeduceTextureHandlerSize(std::move(bound_offsets)); 42 gpu_driver.DeduceTextureHandlerSize(std::move(bound_offsets));
@@ -56,14 +46,14 @@ void DeduceTextureHandlerSize(VideoCore::GuestDriverProfile& gpu_driver,
56std::optional<u32> TryDeduceSamplerSize(const Sampler& sampler_to_deduce, 46std::optional<u32> TryDeduceSamplerSize(const Sampler& sampler_to_deduce,
57 VideoCore::GuestDriverProfile& gpu_driver, 47 VideoCore::GuestDriverProfile& gpu_driver,
58 const std::list<Sampler>& used_samplers) { 48 const std::list<Sampler>& used_samplers) {
59 const u32 base_offset = sampler_to_deduce.GetOffset(); 49 const u32 base_offset = sampler_to_deduce.offset;
60 u32 max_offset{std::numeric_limits<u32>::max()}; 50 u32 max_offset{std::numeric_limits<u32>::max()};
61 for (const auto& sampler : used_samplers) { 51 for (const auto& sampler : used_samplers) {
62 if (sampler.IsBindless()) { 52 if (sampler.is_bindless) {
63 continue; 53 continue;
64 } 54 }
65 if (sampler.GetOffset() > base_offset) { 55 if (sampler.offset > base_offset) {
66 max_offset = std::min(sampler.GetOffset(), max_offset); 56 max_offset = std::min(sampler.offset, max_offset);
67 } 57 }
68 } 58 }
69 if (max_offset == std::numeric_limits<u32>::max()) { 59 if (max_offset == std::numeric_limits<u32>::max()) {
@@ -363,14 +353,14 @@ void ShaderIR::PostDecode() {
363 return; 353 return;
364 } 354 }
365 for (auto& sampler : used_samplers) { 355 for (auto& sampler : used_samplers) {
366 if (!sampler.IsIndexed()) { 356 if (!sampler.is_indexed) {
367 continue; 357 continue;
368 } 358 }
369 if (const auto size = TryDeduceSamplerSize(sampler, gpu_driver, used_samplers)) { 359 if (const auto size = TryDeduceSamplerSize(sampler, gpu_driver, used_samplers)) {
370 sampler.SetSize(*size); 360 sampler.size = *size;
371 } else { 361 } else {
372 LOG_CRITICAL(HW_GPU, "Failed to deduce size of indexed sampler"); 362 LOG_CRITICAL(HW_GPU, "Failed to deduce size of indexed sampler");
373 sampler.SetSize(1); 363 sampler.size = 1;
374 } 364 }
375 } 365 }
376} 366}
diff --git a/src/video_core/shader/decode/arithmetic_half.cpp b/src/video_core/shader/decode/arithmetic_half.cpp
index ee7d9a29d..a276aee44 100644
--- a/src/video_core/shader/decode/arithmetic_half.cpp
+++ b/src/video_core/shader/decode/arithmetic_half.cpp
@@ -19,22 +19,46 @@ u32 ShaderIR::DecodeArithmeticHalf(NodeBlock& bb, u32 pc) {
19 const Instruction instr = {program_code[pc]}; 19 const Instruction instr = {program_code[pc]};
20 const auto opcode = OpCode::Decode(instr); 20 const auto opcode = OpCode::Decode(instr);
21 21
22 if (opcode->get().GetId() == OpCode::Id::HADD2_C || 22 bool negate_a = false;
23 opcode->get().GetId() == OpCode::Id::HADD2_R) { 23 bool negate_b = false;
24 bool absolute_a = false;
25 bool absolute_b = false;
26
27 switch (opcode->get().GetId()) {
28 case OpCode::Id::HADD2_R:
24 if (instr.alu_half.ftz == 0) { 29 if (instr.alu_half.ftz == 0) {
25 LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName()); 30 LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName());
26 } 31 }
32 negate_a = ((instr.value >> 43) & 1) != 0;
33 negate_b = ((instr.value >> 31) & 1) != 0;
34 absolute_a = ((instr.value >> 44) & 1) != 0;
35 absolute_b = ((instr.value >> 30) & 1) != 0;
36 break;
37 case OpCode::Id::HADD2_C:
38 if (instr.alu_half.ftz == 0) {
39 LOG_DEBUG(HW_GPU, "{} without FTZ is not implemented", opcode->get().GetName());
40 }
41 negate_a = ((instr.value >> 43) & 1) != 0;
42 negate_b = ((instr.value >> 56) & 1) != 0;
43 absolute_a = ((instr.value >> 44) & 1) != 0;
44 absolute_b = ((instr.value >> 54) & 1) != 0;
45 break;
46 case OpCode::Id::HMUL2_R:
47 negate_a = ((instr.value >> 43) & 1) != 0;
48 absolute_a = ((instr.value >> 44) & 1) != 0;
49 absolute_b = ((instr.value >> 30) & 1) != 0;
50 break;
51 case OpCode::Id::HMUL2_C:
52 negate_b = ((instr.value >> 31) & 1) != 0;
53 absolute_a = ((instr.value >> 44) & 1) != 0;
54 absolute_b = ((instr.value >> 54) & 1) != 0;
55 break;
27 } 56 }
28 57
29 const bool negate_a =
30 opcode->get().GetId() != OpCode::Id::HMUL2_R && instr.alu_half.negate_a != 0;
31 const bool negate_b =
32 opcode->get().GetId() != OpCode::Id::HMUL2_C && instr.alu_half.negate_b != 0;
33
34 Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.alu_half.type_a); 58 Node op_a = UnpackHalfFloat(GetRegister(instr.gpr8), instr.alu_half.type_a);
35 op_a = GetOperandAbsNegHalf(op_a, instr.alu_half.abs_a, negate_a); 59 op_a = GetOperandAbsNegHalf(op_a, absolute_a, negate_a);
36 60
37 auto [type_b, op_b] = [&]() -> std::tuple<HalfType, Node> { 61 auto [type_b, op_b] = [this, instr, opcode]() -> std::pair<HalfType, Node> {
38 switch (opcode->get().GetId()) { 62 switch (opcode->get().GetId()) {
39 case OpCode::Id::HADD2_C: 63 case OpCode::Id::HADD2_C:
40 case OpCode::Id::HMUL2_C: 64 case OpCode::Id::HMUL2_C:
@@ -48,17 +72,16 @@ u32 ShaderIR::DecodeArithmeticHalf(NodeBlock& bb, u32 pc) {
48 } 72 }
49 }(); 73 }();
50 op_b = UnpackHalfFloat(op_b, type_b); 74 op_b = UnpackHalfFloat(op_b, type_b);
51 // redeclaration to avoid a bug in clang with reusing local bindings in lambdas 75 op_b = GetOperandAbsNegHalf(op_b, absolute_b, negate_b);
52 Node op_b_alt = GetOperandAbsNegHalf(op_b, instr.alu_half.abs_b, negate_b);
53 76
54 Node value = [&]() { 77 Node value = [this, opcode, op_a, op_b = op_b] {
55 switch (opcode->get().GetId()) { 78 switch (opcode->get().GetId()) {
56 case OpCode::Id::HADD2_C: 79 case OpCode::Id::HADD2_C:
57 case OpCode::Id::HADD2_R: 80 case OpCode::Id::HADD2_R:
58 return Operation(OperationCode::HAdd, PRECISE, op_a, op_b_alt); 81 return Operation(OperationCode::HAdd, PRECISE, op_a, op_b);
59 case OpCode::Id::HMUL2_C: 82 case OpCode::Id::HMUL2_C:
60 case OpCode::Id::HMUL2_R: 83 case OpCode::Id::HMUL2_R:
61 return Operation(OperationCode::HMul, PRECISE, op_a, op_b_alt); 84 return Operation(OperationCode::HMul, PRECISE, op_a, op_b);
62 default: 85 default:
63 UNIMPLEMENTED_MSG("Unhandled half float instruction: {}", opcode->get().GetName()); 86 UNIMPLEMENTED_MSG("Unhandled half float instruction: {}", opcode->get().GetName());
64 return Immediate(0); 87 return Immediate(0);
diff --git a/src/video_core/shader/decode/arithmetic_integer.cpp b/src/video_core/shader/decode/arithmetic_integer.cpp
index 0f4c3103a..a041519b7 100644
--- a/src/video_core/shader/decode/arithmetic_integer.cpp
+++ b/src/video_core/shader/decode/arithmetic_integer.cpp
@@ -35,15 +35,38 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) {
35 case OpCode::Id::IADD_C: 35 case OpCode::Id::IADD_C:
36 case OpCode::Id::IADD_R: 36 case OpCode::Id::IADD_R:
37 case OpCode::Id::IADD_IMM: { 37 case OpCode::Id::IADD_IMM: {
38 UNIMPLEMENTED_IF_MSG(instr.alu.saturate_d, "IADD saturation not implemented"); 38 UNIMPLEMENTED_IF_MSG(instr.alu.saturate_d, "IADD.SAT");
39 UNIMPLEMENTED_IF_MSG(instr.iadd.x && instr.generates_cc, "IADD.X Rd.CC");
39 40
40 op_a = GetOperandAbsNegInteger(op_a, false, instr.alu_integer.negate_a, true); 41 op_a = GetOperandAbsNegInteger(op_a, false, instr.alu_integer.negate_a, true);
41 op_b = GetOperandAbsNegInteger(op_b, false, instr.alu_integer.negate_b, true); 42 op_b = GetOperandAbsNegInteger(op_b, false, instr.alu_integer.negate_b, true);
42 43
43 const Node value = Operation(OperationCode::IAdd, PRECISE, op_a, op_b); 44 Node value = Operation(OperationCode::UAdd, op_a, op_b);
44 45
45 SetInternalFlagsFromInteger(bb, value, instr.generates_cc); 46 if (instr.iadd.x) {
46 SetRegister(bb, instr.gpr0, value); 47 Node carry = GetInternalFlag(InternalFlag::Carry);
48 Node x = Operation(OperationCode::Select, std::move(carry), Immediate(1), Immediate(0));
49 value = Operation(OperationCode::UAdd, std::move(value), std::move(x));
50 }
51
52 if (instr.generates_cc) {
53 const Node i0 = Immediate(0);
54
55 Node zero = Operation(OperationCode::LogicalIEqual, value, i0);
56 Node sign = Operation(OperationCode::LogicalILessThan, value, i0);
57 Node carry = Operation(OperationCode::LogicalAddCarry, op_a, op_b);
58
59 Node pos_a = Operation(OperationCode::LogicalIGreaterThan, op_a, i0);
60 Node pos_b = Operation(OperationCode::LogicalIGreaterThan, op_b, i0);
61 Node pos = Operation(OperationCode::LogicalAnd, std::move(pos_a), std::move(pos_b));
62 Node overflow = Operation(OperationCode::LogicalAnd, pos, sign);
63
64 SetInternalFlag(bb, InternalFlag::Zero, std::move(zero));
65 SetInternalFlag(bb, InternalFlag::Sign, std::move(sign));
66 SetInternalFlag(bb, InternalFlag::Carry, std::move(carry));
67 SetInternalFlag(bb, InternalFlag::Overflow, std::move(overflow));
68 }
69 SetRegister(bb, instr.gpr0, std::move(value));
47 break; 70 break;
48 } 71 }
49 case OpCode::Id::IADD3_C: 72 case OpCode::Id::IADD3_C:
@@ -249,8 +272,8 @@ u32 ShaderIR::DecodeArithmeticInteger(NodeBlock& bb, u32 pc) {
249 } 272 }
250 case OpCode::Id::LEA_IMM: { 273 case OpCode::Id::LEA_IMM: {
251 const bool neg = instr.lea.imm.neg != 0; 274 const bool neg = instr.lea.imm.neg != 0;
252 return {Immediate(static_cast<u32>(instr.lea.imm.entry_a)), 275 return {GetOperandAbsNegInteger(GetRegister(instr.gpr8), false, neg, true),
253 GetOperandAbsNegInteger(GetRegister(instr.gpr8), false, neg, true), 276 Immediate(static_cast<u32>(instr.lea.imm.entry_a)),
254 Immediate(static_cast<u32>(instr.lea.imm.entry_b))}; 277 Immediate(static_cast<u32>(instr.lea.imm.entry_b))};
255 } 278 }
256 case OpCode::Id::LEA_RZ: { 279 case OpCode::Id::LEA_RZ: {
diff --git a/src/video_core/shader/decode/image.cpp b/src/video_core/shader/decode/image.cpp
index 85ee9aa5e..60b6ad72a 100644
--- a/src/video_core/shader/decode/image.cpp
+++ b/src/video_core/shader/decode/image.cpp
@@ -485,11 +485,10 @@ u32 ShaderIR::DecodeImage(NodeBlock& bb, u32 pc) {
485Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type) { 485Image& ShaderIR::GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type) {
486 const auto offset = static_cast<u32>(image.index.Value()); 486 const auto offset = static_cast<u32>(image.index.Value());
487 487
488 const auto it = 488 const auto it = std::find_if(std::begin(used_images), std::end(used_images),
489 std::find_if(std::begin(used_images), std::end(used_images), 489 [offset](const Image& entry) { return entry.offset == offset; });
490 [offset](const Image& entry) { return entry.GetOffset() == offset; });
491 if (it != std::end(used_images)) { 490 if (it != std::end(used_images)) {
492 ASSERT(!it->IsBindless() && it->GetType() == it->GetType()); 491 ASSERT(!it->is_bindless && it->type == type);
493 return *it; 492 return *it;
494 } 493 }
495 494
@@ -505,13 +504,12 @@ Image& ShaderIR::GetBindlessImage(Tegra::Shader::Register reg, Tegra::Shader::Im
505 const auto buffer = std::get<1>(result); 504 const auto buffer = std::get<1>(result);
506 const auto offset = std::get<2>(result); 505 const auto offset = std::get<2>(result);
507 506
508 const auto it = 507 const auto it = std::find_if(std::begin(used_images), std::end(used_images),
509 std::find_if(std::begin(used_images), std::end(used_images), 508 [buffer, offset](const Image& entry) {
510 [buffer = buffer, offset = offset](const Image& entry) { 509 return entry.buffer == buffer && entry.offset == offset;
511 return entry.GetBuffer() == buffer && entry.GetOffset() == offset; 510 });
512 });
513 if (it != std::end(used_images)) { 511 if (it != std::end(used_images)) {
514 ASSERT(it->IsBindless() && it->GetType() == it->GetType()); 512 ASSERT(it->is_bindless && it->type == type);
515 return *it; 513 return *it;
516 } 514 }
517 515
diff --git a/src/video_core/shader/decode/memory.cpp b/src/video_core/shader/decode/memory.cpp
index 8112ead3e..9392f065b 100644
--- a/src/video_core/shader/decode/memory.cpp
+++ b/src/video_core/shader/decode/memory.cpp
@@ -479,7 +479,7 @@ std::tuple<Node, Node, GlobalMemoryBase> ShaderIR::TrackGlobalMemory(NodeBlock&
479 bb.push_back(Comment(fmt::format("Base address is c[0x{:x}][0x{:x}]", index, offset))); 479 bb.push_back(Comment(fmt::format("Base address is c[0x{:x}][0x{:x}]", index, offset)));
480 480
481 const GlobalMemoryBase descriptor{index, offset}; 481 const GlobalMemoryBase descriptor{index, offset};
482 const auto& [entry, is_new] = used_global_memory.try_emplace(descriptor); 482 const auto& entry = used_global_memory.try_emplace(descriptor).first;
483 auto& usage = entry->second; 483 auto& usage = entry->second;
484 usage.is_written |= is_write; 484 usage.is_written |= is_write;
485 usage.is_read |= is_read; 485 usage.is_read |= is_read;
diff --git a/src/video_core/shader/decode/register_set_predicate.cpp b/src/video_core/shader/decode/register_set_predicate.cpp
index 8d54cce34..6116c31aa 100644
--- a/src/video_core/shader/decode/register_set_predicate.cpp
+++ b/src/video_core/shader/decode/register_set_predicate.cpp
@@ -2,6 +2,8 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <utility>
6
5#include "common/assert.h" 7#include "common/assert.h"
6#include "common/common_types.h" 8#include "common/common_types.h"
7#include "video_core/engines/shader_bytecode.h" 9#include "video_core/engines/shader_bytecode.h"
@@ -10,20 +12,20 @@
10 12
11namespace VideoCommon::Shader { 13namespace VideoCommon::Shader {
12 14
15using std::move;
13using Tegra::Shader::Instruction; 16using Tegra::Shader::Instruction;
14using Tegra::Shader::OpCode; 17using Tegra::Shader::OpCode;
15 18
16namespace { 19namespace {
17constexpr u64 NUM_PROGRAMMABLE_PREDICATES = 7; 20constexpr u64 NUM_CONDITION_CODES = 4;
18} 21constexpr u64 NUM_PREDICATES = 7;
22} // namespace
19 23
20u32 ShaderIR::DecodeRegisterSetPredicate(NodeBlock& bb, u32 pc) { 24u32 ShaderIR::DecodeRegisterSetPredicate(NodeBlock& bb, u32 pc) {
21 const Instruction instr = {program_code[pc]}; 25 const Instruction instr = {program_code[pc]};
22 const auto opcode = OpCode::Decode(instr); 26 const auto opcode = OpCode::Decode(instr);
23 27
24 UNIMPLEMENTED_IF(instr.p2r_r2p.mode != Tegra::Shader::R2pMode::Pr); 28 Node apply_mask = [this, opcode, instr] {
25
26 const Node apply_mask = [&] {
27 switch (opcode->get().GetId()) { 29 switch (opcode->get().GetId()) {
28 case OpCode::Id::R2P_IMM: 30 case OpCode::Id::R2P_IMM:
29 case OpCode::Id::P2R_IMM: 31 case OpCode::Id::P2R_IMM:
@@ -34,39 +36,43 @@ u32 ShaderIR::DecodeRegisterSetPredicate(NodeBlock& bb, u32 pc) {
34 } 36 }
35 }(); 37 }();
36 38
37 const auto offset = static_cast<u32>(instr.p2r_r2p.byte) * 8; 39 const u32 offset = static_cast<u32>(instr.p2r_r2p.byte) * 8;
40
41 const bool cc = instr.p2r_r2p.mode == Tegra::Shader::R2pMode::Cc;
42 const u64 num_entries = cc ? NUM_CONDITION_CODES : NUM_PREDICATES;
43 const auto get_entry = [this, cc](u64 entry) {
44 return cc ? GetInternalFlag(static_cast<InternalFlag>(entry)) : GetPredicate(entry);
45 };
38 46
39 switch (opcode->get().GetId()) { 47 switch (opcode->get().GetId()) {
40 case OpCode::Id::R2P_IMM: { 48 case OpCode::Id::R2P_IMM: {
41 const Node mask = GetRegister(instr.gpr8); 49 Node mask = GetRegister(instr.gpr8);
42 50
43 for (u64 pred = 0; pred < NUM_PROGRAMMABLE_PREDICATES; ++pred) { 51 for (u64 entry = 0; entry < num_entries; ++entry) {
44 const auto shift = static_cast<u32>(pred); 52 const u32 shift = static_cast<u32>(entry);
45 53
46 const Node apply_compare = BitfieldExtract(apply_mask, shift, 1); 54 Node apply = BitfieldExtract(apply_mask, shift, 1);
47 const Node condition = 55 Node condition = Operation(OperationCode::LogicalUNotEqual, apply, Immediate(0));
48 Operation(OperationCode::LogicalUNotEqual, apply_compare, Immediate(0));
49 56
50 const Node value_compare = BitfieldExtract(mask, offset + shift, 1); 57 Node compare = BitfieldExtract(mask, offset + shift, 1);
51 const Node value = 58 Node value = Operation(OperationCode::LogicalUNotEqual, move(compare), Immediate(0));
52 Operation(OperationCode::LogicalUNotEqual, value_compare, Immediate(0));
53 59
54 const Node code = Operation(OperationCode::LogicalAssign, GetPredicate(pred), value); 60 Node code = Operation(OperationCode::LogicalAssign, get_entry(entry), move(value));
55 bb.push_back(Conditional(condition, {code})); 61 bb.push_back(Conditional(condition, {move(code)}));
56 } 62 }
57 break; 63 break;
58 } 64 }
59 case OpCode::Id::P2R_IMM: { 65 case OpCode::Id::P2R_IMM: {
60 Node value = Immediate(0); 66 Node value = Immediate(0);
61 for (u64 pred = 0; pred < NUM_PROGRAMMABLE_PREDICATES; ++pred) { 67 for (u64 entry = 0; entry < num_entries; ++entry) {
62 Node bit = Operation(OperationCode::Select, GetPredicate(pred), Immediate(1U << pred), 68 Node bit = Operation(OperationCode::Select, get_entry(entry), Immediate(1U << entry),
63 Immediate(0)); 69 Immediate(0));
64 value = Operation(OperationCode::UBitwiseOr, std::move(value), std::move(bit)); 70 value = Operation(OperationCode::UBitwiseOr, move(value), move(bit));
65 } 71 }
66 value = Operation(OperationCode::UBitwiseAnd, std::move(value), apply_mask); 72 value = Operation(OperationCode::UBitwiseAnd, move(value), apply_mask);
67 value = BitfieldInsert(GetRegister(instr.gpr8), std::move(value), offset, 8); 73 value = BitfieldInsert(GetRegister(instr.gpr8), move(value), offset, 8);
68 74
69 SetRegister(bb, instr.gpr0, std::move(value)); 75 SetRegister(bb, instr.gpr0, move(value));
70 break; 76 break;
71 } 77 }
72 default: 78 default:
diff --git a/src/video_core/shader/decode/texture.cpp b/src/video_core/shader/decode/texture.cpp
index 6c4a1358b..8f0bb996e 100644
--- a/src/video_core/shader/decode/texture.cpp
+++ b/src/video_core/shader/decode/texture.cpp
@@ -139,15 +139,15 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
139 } 139 }
140 const Node component = Immediate(static_cast<u32>(instr.tld4s.component)); 140 const Node component = Immediate(static_cast<u32>(instr.tld4s.component));
141 141
142 const SamplerInfo info{TextureType::Texture2D, false, is_depth_compare}; 142 SamplerInfo info;
143 const Sampler& sampler = *GetSampler(instr.sampler, info); 143 info.is_shadow = is_depth_compare;
144 const std::optional<Sampler> sampler = GetSampler(instr.sampler, info);
144 145
145 Node4 values; 146 Node4 values;
146 for (u32 element = 0; element < values.size(); ++element) { 147 for (u32 element = 0; element < values.size(); ++element) {
147 auto coords_copy = coords; 148 MetaTexture meta{*sampler, {}, depth_compare, aoffi, {}, {},
148 MetaTexture meta{sampler, {}, depth_compare, aoffi, {}, {}, 149 {}, {}, component, element, {}};
149 {}, {}, component, element, {}}; 150 values[element] = Operation(OperationCode::TextureGather, meta, coords);
150 values[element] = Operation(OperationCode::TextureGather, meta, std::move(coords_copy));
151 } 151 }
152 152
153 if (instr.tld4s.fp16_flag) { 153 if (instr.tld4s.fp16_flag) {
@@ -165,19 +165,20 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
165 "AOFFI is not implemented"); 165 "AOFFI is not implemented");
166 166
167 const bool is_array = instr.txd.is_array != 0; 167 const bool is_array = instr.txd.is_array != 0;
168 u64 base_reg = instr.gpr8.Value();
169 const auto derivate_reg = instr.gpr20.Value(); 168 const auto derivate_reg = instr.gpr20.Value();
170 const auto texture_type = instr.txd.texture_type.Value(); 169 const auto texture_type = instr.txd.texture_type.Value();
171 const auto coord_count = GetCoordCount(texture_type); 170 const auto coord_count = GetCoordCount(texture_type);
172 Node index_var{}; 171 u64 base_reg = instr.gpr8.Value();
173 const Sampler* sampler = 172 Node index_var;
174 is_bindless ? GetBindlessSampler(base_reg, index_var, {{texture_type, is_array, false}}) 173 SamplerInfo info;
175 : GetSampler(instr.sampler, {{texture_type, is_array, false}}); 174 info.type = texture_type;
175 info.is_array = is_array;
176 const std::optional<Sampler> sampler = is_bindless
177 ? GetBindlessSampler(base_reg, info, index_var)
178 : GetSampler(instr.sampler, info);
176 Node4 values; 179 Node4 values;
177 if (sampler == nullptr) { 180 if (!sampler) {
178 for (u32 element = 0; element < values.size(); ++element) { 181 std::generate(values.begin(), values.end(), [this] { return Immediate(0); });
179 values[element] = Immediate(0);
180 }
181 WriteTexInstructionFloat(bb, instr, values); 182 WriteTexInstructionFloat(bb, instr, values);
182 break; 183 break;
183 } 184 }
@@ -215,14 +216,12 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
215 is_bindless = true; 216 is_bindless = true;
216 [[fallthrough]]; 217 [[fallthrough]];
217 case OpCode::Id::TXQ: { 218 case OpCode::Id::TXQ: {
218 // TODO: The new commits on the texture refactor, change the way samplers work. 219 Node index_var;
219 // Sadly, not all texture instructions specify the type of texture their sampler 220 const std::optional<Sampler> sampler = is_bindless
220 // uses. This must be fixed at a later instance. 221 ? GetBindlessSampler(instr.gpr8, {}, index_var)
221 Node index_var{}; 222 : GetSampler(instr.sampler, {});
222 const Sampler* sampler = 223
223 is_bindless ? GetBindlessSampler(instr.gpr8, index_var) : GetSampler(instr.sampler); 224 if (!sampler) {
224
225 if (sampler == nullptr) {
226 u32 indexer = 0; 225 u32 indexer = 0;
227 for (u32 element = 0; element < 4; ++element) { 226 for (u32 element = 0; element < 4; ++element) {
228 if (!instr.txq.IsComponentEnabled(element)) { 227 if (!instr.txq.IsComponentEnabled(element)) {
@@ -268,13 +267,17 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
268 UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV), 267 UNIMPLEMENTED_IF_MSG(instr.tmml.UsesMiscMode(Tegra::Shader::TextureMiscMode::NDV),
269 "NDV is not implemented"); 268 "NDV is not implemented");
270 269
271 auto texture_type = instr.tmml.texture_type.Value(); 270 const auto texture_type = instr.tmml.texture_type.Value();
272 const bool is_array = instr.tmml.array != 0; 271 const bool is_array = instr.tmml.array != 0;
273 Node index_var{}; 272 SamplerInfo info;
274 const Sampler* sampler = 273 info.type = texture_type;
275 is_bindless ? GetBindlessSampler(instr.gpr20, index_var) : GetSampler(instr.sampler); 274 info.is_array = is_array;
276 275 Node index_var;
277 if (sampler == nullptr) { 276 const std::optional<Sampler> sampler =
277 is_bindless ? GetBindlessSampler(instr.gpr20, info, index_var)
278 : GetSampler(instr.sampler, info);
279
280 if (!sampler) {
278 u32 indexer = 0; 281 u32 indexer = 0;
279 for (u32 element = 0; element < 2; ++element) { 282 for (u32 element = 0; element < 2; ++element) {
280 if (!instr.tmml.IsComponentEnabled(element)) { 283 if (!instr.tmml.IsComponentEnabled(element)) {
@@ -301,12 +304,11 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
301 coords.push_back(GetRegister(instr.gpr8.Value() + 1)); 304 coords.push_back(GetRegister(instr.gpr8.Value() + 1));
302 break; 305 break;
303 default: 306 default:
304 UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<u32>(texture_type)); 307 UNIMPLEMENTED_MSG("Unhandled texture type {}", static_cast<int>(texture_type));
305 308
306 // Fallback to interpreting as a 2D texture for now 309 // Fallback to interpreting as a 2D texture for now
307 coords.push_back(GetRegister(instr.gpr8.Value() + 0)); 310 coords.push_back(GetRegister(instr.gpr8.Value() + 0));
308 coords.push_back(GetRegister(instr.gpr8.Value() + 1)); 311 coords.push_back(GetRegister(instr.gpr8.Value() + 1));
309 texture_type = TextureType::Texture2D;
310 } 312 }
311 u32 indexer = 0; 313 u32 indexer = 0;
312 for (u32 element = 0; element < 2; ++element) { 314 for (u32 element = 0; element < 2; ++element) {
@@ -355,98 +357,103 @@ u32 ShaderIR::DecodeTexture(NodeBlock& bb, u32 pc) {
355 return pc; 357 return pc;
356} 358}
357 359
358ShaderIR::SamplerInfo ShaderIR::GetSamplerInfo(std::optional<SamplerInfo> sampler_info, u32 offset, 360ShaderIR::SamplerInfo ShaderIR::GetSamplerInfo(SamplerInfo info, u32 offset,
359 std::optional<u32> buffer) { 361 std::optional<u32> buffer) {
360 if (sampler_info) { 362 if (info.IsComplete()) {
361 return *sampler_info; 363 return info;
362 } 364 }
363 const auto sampler = buffer ? registry.ObtainBindlessSampler(*buffer, offset) 365 const auto sampler = buffer ? registry.ObtainBindlessSampler(*buffer, offset)
364 : registry.ObtainBoundSampler(offset); 366 : registry.ObtainBoundSampler(offset);
365 if (!sampler) { 367 if (!sampler) {
366 LOG_WARNING(HW_GPU, "Unknown sampler info"); 368 LOG_WARNING(HW_GPU, "Unknown sampler info");
367 return SamplerInfo{TextureType::Texture2D, false, false, false}; 369 info.type = info.type.value_or(Tegra::Shader::TextureType::Texture2D);
368 } 370 info.is_array = info.is_array.value_or(false);
369 return SamplerInfo{sampler->texture_type, sampler->is_array != 0, sampler->is_shadow != 0, 371 info.is_shadow = info.is_shadow.value_or(false);
370 sampler->is_buffer != 0}; 372 info.is_buffer = info.is_buffer.value_or(false);
373 return info;
374 }
375 info.type = info.type.value_or(sampler->texture_type);
376 info.is_array = info.is_array.value_or(sampler->is_array != 0);
377 info.is_shadow = info.is_shadow.value_or(sampler->is_shadow != 0);
378 info.is_buffer = info.is_buffer.value_or(sampler->is_buffer != 0);
379 return info;
371} 380}
372 381
373const Sampler* ShaderIR::GetSampler(const Tegra::Shader::Sampler& sampler, 382std::optional<Sampler> ShaderIR::GetSampler(Tegra::Shader::Sampler sampler,
374 std::optional<SamplerInfo> sampler_info) { 383 SamplerInfo sampler_info) {
375 const auto offset = static_cast<u32>(sampler.index.Value()); 384 const auto offset = static_cast<u32>(sampler.index.Value());
376 const auto info = GetSamplerInfo(sampler_info, offset); 385 const auto info = GetSamplerInfo(sampler_info, offset);
377 386
378 // If this sampler has already been used, return the existing mapping. 387 // If this sampler has already been used, return the existing mapping.
379 const auto it = 388 const auto it = std::find_if(used_samplers.begin(), used_samplers.end(),
380 std::find_if(used_samplers.begin(), used_samplers.end(), 389 [offset](const Sampler& entry) { return entry.offset == offset; });
381 [offset](const Sampler& entry) { return entry.GetOffset() == offset; });
382 if (it != used_samplers.end()) { 390 if (it != used_samplers.end()) {
383 ASSERT(!it->IsBindless() && it->GetType() == info.type && it->IsArray() == info.is_array && 391 ASSERT(!it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
384 it->IsShadow() == info.is_shadow && it->IsBuffer() == info.is_buffer); 392 it->is_shadow == info.is_shadow && it->is_buffer == info.is_buffer);
385 return &*it; 393 return *it;
386 } 394 }
387 395
388 // Otherwise create a new mapping for this sampler 396 // Otherwise create a new mapping for this sampler
389 const auto next_index = static_cast<u32>(used_samplers.size()); 397 const auto next_index = static_cast<u32>(used_samplers.size());
390 return &used_samplers.emplace_back(next_index, offset, info.type, info.is_array, info.is_shadow, 398 return used_samplers.emplace_back(next_index, offset, *info.type, *info.is_array,
391 info.is_buffer, false); 399 *info.is_shadow, *info.is_buffer, false);
392} 400}
393 401
394const Sampler* ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, Node& index_var, 402std::optional<Sampler> ShaderIR::GetBindlessSampler(Tegra::Shader::Register reg, SamplerInfo info,
395 std::optional<SamplerInfo> sampler_info) { 403 Node& index_var) {
396 const Node sampler_register = GetRegister(reg); 404 const Node sampler_register = GetRegister(reg);
397 const auto [base_node, tracked_sampler_info] = 405 const auto [base_node, tracked_sampler_info] =
398 TrackBindlessSampler(sampler_register, global_code, static_cast<s64>(global_code.size())); 406 TrackBindlessSampler(sampler_register, global_code, static_cast<s64>(global_code.size()));
399 ASSERT(base_node != nullptr); 407 ASSERT(base_node != nullptr);
400 if (base_node == nullptr) { 408 if (base_node == nullptr) {
401 return nullptr; 409 return std::nullopt;
402 } 410 }
403 411
404 if (const auto bindless_sampler_info = 412 if (const auto bindless_sampler_info =
405 std::get_if<BindlessSamplerNode>(&*tracked_sampler_info)) { 413 std::get_if<BindlessSamplerNode>(&*tracked_sampler_info)) {
406 const u32 buffer = bindless_sampler_info->GetIndex(); 414 const u32 buffer = bindless_sampler_info->GetIndex();
407 const u32 offset = bindless_sampler_info->GetOffset(); 415 const u32 offset = bindless_sampler_info->GetOffset();
408 const auto info = GetSamplerInfo(sampler_info, offset, buffer); 416 info = GetSamplerInfo(info, offset, buffer);
409 417
410 // If this sampler has already been used, return the existing mapping. 418 // If this sampler has already been used, return the existing mapping.
411 const auto it = 419 const auto it = std::find_if(used_samplers.begin(), used_samplers.end(),
412 std::find_if(used_samplers.begin(), used_samplers.end(), 420 [buffer = buffer, offset = offset](const Sampler& entry) {
413 [buffer = buffer, offset = offset](const Sampler& entry) { 421 return entry.buffer == buffer && entry.offset == offset;
414 return entry.GetBuffer() == buffer && entry.GetOffset() == offset; 422 });
415 });
416 if (it != used_samplers.end()) { 423 if (it != used_samplers.end()) {
417 ASSERT(it->IsBindless() && it->GetType() == info.type && 424 ASSERT(it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
418 it->IsArray() == info.is_array && it->IsShadow() == info.is_shadow); 425 it->is_shadow == info.is_shadow);
419 return &*it; 426 return *it;
420 } 427 }
421 428
422 // Otherwise create a new mapping for this sampler 429 // Otherwise create a new mapping for this sampler
423 const auto next_index = static_cast<u32>(used_samplers.size()); 430 const auto next_index = static_cast<u32>(used_samplers.size());
424 return &used_samplers.emplace_back(next_index, offset, buffer, info.type, info.is_array, 431 return used_samplers.emplace_back(next_index, offset, buffer, *info.type, *info.is_array,
425 info.is_shadow, info.is_buffer, false); 432 *info.is_shadow, *info.is_buffer, false);
426 } else if (const auto array_sampler_info = 433 }
427 std::get_if<ArraySamplerNode>(&*tracked_sampler_info)) { 434 if (const auto array_sampler_info = std::get_if<ArraySamplerNode>(&*tracked_sampler_info)) {
428 const u32 base_offset = array_sampler_info->GetBaseOffset() / 4; 435 const u32 base_offset = array_sampler_info->GetBaseOffset() / 4;
429 index_var = GetCustomVariable(array_sampler_info->GetIndexVar()); 436 index_var = GetCustomVariable(array_sampler_info->GetIndexVar());
430 const auto info = GetSamplerInfo(sampler_info, base_offset); 437 info = GetSamplerInfo(info, base_offset);
431 438
432 // If this sampler has already been used, return the existing mapping. 439 // If this sampler has already been used, return the existing mapping.
433 const auto it = std::find_if( 440 const auto it = std::find_if(
434 used_samplers.begin(), used_samplers.end(), 441 used_samplers.begin(), used_samplers.end(),
435 [base_offset](const Sampler& entry) { return entry.GetOffset() == base_offset; }); 442 [base_offset](const Sampler& entry) { return entry.offset == base_offset; });
436 if (it != used_samplers.end()) { 443 if (it != used_samplers.end()) {
437 ASSERT(!it->IsBindless() && it->GetType() == info.type && 444 ASSERT(!it->is_bindless && it->type == info.type && it->is_array == info.is_array &&
438 it->IsArray() == info.is_array && it->IsShadow() == info.is_shadow && 445 it->is_shadow == info.is_shadow && it->is_buffer == info.is_buffer &&
439 it->IsBuffer() == info.is_buffer && it->IsIndexed()); 446 it->is_indexed);
440 return &*it; 447 return *it;
441 } 448 }
442 449
443 uses_indexed_samplers = true; 450 uses_indexed_samplers = true;
444 // Otherwise create a new mapping for this sampler 451 // Otherwise create a new mapping for this sampler
445 const auto next_index = static_cast<u32>(used_samplers.size()); 452 const auto next_index = static_cast<u32>(used_samplers.size());
446 return &used_samplers.emplace_back(next_index, base_offset, info.type, info.is_array, 453 return used_samplers.emplace_back(next_index, base_offset, *info.type, *info.is_array,
447 info.is_shadow, info.is_buffer, true); 454 *info.is_shadow, *info.is_buffer, true);
448 } 455 }
449 return nullptr; 456 return std::nullopt;
450} 457}
451 458
452void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) { 459void ShaderIR::WriteTexInstructionFloat(NodeBlock& bb, Instruction instr, const Node4& components) {
@@ -531,10 +538,16 @@ Node4 ShaderIR::GetTextureCode(Instruction instr, TextureType texture_type,
531 ASSERT_MSG(texture_type != TextureType::Texture3D || !is_array || !is_shadow, 538 ASSERT_MSG(texture_type != TextureType::Texture3D || !is_array || !is_shadow,
532 "Illegal texture type"); 539 "Illegal texture type");
533 540
534 const SamplerInfo info{texture_type, is_array, is_shadow, false}; 541 SamplerInfo info;
542 info.type = texture_type;
543 info.is_array = is_array;
544 info.is_shadow = is_shadow;
545 info.is_buffer = false;
546
535 Node index_var; 547 Node index_var;
536 const Sampler* sampler = is_bindless ? GetBindlessSampler(*bindless_reg, index_var, info) 548 const std::optional<Sampler> sampler = is_bindless
537 : GetSampler(instr.sampler, info); 549 ? GetBindlessSampler(*bindless_reg, info, index_var)
550 : GetSampler(instr.sampler, info);
538 if (!sampler) { 551 if (!sampler) {
539 return {Immediate(0), Immediate(0), Immediate(0), Immediate(0)}; 552 return {Immediate(0), Immediate(0), Immediate(0), Immediate(0)};
540 } 553 }
@@ -593,8 +606,9 @@ Node4 ShaderIR::GetTexCode(Instruction instr, TextureType texture_type,
593 ++parameter_register; 606 ++parameter_register;
594 } 607 }
595 608
596 const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement( 609 const auto coord_counts = ValidateAndGetCoordinateElement(texture_type, depth_compare, is_array,
597 texture_type, depth_compare, is_array, lod_bias_enabled, 4, 5); 610 lod_bias_enabled, 4, 5);
611 const auto coord_count = std::get<0>(coord_counts);
598 // If enabled arrays index is always stored in the gpr8 field 612 // If enabled arrays index is always stored in the gpr8 field
599 const u64 array_register = instr.gpr8.Value(); 613 const u64 array_register = instr.gpr8.Value();
600 // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used 614 // First coordinate index is the gpr8 or gpr8 + 1 when arrays are used
@@ -632,8 +646,10 @@ Node4 ShaderIR::GetTexsCode(Instruction instr, TextureType texture_type,
632 const bool lod_bias_enabled = 646 const bool lod_bias_enabled =
633 (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ); 647 (process_mode != TextureProcessMode::None && process_mode != TextureProcessMode::LZ);
634 648
635 const auto [coord_count, total_coord_count] = ValidateAndGetCoordinateElement( 649 const auto coord_counts = ValidateAndGetCoordinateElement(texture_type, depth_compare, is_array,
636 texture_type, depth_compare, is_array, lod_bias_enabled, 4, 4); 650 lod_bias_enabled, 4, 4);
651 const auto coord_count = std::get<0>(coord_counts);
652
637 // If enabled arrays index is always stored in the gpr8 field 653 // If enabled arrays index is always stored in the gpr8 field
638 const u64 array_register = instr.gpr8.Value(); 654 const u64 array_register = instr.gpr8.Value();
639 // First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used 655 // First coordinate index is stored in gpr8 field or (gpr8 + 1) when arrays are used
@@ -682,12 +698,17 @@ Node4 ShaderIR::GetTld4Code(Instruction instr, TextureType texture_type, bool de
682 698
683 u64 parameter_register = instr.gpr20.Value(); 699 u64 parameter_register = instr.gpr20.Value();
684 700
685 const SamplerInfo info{texture_type, is_array, depth_compare, false}; 701 SamplerInfo info;
686 Node index_var{}; 702 info.type = texture_type;
687 const Sampler* sampler = is_bindless ? GetBindlessSampler(parameter_register++, index_var, info) 703 info.is_array = is_array;
688 : GetSampler(instr.sampler, info); 704 info.is_shadow = depth_compare;
705
706 Node index_var;
707 const std::optional<Sampler> sampler =
708 is_bindless ? GetBindlessSampler(parameter_register++, info, index_var)
709 : GetSampler(instr.sampler, info);
689 Node4 values; 710 Node4 values;
690 if (sampler == nullptr) { 711 if (!sampler) {
691 for (u32 element = 0; element < values.size(); ++element) { 712 for (u32 element = 0; element < values.size(); ++element) {
692 values[element] = Immediate(0); 713 values[element] = Immediate(0);
693 } 714 }
@@ -742,12 +763,12 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) {
742 // const Node aoffi_register{is_aoffi ? GetRegister(gpr20_cursor++) : nullptr}; 763 // const Node aoffi_register{is_aoffi ? GetRegister(gpr20_cursor++) : nullptr};
743 // const Node multisample{is_multisample ? GetRegister(gpr20_cursor++) : nullptr}; 764 // const Node multisample{is_multisample ? GetRegister(gpr20_cursor++) : nullptr};
744 765
745 const auto& sampler = *GetSampler(instr.sampler); 766 const std::optional<Sampler> sampler = GetSampler(instr.sampler, {});
746 767
747 Node4 values; 768 Node4 values;
748 for (u32 element = 0; element < values.size(); ++element) { 769 for (u32 element = 0; element < values.size(); ++element) {
749 auto coords_copy = coords; 770 auto coords_copy = coords;
750 MetaTexture meta{sampler, array_register, {}, {}, {}, {}, {}, lod, {}, element, {}}; 771 MetaTexture meta{*sampler, array_register, {}, {}, {}, {}, {}, lod, {}, element, {}};
751 values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); 772 values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
752 } 773 }
753 774
@@ -755,7 +776,11 @@ Node4 ShaderIR::GetTldCode(Tegra::Shader::Instruction instr) {
755} 776}
756 777
757Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) { 778Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is_array) {
758 const Sampler& sampler = *GetSampler(instr.sampler); 779 SamplerInfo info;
780 info.type = texture_type;
781 info.is_array = is_array;
782 info.is_shadow = false;
783 const std::optional<Sampler> sampler = GetSampler(instr.sampler, info);
759 784
760 const std::size_t type_coord_count = GetCoordCount(texture_type); 785 const std::size_t type_coord_count = GetCoordCount(texture_type);
761 const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL; 786 const bool lod_enabled = instr.tlds.GetTextureProcessMode() == TextureProcessMode::LL;
@@ -783,7 +808,7 @@ Node4 ShaderIR::GetTldsCode(Instruction instr, TextureType texture_type, bool is
783 Node4 values; 808 Node4 values;
784 for (u32 element = 0; element < values.size(); ++element) { 809 for (u32 element = 0; element < values.size(); ++element) {
785 auto coords_copy = coords; 810 auto coords_copy = coords;
786 MetaTexture meta{sampler, array, {}, {}, {}, {}, {}, lod, {}, element, {}}; 811 MetaTexture meta{*sampler, array, {}, {}, {}, {}, {}, lod, {}, element, {}};
787 values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy)); 812 values[element] = Operation(OperationCode::TexelFetch, meta, std::move(coords_copy));
788 } 813 }
789 return values; 814 return values;
diff --git a/src/video_core/shader/memory_util.cpp b/src/video_core/shader/memory_util.cpp
new file mode 100644
index 000000000..074f21691
--- /dev/null
+++ b/src/video_core/shader/memory_util.cpp
@@ -0,0 +1,77 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#include <algorithm>
6#include <cstddef>
7
8#include <boost/container_hash/hash.hpp>
9
10#include "common/common_types.h"
11#include "core/core.h"
12#include "video_core/engines/maxwell_3d.h"
13#include "video_core/memory_manager.h"
14#include "video_core/shader/memory_util.h"
15#include "video_core/shader/shader_ir.h"
16
17namespace VideoCommon::Shader {
18
19GPUVAddr GetShaderAddress(Core::System& system,
20 Tegra::Engines::Maxwell3D::Regs::ShaderProgram program) {
21 const auto& gpu{system.GPU().Maxwell3D()};
22 const auto& shader_config{gpu.regs.shader_config[static_cast<std::size_t>(program)]};
23 return gpu.regs.code_address.CodeAddress() + shader_config.offset;
24}
25
26bool IsSchedInstruction(std::size_t offset, std::size_t main_offset) {
27 // Sched instructions appear once every 4 instructions.
28 constexpr std::size_t SchedPeriod = 4;
29 const std::size_t absolute_offset = offset - main_offset;
30 return (absolute_offset % SchedPeriod) == 0;
31}
32
33std::size_t CalculateProgramSize(const ProgramCode& program, bool is_compute) {
34 // This is the encoded version of BRA that jumps to itself. All Nvidia
35 // shaders end with one.
36 static constexpr u64 SELF_JUMPING_BRANCH = 0xE2400FFFFF07000FULL;
37 static constexpr u64 MASK = 0xFFFFFFFFFF7FFFFFULL;
38
39 const std::size_t start_offset = is_compute ? KERNEL_MAIN_OFFSET : STAGE_MAIN_OFFSET;
40 std::size_t offset = start_offset;
41 while (offset < program.size()) {
42 const u64 instruction = program[offset];
43 if (!IsSchedInstruction(offset, start_offset)) {
44 if ((instruction & MASK) == SELF_JUMPING_BRANCH) {
45 // End on Maxwell's "nop" instruction
46 break;
47 }
48 if (instruction == 0) {
49 break;
50 }
51 }
52 ++offset;
53 }
54 // The last instruction is included in the program size
55 return std::min(offset + 1, program.size());
56}
57
58ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, GPUVAddr gpu_addr,
59 const u8* host_ptr, bool is_compute) {
60 ProgramCode code(VideoCommon::Shader::MAX_PROGRAM_LENGTH);
61 ASSERT_OR_EXECUTE(host_ptr != nullptr, { return code; });
62 memory_manager.ReadBlockUnsafe(gpu_addr, code.data(), code.size() * sizeof(u64));
63 code.resize(CalculateProgramSize(code, is_compute));
64 return code;
65}
66
67u64 GetUniqueIdentifier(Tegra::Engines::ShaderType shader_type, bool is_a, const ProgramCode& code,
68 const ProgramCode& code_b) {
69 u64 unique_identifier = boost::hash_value(code);
70 if (is_a) {
71 // VertexA programs include two programs
72 boost::hash_combine(unique_identifier, boost::hash_value(code_b));
73 }
74 return unique_identifier;
75}
76
77} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/memory_util.h b/src/video_core/shader/memory_util.h
new file mode 100644
index 000000000..be90d24fd
--- /dev/null
+++ b/src/video_core/shader/memory_util.h
@@ -0,0 +1,47 @@
1// Copyright 2020 yuzu Emulator Project
2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included.
4
5#pragma once
6
7#include <cstddef>
8#include <vector>
9
10#include "common/common_types.h"
11#include "video_core/engines/maxwell_3d.h"
12#include "video_core/engines/shader_type.h"
13
14namespace Core {
15class System;
16}
17
18namespace Tegra {
19class MemoryManager;
20}
21
22namespace VideoCommon::Shader {
23
24using ProgramCode = std::vector<u64>;
25
26constexpr u32 STAGE_MAIN_OFFSET = 10;
27constexpr u32 KERNEL_MAIN_OFFSET = 0;
28
29/// Gets the address for the specified shader stage program
30GPUVAddr GetShaderAddress(Core::System& system,
31 Tegra::Engines::Maxwell3D::Regs::ShaderProgram program);
32
33/// Gets if the current instruction offset is a scheduler instruction
34bool IsSchedInstruction(std::size_t offset, std::size_t main_offset);
35
36/// Calculates the size of a program stream
37std::size_t CalculateProgramSize(const ProgramCode& program, bool is_compute);
38
39/// Gets the shader program code from memory for the specified address
40ProgramCode GetShaderCode(Tegra::MemoryManager& memory_manager, GPUVAddr gpu_addr,
41 const u8* host_ptr, bool is_compute);
42
43/// Hashes one (or two) program streams
44u64 GetUniqueIdentifier(Tegra::Engines::ShaderType shader_type, bool is_a, const ProgramCode& code,
45 const ProgramCode& code_b = {});
46
47} // namespace VideoCommon::Shader
diff --git a/src/video_core/shader/node.h b/src/video_core/shader/node.h
index 3eee961f5..601c822d2 100644
--- a/src/video_core/shader/node.h
+++ b/src/video_core/shader/node.h
@@ -132,6 +132,8 @@ enum class OperationCode {
132 LogicalUNotEqual, /// (uint a, uint b) -> bool 132 LogicalUNotEqual, /// (uint a, uint b) -> bool
133 LogicalUGreaterEqual, /// (uint a, uint b) -> bool 133 LogicalUGreaterEqual, /// (uint a, uint b) -> bool
134 134
135 LogicalAddCarry, /// (uint a, uint b) -> bool
136
135 Logical2HLessThan, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 137 Logical2HLessThan, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2
136 Logical2HEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 138 Logical2HEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2
137 Logical2HLessEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2 139 Logical2HLessEqual, /// (MetaHalfArithmetic, f16vec2 a, f16vec2) -> bool2
@@ -265,76 +267,30 @@ class ArraySamplerNode;
265using TrackSamplerData = std::variant<BindlessSamplerNode, ArraySamplerNode>; 267using TrackSamplerData = std::variant<BindlessSamplerNode, ArraySamplerNode>;
266using TrackSampler = std::shared_ptr<TrackSamplerData>; 268using TrackSampler = std::shared_ptr<TrackSamplerData>;
267 269
268class Sampler { 270struct Sampler {
269public: 271 /// Bound samplers constructor
270 /// This constructor is for bound samplers
271 constexpr explicit Sampler(u32 index, u32 offset, Tegra::Shader::TextureType type, 272 constexpr explicit Sampler(u32 index, u32 offset, Tegra::Shader::TextureType type,
272 bool is_array, bool is_shadow, bool is_buffer, bool is_indexed) 273 bool is_array, bool is_shadow, bool is_buffer, bool is_indexed)
273 : index{index}, offset{offset}, type{type}, is_array{is_array}, is_shadow{is_shadow}, 274 : index{index}, offset{offset}, type{type}, is_array{is_array}, is_shadow{is_shadow},
274 is_buffer{is_buffer}, is_indexed{is_indexed} {} 275 is_buffer{is_buffer}, is_indexed{is_indexed} {}
275 276
276 /// This constructor is for bindless samplers 277 /// Bindless samplers constructor
277 constexpr explicit Sampler(u32 index, u32 offset, u32 buffer, Tegra::Shader::TextureType type, 278 constexpr explicit Sampler(u32 index, u32 offset, u32 buffer, Tegra::Shader::TextureType type,
278 bool is_array, bool is_shadow, bool is_buffer, bool is_indexed) 279 bool is_array, bool is_shadow, bool is_buffer, bool is_indexed)
279 : index{index}, offset{offset}, buffer{buffer}, type{type}, is_array{is_array}, 280 : index{index}, offset{offset}, buffer{buffer}, type{type}, is_array{is_array},
280 is_shadow{is_shadow}, is_buffer{is_buffer}, is_bindless{true}, is_indexed{is_indexed} {} 281 is_shadow{is_shadow}, is_buffer{is_buffer}, is_bindless{true}, is_indexed{is_indexed} {}
281 282
282 constexpr u32 GetIndex() const { 283 u32 index = 0; ///< Emulated index given for the this sampler.
283 return index; 284 u32 offset = 0; ///< Offset in the const buffer from where the sampler is being read.
284 } 285 u32 buffer = 0; ///< Buffer where the bindless sampler is being read (unused on bound samplers).
285 286 u32 size = 1; ///< Size of the sampler.
286 constexpr u32 GetOffset() const {
287 return offset;
288 }
289
290 constexpr u32 GetBuffer() const {
291 return buffer;
292 }
293
294 constexpr Tegra::Shader::TextureType GetType() const {
295 return type;
296 }
297
298 constexpr bool IsArray() const {
299 return is_array;
300 }
301
302 constexpr bool IsShadow() const {
303 return is_shadow;
304 }
305
306 constexpr bool IsBuffer() const {
307 return is_buffer;
308 }
309
310 constexpr bool IsBindless() const {
311 return is_bindless;
312 }
313
314 constexpr bool IsIndexed() const {
315 return is_indexed;
316 }
317
318 constexpr u32 Size() const {
319 return size;
320 }
321
322 constexpr void SetSize(u32 new_size) {
323 size = new_size;
324 }
325
326private:
327 u32 index{}; ///< Emulated index given for the this sampler.
328 u32 offset{}; ///< Offset in the const buffer from where the sampler is being read.
329 u32 buffer{}; ///< Buffer where the bindless sampler is being read (unused on bound samplers).
330 u32 size{1}; ///< Size of the sampler.
331 287
332 Tegra::Shader::TextureType type{}; ///< The type used to sample this texture (Texture2D, etc) 288 Tegra::Shader::TextureType type{}; ///< The type used to sample this texture (Texture2D, etc)
333 bool is_array{}; ///< Whether the texture is being sampled as an array texture or not. 289 bool is_array = false; ///< Whether the texture is being sampled as an array texture or not.
334 bool is_shadow{}; ///< Whether the texture is being sampled as a depth texture or not. 290 bool is_shadow = false; ///< Whether the texture is being sampled as a depth texture or not.
335 bool is_buffer{}; ///< Whether the texture is a texture buffer without sampler. 291 bool is_buffer = false; ///< Whether the texture is a texture buffer without sampler.
336 bool is_bindless{}; ///< Whether this sampler belongs to a bindless texture or not. 292 bool is_bindless = false; ///< Whether this sampler belongs to a bindless texture or not.
337 bool is_indexed{}; ///< Whether this sampler is an indexed array of textures. 293 bool is_indexed = false; ///< Whether this sampler is an indexed array of textures.
338}; 294};
339 295
340/// Represents a tracked bindless sampler into a direct const buffer 296/// Represents a tracked bindless sampler into a direct const buffer
@@ -379,13 +335,13 @@ private:
379 u32 offset; 335 u32 offset;
380}; 336};
381 337
382class Image final { 338struct Image {
383public: 339public:
384 /// This constructor is for bound images 340 /// Bound images constructor
385 constexpr explicit Image(u32 index, u32 offset, Tegra::Shader::ImageType type) 341 constexpr explicit Image(u32 index, u32 offset, Tegra::Shader::ImageType type)
386 : index{index}, offset{offset}, type{type} {} 342 : index{index}, offset{offset}, type{type} {}
387 343
388 /// This constructor is for bindless samplers 344 /// Bindless samplers constructor
389 constexpr explicit Image(u32 index, u32 offset, u32 buffer, Tegra::Shader::ImageType type) 345 constexpr explicit Image(u32 index, u32 offset, u32 buffer, Tegra::Shader::ImageType type)
390 : index{index}, offset{offset}, buffer{buffer}, type{type}, is_bindless{true} {} 346 : index{index}, offset{offset}, buffer{buffer}, type{type}, is_bindless{true} {}
391 347
@@ -403,53 +359,20 @@ public:
403 is_atomic = true; 359 is_atomic = true;
404 } 360 }
405 361
406 constexpr u32 GetIndex() const { 362 u32 index = 0;
407 return index; 363 u32 offset = 0;
408 } 364 u32 buffer = 0;
409
410 constexpr u32 GetOffset() const {
411 return offset;
412 }
413
414 constexpr u32 GetBuffer() const {
415 return buffer;
416 }
417
418 constexpr Tegra::Shader::ImageType GetType() const {
419 return type;
420 }
421
422 constexpr bool IsBindless() const {
423 return is_bindless;
424 }
425
426 constexpr bool IsWritten() const {
427 return is_written;
428 }
429
430 constexpr bool IsRead() const {
431 return is_read;
432 }
433
434 constexpr bool IsAtomic() const {
435 return is_atomic;
436 }
437
438private:
439 u32 index{};
440 u32 offset{};
441 u32 buffer{};
442 365
443 Tegra::Shader::ImageType type{}; 366 Tegra::Shader::ImageType type{};
444 bool is_bindless{}; 367 bool is_bindless = false;
445 bool is_written{}; 368 bool is_written = false;
446 bool is_read{}; 369 bool is_read = false;
447 bool is_atomic{}; 370 bool is_atomic = false;
448}; 371};
449 372
450struct GlobalMemoryBase { 373struct GlobalMemoryBase {
451 u32 cbuf_index{}; 374 u32 cbuf_index = 0;
452 u32 cbuf_offset{}; 375 u32 cbuf_offset = 0;
453 376
454 bool operator<(const GlobalMemoryBase& rhs) const { 377 bool operator<(const GlobalMemoryBase& rhs) const {
455 return std::tie(cbuf_index, cbuf_offset) < std::tie(rhs.cbuf_index, rhs.cbuf_offset); 378 return std::tie(cbuf_index, cbuf_offset) < std::tie(rhs.cbuf_index, rhs.cbuf_offset);
@@ -463,7 +386,7 @@ struct MetaArithmetic {
463 386
464/// Parameters describing a texture sampler 387/// Parameters describing a texture sampler
465struct MetaTexture { 388struct MetaTexture {
466 const Sampler& sampler; 389 Sampler sampler;
467 Node array; 390 Node array;
468 Node depth_compare; 391 Node depth_compare;
469 std::vector<Node> aoffi; 392 std::vector<Node> aoffi;
diff --git a/src/video_core/shader/shader_ir.h b/src/video_core/shader/shader_ir.h
index c6e7bdf50..15ae152f2 100644
--- a/src/video_core/shader/shader_ir.h
+++ b/src/video_core/shader/shader_ir.h
@@ -18,6 +18,7 @@
18#include "video_core/engines/shader_header.h" 18#include "video_core/engines/shader_header.h"
19#include "video_core/shader/ast.h" 19#include "video_core/shader/ast.h"
20#include "video_core/shader/compiler_settings.h" 20#include "video_core/shader/compiler_settings.h"
21#include "video_core/shader/memory_util.h"
21#include "video_core/shader/node.h" 22#include "video_core/shader/node.h"
22#include "video_core/shader/registry.h" 23#include "video_core/shader/registry.h"
23 24
@@ -25,16 +26,13 @@ namespace VideoCommon::Shader {
25 26
26struct ShaderBlock; 27struct ShaderBlock;
27 28
28using ProgramCode = std::vector<u64>;
29
30constexpr u32 MAX_PROGRAM_LENGTH = 0x1000; 29constexpr u32 MAX_PROGRAM_LENGTH = 0x1000;
31 30
32class ConstBuffer { 31struct ConstBuffer {
33public: 32 constexpr explicit ConstBuffer(u32 max_offset, bool is_indirect)
34 explicit ConstBuffer(u32 max_offset, bool is_indirect)
35 : max_offset{max_offset}, is_indirect{is_indirect} {} 33 : max_offset{max_offset}, is_indirect{is_indirect} {}
36 34
37 ConstBuffer() = default; 35 constexpr ConstBuffer() = default;
38 36
39 void MarkAsUsed(u64 offset) { 37 void MarkAsUsed(u64 offset) {
40 max_offset = std::max(max_offset, static_cast<u32>(offset)); 38 max_offset = std::max(max_offset, static_cast<u32>(offset));
@@ -57,8 +55,8 @@ public:
57 } 55 }
58 56
59private: 57private:
60 u32 max_offset{}; 58 u32 max_offset = 0;
61 bool is_indirect{}; 59 bool is_indirect = false;
62}; 60};
63 61
64struct GlobalMemoryUsage { 62struct GlobalMemoryUsage {
@@ -192,10 +190,14 @@ private:
192 friend class ASTDecoder; 190 friend class ASTDecoder;
193 191
194 struct SamplerInfo { 192 struct SamplerInfo {
195 Tegra::Shader::TextureType type; 193 std::optional<Tegra::Shader::TextureType> type;
196 bool is_array; 194 std::optional<bool> is_array;
197 bool is_shadow; 195 std::optional<bool> is_shadow;
198 bool is_buffer; 196 std::optional<bool> is_buffer;
197
198 constexpr bool IsComplete() const noexcept {
199 return type && is_array && is_shadow && is_buffer;
200 }
199 }; 201 };
200 202
201 void Decode(); 203 void Decode();
@@ -328,16 +330,15 @@ private:
328 OperationCode GetPredicateCombiner(Tegra::Shader::PredOperation operation); 330 OperationCode GetPredicateCombiner(Tegra::Shader::PredOperation operation);
329 331
330 /// Queries the missing sampler info from the execution context. 332 /// Queries the missing sampler info from the execution context.
331 SamplerInfo GetSamplerInfo(std::optional<SamplerInfo> sampler_info, u32 offset, 333 SamplerInfo GetSamplerInfo(SamplerInfo info, u32 offset,
332 std::optional<u32> buffer = std::nullopt); 334 std::optional<u32> buffer = std::nullopt);
333 335
334 /// Accesses a texture sampler 336 /// Accesses a texture sampler.
335 const Sampler* GetSampler(const Tegra::Shader::Sampler& sampler, 337 std::optional<Sampler> GetSampler(Tegra::Shader::Sampler sampler, SamplerInfo info);
336 std::optional<SamplerInfo> sampler_info = std::nullopt);
337 338
338 /// Accesses a texture sampler for a bindless texture. 339 /// Accesses a texture sampler for a bindless texture.
339 const Sampler* GetBindlessSampler(Tegra::Shader::Register reg, Node& index_var, 340 std::optional<Sampler> GetBindlessSampler(Tegra::Shader::Register reg, SamplerInfo info,
340 std::optional<SamplerInfo> sampler_info = std::nullopt); 341 Node& index_var);
341 342
342 /// Accesses an image. 343 /// Accesses an image.
343 Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type); 344 Image& GetImage(Tegra::Shader::Image image, Tegra::Shader::ImageType type);
diff --git a/src/video_core/shader/track.cpp b/src/video_core/shader/track.cpp
index 513e9bf49..eb97bfd41 100644
--- a/src/video_core/shader/track.cpp
+++ b/src/video_core/shader/track.cpp
@@ -153,21 +153,13 @@ std::tuple<Node, u32, u32> ShaderIR::TrackCbuf(Node tracked, const NodeBlock& co
153 if (gpr->GetIndex() == Tegra::Shader::Register::ZeroIndex) { 153 if (gpr->GetIndex() == Tegra::Shader::Register::ZeroIndex) {
154 return {}; 154 return {};
155 } 155 }
156 s64 current_cursor = cursor; 156 // Reduce the cursor in one to avoid infinite loops when the instruction sets the same
157 while (current_cursor > 0) { 157 // register that it uses as operand
158 // Reduce the cursor in one to avoid infinite loops when the instruction sets the same 158 const auto [source, new_cursor] = TrackRegister(gpr, code, cursor - 1);
159 // register that it uses as operand 159 if (!source) {
160 const auto [source, new_cursor] = TrackRegister(gpr, code, current_cursor - 1); 160 return {};
161 current_cursor = new_cursor;
162 if (!source) {
163 continue;
164 }
165 const auto [base_address, index, offset] = TrackCbuf(source, code, current_cursor);
166 if (base_address != nullptr) {
167 return {base_address, index, offset};
168 }
169 } 161 }
170 return {}; 162 return TrackCbuf(source, code, new_cursor);
171 } 163 }
172 if (const auto operation = std::get_if<OperationNode>(&*tracked)) { 164 if (const auto operation = std::get_if<OperationNode>(&*tracked)) {
173 for (std::size_t i = operation->GetOperandsCount(); i > 0; --i) { 165 for (std::size_t i = operation->GetOperandsCount(); i > 0; --i) {
diff --git a/src/video_core/texture_cache/surface_base.h b/src/video_core/texture_cache/surface_base.h
index c5ab21f56..79e10ffbb 100644
--- a/src/video_core/texture_cache/surface_base.h
+++ b/src/video_core/texture_cache/surface_base.h
@@ -192,6 +192,22 @@ public:
192 index = index_; 192 index = index_;
193 } 193 }
194 194
195 void SetMemoryMarked(bool is_memory_marked_) {
196 is_memory_marked = is_memory_marked_;
197 }
198
199 bool IsMemoryMarked() const {
200 return is_memory_marked;
201 }
202
203 void SetSyncPending(bool is_sync_pending_) {
204 is_sync_pending = is_sync_pending_;
205 }
206
207 bool IsSyncPending() const {
208 return is_sync_pending;
209 }
210
195 void MarkAsPicked(bool is_picked_) { 211 void MarkAsPicked(bool is_picked_) {
196 is_picked = is_picked_; 212 is_picked = is_picked_;
197 } 213 }
@@ -303,6 +319,8 @@ private:
303 bool is_target{}; 319 bool is_target{};
304 bool is_registered{}; 320 bool is_registered{};
305 bool is_picked{}; 321 bool is_picked{};
322 bool is_memory_marked{};
323 bool is_sync_pending{};
306 u32 index{NO_RT}; 324 u32 index{NO_RT};
307 u64 modification_tick{}; 325 u64 modification_tick{};
308}; 326};
diff --git a/src/video_core/texture_cache/surface_params.cpp b/src/video_core/texture_cache/surface_params.cpp
index 0de499946..884fabffe 100644
--- a/src/video_core/texture_cache/surface_params.cpp
+++ b/src/video_core/texture_cache/surface_params.cpp
@@ -81,7 +81,7 @@ SurfaceParams SurfaceParams::CreateForTexture(const FormatLookupTable& lookup_ta
81 params.pixel_format = lookup_table.GetPixelFormat( 81 params.pixel_format = lookup_table.GetPixelFormat(
82 tic.format, params.srgb_conversion, tic.r_type, tic.g_type, tic.b_type, tic.a_type); 82 tic.format, params.srgb_conversion, tic.r_type, tic.g_type, tic.b_type, tic.a_type);
83 params.type = GetFormatType(params.pixel_format); 83 params.type = GetFormatType(params.pixel_format);
84 if (entry.IsShadow() && params.type == SurfaceType::ColorTexture) { 84 if (entry.is_shadow && params.type == SurfaceType::ColorTexture) {
85 switch (params.pixel_format) { 85 switch (params.pixel_format) {
86 case PixelFormat::R16U: 86 case PixelFormat::R16U:
87 case PixelFormat::R16F: 87 case PixelFormat::R16F:
@@ -108,7 +108,7 @@ SurfaceParams SurfaceParams::CreateForTexture(const FormatLookupTable& lookup_ta
108 params.emulated_levels = 1; 108 params.emulated_levels = 1;
109 params.is_layered = false; 109 params.is_layered = false;
110 } else { 110 } else {
111 params.target = TextureTypeToSurfaceTarget(entry.GetType(), entry.IsArray()); 111 params.target = TextureTypeToSurfaceTarget(entry.type, entry.is_array);
112 params.width = tic.Width(); 112 params.width = tic.Width();
113 params.height = tic.Height(); 113 params.height = tic.Height();
114 params.depth = tic.Depth(); 114 params.depth = tic.Depth();
@@ -138,7 +138,7 @@ SurfaceParams SurfaceParams::CreateForImage(const FormatLookupTable& lookup_tabl
138 tic.format, params.srgb_conversion, tic.r_type, tic.g_type, tic.b_type, tic.a_type); 138 tic.format, params.srgb_conversion, tic.r_type, tic.g_type, tic.b_type, tic.a_type);
139 params.type = GetFormatType(params.pixel_format); 139 params.type = GetFormatType(params.pixel_format);
140 params.type = GetFormatType(params.pixel_format); 140 params.type = GetFormatType(params.pixel_format);
141 params.target = ImageTypeToSurfaceTarget(entry.GetType()); 141 params.target = ImageTypeToSurfaceTarget(entry.type);
142 // TODO: on 1DBuffer we should use the tic info. 142 // TODO: on 1DBuffer we should use the tic info.
143 if (tic.IsBuffer()) { 143 if (tic.IsBuffer()) {
144 params.target = SurfaceTarget::TextureBuffer; 144 params.target = SurfaceTarget::TextureBuffer;
@@ -248,12 +248,12 @@ SurfaceParams SurfaceParams::CreateForFermiCopySurface(
248 248
249VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget( 249VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget(
250 const VideoCommon::Shader::Sampler& entry) { 250 const VideoCommon::Shader::Sampler& entry) {
251 return TextureTypeToSurfaceTarget(entry.GetType(), entry.IsArray()); 251 return TextureTypeToSurfaceTarget(entry.type, entry.is_array);
252} 252}
253 253
254VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget( 254VideoCore::Surface::SurfaceTarget SurfaceParams::ExpectedTarget(
255 const VideoCommon::Shader::Image& entry) { 255 const VideoCommon::Shader::Image& entry) {
256 return ImageTypeToSurfaceTarget(entry.GetType()); 256 return ImageTypeToSurfaceTarget(entry.type);
257} 257}
258 258
259bool SurfaceParams::IsLayered() const { 259bool SurfaceParams::IsLayered() const {
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 69ca08fd1..d6efc34b2 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -6,6 +6,7 @@
6 6
7#include <algorithm> 7#include <algorithm>
8#include <array> 8#include <array>
9#include <list>
9#include <memory> 10#include <memory>
10#include <mutex> 11#include <mutex>
11#include <set> 12#include <set>
@@ -62,6 +63,30 @@ public:
62 } 63 }
63 } 64 }
64 65
66 void OnCPUWrite(VAddr addr, std::size_t size) {
67 std::lock_guard lock{mutex};
68
69 for (const auto& surface : GetSurfacesInRegion(addr, size)) {
70 if (surface->IsMemoryMarked()) {
71 UnmarkMemory(surface);
72 surface->SetSyncPending(true);
73 marked_for_unregister.emplace_back(surface);
74 }
75 }
76 }
77
78 void SyncGuestHost() {
79 std::lock_guard lock{mutex};
80
81 for (const auto& surface : marked_for_unregister) {
82 if (surface->IsRegistered()) {
83 surface->SetSyncPending(false);
84 Unregister(surface);
85 }
86 }
87 marked_for_unregister.clear();
88 }
89
65 /** 90 /**
66 * Guarantees that rendertargets don't unregister themselves if the 91 * Guarantees that rendertargets don't unregister themselves if the
67 * collide. Protection is currently only done on 3D slices. 92 * collide. Protection is currently only done on 3D slices.
@@ -85,10 +110,20 @@ public:
85 return a->GetModificationTick() < b->GetModificationTick(); 110 return a->GetModificationTick() < b->GetModificationTick();
86 }); 111 });
87 for (const auto& surface : surfaces) { 112 for (const auto& surface : surfaces) {
113 mutex.unlock();
88 FlushSurface(surface); 114 FlushSurface(surface);
115 mutex.lock();
89 } 116 }
90 } 117 }
91 118
119 bool MustFlushRegion(VAddr addr, std::size_t size) {
120 std::lock_guard lock{mutex};
121
122 const auto surfaces = GetSurfacesInRegion(addr, size);
123 return std::any_of(surfaces.cbegin(), surfaces.cend(),
124 [](const TSurface& surface) { return surface->IsModified(); });
125 }
126
92 TView GetTextureSurface(const Tegra::Texture::TICEntry& tic, 127 TView GetTextureSurface(const Tegra::Texture::TICEntry& tic,
93 const VideoCommon::Shader::Sampler& entry) { 128 const VideoCommon::Shader::Sampler& entry) {
94 std::lock_guard lock{mutex}; 129 std::lock_guard lock{mutex};
@@ -108,7 +143,7 @@ public:
108 } 143 }
109 144
110 const auto params{SurfaceParams::CreateForTexture(format_lookup_table, tic, entry)}; 145 const auto params{SurfaceParams::CreateForTexture(format_lookup_table, tic, entry)};
111 const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, false); 146 const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, true, false);
112 if (guard_samplers) { 147 if (guard_samplers) {
113 sampled_textures.push_back(surface); 148 sampled_textures.push_back(surface);
114 } 149 }
@@ -128,7 +163,7 @@ public:
128 return GetNullSurface(SurfaceParams::ExpectedTarget(entry)); 163 return GetNullSurface(SurfaceParams::ExpectedTarget(entry));
129 } 164 }
130 const auto params{SurfaceParams::CreateForImage(format_lookup_table, tic, entry)}; 165 const auto params{SurfaceParams::CreateForImage(format_lookup_table, tic, entry)};
131 const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, false); 166 const auto [surface, view] = GetSurface(gpu_addr, *cpu_addr, params, true, false);
132 if (guard_samplers) { 167 if (guard_samplers) {
133 sampled_textures.push_back(surface); 168 sampled_textures.push_back(surface);
134 } 169 }
@@ -143,7 +178,7 @@ public:
143 return any_rt; 178 return any_rt;
144 } 179 }
145 180
146 TView GetDepthBufferSurface() { 181 TView GetDepthBufferSurface(bool preserve_contents) {
147 std::lock_guard lock{mutex}; 182 std::lock_guard lock{mutex};
148 auto& maxwell3d = system.GPU().Maxwell3D(); 183 auto& maxwell3d = system.GPU().Maxwell3D();
149 if (!maxwell3d.dirty.flags[VideoCommon::Dirty::ZetaBuffer]) { 184 if (!maxwell3d.dirty.flags[VideoCommon::Dirty::ZetaBuffer]) {
@@ -164,7 +199,7 @@ public:
164 return {}; 199 return {};
165 } 200 }
166 const auto depth_params{SurfaceParams::CreateForDepthBuffer(system)}; 201 const auto depth_params{SurfaceParams::CreateForDepthBuffer(system)};
167 auto surface_view = GetSurface(gpu_addr, *cpu_addr, depth_params, true); 202 auto surface_view = GetSurface(gpu_addr, *cpu_addr, depth_params, preserve_contents, true);
168 if (depth_buffer.target) 203 if (depth_buffer.target)
169 depth_buffer.target->MarkAsRenderTarget(false, NO_RT); 204 depth_buffer.target->MarkAsRenderTarget(false, NO_RT);
170 depth_buffer.target = surface_view.first; 205 depth_buffer.target = surface_view.first;
@@ -174,7 +209,7 @@ public:
174 return surface_view.second; 209 return surface_view.second;
175 } 210 }
176 211
177 TView GetColorBufferSurface(std::size_t index) { 212 TView GetColorBufferSurface(std::size_t index, bool preserve_contents) {
178 std::lock_guard lock{mutex}; 213 std::lock_guard lock{mutex};
179 ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets); 214 ASSERT(index < Tegra::Engines::Maxwell3D::Regs::NumRenderTargets);
180 auto& maxwell3d = system.GPU().Maxwell3D(); 215 auto& maxwell3d = system.GPU().Maxwell3D();
@@ -204,10 +239,17 @@ public:
204 return {}; 239 return {};
205 } 240 }
206 241
207 auto surface_view = GetSurface(gpu_addr, *cpu_addr, 242 auto surface_view =
208 SurfaceParams::CreateForFramebuffer(system, index), true); 243 GetSurface(gpu_addr, *cpu_addr, SurfaceParams::CreateForFramebuffer(system, index),
209 if (render_targets[index].target) 244 preserve_contents, true);
210 render_targets[index].target->MarkAsRenderTarget(false, NO_RT); 245 if (render_targets[index].target) {
246 auto& surface = render_targets[index].target;
247 surface->MarkAsRenderTarget(false, NO_RT);
248 const auto& cr_params = surface->GetSurfaceParams();
249 if (!cr_params.is_tiled && Settings::values.use_asynchronous_gpu_emulation) {
250 AsyncFlushSurface(surface);
251 }
252 }
211 render_targets[index].target = surface_view.first; 253 render_targets[index].target = surface_view.first;
212 render_targets[index].view = surface_view.second; 254 render_targets[index].view = surface_view.second;
213 if (render_targets[index].target) 255 if (render_targets[index].target)
@@ -259,9 +301,9 @@ public:
259 const std::optional<VAddr> src_cpu_addr = 301 const std::optional<VAddr> src_cpu_addr =
260 system.GPU().MemoryManager().GpuToCpuAddress(src_gpu_addr); 302 system.GPU().MemoryManager().GpuToCpuAddress(src_gpu_addr);
261 std::pair<TSurface, TView> dst_surface = 303 std::pair<TSurface, TView> dst_surface =
262 GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, false); 304 GetSurface(dst_gpu_addr, *dst_cpu_addr, dst_params, true, false);
263 std::pair<TSurface, TView> src_surface = 305 std::pair<TSurface, TView> src_surface =
264 GetSurface(src_gpu_addr, *src_cpu_addr, src_params, false); 306 GetSurface(src_gpu_addr, *src_cpu_addr, src_params, true, false);
265 ImageBlit(src_surface.second, dst_surface.second, copy_config); 307 ImageBlit(src_surface.second, dst_surface.second, copy_config);
266 dst_surface.first->MarkAsModified(true, Tick()); 308 dst_surface.first->MarkAsModified(true, Tick());
267 } 309 }
@@ -284,6 +326,34 @@ public:
284 return ++ticks; 326 return ++ticks;
285 } 327 }
286 328
329 void CommitAsyncFlushes() {
330 committed_flushes.push_back(uncommitted_flushes);
331 uncommitted_flushes.reset();
332 }
333
334 bool HasUncommittedFlushes() const {
335 return uncommitted_flushes != nullptr;
336 }
337
338 bool ShouldWaitAsyncFlushes() const {
339 return !committed_flushes.empty() && committed_flushes.front() != nullptr;
340 }
341
342 void PopAsyncFlushes() {
343 if (committed_flushes.empty()) {
344 return;
345 }
346 auto& flush_list = committed_flushes.front();
347 if (!flush_list) {
348 committed_flushes.pop_front();
349 return;
350 }
351 for (TSurface& surface : *flush_list) {
352 FlushSurface(surface);
353 }
354 committed_flushes.pop_front();
355 }
356
287protected: 357protected:
288 explicit TextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer, 358 explicit TextureCache(Core::System& system, VideoCore::RasterizerInterface& rasterizer,
289 bool is_astc_supported) 359 bool is_astc_supported)
@@ -345,9 +415,20 @@ protected:
345 surface->SetCpuAddr(*cpu_addr); 415 surface->SetCpuAddr(*cpu_addr);
346 RegisterInnerCache(surface); 416 RegisterInnerCache(surface);
347 surface->MarkAsRegistered(true); 417 surface->MarkAsRegistered(true);
418 surface->SetMemoryMarked(true);
348 rasterizer.UpdatePagesCachedCount(*cpu_addr, size, 1); 419 rasterizer.UpdatePagesCachedCount(*cpu_addr, size, 1);
349 } 420 }
350 421
422 void UnmarkMemory(TSurface surface) {
423 if (!surface->IsMemoryMarked()) {
424 return;
425 }
426 const std::size_t size = surface->GetSizeInBytes();
427 const VAddr cpu_addr = surface->GetCpuAddr();
428 rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1);
429 surface->SetMemoryMarked(false);
430 }
431
351 void Unregister(TSurface surface) { 432 void Unregister(TSurface surface) {
352 if (guard_render_targets && surface->IsProtected()) { 433 if (guard_render_targets && surface->IsProtected()) {
353 return; 434 return;
@@ -355,9 +436,11 @@ protected:
355 if (!guard_render_targets && surface->IsRenderTarget()) { 436 if (!guard_render_targets && surface->IsRenderTarget()) {
356 ManageRenderTargetUnregister(surface); 437 ManageRenderTargetUnregister(surface);
357 } 438 }
358 const std::size_t size = surface->GetSizeInBytes(); 439 UnmarkMemory(surface);
359 const VAddr cpu_addr = surface->GetCpuAddr(); 440 if (surface->IsSyncPending()) {
360 rasterizer.UpdatePagesCachedCount(cpu_addr, size, -1); 441 marked_for_unregister.remove(surface);
442 surface->SetSyncPending(false);
443 }
361 UnregisterInnerCache(surface); 444 UnregisterInnerCache(surface);
362 surface->MarkAsRegistered(false); 445 surface->MarkAsRegistered(false);
363 ReserveSurface(surface->GetSurfaceParams(), surface); 446 ReserveSurface(surface->GetSurfaceParams(), surface);
@@ -417,7 +500,7 @@ private:
417 **/ 500 **/
418 RecycleStrategy PickStrategy(std::vector<TSurface>& overlaps, const SurfaceParams& params, 501 RecycleStrategy PickStrategy(std::vector<TSurface>& overlaps, const SurfaceParams& params,
419 const GPUVAddr gpu_addr, const MatchTopologyResult untopological) { 502 const GPUVAddr gpu_addr, const MatchTopologyResult untopological) {
420 if (Settings::values.use_accurate_gpu_emulation) { 503 if (Settings::IsGPULevelExtreme()) {
421 return RecycleStrategy::Flush; 504 return RecycleStrategy::Flush;
422 } 505 }
423 // 3D Textures decision 506 // 3D Textures decision
@@ -450,18 +533,22 @@ private:
450 * @param overlaps The overlapping surfaces registered in the cache. 533 * @param overlaps The overlapping surfaces registered in the cache.
451 * @param params The parameters for the new surface. 534 * @param params The parameters for the new surface.
452 * @param gpu_addr The starting address of the new surface. 535 * @param gpu_addr The starting address of the new surface.
536 * @param preserve_contents Indicates that the new surface should be loaded from memory or left
537 * blank.
453 * @param untopological Indicates to the recycler that the texture has no way to match the 538 * @param untopological Indicates to the recycler that the texture has no way to match the
454 * overlaps due to topological reasons. 539 * overlaps due to topological reasons.
455 **/ 540 **/
456 std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps, 541 std::pair<TSurface, TView> RecycleSurface(std::vector<TSurface>& overlaps,
457 const SurfaceParams& params, const GPUVAddr gpu_addr, 542 const SurfaceParams& params, const GPUVAddr gpu_addr,
543 const bool preserve_contents,
458 const MatchTopologyResult untopological) { 544 const MatchTopologyResult untopological) {
545 const bool do_load = preserve_contents && Settings::IsGPULevelExtreme();
459 for (auto& surface : overlaps) { 546 for (auto& surface : overlaps) {
460 Unregister(surface); 547 Unregister(surface);
461 } 548 }
462 switch (PickStrategy(overlaps, params, gpu_addr, untopological)) { 549 switch (PickStrategy(overlaps, params, gpu_addr, untopological)) {
463 case RecycleStrategy::Ignore: { 550 case RecycleStrategy::Ignore: {
464 return InitializeSurface(gpu_addr, params, Settings::values.use_accurate_gpu_emulation); 551 return InitializeSurface(gpu_addr, params, do_load);
465 } 552 }
466 case RecycleStrategy::Flush: { 553 case RecycleStrategy::Flush: {
467 std::sort(overlaps.begin(), overlaps.end(), 554 std::sort(overlaps.begin(), overlaps.end(),
@@ -471,7 +558,7 @@ private:
471 for (auto& surface : overlaps) { 558 for (auto& surface : overlaps) {
472 FlushSurface(surface); 559 FlushSurface(surface);
473 } 560 }
474 return InitializeSurface(gpu_addr, params); 561 return InitializeSurface(gpu_addr, params, preserve_contents);
475 } 562 }
476 case RecycleStrategy::BufferCopy: { 563 case RecycleStrategy::BufferCopy: {
477 auto new_surface = GetUncachedSurface(gpu_addr, params); 564 auto new_surface = GetUncachedSurface(gpu_addr, params);
@@ -480,7 +567,7 @@ private:
480 } 567 }
481 default: { 568 default: {
482 UNIMPLEMENTED_MSG("Unimplemented Texture Cache Recycling Strategy!"); 569 UNIMPLEMENTED_MSG("Unimplemented Texture Cache Recycling Strategy!");
483 return InitializeSurface(gpu_addr, params); 570 return InitializeSurface(gpu_addr, params, do_load);
484 } 571 }
485 } 572 }
486 } 573 }
@@ -509,7 +596,7 @@ private:
509 } 596 }
510 const auto& final_params = new_surface->GetSurfaceParams(); 597 const auto& final_params = new_surface->GetSurfaceParams();
511 if (cr_params.type != final_params.type) { 598 if (cr_params.type != final_params.type) {
512 if (Settings::values.use_accurate_gpu_emulation) { 599 if (Settings::IsGPULevelExtreme()) {
513 BufferCopy(current_surface, new_surface); 600 BufferCopy(current_surface, new_surface);
514 } 601 }
515 } else { 602 } else {
@@ -598,7 +685,7 @@ private:
598 if (passed_tests == 0) { 685 if (passed_tests == 0) {
599 return {}; 686 return {};
600 // In Accurate GPU all tests should pass, else we recycle 687 // In Accurate GPU all tests should pass, else we recycle
601 } else if (Settings::values.use_accurate_gpu_emulation && passed_tests != overlaps.size()) { 688 } else if (Settings::IsGPULevelExtreme() && passed_tests != overlaps.size()) {
602 return {}; 689 return {};
603 } 690 }
604 for (const auto& surface : overlaps) { 691 for (const auto& surface : overlaps) {
@@ -618,11 +705,14 @@ private:
618 * @param params The parameters on the new surface. 705 * @param params The parameters on the new surface.
619 * @param gpu_addr The starting address of the new surface. 706 * @param gpu_addr The starting address of the new surface.
620 * @param cpu_addr The starting address of the new surface on physical memory. 707 * @param cpu_addr The starting address of the new surface on physical memory.
708 * @param preserve_contents Indicates that the new surface should be loaded from memory or
709 * left blank.
621 */ 710 */
622 std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps, 711 std::optional<std::pair<TSurface, TView>> Manage3DSurfaces(std::vector<TSurface>& overlaps,
623 const SurfaceParams& params, 712 const SurfaceParams& params,
624 const GPUVAddr gpu_addr, 713 const GPUVAddr gpu_addr,
625 const VAddr cpu_addr) { 714 const VAddr cpu_addr,
715 bool preserve_contents) {
626 if (params.target == SurfaceTarget::Texture3D) { 716 if (params.target == SurfaceTarget::Texture3D) {
627 bool failed = false; 717 bool failed = false;
628 if (params.num_levels > 1) { 718 if (params.num_levels > 1) {
@@ -668,11 +758,11 @@ private:
668 for (const auto& surface : overlaps) { 758 for (const auto& surface : overlaps) {
669 if (!surface->MatchTarget(params.target)) { 759 if (!surface->MatchTarget(params.target)) {
670 if (overlaps.size() == 1 && surface->GetCpuAddr() == cpu_addr) { 760 if (overlaps.size() == 1 && surface->GetCpuAddr() == cpu_addr) {
671 if (Settings::values.use_accurate_gpu_emulation) { 761 if (Settings::IsGPULevelExtreme()) {
672 return std::nullopt; 762 return std::nullopt;
673 } 763 }
674 Unregister(surface); 764 Unregister(surface);
675 return InitializeSurface(gpu_addr, params); 765 return InitializeSurface(gpu_addr, params, preserve_contents);
676 } 766 }
677 return std::nullopt; 767 return std::nullopt;
678 } 768 }
@@ -683,7 +773,7 @@ private:
683 return {{surface, surface->GetMainView()}}; 773 return {{surface, surface->GetMainView()}};
684 } 774 }
685 } 775 }
686 return InitializeSurface(gpu_addr, params); 776 return InitializeSurface(gpu_addr, params, preserve_contents);
687 } 777 }
688 } 778 }
689 779
@@ -706,10 +796,13 @@ private:
706 * 796 *
707 * @param gpu_addr The starting address of the candidate surface. 797 * @param gpu_addr The starting address of the candidate surface.
708 * @param params The parameters on the candidate surface. 798 * @param params The parameters on the candidate surface.
799 * @param preserve_contents Indicates that the new surface should be loaded from memory or
800 * left blank.
709 * @param is_render Whether or not the surface is a render target. 801 * @param is_render Whether or not the surface is a render target.
710 **/ 802 **/
711 std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const VAddr cpu_addr, 803 std::pair<TSurface, TView> GetSurface(const GPUVAddr gpu_addr, const VAddr cpu_addr,
712 const SurfaceParams& params, bool is_render) { 804 const SurfaceParams& params, bool preserve_contents,
805 bool is_render) {
713 // Step 1 806 // Step 1
714 // Check Level 1 Cache for a fast structural match. If candidate surface 807 // Check Level 1 Cache for a fast structural match. If candidate surface
715 // matches at certain level we are pretty much done. 808 // matches at certain level we are pretty much done.
@@ -718,7 +811,8 @@ private:
718 const auto topological_result = current_surface->MatchesTopology(params); 811 const auto topological_result = current_surface->MatchesTopology(params);
719 if (topological_result != MatchTopologyResult::FullMatch) { 812 if (topological_result != MatchTopologyResult::FullMatch) {
720 std::vector<TSurface> overlaps{current_surface}; 813 std::vector<TSurface> overlaps{current_surface};
721 return RecycleSurface(overlaps, params, gpu_addr, topological_result); 814 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
815 topological_result);
722 } 816 }
723 817
724 const auto struct_result = current_surface->MatchesStructure(params); 818 const auto struct_result = current_surface->MatchesStructure(params);
@@ -743,7 +837,7 @@ private:
743 837
744 // If none are found, we are done. we just load the surface and create it. 838 // If none are found, we are done. we just load the surface and create it.
745 if (overlaps.empty()) { 839 if (overlaps.empty()) {
746 return InitializeSurface(gpu_addr, params); 840 return InitializeSurface(gpu_addr, params, preserve_contents);
747 } 841 }
748 842
749 // Step 3 843 // Step 3
@@ -753,13 +847,15 @@ private:
753 for (const auto& surface : overlaps) { 847 for (const auto& surface : overlaps) {
754 const auto topological_result = surface->MatchesTopology(params); 848 const auto topological_result = surface->MatchesTopology(params);
755 if (topological_result != MatchTopologyResult::FullMatch) { 849 if (topological_result != MatchTopologyResult::FullMatch) {
756 return RecycleSurface(overlaps, params, gpu_addr, topological_result); 850 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
851 topological_result);
757 } 852 }
758 } 853 }
759 854
760 // Check if it's a 3D texture 855 // Check if it's a 3D texture
761 if (params.block_depth > 0) { 856 if (params.block_depth > 0) {
762 auto surface = Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr); 857 auto surface =
858 Manage3DSurfaces(overlaps, params, gpu_addr, cpu_addr, preserve_contents);
763 if (surface) { 859 if (surface) {
764 return *surface; 860 return *surface;
765 } 861 }
@@ -779,7 +875,8 @@ private:
779 return *view; 875 return *view;
780 } 876 }
781 } 877 }
782 return RecycleSurface(overlaps, params, gpu_addr, MatchTopologyResult::FullMatch); 878 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
879 MatchTopologyResult::FullMatch);
783 } 880 }
784 // Now we check if the candidate is a mipmap/layer of the overlap 881 // Now we check if the candidate is a mipmap/layer of the overlap
785 std::optional<TView> view = 882 std::optional<TView> view =
@@ -803,7 +900,7 @@ private:
803 pair.first->EmplaceView(params, gpu_addr, candidate_size); 900 pair.first->EmplaceView(params, gpu_addr, candidate_size);
804 if (mirage_view) 901 if (mirage_view)
805 return {pair.first, *mirage_view}; 902 return {pair.first, *mirage_view};
806 return RecycleSurface(overlaps, params, gpu_addr, 903 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
807 MatchTopologyResult::FullMatch); 904 MatchTopologyResult::FullMatch);
808 } 905 }
809 return {current_surface, *view}; 906 return {current_surface, *view};
@@ -819,7 +916,8 @@ private:
819 } 916 }
820 } 917 }
821 // We failed all the tests, recycle the overlaps into a new texture. 918 // We failed all the tests, recycle the overlaps into a new texture.
822 return RecycleSurface(overlaps, params, gpu_addr, MatchTopologyResult::FullMatch); 919 return RecycleSurface(overlaps, params, gpu_addr, preserve_contents,
920 MatchTopologyResult::FullMatch);
823 } 921 }
824 922
825 /** 923 /**
@@ -977,10 +1075,10 @@ private:
977 } 1075 }
978 1076
979 std::pair<TSurface, TView> InitializeSurface(GPUVAddr gpu_addr, const SurfaceParams& params, 1077 std::pair<TSurface, TView> InitializeSurface(GPUVAddr gpu_addr, const SurfaceParams& params,
980 bool do_load = true) { 1078 bool preserve_contents) {
981 auto new_surface{GetUncachedSurface(gpu_addr, params)}; 1079 auto new_surface{GetUncachedSurface(gpu_addr, params)};
982 Register(new_surface); 1080 Register(new_surface);
983 if (do_load) { 1081 if (preserve_contents) {
984 LoadSurface(new_surface); 1082 LoadSurface(new_surface);
985 } 1083 }
986 return {new_surface, new_surface->GetMainView()}; 1084 return {new_surface, new_surface->GetMainView()};
@@ -1074,7 +1172,7 @@ private:
1074 /// Returns true the shader sampler entry is compatible with the TIC texture type. 1172 /// Returns true the shader sampler entry is compatible with the TIC texture type.
1075 static bool IsTypeCompatible(Tegra::Texture::TextureType tic_type, 1173 static bool IsTypeCompatible(Tegra::Texture::TextureType tic_type,
1076 const VideoCommon::Shader::Sampler& entry) { 1174 const VideoCommon::Shader::Sampler& entry) {
1077 const auto shader_type = entry.GetType(); 1175 const auto shader_type = entry.type;
1078 switch (tic_type) { 1176 switch (tic_type) {
1079 case Tegra::Texture::TextureType::Texture1D: 1177 case Tegra::Texture::TextureType::Texture1D:
1080 case Tegra::Texture::TextureType::Texture1DArray: 1178 case Tegra::Texture::TextureType::Texture1DArray:
@@ -1095,7 +1193,7 @@ private:
1095 if (shader_type == Tegra::Shader::TextureType::TextureCube) { 1193 if (shader_type == Tegra::Shader::TextureType::TextureCube) {
1096 return true; 1194 return true;
1097 } 1195 }
1098 return shader_type == Tegra::Shader::TextureType::Texture2D && entry.IsArray(); 1196 return shader_type == Tegra::Shader::TextureType::Texture2D && entry.is_array;
1099 } 1197 }
1100 UNREACHABLE(); 1198 UNREACHABLE();
1101 return true; 1199 return true;
@@ -1106,6 +1204,13 @@ private:
1106 TView view; 1204 TView view;
1107 }; 1205 };
1108 1206
1207 void AsyncFlushSurface(TSurface& surface) {
1208 if (!uncommitted_flushes) {
1209 uncommitted_flushes = std::make_shared<std::list<TSurface>>();
1210 }
1211 uncommitted_flushes->push_back(surface);
1212 }
1213
1109 VideoCore::RasterizerInterface& rasterizer; 1214 VideoCore::RasterizerInterface& rasterizer;
1110 1215
1111 FormatLookupTable format_lookup_table; 1216 FormatLookupTable format_lookup_table;
@@ -1150,6 +1255,11 @@ private:
1150 std::unordered_map<u32, TSurface> invalid_cache; 1255 std::unordered_map<u32, TSurface> invalid_cache;
1151 std::vector<u8> invalid_memory; 1256 std::vector<u8> invalid_memory;
1152 1257
1258 std::list<TSurface> marked_for_unregister;
1259
1260 std::shared_ptr<std::list<TSurface>> uncommitted_flushes{};
1261 std::list<std::shared_ptr<std::list<TSurface>>> committed_flushes;
1262
1153 StagingCache staging_cache; 1263 StagingCache staging_cache;
1154 std::recursive_mutex mutex; 1264 std::recursive_mutex mutex;
1155}; 1265};
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp
index 7df5f1452..fae8638ec 100644
--- a/src/video_core/textures/decoders.cpp
+++ b/src/video_core/textures/decoders.cpp
@@ -11,6 +11,7 @@
11#include "video_core/textures/texture.h" 11#include "video_core/textures/texture.h"
12 12
13namespace Tegra::Texture { 13namespace Tegra::Texture {
14namespace {
14 15
15/** 16/**
16 * This table represents the internal swizzle of a gob, 17 * This table represents the internal swizzle of a gob,
@@ -174,6 +175,8 @@ void SwizzledData(u8* const swizzled_data, u8* const unswizzled_data, const bool
174 } 175 }
175} 176}
176 177
178} // Anonymous namespace
179
177void CopySwizzledData(u32 width, u32 height, u32 depth, u32 bytes_per_pixel, 180void CopySwizzledData(u32 width, u32 height, u32 depth, u32 bytes_per_pixel,
178 u32 out_bytes_per_pixel, u8* const swizzled_data, u8* const unswizzled_data, 181 u32 out_bytes_per_pixel, u8* const swizzled_data, u8* const unswizzled_data,
179 bool unswizzle, u32 block_height, u32 block_depth, u32 width_spacing) { 182 bool unswizzle, u32 block_height, u32 block_depth, u32 width_spacing) {
diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h
index e5eac3f3b..9f2d6d308 100644
--- a/src/video_core/textures/decoders.h
+++ b/src/video_core/textures/decoders.h
@@ -56,8 +56,7 @@ void UnswizzleSubrect(u32 subrect_width, u32 subrect_height, u32 dest_pitch, u32
56 u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, u32 block_height, 56 u32 bytes_per_pixel, u8* swizzled_data, u8* unswizzled_data, u32 block_height,
57 u32 offset_x, u32 offset_y); 57 u32 offset_x, u32 offset_y);
58 58
59void SwizzleKepler(const u32 width, const u32 height, const u32 dst_x, const u32 dst_y, 59void SwizzleKepler(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height,
60 const u32 block_height, const std::size_t copy_size, const u8* source_data, 60 std::size_t copy_size, const u8* source_data, u8* swizzle_data);
61 u8* swizzle_data);
62 61
63} // namespace Tegra::Texture 62} // namespace Tegra::Texture
diff --git a/src/yuzu/applets/profile_select.cpp b/src/yuzu/applets/profile_select.cpp
index 6aff38735..4bc8ee726 100644
--- a/src/yuzu/applets/profile_select.cpp
+++ b/src/yuzu/applets/profile_select.cpp
@@ -17,6 +17,7 @@
17#include "yuzu/applets/profile_select.h" 17#include "yuzu/applets/profile_select.h"
18#include "yuzu/main.h" 18#include "yuzu/main.h"
19 19
20namespace {
20QString FormatUserEntryText(const QString& username, Common::UUID uuid) { 21QString FormatUserEntryText(const QString& username, Common::UUID uuid) {
21 return QtProfileSelectionDialog::tr( 22 return QtProfileSelectionDialog::tr(
22 "%1\n%2", "%1 is the profile username, %2 is the formatted UUID (e.g. " 23 "%1\n%2", "%1 is the profile username, %2 is the formatted UUID (e.g. "
@@ -41,6 +42,7 @@ QPixmap GetIcon(Common::UUID uuid) {
41 42
42 return icon.scaled(64, 64, Qt::IgnoreAspectRatio, Qt::SmoothTransformation); 43 return icon.scaled(64, 64, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
43} 44}
45} // Anonymous namespace
44 46
45QtProfileSelectionDialog::QtProfileSelectionDialog(QWidget* parent) 47QtProfileSelectionDialog::QtProfileSelectionDialog(QWidget* parent)
46 : QDialog(parent), profile_manager(std::make_unique<Service::Account::ProfileManager>()) { 48 : QDialog(parent), profile_manager(std::make_unique<Service::Account::ProfileManager>()) {
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp
index 946aa287a..75c6cf20b 100644
--- a/src/yuzu/configuration/config.cpp
+++ b/src/yuzu/configuration/config.cpp
@@ -12,7 +12,6 @@
12#include "input_common/main.h" 12#include "input_common/main.h"
13#include "input_common/udp/client.h" 13#include "input_common/udp/client.h"
14#include "yuzu/configuration/config.h" 14#include "yuzu/configuration/config.h"
15#include "yuzu/uisettings.h"
16 15
17Config::Config() { 16Config::Config() {
18 // TODO: Don't hardcode the path; let the frontend decide where to put the config files. 17 // TODO: Don't hardcode the path; let the frontend decide where to put the config files.
@@ -212,12 +211,13 @@ const std::array<int, Settings::NativeKeyboard::NumKeyboardMods> Config::default
212// This must be in alphabetical order according to action name as it must have the same order as 211// This must be in alphabetical order according to action name as it must have the same order as
213// UISetting::values.shortcuts, which is alphabetically ordered. 212// UISetting::values.shortcuts, which is alphabetically ordered.
214// clang-format off 213// clang-format off
215const std::array<UISettings::Shortcut, 15> default_hotkeys{{ 214const std::array<UISettings::Shortcut, 15> Config::default_hotkeys{{
216 {QStringLiteral("Capture Screenshot"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+P"), Qt::ApplicationShortcut}}, 215 {QStringLiteral("Capture Screenshot"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+P"), Qt::ApplicationShortcut}},
216 {QStringLiteral("Change Docked Mode"), QStringLiteral("Main Window"), {QStringLiteral("F10"), Qt::ApplicationShortcut}},
217 {QStringLiteral("Continue/Pause Emulation"), QStringLiteral("Main Window"), {QStringLiteral("F4"), Qt::WindowShortcut}}, 217 {QStringLiteral("Continue/Pause Emulation"), QStringLiteral("Main Window"), {QStringLiteral("F4"), Qt::WindowShortcut}},
218 {QStringLiteral("Decrease Speed Limit"), QStringLiteral("Main Window"), {QStringLiteral("-"), Qt::ApplicationShortcut}}, 218 {QStringLiteral("Decrease Speed Limit"), QStringLiteral("Main Window"), {QStringLiteral("-"), Qt::ApplicationShortcut}},
219 {QStringLiteral("Exit yuzu"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+Q"), Qt::WindowShortcut}},
220 {QStringLiteral("Exit Fullscreen"), QStringLiteral("Main Window"), {QStringLiteral("Esc"), Qt::WindowShortcut}}, 219 {QStringLiteral("Exit Fullscreen"), QStringLiteral("Main Window"), {QStringLiteral("Esc"), Qt::WindowShortcut}},
220 {QStringLiteral("Exit yuzu"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+Q"), Qt::WindowShortcut}},
221 {QStringLiteral("Fullscreen"), QStringLiteral("Main Window"), {QStringLiteral("F11"), Qt::WindowShortcut}}, 221 {QStringLiteral("Fullscreen"), QStringLiteral("Main Window"), {QStringLiteral("F11"), Qt::WindowShortcut}},
222 {QStringLiteral("Increase Speed Limit"), QStringLiteral("Main Window"), {QStringLiteral("+"), Qt::ApplicationShortcut}}, 222 {QStringLiteral("Increase Speed Limit"), QStringLiteral("Main Window"), {QStringLiteral("+"), Qt::ApplicationShortcut}},
223 {QStringLiteral("Load Amiibo"), QStringLiteral("Main Window"), {QStringLiteral("F2"), Qt::ApplicationShortcut}}, 223 {QStringLiteral("Load Amiibo"), QStringLiteral("Main Window"), {QStringLiteral("F2"), Qt::ApplicationShortcut}},
@@ -227,7 +227,6 @@ const std::array<UISettings::Shortcut, 15> default_hotkeys{{
227 {QStringLiteral("Toggle Filter Bar"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+F"), Qt::WindowShortcut}}, 227 {QStringLiteral("Toggle Filter Bar"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+F"), Qt::WindowShortcut}},
228 {QStringLiteral("Toggle Speed Limit"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+Z"), Qt::ApplicationShortcut}}, 228 {QStringLiteral("Toggle Speed Limit"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+Z"), Qt::ApplicationShortcut}},
229 {QStringLiteral("Toggle Status Bar"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+S"), Qt::WindowShortcut}}, 229 {QStringLiteral("Toggle Status Bar"), QStringLiteral("Main Window"), {QStringLiteral("Ctrl+S"), Qt::WindowShortcut}},
230 {QStringLiteral("Change Docked Mode"), QStringLiteral("Main Window"), {QStringLiteral("F10"), Qt::ApplicationShortcut}},
231}}; 230}};
232// clang-format on 231// clang-format on
233 232
@@ -532,6 +531,8 @@ void Config::ReadDebuggingValues() {
532 Settings::values.reporting_services = 531 Settings::values.reporting_services =
533 ReadSetting(QStringLiteral("reporting_services"), false).toBool(); 532 ReadSetting(QStringLiteral("reporting_services"), false).toBool();
534 Settings::values.quest_flag = ReadSetting(QStringLiteral("quest_flag"), false).toBool(); 533 Settings::values.quest_flag = ReadSetting(QStringLiteral("quest_flag"), false).toBool();
534 Settings::values.disable_cpu_opt =
535 ReadSetting(QStringLiteral("disable_cpu_opt"), false).toBool();
535 536
536 qt_config->endGroup(); 537 qt_config->endGroup();
537} 538}
@@ -637,11 +638,13 @@ void Config::ReadRendererValues() {
637 Settings::values.frame_limit = ReadSetting(QStringLiteral("frame_limit"), 100).toInt(); 638 Settings::values.frame_limit = ReadSetting(QStringLiteral("frame_limit"), 100).toInt();
638 Settings::values.use_disk_shader_cache = 639 Settings::values.use_disk_shader_cache =
639 ReadSetting(QStringLiteral("use_disk_shader_cache"), true).toBool(); 640 ReadSetting(QStringLiteral("use_disk_shader_cache"), true).toBool();
640 Settings::values.use_accurate_gpu_emulation = 641 const int gpu_accuracy_level = ReadSetting(QStringLiteral("gpu_accuracy"), 0).toInt();
641 ReadSetting(QStringLiteral("use_accurate_gpu_emulation"), false).toBool(); 642 Settings::values.gpu_accuracy = static_cast<Settings::GPUAccuracy>(gpu_accuracy_level);
642 Settings::values.use_asynchronous_gpu_emulation = 643 Settings::values.use_asynchronous_gpu_emulation =
643 ReadSetting(QStringLiteral("use_asynchronous_gpu_emulation"), false).toBool(); 644 ReadSetting(QStringLiteral("use_asynchronous_gpu_emulation"), false).toBool();
644 Settings::values.use_vsync = ReadSetting(QStringLiteral("use_vsync"), true).toBool(); 645 Settings::values.use_vsync = ReadSetting(QStringLiteral("use_vsync"), true).toBool();
646 Settings::values.use_fast_gpu_time =
647 ReadSetting(QStringLiteral("use_fast_gpu_time"), true).toBool();
645 Settings::values.force_30fps_mode = 648 Settings::values.force_30fps_mode =
646 ReadSetting(QStringLiteral("force_30fps_mode"), false).toBool(); 649 ReadSetting(QStringLiteral("force_30fps_mode"), false).toBool();
647 650
@@ -1003,6 +1006,7 @@ void Config::SaveDebuggingValues() {
1003 WriteSetting(QStringLiteral("dump_exefs"), Settings::values.dump_exefs, false); 1006 WriteSetting(QStringLiteral("dump_exefs"), Settings::values.dump_exefs, false);
1004 WriteSetting(QStringLiteral("dump_nso"), Settings::values.dump_nso, false); 1007 WriteSetting(QStringLiteral("dump_nso"), Settings::values.dump_nso, false);
1005 WriteSetting(QStringLiteral("quest_flag"), Settings::values.quest_flag, false); 1008 WriteSetting(QStringLiteral("quest_flag"), Settings::values.quest_flag, false);
1009 WriteSetting(QStringLiteral("disable_cpu_opt"), Settings::values.disable_cpu_opt, false);
1006 1010
1007 qt_config->endGroup(); 1011 qt_config->endGroup();
1008} 1012}
@@ -1079,11 +1083,12 @@ void Config::SaveRendererValues() {
1079 WriteSetting(QStringLiteral("frame_limit"), Settings::values.frame_limit, 100); 1083 WriteSetting(QStringLiteral("frame_limit"), Settings::values.frame_limit, 100);
1080 WriteSetting(QStringLiteral("use_disk_shader_cache"), Settings::values.use_disk_shader_cache, 1084 WriteSetting(QStringLiteral("use_disk_shader_cache"), Settings::values.use_disk_shader_cache,
1081 true); 1085 true);
1082 WriteSetting(QStringLiteral("use_accurate_gpu_emulation"), 1086 WriteSetting(QStringLiteral("gpu_accuracy"), static_cast<int>(Settings::values.gpu_accuracy),
1083 Settings::values.use_accurate_gpu_emulation, false); 1087 0);
1084 WriteSetting(QStringLiteral("use_asynchronous_gpu_emulation"), 1088 WriteSetting(QStringLiteral("use_asynchronous_gpu_emulation"),
1085 Settings::values.use_asynchronous_gpu_emulation, false); 1089 Settings::values.use_asynchronous_gpu_emulation, false);
1086 WriteSetting(QStringLiteral("use_vsync"), Settings::values.use_vsync, true); 1090 WriteSetting(QStringLiteral("use_vsync"), Settings::values.use_vsync, true);
1091 WriteSetting(QStringLiteral("use_fast_gpu_time"), Settings::values.use_fast_gpu_time, true);
1087 WriteSetting(QStringLiteral("force_30fps_mode"), Settings::values.force_30fps_mode, false); 1092 WriteSetting(QStringLiteral("force_30fps_mode"), Settings::values.force_30fps_mode, false);
1088 1093
1089 // Cast to double because Qt's written float values are not human-readable 1094 // Cast to double because Qt's written float values are not human-readable
diff --git a/src/yuzu/configuration/config.h b/src/yuzu/configuration/config.h
index ba6888004..5cd2a5feb 100644
--- a/src/yuzu/configuration/config.h
+++ b/src/yuzu/configuration/config.h
@@ -9,6 +9,7 @@
9#include <string> 9#include <string>
10#include <QVariant> 10#include <QVariant>
11#include "core/settings.h" 11#include "core/settings.h"
12#include "yuzu/uisettings.h"
12 13
13class QSettings; 14class QSettings;
14 15
@@ -26,6 +27,7 @@ public:
26 default_mouse_buttons; 27 default_mouse_buttons;
27 static const std::array<int, Settings::NativeKeyboard::NumKeyboardKeys> default_keyboard_keys; 28 static const std::array<int, Settings::NativeKeyboard::NumKeyboardKeys> default_keyboard_keys;
28 static const std::array<int, Settings::NativeKeyboard::NumKeyboardMods> default_keyboard_mods; 29 static const std::array<int, Settings::NativeKeyboard::NumKeyboardMods> default_keyboard_mods;
30 static const std::array<UISettings::Shortcut, 15> default_hotkeys;
29 31
30private: 32private:
31 void ReadValues(); 33 void ReadValues();
diff --git a/src/yuzu/configuration/configure_debug.cpp b/src/yuzu/configuration/configure_debug.cpp
index 9631059c7..c2026763e 100644
--- a/src/yuzu/configuration/configure_debug.cpp
+++ b/src/yuzu/configuration/configure_debug.cpp
@@ -36,6 +36,7 @@ void ConfigureDebug::SetConfiguration() {
36 ui->homebrew_args_edit->setText(QString::fromStdString(Settings::values.program_args)); 36 ui->homebrew_args_edit->setText(QString::fromStdString(Settings::values.program_args));
37 ui->reporting_services->setChecked(Settings::values.reporting_services); 37 ui->reporting_services->setChecked(Settings::values.reporting_services);
38 ui->quest_flag->setChecked(Settings::values.quest_flag); 38 ui->quest_flag->setChecked(Settings::values.quest_flag);
39 ui->disable_cpu_opt->setChecked(Settings::values.disable_cpu_opt);
39 ui->enable_graphics_debugging->setEnabled(!Core::System::GetInstance().IsPoweredOn()); 40 ui->enable_graphics_debugging->setEnabled(!Core::System::GetInstance().IsPoweredOn());
40 ui->enable_graphics_debugging->setChecked(Settings::values.renderer_debug); 41 ui->enable_graphics_debugging->setChecked(Settings::values.renderer_debug);
41} 42}
@@ -48,6 +49,7 @@ void ConfigureDebug::ApplyConfiguration() {
48 Settings::values.program_args = ui->homebrew_args_edit->text().toStdString(); 49 Settings::values.program_args = ui->homebrew_args_edit->text().toStdString();
49 Settings::values.reporting_services = ui->reporting_services->isChecked(); 50 Settings::values.reporting_services = ui->reporting_services->isChecked();
50 Settings::values.quest_flag = ui->quest_flag->isChecked(); 51 Settings::values.quest_flag = ui->quest_flag->isChecked();
52 Settings::values.disable_cpu_opt = ui->disable_cpu_opt->isChecked();
51 Settings::values.renderer_debug = ui->enable_graphics_debugging->isChecked(); 53 Settings::values.renderer_debug = ui->enable_graphics_debugging->isChecked();
52 Debugger::ToggleConsole(); 54 Debugger::ToggleConsole();
53 Log::Filter filter; 55 Log::Filter filter;
diff --git a/src/yuzu/configuration/configure_debug.ui b/src/yuzu/configuration/configure_debug.ui
index e028c4c80..e0d4c4a44 100644
--- a/src/yuzu/configuration/configure_debug.ui
+++ b/src/yuzu/configuration/configure_debug.ui
@@ -215,6 +215,13 @@
215 </property> 215 </property>
216 </widget> 216 </widget>
217 </item> 217 </item>
218 <item>
219 <widget class="QCheckBox" name="disable_cpu_opt">
220 <property name="text">
221 <string>Disable CPU JIT optimizations</string>
222 </property>
223 </widget>
224 </item>
218 </layout> 225 </layout>
219 </widget> 226 </widget>
220 </item> 227 </item>
diff --git a/src/yuzu/configuration/configure_filesystem.cpp b/src/yuzu/configuration/configure_filesystem.cpp
index 29f540eb7..835ee821c 100644
--- a/src/yuzu/configuration/configure_filesystem.cpp
+++ b/src/yuzu/configuration/configure_filesystem.cpp
@@ -138,7 +138,7 @@ void ConfigureFilesystem::SetDirectory(DirectoryTarget target, QLineEdit* edit)
138 str = QFileDialog::getOpenFileName(this, caption, QFileInfo(edit->text()).dir().path(), 138 str = QFileDialog::getOpenFileName(this, caption, QFileInfo(edit->text()).dir().path(),
139 QStringLiteral("NX Gamecard;*.xci")); 139 QStringLiteral("NX Gamecard;*.xci"));
140 } else { 140 } else {
141 str = QFileDialog::getExistingDirectory(this, caption, edit->text()); 141 str = QFileDialog::getExistingDirectory(this, caption, edit->text()) + QDir::separator();
142 } 142 }
143 143
144 if (str.isEmpty()) 144 if (str.isEmpty())
diff --git a/src/yuzu/configuration/configure_graphics_advanced.cpp b/src/yuzu/configuration/configure_graphics_advanced.cpp
index b9f429f84..5bb2ae555 100644
--- a/src/yuzu/configuration/configure_graphics_advanced.cpp
+++ b/src/yuzu/configuration/configure_graphics_advanced.cpp
@@ -19,9 +19,10 @@ ConfigureGraphicsAdvanced::~ConfigureGraphicsAdvanced() = default;
19 19
20void ConfigureGraphicsAdvanced::SetConfiguration() { 20void ConfigureGraphicsAdvanced::SetConfiguration() {
21 const bool runtime_lock = !Core::System::GetInstance().IsPoweredOn(); 21 const bool runtime_lock = !Core::System::GetInstance().IsPoweredOn();
22 ui->use_accurate_gpu_emulation->setChecked(Settings::values.use_accurate_gpu_emulation); 22 ui->gpu_accuracy->setCurrentIndex(static_cast<int>(Settings::values.gpu_accuracy));
23 ui->use_vsync->setEnabled(runtime_lock); 23 ui->use_vsync->setEnabled(runtime_lock);
24 ui->use_vsync->setChecked(Settings::values.use_vsync); 24 ui->use_vsync->setChecked(Settings::values.use_vsync);
25 ui->use_fast_gpu_time->setChecked(Settings::values.use_fast_gpu_time);
25 ui->force_30fps_mode->setEnabled(runtime_lock); 26 ui->force_30fps_mode->setEnabled(runtime_lock);
26 ui->force_30fps_mode->setChecked(Settings::values.force_30fps_mode); 27 ui->force_30fps_mode->setChecked(Settings::values.force_30fps_mode);
27 ui->anisotropic_filtering_combobox->setEnabled(runtime_lock); 28 ui->anisotropic_filtering_combobox->setEnabled(runtime_lock);
@@ -29,8 +30,10 @@ void ConfigureGraphicsAdvanced::SetConfiguration() {
29} 30}
30 31
31void ConfigureGraphicsAdvanced::ApplyConfiguration() { 32void ConfigureGraphicsAdvanced::ApplyConfiguration() {
32 Settings::values.use_accurate_gpu_emulation = ui->use_accurate_gpu_emulation->isChecked(); 33 auto gpu_accuracy = static_cast<Settings::GPUAccuracy>(ui->gpu_accuracy->currentIndex());
34 Settings::values.gpu_accuracy = gpu_accuracy;
33 Settings::values.use_vsync = ui->use_vsync->isChecked(); 35 Settings::values.use_vsync = ui->use_vsync->isChecked();
36 Settings::values.use_fast_gpu_time = ui->use_fast_gpu_time->isChecked();
34 Settings::values.force_30fps_mode = ui->force_30fps_mode->isChecked(); 37 Settings::values.force_30fps_mode = ui->force_30fps_mode->isChecked();
35 Settings::values.max_anisotropy = ui->anisotropic_filtering_combobox->currentIndex(); 38 Settings::values.max_anisotropy = ui->anisotropic_filtering_combobox->currentIndex();
36} 39}
diff --git a/src/yuzu/configuration/configure_graphics_advanced.ui b/src/yuzu/configuration/configure_graphics_advanced.ui
index 42eec278e..770b80c50 100644
--- a/src/yuzu/configuration/configure_graphics_advanced.ui
+++ b/src/yuzu/configuration/configure_graphics_advanced.ui
@@ -23,11 +23,34 @@
23 </property> 23 </property>
24 <layout class="QVBoxLayout" name="verticalLayout_3"> 24 <layout class="QVBoxLayout" name="verticalLayout_3">
25 <item> 25 <item>
26 <widget class="QCheckBox" name="use_accurate_gpu_emulation"> 26 <layout class="QHBoxLayout" name="horizontalLayout_2">
27 <property name="text"> 27 <item>
28 <string>Use accurate GPU emulation (slow)</string> 28 <widget class="QLabel" name="label_gpu_accuracy">
29 </property> 29 <property name="text">
30 </widget> 30 <string>Accuracy Level:</string>
31 </property>
32 </widget>
33 </item>
34 <item>
35 <widget class="QComboBox" name="gpu_accuracy">
36 <item>
37 <property name="text">
38 <string notr="true">Normal</string>
39 </property>
40 </item>
41 <item>
42 <property name="text">
43 <string notr="true">High</string>
44 </property>
45 </item>
46 <item>
47 <property name="text">
48 <string notr="true">Extreme(very slow)</string>
49 </property>
50 </item>
51 </widget>
52 </item>
53 </layout>
31 </item> 54 </item>
32 <item> 55 <item>
33 <widget class="QCheckBox" name="use_vsync"> 56 <widget class="QCheckBox" name="use_vsync">
@@ -47,6 +70,13 @@
47 </widget> 70 </widget>
48 </item> 71 </item>
49 <item> 72 <item>
73 <widget class="QCheckBox" name="use_fast_gpu_time">
74 <property name="text">
75 <string>Use Fast GPU Time</string>
76 </property>
77 </widget>
78 </item>
79 <item>
50 <layout class="QHBoxLayout" name="horizontalLayout_1"> 80 <layout class="QHBoxLayout" name="horizontalLayout_1">
51 <item> 81 <item>
52 <widget class="QLabel" name="af_label"> 82 <widget class="QLabel" name="af_label">
diff --git a/src/yuzu/configuration/configure_hotkeys.cpp b/src/yuzu/configuration/configure_hotkeys.cpp
index fa9052136..6f7fd4414 100644
--- a/src/yuzu/configuration/configure_hotkeys.cpp
+++ b/src/yuzu/configuration/configure_hotkeys.cpp
@@ -2,10 +2,12 @@
2// Licensed under GPLv2 or any later version 2// Licensed under GPLv2 or any later version
3// Refer to the license.txt file included. 3// Refer to the license.txt file included.
4 4
5#include <QMenu>
5#include <QMessageBox> 6#include <QMessageBox>
6#include <QStandardItemModel> 7#include <QStandardItemModel>
7#include "core/settings.h" 8#include "core/settings.h"
8#include "ui_configure_hotkeys.h" 9#include "ui_configure_hotkeys.h"
10#include "yuzu/configuration/config.h"
9#include "yuzu/configuration/configure_hotkeys.h" 11#include "yuzu/configuration/configure_hotkeys.h"
10#include "yuzu/hotkeys.h" 12#include "yuzu/hotkeys.h"
11#include "yuzu/util/sequence_dialog/sequence_dialog.h" 13#include "yuzu/util/sequence_dialog/sequence_dialog.h"
@@ -19,6 +21,9 @@ ConfigureHotkeys::ConfigureHotkeys(QWidget* parent)
19 model->setColumnCount(3); 21 model->setColumnCount(3);
20 22
21 connect(ui->hotkey_list, &QTreeView::doubleClicked, this, &ConfigureHotkeys::Configure); 23 connect(ui->hotkey_list, &QTreeView::doubleClicked, this, &ConfigureHotkeys::Configure);
24 connect(ui->hotkey_list, &QTreeView::customContextMenuRequested, this,
25 &ConfigureHotkeys::PopupContextMenu);
26 ui->hotkey_list->setContextMenuPolicy(Qt::CustomContextMenu);
22 ui->hotkey_list->setModel(model); 27 ui->hotkey_list->setModel(model);
23 28
24 // TODO(Kloen): Make context configurable as well (hiding the column for now) 29 // TODO(Kloen): Make context configurable as well (hiding the column for now)
@@ -27,6 +32,10 @@ ConfigureHotkeys::ConfigureHotkeys(QWidget* parent)
27 ui->hotkey_list->setColumnWidth(0, 200); 32 ui->hotkey_list->setColumnWidth(0, 200);
28 ui->hotkey_list->resizeColumnToContents(1); 33 ui->hotkey_list->resizeColumnToContents(1);
29 34
35 connect(ui->button_restore_defaults, &QPushButton::clicked, this,
36 &ConfigureHotkeys::RestoreDefaults);
37 connect(ui->button_clear_all, &QPushButton::clicked, this, &ConfigureHotkeys::ClearAll);
38
30 RetranslateUI(); 39 RetranslateUI();
31} 40}
32 41
@@ -71,7 +80,6 @@ void ConfigureHotkeys::Configure(QModelIndex index) {
71 } 80 }
72 81
73 index = index.sibling(index.row(), 1); 82 index = index.sibling(index.row(), 1);
74 auto* const model = ui->hotkey_list->model();
75 const auto previous_key = model->data(index); 83 const auto previous_key = model->data(index);
76 84
77 SequenceDialog hotkey_dialog{this}; 85 SequenceDialog hotkey_dialog{this};
@@ -81,31 +89,33 @@ void ConfigureHotkeys::Configure(QModelIndex index) {
81 if (return_code == QDialog::Rejected || key_sequence.isEmpty()) { 89 if (return_code == QDialog::Rejected || key_sequence.isEmpty()) {
82 return; 90 return;
83 } 91 }
92 const auto [key_sequence_used, used_action] = IsUsedKey(key_sequence);
84 93
85 if (IsUsedKey(key_sequence) && key_sequence != QKeySequence(previous_key.toString())) { 94 if (key_sequence_used && key_sequence != QKeySequence(previous_key.toString())) {
86 QMessageBox::warning(this, tr("Conflicting Key Sequence"), 95 QMessageBox::warning(
87 tr("The entered key sequence is already assigned to another hotkey.")); 96 this, tr("Conflicting Key Sequence"),
97 tr("The entered key sequence is already assigned to: %1").arg(used_action));
88 } else { 98 } else {
89 model->setData(index, key_sequence.toString(QKeySequence::NativeText)); 99 model->setData(index, key_sequence.toString(QKeySequence::NativeText));
90 } 100 }
91} 101}
92 102
93bool ConfigureHotkeys::IsUsedKey(QKeySequence key_sequence) const { 103std::pair<bool, QString> ConfigureHotkeys::IsUsedKey(QKeySequence key_sequence) const {
94 for (int r = 0; r < model->rowCount(); r++) { 104 for (int r = 0; r < model->rowCount(); ++r) {
95 const QStandardItem* const parent = model->item(r, 0); 105 const QStandardItem* const parent = model->item(r, 0);
96 106
97 for (int r2 = 0; r2 < parent->rowCount(); r2++) { 107 for (int r2 = 0; r2 < parent->rowCount(); ++r2) {
98 const QStandardItem* const key_seq_item = parent->child(r2, 1); 108 const QStandardItem* const key_seq_item = parent->child(r2, 1);
99 const auto key_seq_str = key_seq_item->text(); 109 const auto key_seq_str = key_seq_item->text();
100 const auto key_seq = QKeySequence::fromString(key_seq_str, QKeySequence::NativeText); 110 const auto key_seq = QKeySequence::fromString(key_seq_str, QKeySequence::NativeText);
101 111
102 if (key_sequence == key_seq) { 112 if (key_sequence == key_seq) {
103 return true; 113 return std::make_pair(true, parent->child(r2, 0)->text());
104 } 114 }
105 } 115 }
106 } 116 }
107 117
108 return false; 118 return std::make_pair(false, QString());
109} 119}
110 120
111void ConfigureHotkeys::ApplyConfiguration(HotkeyRegistry& registry) { 121void ConfigureHotkeys::ApplyConfiguration(HotkeyRegistry& registry) {
@@ -128,3 +138,55 @@ void ConfigureHotkeys::ApplyConfiguration(HotkeyRegistry& registry) {
128 138
129 registry.SaveHotkeys(); 139 registry.SaveHotkeys();
130} 140}
141
142void ConfigureHotkeys::RestoreDefaults() {
143 for (int r = 0; r < model->rowCount(); ++r) {
144 const QStandardItem* parent = model->item(r, 0);
145
146 for (int r2 = 0; r2 < parent->rowCount(); ++r2) {
147 model->item(r, 0)->child(r2, 1)->setText(Config::default_hotkeys[r2].shortcut.first);
148 }
149 }
150}
151
152void ConfigureHotkeys::ClearAll() {
153 for (int r = 0; r < model->rowCount(); ++r) {
154 const QStandardItem* parent = model->item(r, 0);
155
156 for (int r2 = 0; r2 < parent->rowCount(); ++r2) {
157 model->item(r, 0)->child(r2, 1)->setText(tr(""));
158 }
159 }
160}
161
162void ConfigureHotkeys::PopupContextMenu(const QPoint& menu_location) {
163 QModelIndex index = ui->hotkey_list->indexAt(menu_location);
164 if (!index.parent().isValid()) {
165 return;
166 }
167
168 const auto selected = index.sibling(index.row(), 1);
169 QMenu context_menu;
170
171 QAction* restore_default = context_menu.addAction(tr("Restore Default"));
172 QAction* clear = context_menu.addAction(tr("Clear"));
173
174 connect(restore_default, &QAction::triggered, [this, selected] {
175 const QKeySequence& default_key_sequence = QKeySequence::fromString(
176 Config::default_hotkeys[selected.row()].shortcut.first, QKeySequence::NativeText);
177 const auto [key_sequence_used, used_action] = IsUsedKey(default_key_sequence);
178
179 if (key_sequence_used &&
180 default_key_sequence != QKeySequence(model->data(selected).toString())) {
181
182 QMessageBox::warning(
183 this, tr("Conflicting Key Sequence"),
184 tr("The default key sequence is already assigned to: %1").arg(used_action));
185 } else {
186 model->setData(selected, default_key_sequence.toString(QKeySequence::NativeText));
187 }
188 });
189 connect(clear, &QAction::triggered, [this, selected] { model->setData(selected, tr("")); });
190
191 context_menu.exec(ui->hotkey_list->viewport()->mapToGlobal(menu_location));
192}
diff --git a/src/yuzu/configuration/configure_hotkeys.h b/src/yuzu/configuration/configure_hotkeys.h
index 8f8c6173b..a2ec3323e 100644
--- a/src/yuzu/configuration/configure_hotkeys.h
+++ b/src/yuzu/configuration/configure_hotkeys.h
@@ -35,7 +35,11 @@ private:
35 void RetranslateUI(); 35 void RetranslateUI();
36 36
37 void Configure(QModelIndex index); 37 void Configure(QModelIndex index);
38 bool IsUsedKey(QKeySequence key_sequence) const; 38 std::pair<bool, QString> IsUsedKey(QKeySequence key_sequence) const;
39
40 void RestoreDefaults();
41 void ClearAll();
42 void PopupContextMenu(const QPoint& menu_location);
39 43
40 std::unique_ptr<Ui::ConfigureHotkeys> ui; 44 std::unique_ptr<Ui::ConfigureHotkeys> ui;
41 45
diff --git a/src/yuzu/configuration/configure_hotkeys.ui b/src/yuzu/configuration/configure_hotkeys.ui
index 0d0b70f38..6d9f861e3 100644
--- a/src/yuzu/configuration/configure_hotkeys.ui
+++ b/src/yuzu/configuration/configure_hotkeys.ui
@@ -6,8 +6,8 @@
6 <rect> 6 <rect>
7 <x>0</x> 7 <x>0</x>
8 <y>0</y> 8 <y>0</y>
9 <width>363</width> 9 <width>439</width>
10 <height>388</height> 10 <height>510</height>
11 </rect> 11 </rect>
12 </property> 12 </property>
13 <property name="windowTitle"> 13 <property name="windowTitle">
@@ -15,7 +15,7 @@
15 </property> 15 </property>
16 <layout class="QVBoxLayout" name="verticalLayout"> 16 <layout class="QVBoxLayout" name="verticalLayout">
17 <item> 17 <item>
18 <layout class="QVBoxLayout" name="verticalLayout_2"> 18 <layout class="QHBoxLayout" name="horizontalLayout">
19 <item> 19 <item>
20 <widget class="QLabel" name="label_2"> 20 <widget class="QLabel" name="label_2">
21 <property name="text"> 21 <property name="text">
@@ -24,6 +24,37 @@
24 </widget> 24 </widget>
25 </item> 25 </item>
26 <item> 26 <item>
27 <spacer name="horizontalSpacer">
28 <property name="orientation">
29 <enum>Qt::Horizontal</enum>
30 </property>
31 <property name="sizeHint" stdset="0">
32 <size>
33 <width>40</width>
34 <height>20</height>
35 </size>
36 </property>
37 </spacer>
38 </item>
39 <item>
40 <widget class="QPushButton" name="button_clear_all">
41 <property name="text">
42 <string>Clear All</string>
43 </property>
44 </widget>
45 </item>
46 <item>
47 <widget class="QPushButton" name="button_restore_defaults">
48 <property name="text">
49 <string>Restore Defaults</string>
50 </property>
51 </widget>
52 </item>
53 </layout>
54 </item>
55 <item>
56 <layout class="QVBoxLayout" name="verticalLayout_2">
57 <item>
27 <widget class="QTreeView" name="hotkey_list"> 58 <widget class="QTreeView" name="hotkey_list">
28 <property name="editTriggers"> 59 <property name="editTriggers">
29 <set>QAbstractItemView::NoEditTriggers</set> 60 <set>QAbstractItemView::NoEditTriggers</set>
@@ -39,4 +70,4 @@
39 </widget> 70 </widget>
40 <resources/> 71 <resources/>
41 <connections/> 72 <connections/>
42</ui> \ No newline at end of file 73</ui>
diff --git a/src/yuzu/configuration/configure_input_player.cpp b/src/yuzu/configuration/configure_input_player.cpp
index 15ac30f12..e4eb5594b 100644
--- a/src/yuzu/configuration/configure_input_player.cpp
+++ b/src/yuzu/configuration/configure_input_player.cpp
@@ -56,7 +56,6 @@ static void SetAnalogButton(const Common::ParamPackage& input_param,
56 if (analog_param.Get("engine", "") != "analog_from_button") { 56 if (analog_param.Get("engine", "") != "analog_from_button") {
57 analog_param = { 57 analog_param = {
58 {"engine", "analog_from_button"}, 58 {"engine", "analog_from_button"},
59 {"modifier_scale", "0.5"},
60 }; 59 };
61 } 60 }
62 analog_param.Set(button_name, input_param.Serialize()); 61 analog_param.Set(button_name, input_param.Serialize());
@@ -236,8 +235,10 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i
236 widget->setVisible(false); 235 widget->setVisible(false);
237 236
238 analog_map_stick = {ui->buttonLStickAnalog, ui->buttonRStickAnalog}; 237 analog_map_stick = {ui->buttonLStickAnalog, ui->buttonRStickAnalog};
239 analog_map_deadzone = {ui->sliderLStickDeadzone, ui->sliderRStickDeadzone}; 238 analog_map_deadzone_and_modifier_slider = {ui->sliderLStickDeadzoneAndModifier,
240 analog_map_deadzone_label = {ui->labelLStickDeadzone, ui->labelRStickDeadzone}; 239 ui->sliderRStickDeadzoneAndModifier};
240 analog_map_deadzone_and_modifier_slider_label = {ui->labelLStickDeadzoneAndModifier,
241 ui->labelRStickDeadzoneAndModifier};
241 242
242 for (int button_id = 0; button_id < Settings::NativeButton::NumButtons; button_id++) { 243 for (int button_id = 0; button_id < Settings::NativeButton::NumButtons; button_id++) {
243 auto* const button = button_map[button_id]; 244 auto* const button = button_map[button_id];
@@ -328,10 +329,18 @@ ConfigureInputPlayer::ConfigureInputPlayer(QWidget* parent, std::size_t player_i
328 InputCommon::Polling::DeviceType::Analog); 329 InputCommon::Polling::DeviceType::Analog);
329 } 330 }
330 }); 331 });
331 connect(analog_map_deadzone[analog_id], &QSlider::valueChanged, [=] { 332
332 const float deadzone = analog_map_deadzone[analog_id]->value() / 100.0f; 333 connect(analog_map_deadzone_and_modifier_slider[analog_id], &QSlider::valueChanged, [=] {
333 analog_map_deadzone_label[analog_id]->setText(tr("Deadzone: %1").arg(deadzone)); 334 const float slider_value = analog_map_deadzone_and_modifier_slider[analog_id]->value();
334 analogs_param[analog_id].Set("deadzone", deadzone); 335 if (analogs_param[analog_id].Get("engine", "") == "sdl") {
336 analog_map_deadzone_and_modifier_slider_label[analog_id]->setText(
337 tr("Deadzone: %1%").arg(slider_value));
338 analogs_param[analog_id].Set("deadzone", slider_value / 100.0f);
339 } else {
340 analog_map_deadzone_and_modifier_slider_label[analog_id]->setText(
341 tr("Modifier Scale: %1%").arg(slider_value));
342 analogs_param[analog_id].Set("modifier_scale", slider_value / 100.0f);
343 }
335 }); 344 });
336 } 345 }
337 346
@@ -517,20 +526,31 @@ void ConfigureInputPlayer::UpdateButtonLabels() {
517 analog_map_stick[analog_id]->setText(tr("Set Analog Stick")); 526 analog_map_stick[analog_id]->setText(tr("Set Analog Stick"));
518 527
519 auto& param = analogs_param[analog_id]; 528 auto& param = analogs_param[analog_id];
520 auto* const analog_deadzone_slider = analog_map_deadzone[analog_id]; 529 auto* const analog_stick_slider = analog_map_deadzone_and_modifier_slider[analog_id];
521 auto* const analog_deadzone_label = analog_map_deadzone_label[analog_id]; 530 auto* const analog_stick_slider_label =
522 531 analog_map_deadzone_and_modifier_slider_label[analog_id];
523 if (param.Has("engine") && param.Get("engine", "") == "sdl") { 532
524 if (!param.Has("deadzone")) { 533 if (param.Has("engine")) {
525 param.Set("deadzone", 0.1f); 534 if (param.Get("engine", "") == "sdl") {
535 if (!param.Has("deadzone")) {
536 param.Set("deadzone", 0.1f);
537 }
538
539 analog_stick_slider->setValue(static_cast<int>(param.Get("deadzone", 0.1f) * 100));
540 if (analog_stick_slider->value() == 0) {
541 analog_stick_slider_label->setText(tr("Deadzone: 0%"));
542 }
543 } else {
544 if (!param.Has("modifier_scale")) {
545 param.Set("modifier_scale", 0.5f);
546 }
547
548 analog_stick_slider->setValue(
549 static_cast<int>(param.Get("modifier_scale", 0.5f) * 100));
550 if (analog_stick_slider->value() == 0) {
551 analog_stick_slider_label->setText(tr("Modifier Scale: 0%"));
552 }
526 } 553 }
527
528 analog_deadzone_slider->setValue(static_cast<int>(param.Get("deadzone", 0.1f) * 100));
529 analog_deadzone_slider->setVisible(true);
530 analog_deadzone_label->setVisible(true);
531 } else {
532 analog_deadzone_slider->setVisible(false);
533 analog_deadzone_label->setVisible(false);
534 } 554 }
535 } 555 }
536} 556}
diff --git a/src/yuzu/configuration/configure_input_player.h b/src/yuzu/configuration/configure_input_player.h
index 045704e47..95afa5375 100644
--- a/src/yuzu/configuration/configure_input_player.h
+++ b/src/yuzu/configuration/configure_input_player.h
@@ -97,8 +97,10 @@ private:
97 /// Analog inputs are also represented each with a single button, used to configure with an 97 /// Analog inputs are also represented each with a single button, used to configure with an
98 /// actual analog stick 98 /// actual analog stick
99 std::array<QPushButton*, Settings::NativeAnalog::NumAnalogs> analog_map_stick; 99 std::array<QPushButton*, Settings::NativeAnalog::NumAnalogs> analog_map_stick;
100 std::array<QSlider*, Settings::NativeAnalog::NumAnalogs> analog_map_deadzone; 100 std::array<QSlider*, Settings::NativeAnalog::NumAnalogs>
101 std::array<QLabel*, Settings::NativeAnalog::NumAnalogs> analog_map_deadzone_label; 101 analog_map_deadzone_and_modifier_slider;
102 std::array<QLabel*, Settings::NativeAnalog::NumAnalogs>
103 analog_map_deadzone_and_modifier_slider_label;
102 104
103 static const std::array<std::string, ANALOG_SUB_BUTTONS_NUM> analog_sub_buttons; 105 static const std::array<std::string, ANALOG_SUB_BUTTONS_NUM> analog_sub_buttons;
104 106
diff --git a/src/yuzu/configuration/configure_input_player.ui b/src/yuzu/configuration/configure_input_player.ui
index 4b37746a1..f27a77180 100644
--- a/src/yuzu/configuration/configure_input_player.ui
+++ b/src/yuzu/configuration/configure_input_player.ui
@@ -171,11 +171,11 @@
171 </layout> 171 </layout>
172 </item> 172 </item>
173 <item row="4" column="0" colspan="2"> 173 <item row="4" column="0" colspan="2">
174 <layout class="QVBoxLayout" name="sliderRStickDeadzoneVerticalLayout"> 174 <layout class="QVBoxLayout" name="sliderRStickDeadzoneAndModifierVerticalLayout">
175 <item> 175 <item>
176 <layout class="QHBoxLayout" name="sliderRStickDeadzoneHorizontalLayout"> 176 <layout class="QHBoxLayout" name="sliderRStickDeadzoneAndModifierHorizontalLayout">
177 <item> 177 <item>
178 <widget class="QLabel" name="labelRStickDeadzone"> 178 <widget class="QLabel" name="labelRStickDeadzoneAndModifier">
179 <property name="text"> 179 <property name="text">
180 <string>Deadzone: 0</string> 180 <string>Deadzone: 0</string>
181 </property> 181 </property>
@@ -187,7 +187,7 @@
187 </layout> 187 </layout>
188 </item> 188 </item>
189 <item> 189 <item>
190 <widget class="QSlider" name="sliderRStickDeadzone"> 190 <widget class="QSlider" name="sliderRStickDeadzoneAndModifier">
191 <property name="orientation"> 191 <property name="orientation">
192 <enum>Qt::Horizontal</enum> 192 <enum>Qt::Horizontal</enum>
193 </property> 193 </property>
@@ -784,14 +784,14 @@
784 </layout> 784 </layout>
785 </item> 785 </item>
786 <item row="5" column="1" colspan="2"> 786 <item row="5" column="1" colspan="2">
787 <layout class="QVBoxLayout" name="sliderLStickDeadzoneVerticalLayout"> 787 <layout class="QVBoxLayout" name="sliderLStickDeadzoneAndModifierVerticalLayout">
788 <property name="sizeConstraint"> 788 <property name="sizeConstraint">
789 <enum>QLayout::SetDefaultConstraint</enum> 789 <enum>QLayout::SetDefaultConstraint</enum>
790 </property> 790 </property>
791 <item> 791 <item>
792 <layout class="QHBoxLayout" name="sliderLStickDeadzoneHorizontalLayout"> 792 <layout class="QHBoxLayout" name="sliderLStickDeadzoneAndModifierHorizontalLayout">
793 <item> 793 <item>
794 <widget class="QLabel" name="labelLStickDeadzone"> 794 <widget class="QLabel" name="labelLStickDeadzoneAndModifier">
795 <property name="text"> 795 <property name="text">
796 <string>Deadzone: 0</string> 796 <string>Deadzone: 0</string>
797 </property> 797 </property>
@@ -803,7 +803,7 @@
803 </layout> 803 </layout>
804 </item> 804 </item>
805 <item> 805 <item>
806 <widget class="QSlider" name="sliderLStickDeadzone"> 806 <widget class="QSlider" name="sliderLStickDeadzoneAndModifier">
807 <property name="orientation"> 807 <property name="orientation">
808 <enum>Qt::Horizontal</enum> 808 <enum>Qt::Horizontal</enum>
809 </property> 809 </property>
diff --git a/src/yuzu/game_list_p.h b/src/yuzu/game_list_p.h
index 3e6d5a7cd..0cd0054c8 100644
--- a/src/yuzu/game_list_p.h
+++ b/src/yuzu/game_list_p.h
@@ -126,13 +126,6 @@ public:
126 126
127 return GameListItem::data(role); 127 return GameListItem::data(role);
128 } 128 }
129
130 /**
131 * Override to prevent automatic sorting.
132 */
133 bool operator<(const QStandardItem& other) const override {
134 return false;
135 }
136}; 129};
137 130
138class GameListItemCompat : public GameListItem { 131class GameListItemCompat : public GameListItem {
@@ -279,6 +272,13 @@ public:
279 return static_cast<int>(dir_type); 272 return static_cast<int>(dir_type);
280 } 273 }
281 274
275 /**
276 * Override to prevent automatic sorting between folders and the addDir button.
277 */
278 bool operator<(const QStandardItem& other) const override {
279 return false;
280 }
281
282private: 282private:
283 GameListItemType dir_type; 283 GameListItemType dir_type;
284}; 284};
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index 1e76f789c..0a6839b2d 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -1325,7 +1325,9 @@ void GMainWindow::OnGameListDumpRomFS(u64 program_id, const std::string& game_pa
1325 FileSys::VirtualFile romfs; 1325 FileSys::VirtualFile romfs;
1326 1326
1327 if (*romfs_title_id == program_id) { 1327 if (*romfs_title_id == program_id) {
1328 romfs = file; 1328 const u64 ivfc_offset = loader->ReadRomFSIVFCOffset();
1329 FileSys::PatchManager pm{program_id};
1330 romfs = pm.PatchRomFS(file, ivfc_offset, FileSys::ContentRecordType::Program);
1329 } else { 1331 } else {
1330 romfs = installed.GetEntry(*romfs_title_id, FileSys::ContentRecordType::Data)->GetRomFS(); 1332 romfs = installed.GetEntry(*romfs_title_id, FileSys::ContentRecordType::Data)->GetRomFS();
1331 } 1333 }
diff --git a/src/yuzu_cmd/config.cpp b/src/yuzu_cmd/config.cpp
index f4cd905c9..8476a5a16 100644
--- a/src/yuzu_cmd/config.cpp
+++ b/src/yuzu_cmd/config.cpp
@@ -388,12 +388,14 @@ void Config::ReadValues() {
388 static_cast<u16>(sdl2_config->GetInteger("Renderer", "frame_limit", 100)); 388 static_cast<u16>(sdl2_config->GetInteger("Renderer", "frame_limit", 100));
389 Settings::values.use_disk_shader_cache = 389 Settings::values.use_disk_shader_cache =
390 sdl2_config->GetBoolean("Renderer", "use_disk_shader_cache", false); 390 sdl2_config->GetBoolean("Renderer", "use_disk_shader_cache", false);
391 Settings::values.use_accurate_gpu_emulation = 391 const int gpu_accuracy_level = sdl2_config->GetInteger("Renderer", "gpu_accuracy", 0);
392 sdl2_config->GetBoolean("Renderer", "use_accurate_gpu_emulation", false); 392 Settings::values.gpu_accuracy = static_cast<Settings::GPUAccuracy>(gpu_accuracy_level);
393 Settings::values.use_asynchronous_gpu_emulation = 393 Settings::values.use_asynchronous_gpu_emulation =
394 sdl2_config->GetBoolean("Renderer", "use_asynchronous_gpu_emulation", false); 394 sdl2_config->GetBoolean("Renderer", "use_asynchronous_gpu_emulation", false);
395 Settings::values.use_vsync = 395 Settings::values.use_vsync =
396 static_cast<u16>(sdl2_config->GetInteger("Renderer", "use_vsync", 1)); 396 static_cast<u16>(sdl2_config->GetInteger("Renderer", "use_vsync", 1));
397 Settings::values.use_fast_gpu_time =
398 sdl2_config->GetBoolean("Renderer", "use_fast_gpu_time", true);
397 399
398 Settings::values.bg_red = static_cast<float>(sdl2_config->GetReal("Renderer", "bg_red", 0.0)); 400 Settings::values.bg_red = static_cast<float>(sdl2_config->GetReal("Renderer", "bg_red", 0.0));
399 Settings::values.bg_green = 401 Settings::values.bg_green =
@@ -425,6 +427,8 @@ void Config::ReadValues() {
425 Settings::values.reporting_services = 427 Settings::values.reporting_services =
426 sdl2_config->GetBoolean("Debugging", "reporting_services", false); 428 sdl2_config->GetBoolean("Debugging", "reporting_services", false);
427 Settings::values.quest_flag = sdl2_config->GetBoolean("Debugging", "quest_flag", false); 429 Settings::values.quest_flag = sdl2_config->GetBoolean("Debugging", "quest_flag", false);
430 Settings::values.disable_cpu_opt =
431 sdl2_config->GetBoolean("Debugging", "disable_cpu_opt", false);
428 432
429 const auto title_list = sdl2_config->Get("AddOns", "title_ids", ""); 433 const auto title_list = sdl2_config->Get("AddOns", "title_ids", "");
430 std::stringstream ss(title_list); 434 std::stringstream ss(title_list);
diff --git a/src/yuzu_cmd/default_ini.h b/src/yuzu_cmd/default_ini.h
index d63d7a58e..60b1a62fa 100644
--- a/src/yuzu_cmd/default_ini.h
+++ b/src/yuzu_cmd/default_ini.h
@@ -146,9 +146,9 @@ frame_limit =
146# 0 (default): Off, 1 : On 146# 0 (default): Off, 1 : On
147use_disk_shader_cache = 147use_disk_shader_cache =
148 148
149# Whether to use accurate GPU emulation 149# Which gpu accuracy level to use
150# 0 (default): Off (fast), 1 : On (slow) 150# 0 (Normal), 1 (High), 2 (Extreme)
151use_accurate_gpu_emulation = 151gpu_accuracy =
152 152
153# Whether to use asynchronous GPU emulation 153# Whether to use asynchronous GPU emulation
154# 0 : Off (slow), 1 (default): On (fast) 154# 0 : Off (slow), 1 (default): On (fast)
@@ -280,6 +280,9 @@ dump_nso=false
280# Determines whether or not yuzu will report to the game that the emulated console is in Kiosk Mode 280# Determines whether or not yuzu will report to the game that the emulated console is in Kiosk Mode
281# false: Retail/Normal Mode (default), true: Kiosk Mode 281# false: Retail/Normal Mode (default), true: Kiosk Mode
282quest_flag = 282quest_flag =
283# Determines whether or not JIT CPU optimizations are enabled
284# false: Optimizations Enabled, true: Optimizations Disabled
285disable_cpu_opt =
283 286
284[WebService] 287[WebService]
285# Whether or not to enable telemetry 288# Whether or not to enable telemetry
diff --git a/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp b/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp
index 19584360c..e5e684206 100644
--- a/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp
+++ b/src/yuzu_cmd/emu_window/emu_window_sdl2.cpp
@@ -181,9 +181,10 @@ void EmuWindow_SDL2::PollEvents() {
181 const u32 current_time = SDL_GetTicks(); 181 const u32 current_time = SDL_GetTicks();
182 if (current_time > last_time + 2000) { 182 if (current_time > last_time + 2000) {
183 const auto results = Core::System::GetInstance().GetAndResetPerfStats(); 183 const auto results = Core::System::GetInstance().GetAndResetPerfStats();
184 const auto title = fmt::format( 184 const auto title =
185 "yuzu {} | {}-{} | FPS: {:.0f} ({:.0%})", Common::g_build_fullname, 185 fmt::format("yuzu {} | {}-{} | FPS: {:.0f} ({:.0f}%)", Common::g_build_fullname,
186 Common::g_scm_branch, Common::g_scm_desc, results.game_fps, results.emulation_speed); 186 Common::g_scm_branch, Common::g_scm_desc, results.game_fps,
187 results.emulation_speed * 100.0);
187 SDL_SetWindowTitle(render_window, title.c_str()); 188 SDL_SetWindowTitle(render_window, title.c_str());
188 last_time = current_time; 189 last_time = current_time;
189 } 190 }
diff --git a/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.cpp b/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.cpp
index f2990910e..cb8e68a39 100644
--- a/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.cpp
+++ b/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.cpp
@@ -29,6 +29,7 @@ EmuWindow_SDL2_VK::EmuWindow_SDL2_VK(Core::System& system, bool fullscreen)
29 SDL_WINDOW_RESIZABLE | SDL_WINDOW_ALLOW_HIGHDPI); 29 SDL_WINDOW_RESIZABLE | SDL_WINDOW_ALLOW_HIGHDPI);
30 30
31 SDL_SysWMinfo wm; 31 SDL_SysWMinfo wm;
32 SDL_VERSION(&wm.version);
32 if (SDL_GetWindowWMInfo(render_window, &wm) == SDL_FALSE) { 33 if (SDL_GetWindowWMInfo(render_window, &wm) == SDL_FALSE) {
33 LOG_CRITICAL(Frontend, "Failed to get information from the window manager"); 34 LOG_CRITICAL(Frontend, "Failed to get information from the window manager");
34 std::exit(EXIT_FAILURE); 35 std::exit(EXIT_FAILURE);
@@ -70,7 +71,7 @@ EmuWindow_SDL2_VK::EmuWindow_SDL2_VK(Core::System& system, bool fullscreen)
70EmuWindow_SDL2_VK::~EmuWindow_SDL2_VK() = default; 71EmuWindow_SDL2_VK::~EmuWindow_SDL2_VK() = default;
71 72
72std::unique_ptr<Core::Frontend::GraphicsContext> EmuWindow_SDL2_VK::CreateSharedContext() const { 73std::unique_ptr<Core::Frontend::GraphicsContext> EmuWindow_SDL2_VK::CreateSharedContext() const {
73 return nullptr; 74 return std::make_unique<DummyContext>();
74} 75}
75 76
76void EmuWindow_SDL2_VK::Present() { 77void EmuWindow_SDL2_VK::Present() {
diff --git a/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.h b/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.h
index b8021ebea..77a6ca72b 100644
--- a/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.h
+++ b/src/yuzu_cmd/emu_window/emu_window_sdl2_vk.h
@@ -22,3 +22,5 @@ public:
22 22
23 std::unique_ptr<Core::Frontend::GraphicsContext> CreateSharedContext() const override; 23 std::unique_ptr<Core::Frontend::GraphicsContext> CreateSharedContext() const override;
24}; 24};
25
26class DummyContext : public Core::Frontend::GraphicsContext {};
diff --git a/src/yuzu_tester/config.cpp b/src/yuzu_tester/config.cpp
index ee2591c8f..3be58b15d 100644
--- a/src/yuzu_tester/config.cpp
+++ b/src/yuzu_tester/config.cpp
@@ -126,10 +126,12 @@ void Config::ReadValues() {
126 Settings::values.frame_limit = 100; 126 Settings::values.frame_limit = 100;
127 Settings::values.use_disk_shader_cache = 127 Settings::values.use_disk_shader_cache =
128 sdl2_config->GetBoolean("Renderer", "use_disk_shader_cache", false); 128 sdl2_config->GetBoolean("Renderer", "use_disk_shader_cache", false);
129 Settings::values.use_accurate_gpu_emulation = 129 const int gpu_accuracy_level = sdl2_config->GetInteger("Renderer", "gpu_accuracy", 0);
130 sdl2_config->GetBoolean("Renderer", "use_accurate_gpu_emulation", false); 130 Settings::values.gpu_accuracy = static_cast<Settings::GPUAccuracy>(gpu_accuracy_level);
131 Settings::values.use_asynchronous_gpu_emulation = 131 Settings::values.use_asynchronous_gpu_emulation =
132 sdl2_config->GetBoolean("Renderer", "use_asynchronous_gpu_emulation", false); 132 sdl2_config->GetBoolean("Renderer", "use_asynchronous_gpu_emulation", false);
133 Settings::values.use_fast_gpu_time =
134 sdl2_config->GetBoolean("Renderer", "use_fast_gpu_time", true);
133 135
134 Settings::values.bg_red = static_cast<float>(sdl2_config->GetReal("Renderer", "bg_red", 0.0)); 136 Settings::values.bg_red = static_cast<float>(sdl2_config->GetReal("Renderer", "bg_red", 0.0));
135 Settings::values.bg_green = 137 Settings::values.bg_green =