summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/CMakeLists.txt1
-rw-r--r--src/audio_core/audio_core.cpp2
-rw-r--r--src/audio_core/audio_manager.cpp17
-rw-r--r--src/audio_core/audio_manager.h19
-rw-r--r--src/audio_core/renderer/adsp/audio_renderer.cpp4
-rw-r--r--src/audio_core/renderer/adsp/audio_renderer.h2
-rw-r--r--src/audio_core/renderer/system_manager.cpp2
-rw-r--r--src/audio_core/sink/sink_stream.cpp9
-rw-r--r--src/common/CMakeLists.txt33
-rw-r--r--src/common/address_space.cpp10
-rw-r--r--src/common/address_space.h150
-rw-r--r--src/common/address_space.inc366
-rw-r--r--src/common/algorithm.h8
-rw-r--r--src/common/hash.h7
-rw-r--r--src/common/input.h27
-rw-r--r--src/common/logging/backend.cpp2
-rw-r--r--src/common/multi_level_page_table.cpp9
-rw-r--r--src/common/multi_level_page_table.h78
-rw-r--r--src/common/multi_level_page_table.inc84
-rw-r--r--src/common/settings.h1
-rw-r--r--src/core/CMakeLists.txt19
-rw-r--r--src/core/core.cpp15
-rw-r--r--src/core/core.h19
-rw-r--r--src/core/core_timing.cpp2
-rw-r--r--src/core/cpu_manager.cpp4
-rw-r--r--src/core/debugger/debugger.cpp2
-rw-r--r--src/core/file_sys/program_metadata.cpp52
-rw-r--r--src/core/file_sys/program_metadata.h14
-rw-r--r--src/core/hardware_interrupt_manager.cpp32
-rw-r--r--src/core/hardware_interrupt_manager.h32
-rw-r--r--src/core/hid/emulated_controller.cpp76
-rw-r--r--src/core/hid/emulated_controller.h34
-rw-r--r--src/core/hid/input_converter.cpp14
-rw-r--r--src/core/hid/input_converter.h8
-rw-r--r--src/core/hle/kernel/k_worker_task_manager.cpp2
-rw-r--r--src/core/hle/kernel/kernel.cpp2
-rw-r--r--src/core/hle/kernel/service_thread.cpp2
-rw-r--r--src/core/hle/service/hid/controllers/npad.cpp6
-rw-r--r--src/core/hle/service/hid/controllers/palma.cpp229
-rw-r--r--src/core/hle/service/hid/controllers/palma.h163
-rw-r--r--src/core/hle/service/hid/errors.h2
-rw-r--r--src/core/hle/service/hid/hid.cpp447
-rw-r--r--src/core/hle/service/hid/hid.h29
-rw-r--r--src/core/hle/service/hid/irs.cpp3
-rw-r--r--src/core/hle/service/ldn/lan_discovery.cpp633
-rw-r--r--src/core/hle/service/ldn/lan_discovery.h134
-rw-r--r--src/core/hle/service/ldn/ldn.cpp227
-rw-r--r--src/core/hle/service/ldn/ldn_types.h48
-rw-r--r--src/core/hle/service/mii/mii_manager.cpp161
-rw-r--r--src/core/hle/service/mii/mii_manager.h4
-rw-r--r--src/core/hle/service/nfc/nfc.cpp8
-rw-r--r--src/core/hle/service/nfp/amiibo_crypto.cpp79
-rw-r--r--src/core/hle/service/nfp/amiibo_crypto.h10
-rw-r--r--src/core/hle/service/nfp/nfp.cpp1093
-rw-r--r--src/core/hle/service/nfp/nfp.h161
-rw-r--r--src/core/hle/service/nfp/nfp_device.cpp681
-rw-r--r--src/core/hle/service/nfp/nfp_device.h101
-rw-r--r--src/core/hle/service/nfp/nfp_result.h24
-rw-r--r--src/core/hle/service/nfp/nfp_types.h (renamed from src/core/hle/service/nfp/amiibo_types.h)149
-rw-r--r--src/core/hle/service/nfp/nfp_user.cpp664
-rw-r--r--src/core/hle/service/nfp/nfp_user.h44
-rw-r--r--src/core/hle/service/nvdrv/core/container.cpp50
-rw-r--r--src/core/hle/service/nvdrv/core/container.h52
-rw-r--r--src/core/hle/service/nvdrv/core/nvmap.cpp272
-rw-r--r--src/core/hle/service/nvdrv/core/nvmap.h175
-rw-r--r--src/core/hle/service/nvdrv/core/syncpoint_manager.cpp121
-rw-r--r--src/core/hle/service/nvdrv/core/syncpoint_manager.h134
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdevice.h8
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp19
-rw-r--r--src/core/hle/service/nvdrv/devices/nvdisp_disp0.h15
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp492
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h191
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp363
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl.h114
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp25
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h14
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp129
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_gpu.h54
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp16
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec.h6
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp81
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h23
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_vic.cpp20
-rw-r--r--src/core/hle/service/nvdrv/devices/nvhost_vic.h6
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.cpp230
-rw-r--r--src/core/hle/service/nvdrv/devices/nvmap.h56
-rw-r--r--src/core/hle/service/nvdrv/nvdata.h17
-rw-r--r--src/core/hle/service/nvdrv/nvdrv.cpp130
-rw-r--r--src/core/hle/service/nvdrv/nvdrv.h125
-rw-r--r--src/core/hle/service/nvdrv/nvdrv_interface.cpp31
-rw-r--r--src/core/hle/service/nvdrv/nvdrv_interface.h2
-rw-r--r--src/core/hle/service/nvdrv/syncpoint_manager.cpp38
-rw-r--r--src/core/hle/service/nvdrv/syncpoint_manager.h84
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_consumer.cpp9
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_consumer.h8
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_producer.cpp10
-rw-r--r--src/core/hle/service/nvflinger/buffer_queue_producer.h9
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.cpp38
-rw-r--r--src/core/hle/service/nvflinger/nvflinger.h7
-rw-r--r--src/core/hle/service/sockets/bsd.cpp2
-rw-r--r--src/core/hle/service/vi/display/vi_display.cpp31
-rw-r--r--src/core/hle/service/vi/display/vi_display.h21
-rw-r--r--src/core/hle/service/vi/vi.cpp41
-rw-r--r--src/core/hle/service/vi/vi_results.h13
-rw-r--r--src/core/internal_network/network.cpp12
-rw-r--r--src/core/internal_network/network_interface.cpp12
-rw-r--r--src/core/internal_network/network_interface.h1
-rw-r--r--src/core/internal_network/socket_proxy.cpp8
-rw-r--r--src/core/internal_network/sockets.h11
-rw-r--r--src/core/loader/loader.cpp4
-rw-r--r--src/core/memory.cpp9
-rw-r--r--src/core/memory.h1
-rw-r--r--src/dedicated_room/CMakeLists.txt2
-rw-r--r--src/dedicated_room/yuzu_room.cpp13
-rw-r--r--src/input_common/CMakeLists.txt2
-rw-r--r--src/input_common/drivers/gc_adapter.cpp4
-rw-r--r--src/input_common/drivers/mouse.cpp2
-rw-r--r--src/input_common/drivers/sdl_driver.cpp4
-rw-r--r--src/input_common/drivers/virtual_amiibo.cpp101
-rw-r--r--src/input_common/drivers/virtual_amiibo.h61
-rw-r--r--src/input_common/input_engine.cpp37
-rw-r--r--src/input_common/input_engine.h17
-rw-r--r--src/input_common/input_poller.cpp64
-rw-r--r--src/input_common/input_poller.h10
-rw-r--r--src/input_common/main.cpp21
-rw-r--r--src/input_common/main.h7
-rw-r--r--src/network/network.cpp2
-rw-r--r--src/network/room.cpp63
-rw-r--r--src/network/room.h1
-rw-r--r--src/network/room_member.cpp57
-rw-r--r--src/network/room_member.h35
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm.cpp2
-rw-r--r--src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp25
-rw-r--r--src/shader_recompiler/backend/glsl/emit_glsl.cpp2
-rw-r--r--src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp6
-rw-r--r--src/shader_recompiler/ir_opt/texture_pass.cpp98
-rw-r--r--src/shader_recompiler/shader_info.h4
-rw-r--r--src/video_core/CMakeLists.txt51
-rw-r--r--src/video_core/buffer_cache/buffer_cache.h166
-rw-r--r--src/video_core/cdma_pusher.cpp29
-rw-r--r--src/video_core/cdma_pusher.h18
-rw-r--r--src/video_core/command_classes/host1x.cpp29
-rw-r--r--src/video_core/control/channel_state.cpp40
-rw-r--r--src/video_core/control/channel_state.h68
-rw-r--r--src/video_core/control/channel_state_cache.cpp14
-rw-r--r--src/video_core/control/channel_state_cache.h101
-rw-r--r--src/video_core/control/channel_state_cache.inc86
-rw-r--r--src/video_core/control/scheduler.cpp32
-rw-r--r--src/video_core/control/scheduler.h37
-rw-r--r--src/video_core/dma_pusher.cpp26
-rw-r--r--src/video_core/dma_pusher.h39
-rw-r--r--src/video_core/engines/engine_upload.cpp46
-rw-r--r--src/video_core/engines/engine_upload.h6
-rw-r--r--src/video_core/engines/kepler_compute.cpp13
-rw-r--r--src/video_core/engines/kepler_memory.cpp13
-rw-r--r--src/video_core/engines/maxwell_3d.cpp43
-rw-r--r--src/video_core/engines/maxwell_dma.cpp111
-rw-r--r--src/video_core/engines/maxwell_dma.h6
-rw-r--r--src/video_core/engines/puller.cpp306
-rw-r--r--src/video_core/engines/puller.h177
-rw-r--r--src/video_core/fence_manager.h104
-rw-r--r--src/video_core/gpu.cpp706
-rw-r--r--src/video_core/gpu.h93
-rw-r--r--src/video_core/gpu_thread.cpp24
-rw-r--r--src/video_core/gpu_thread.h14
-rw-r--r--src/video_core/host1x/codecs/codec.cpp (renamed from src/video_core/command_classes/codecs/codec.cpp)44
-rw-r--r--src/video_core/host1x/codecs/codec.h (renamed from src/video_core/command_classes/codecs/codec.h)21
-rw-r--r--src/video_core/host1x/codecs/h264.cpp (renamed from src/video_core/command_classes/codecs/h264.cpp)17
-rw-r--r--src/video_core/host1x/codecs/h264.h (renamed from src/video_core/command_classes/codecs/h264.h)16
-rw-r--r--src/video_core/host1x/codecs/vp8.cpp (renamed from src/video_core/command_classes/codecs/vp8.cpp)12
-rw-r--r--src/video_core/host1x/codecs/vp8.h (renamed from src/video_core/command_classes/codecs/vp8.h)15
-rw-r--r--src/video_core/host1x/codecs/vp9.cpp (renamed from src/video_core/command_classes/codecs/vp9.cpp)23
-rw-r--r--src/video_core/host1x/codecs/vp9.h (renamed from src/video_core/command_classes/codecs/vp9.h)22
-rw-r--r--src/video_core/host1x/codecs/vp9_types.h (renamed from src/video_core/command_classes/codecs/vp9_types.h)1
-rw-r--r--src/video_core/host1x/control.cpp33
-rw-r--r--src/video_core/host1x/control.h (renamed from src/video_core/command_classes/host1x.h)20
-rw-r--r--src/video_core/host1x/host1x.cpp17
-rw-r--r--src/video_core/host1x/host1x.h57
-rw-r--r--src/video_core/host1x/nvdec.cpp (renamed from src/video_core/command_classes/nvdec.cpp)11
-rw-r--r--src/video_core/host1x/nvdec.h (renamed from src/video_core/command_classes/nvdec.h)14
-rw-r--r--src/video_core/host1x/nvdec_common.h (renamed from src/video_core/command_classes/nvdec_common.h)4
-rw-r--r--src/video_core/host1x/sync_manager.cpp (renamed from src/video_core/command_classes/sync_manager.cpp)13
-rw-r--r--src/video_core/host1x/sync_manager.h (renamed from src/video_core/command_classes/sync_manager.h)12
-rw-r--r--src/video_core/host1x/syncpoint_manager.cpp96
-rw-r--r--src/video_core/host1x/syncpoint_manager.h98
-rw-r--r--src/video_core/host1x/vic.cpp (renamed from src/video_core/command_classes/vic.cpp)36
-rw-r--r--src/video_core/host1x/vic.h (renamed from src/video_core/command_classes/vic.h)13
-rw-r--r--src/video_core/host_shaders/astc_decoder.comp2
-rw-r--r--src/video_core/macro/macro.cpp1
-rw-r--r--src/video_core/macro/macro_hle.cpp63
-rw-r--r--src/video_core/macro/macro_jit_x64.cpp62
-rw-r--r--src/video_core/memory_manager.cpp754
-rw-r--r--src/video_core/memory_manager.h174
-rw-r--r--src/video_core/query_cache.h22
-rw-r--r--src/video_core/rasterizer_interface.h20
-rw-r--r--src/video_core/renderer_opengl/gl_buffer_cache.cpp2
-rw-r--r--src/video_core/renderer_opengl/gl_compute_pipeline.cpp20
-rw-r--r--src/video_core/renderer_opengl/gl_compute_pipeline.h16
-rw-r--r--src/video_core/renderer_opengl/gl_fence_manager.cpp13
-rw-r--r--src/video_core/renderer_opengl/gl_fence_manager.h6
-rw-r--r--src/video_core/renderer_opengl/gl_graphics_pipeline.cpp29
-rw-r--r--src/video_core/renderer_opengl/gl_graphics_pipeline.h16
-rw-r--r--src/video_core/renderer_opengl/gl_query_cache.cpp5
-rw-r--r--src/video_core/renderer_opengl/gl_query_cache.h3
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.cpp217
-rw-r--r--src/video_core/renderer_opengl/gl_rasterizer.h22
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.cpp44
-rw-r--r--src/video_core/renderer_opengl/gl_shader_cache.h9
-rw-r--r--src/video_core/renderer_opengl/gl_state_tracker.cpp17
-rw-r--r--src/video_core/renderer_opengl/gl_state_tracker.h83
-rw-r--r--src/video_core/renderer_opengl/maxwell_to_gl.h4
-rw-r--r--src/video_core/renderer_opengl/renderer_opengl.cpp2
-rw-r--r--src/video_core/renderer_vulkan/maxwell_to_vk.cpp4
-rw-r--r--src/video_core/renderer_vulkan/renderer_vulkan.cpp8
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.cpp13
-rw-r--r--src/video_core/renderer_vulkan/vk_blit_screen.h2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pass.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_compute_pipeline.cpp4
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.cpp15
-rw-r--r--src/video_core/renderer_vulkan/vk_fence_manager.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp18
-rw-r--r--src/video_core/renderer_vulkan/vk_graphics_pipeline.h28
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.cpp38
-rw-r--r--src/video_core/renderer_vulkan/vk_pipeline_cache.h6
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.cpp7
-rw-r--r--src/video_core/renderer_vulkan/vk_query_cache.h5
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.cpp123
-rw-r--r--src/video_core/renderer_vulkan/vk_rasterizer.h29
-rw-r--r--src/video_core/renderer_vulkan/vk_scheduler.cpp2
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp84
-rw-r--r--src/video_core/renderer_vulkan/vk_staging_buffer_pool.h3
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.cpp18
-rw-r--r--src/video_core/renderer_vulkan/vk_state_tracker.h27
-rw-r--r--src/video_core/renderer_vulkan/vk_swapchain.cpp15
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.cpp32
-rw-r--r--src/video_core/renderer_vulkan/vk_texture_cache.h5
-rw-r--r--src/video_core/shader_cache.cpp33
-rw-r--r--src/video_core/shader_cache.h15
-rw-r--r--src/video_core/surface.cpp3
-rw-r--r--src/video_core/surface.h16
-rw-r--r--src/video_core/texture_cache/format_lookup_table.cpp6
-rw-r--r--src/video_core/texture_cache/formatter.h8
-rw-r--r--src/video_core/texture_cache/image_base.cpp13
-rw-r--r--src/video_core/texture_cache/image_base.h3
-rw-r--r--src/video_core/texture_cache/render_targets.h1
-rw-r--r--src/video_core/texture_cache/texture_cache.cpp15
-rw-r--r--src/video_core/texture_cache/texture_cache.h222
-rw-r--r--src/video_core/texture_cache/texture_cache_base.h85
-rw-r--r--src/video_core/texture_cache/util.cpp3
-rw-r--r--src/video_core/textures/astc.cpp4
-rw-r--r--src/video_core/textures/decoders.cpp240
-rw-r--r--src/video_core/textures/decoders.h33
-rw-r--r--src/video_core/vulkan_common/vulkan_wrapper.h20
-rw-r--r--src/web_service/web_backend.cpp3
-rw-r--r--src/yuzu/applets/qt_controller.cpp2
-rw-r--r--src/yuzu/bootmanager.cpp2
-rw-r--r--src/yuzu/configuration/config.cpp2
-rw-r--r--src/yuzu/configuration/configure_debug.cpp2
-rw-r--r--src/yuzu/configuration/configure_debug.ui10
-rw-r--r--src/yuzu/configuration/configure_graphics.cpp7
-rw-r--r--src/yuzu/configuration/configure_input.cpp7
-rw-r--r--src/yuzu/configuration/input_profiles.cpp2
-rw-r--r--src/yuzu/main.cpp92
-rw-r--r--src/yuzu/main.h1
-rw-r--r--src/yuzu/main.ui24
-rw-r--r--src/yuzu/multiplayer/chat_room.cpp12
-rw-r--r--src/yuzu/multiplayer/client_room.cpp3
-rw-r--r--src/yuzu/multiplayer/direct_connect.cpp2
-rw-r--r--src/yuzu/multiplayer/direct_connect.h1
-rw-r--r--src/yuzu/multiplayer/host_room.cpp1
-rw-r--r--src/yuzu/multiplayer/host_room.h3
-rw-r--r--src/yuzu/multiplayer/lobby.cpp67
-rw-r--r--src/yuzu/multiplayer/lobby.h8
-rw-r--r--src/yuzu/multiplayer/lobby_p.h18
-rw-r--r--src/yuzu/multiplayer/message.cpp6
-rw-r--r--src/yuzu/multiplayer/state.cpp80
-rw-r--r--src/yuzu/multiplayer/state.h14
-rw-r--r--src/yuzu/startup_checks.cpp91
-rw-r--r--src/yuzu/startup_checks.h2
-rw-r--r--src/yuzu/uisettings.h2
280 files changed, 11494 insertions, 5023 deletions
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 54de1dc94..3575a3cb3 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -121,6 +121,7 @@ else()
121 121
122 if (ARCHITECTURE_x86_64) 122 if (ARCHITECTURE_x86_64)
123 add_compile_options("-mcx16") 123 add_compile_options("-mcx16")
124 add_compile_options("-fwrapv")
124 endif() 125 endif()
125 126
126 if (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL Clang) 127 if (APPLE AND CMAKE_CXX_COMPILER_ID STREQUAL Clang)
diff --git a/src/audio_core/audio_core.cpp b/src/audio_core/audio_core.cpp
index c845330cd..07a679c32 100644
--- a/src/audio_core/audio_core.cpp
+++ b/src/audio_core/audio_core.cpp
@@ -8,7 +8,7 @@
8 8
9namespace AudioCore { 9namespace AudioCore {
10 10
11AudioCore::AudioCore(Core::System& system) : audio_manager{std::make_unique<AudioManager>(system)} { 11AudioCore::AudioCore(Core::System& system) : audio_manager{std::make_unique<AudioManager>()} {
12 CreateSinks(); 12 CreateSinks();
13 // Must be created after the sinks 13 // Must be created after the sinks
14 adsp = std::make_unique<AudioRenderer::ADSP::ADSP>(system, *output_sink); 14 adsp = std::make_unique<AudioRenderer::ADSP::ADSP>(system, *output_sink);
diff --git a/src/audio_core/audio_manager.cpp b/src/audio_core/audio_manager.cpp
index 2f1bba9c3..2acde668e 100644
--- a/src/audio_core/audio_manager.cpp
+++ b/src/audio_core/audio_manager.cpp
@@ -1,14 +1,13 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include "audio_core/audio_in_manager.h"
5#include "audio_core/audio_manager.h" 4#include "audio_core/audio_manager.h"
6#include "audio_core/audio_out_manager.h"
7#include "core/core.h" 5#include "core/core.h"
6#include "core/hle/service/audio/errors.h"
8 7
9namespace AudioCore { 8namespace AudioCore {
10 9
11AudioManager::AudioManager(Core::System& system_) : system{system_} { 10AudioManager::AudioManager() {
12 thread = std::jthread([this]() { ThreadFunc(); }); 11 thread = std::jthread([this]() { ThreadFunc(); });
13} 12}
14 13
@@ -27,7 +26,7 @@ Result AudioManager::SetOutManager(BufferEventFunc buffer_func) {
27 26
28 const auto index{events.GetManagerIndex(Event::Type::AudioOutManager)}; 27 const auto index{events.GetManagerIndex(Event::Type::AudioOutManager)};
29 if (buffer_events[index] == nullptr) { 28 if (buffer_events[index] == nullptr) {
30 buffer_events[index] = buffer_func; 29 buffer_events[index] = std::move(buffer_func);
31 needs_update = true; 30 needs_update = true;
32 events.SetAudioEvent(Event::Type::AudioOutManager, true); 31 events.SetAudioEvent(Event::Type::AudioOutManager, true);
33 } 32 }
@@ -43,7 +42,7 @@ Result AudioManager::SetInManager(BufferEventFunc buffer_func) {
43 42
44 const auto index{events.GetManagerIndex(Event::Type::AudioInManager)}; 43 const auto index{events.GetManagerIndex(Event::Type::AudioInManager)};
45 if (buffer_events[index] == nullptr) { 44 if (buffer_events[index] == nullptr) {
46 buffer_events[index] = buffer_func; 45 buffer_events[index] = std::move(buffer_func);
47 needs_update = true; 46 needs_update = true;
48 events.SetAudioEvent(Event::Type::AudioInManager, true); 47 events.SetAudioEvent(Event::Type::AudioInManager, true);
49 } 48 }
@@ -60,19 +59,21 @@ void AudioManager::ThreadFunc() {
60 running = true; 59 running = true;
61 60
62 while (running) { 61 while (running) {
63 auto timed_out{events.Wait(l, std::chrono::seconds(2))}; 62 const auto timed_out{events.Wait(l, std::chrono::seconds(2))};
64 63
65 if (events.CheckAudioEventSet(Event::Type::Max)) { 64 if (events.CheckAudioEventSet(Event::Type::Max)) {
66 break; 65 break;
67 } 66 }
68 67
69 for (size_t i = 0; i < buffer_events.size(); i++) { 68 for (size_t i = 0; i < buffer_events.size(); i++) {
70 if (events.CheckAudioEventSet(Event::Type(i)) || timed_out) { 69 const auto event_type = static_cast<Event::Type>(i);
70
71 if (events.CheckAudioEventSet(event_type) || timed_out) {
71 if (buffer_events[i]) { 72 if (buffer_events[i]) {
72 buffer_events[i](); 73 buffer_events[i]();
73 } 74 }
74 } 75 }
75 events.SetAudioEvent(Event::Type(i), false); 76 events.SetAudioEvent(event_type, false);
76 } 77 }
77 } 78 }
78} 79}
diff --git a/src/audio_core/audio_manager.h b/src/audio_core/audio_manager.h
index 8cbd95e22..abf077de4 100644
--- a/src/audio_core/audio_manager.h
+++ b/src/audio_core/audio_manager.h
@@ -10,22 +10,11 @@
10#include <thread> 10#include <thread>
11 11
12#include "audio_core/audio_event.h" 12#include "audio_core/audio_event.h"
13#include "core/hle/service/audio/errors.h"
14 13
15namespace Core { 14union Result;
16class System;
17}
18 15
19namespace AudioCore { 16namespace AudioCore {
20 17
21namespace AudioOut {
22class Manager;
23}
24
25namespace AudioIn {
26class Manager;
27}
28
29/** 18/**
30 * The AudioManager's main purpose is to wait for buffer events for the audio in and out managers, 19 * The AudioManager's main purpose is to wait for buffer events for the audio in and out managers,
31 * and call an associated callback to release buffers. 20 * and call an associated callback to release buffers.
@@ -43,7 +32,7 @@ class AudioManager {
43 using BufferEventFunc = std::function<void()>; 32 using BufferEventFunc = std::function<void()>;
44 33
45public: 34public:
46 explicit AudioManager(Core::System& system); 35 explicit AudioManager();
47 36
48 /** 37 /**
49 * Shutdown the audio manager. 38 * Shutdown the audio manager.
@@ -80,10 +69,6 @@ private:
80 */ 69 */
81 void ThreadFunc(); 70 void ThreadFunc();
82 71
83 /// Core system
84 Core::System& system;
85 /// Have sessions started palying?
86 bool sessions_started{};
87 /// Is the main thread running? 72 /// Is the main thread running?
88 std::atomic<bool> running{}; 73 std::atomic<bool> running{};
89 /// Unused 74 /// Unused
diff --git a/src/audio_core/renderer/adsp/audio_renderer.cpp b/src/audio_core/renderer/adsp/audio_renderer.cpp
index bafe4822a..d982ef630 100644
--- a/src/audio_core/renderer/adsp/audio_renderer.cpp
+++ b/src/audio_core/renderer/adsp/audio_renderer.cpp
@@ -47,7 +47,7 @@ RenderMessage AudioRenderer_Mailbox::ADSPWaitMessage() {
47 return msg; 47 return msg;
48} 48}
49 49
50CommandBuffer& AudioRenderer_Mailbox::GetCommandBuffer(const s32 session_id) { 50CommandBuffer& AudioRenderer_Mailbox::GetCommandBuffer(const u32 session_id) {
51 return command_buffers[session_id]; 51 return command_buffers[session_id];
52} 52}
53 53
@@ -132,7 +132,7 @@ void AudioRenderer::CreateSinkStreams() {
132} 132}
133 133
134void AudioRenderer::ThreadFunc() { 134void AudioRenderer::ThreadFunc() {
135 constexpr char name[]{"yuzu:AudioRenderer"}; 135 constexpr char name[]{"AudioRenderer"};
136 MicroProfileOnThreadCreate(name); 136 MicroProfileOnThreadCreate(name);
137 Common::SetCurrentThreadName(name); 137 Common::SetCurrentThreadName(name);
138 Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical); 138 Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical);
diff --git a/src/audio_core/renderer/adsp/audio_renderer.h b/src/audio_core/renderer/adsp/audio_renderer.h
index 02e923c84..151f38c1b 100644
--- a/src/audio_core/renderer/adsp/audio_renderer.h
+++ b/src/audio_core/renderer/adsp/audio_renderer.h
@@ -83,7 +83,7 @@ public:
83 * @param session_id - The session id to get (0 or 1). 83 * @param session_id - The session id to get (0 or 1).
84 * @return The command buffer. 84 * @return The command buffer.
85 */ 85 */
86 CommandBuffer& GetCommandBuffer(s32 session_id); 86 CommandBuffer& GetCommandBuffer(u32 session_id);
87 87
88 /** 88 /**
89 * Set the command buffer with the given session id (0 or 1). 89 * Set the command buffer with the given session id (0 or 1).
diff --git a/src/audio_core/renderer/system_manager.cpp b/src/audio_core/renderer/system_manager.cpp
index 9c1331e19..f66b2b890 100644
--- a/src/audio_core/renderer/system_manager.cpp
+++ b/src/audio_core/renderer/system_manager.cpp
@@ -94,7 +94,7 @@ bool SystemManager::Remove(System& system_) {
94} 94}
95 95
96void SystemManager::ThreadFunc() { 96void SystemManager::ThreadFunc() {
97 constexpr char name[]{"yuzu:AudioRenderSystemManager"}; 97 constexpr char name[]{"AudioRenderSystemManager"};
98 MicroProfileOnThreadCreate(name); 98 MicroProfileOnThreadCreate(name);
99 Common::SetCurrentThreadName(name); 99 Common::SetCurrentThreadName(name);
100 Common::SetCurrentThreadPriority(Common::ThreadPriority::High); 100 Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
diff --git a/src/audio_core/sink/sink_stream.cpp b/src/audio_core/sink/sink_stream.cpp
index 37fe725e4..849f862b0 100644
--- a/src/audio_core/sink/sink_stream.cpp
+++ b/src/audio_core/sink/sink_stream.cpp
@@ -214,8 +214,13 @@ void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::siz
214 // video play out without attempting to stall. 214 // video play out without attempting to stall.
215 // Can hopefully remove this later with a more complete NVDEC implementation. 215 // Can hopefully remove this later with a more complete NVDEC implementation.
216 const auto nvdec_active{system.AudioCore().IsNVDECActive()}; 216 const auto nvdec_active{system.AudioCore().IsNVDECActive()};
217 if (!nvdec_active && queued_buffers > max_queue_size) { 217
218 // Core timing cannot be paused in single-core mode, so Stall ends up being called over and over
219 // and never recovers to a normal state, so just skip attempting to sync things on single-core.
220 if (system.IsMulticore() && !nvdec_active && queued_buffers > max_queue_size) {
218 Stall(); 221 Stall();
222 } else if (system.IsMulticore() && queued_buffers <= max_queue_size) {
223 Unstall();
219 } 224 }
220 225
221 while (frames_written < num_frames) { 226 while (frames_written < num_frames) {
@@ -255,7 +260,7 @@ void SinkStream::ProcessAudioOutAndRender(std::span<s16> output_buffer, std::siz
255 std::memcpy(&last_frame[0], &output_buffer[(frames_written - 1) * frame_size], 260 std::memcpy(&last_frame[0], &output_buffer[(frames_written - 1) * frame_size],
256 frame_size_bytes); 261 frame_size_bytes);
257 262
258 if (stalled && queued_buffers <= max_queue_size) { 263 if (system.IsMulticore() && queued_buffers <= max_queue_size) {
259 Unstall(); 264 Unstall();
260 } 265 }
261} 266}
diff --git a/src/common/CMakeLists.txt b/src/common/CMakeLists.txt
index 68436a4bc..a02696873 100644
--- a/src/common/CMakeLists.txt
+++ b/src/common/CMakeLists.txt
@@ -14,34 +14,11 @@ if (DEFINED ENV{DISPLAYVERSION})
14 set(DISPLAY_VERSION $ENV{DISPLAYVERSION}) 14 set(DISPLAY_VERSION $ENV{DISPLAYVERSION})
15endif () 15endif ()
16 16
17# Pass the path to git to the GenerateSCMRev.cmake as well 17include(GenerateSCMRev)
18find_package(Git QUIET)
19
20add_custom_command(OUTPUT scm_rev.cpp
21 COMMAND ${CMAKE_COMMAND}
22 -DSRC_DIR=${PROJECT_SOURCE_DIR}
23 -DBUILD_REPOSITORY=${BUILD_REPOSITORY}
24 -DTITLE_BAR_FORMAT_IDLE=${TITLE_BAR_FORMAT_IDLE}
25 -DTITLE_BAR_FORMAT_RUNNING=${TITLE_BAR_FORMAT_RUNNING}
26 -DBUILD_TAG=${BUILD_TAG}
27 -DBUILD_ID=${DISPLAY_VERSION}
28 -DGIT_REF_SPEC=${GIT_REF_SPEC}
29 -DGIT_REV=${GIT_REV}
30 -DGIT_DESC=${GIT_DESC}
31 -DGIT_BRANCH=${GIT_BRANCH}
32 -DBUILD_FULLNAME=${BUILD_FULLNAME}
33 -DGIT_EXECUTABLE=${GIT_EXECUTABLE}
34 -P ${PROJECT_SOURCE_DIR}/CMakeModules/GenerateSCMRev.cmake
35 DEPENDS
36 # Check that the scm_rev files haven't changed
37 "${CMAKE_CURRENT_SOURCE_DIR}/scm_rev.cpp.in"
38 "${CMAKE_CURRENT_SOURCE_DIR}/scm_rev.h"
39 # technically we should regenerate if the git version changed, but its not worth the effort imo
40 "${PROJECT_SOURCE_DIR}/CMakeModules/GenerateSCMRev.cmake"
41 VERBATIM
42)
43 18
44add_library(common STATIC 19add_library(common STATIC
20 address_space.cpp
21 address_space.h
45 algorithm.h 22 algorithm.h
46 alignment.h 23 alignment.h
47 announce_multiplayer_room.h 24 announce_multiplayer_room.h
@@ -106,6 +83,8 @@ add_library(common STATIC
106 microprofile.cpp 83 microprofile.cpp
107 microprofile.h 84 microprofile.h
108 microprofileui.h 85 microprofileui.h
86 multi_level_page_table.cpp
87 multi_level_page_table.h
109 nvidia_flags.cpp 88 nvidia_flags.cpp
110 nvidia_flags.h 89 nvidia_flags.h
111 page_table.cpp 90 page_table.cpp
@@ -117,7 +96,7 @@ add_library(common STATIC
117 quaternion.h 96 quaternion.h
118 reader_writer_queue.h 97 reader_writer_queue.h
119 ring_buffer.h 98 ring_buffer.h
120 scm_rev.cpp 99 ${CMAKE_CURRENT_BINARY_DIR}/scm_rev.cpp
121 scm_rev.h 100 scm_rev.h
122 scope_exit.h 101 scope_exit.h
123 settings.cpp 102 settings.cpp
diff --git a/src/common/address_space.cpp b/src/common/address_space.cpp
new file mode 100644
index 000000000..866e78dbe
--- /dev/null
+++ b/src/common/address_space.cpp
@@ -0,0 +1,10 @@
1// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include "common/address_space.inc"
5
6namespace Common {
7
8template class Common::FlatAllocator<u32, 0, 32>;
9
10}
diff --git a/src/common/address_space.h b/src/common/address_space.h
new file mode 100644
index 000000000..9222b2fdc
--- /dev/null
+++ b/src/common/address_space.h
@@ -0,0 +1,150 @@
1// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#pragma once
5
6#include <concepts>
7#include <functional>
8#include <mutex>
9#include <vector>
10
11#include "common/common_types.h"
12
13namespace Common {
14template <typename VaType, size_t AddressSpaceBits>
15concept AddressSpaceValid = std::is_unsigned_v<VaType> && sizeof(VaType) * 8 >= AddressSpaceBits;
16
17struct EmptyStruct {};
18
19/**
20 * @brief FlatAddressSpaceMap provides a generic VA->PA mapping implementation using a sorted vector
21 */
22template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa,
23 bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo = EmptyStruct>
24requires AddressSpaceValid<VaType, AddressSpaceBits>
25class FlatAddressSpaceMap {
26public:
27 /// The maximum VA that this AS can technically reach
28 static constexpr VaType VaMaximum{(1ULL << (AddressSpaceBits - 1)) +
29 ((1ULL << (AddressSpaceBits - 1)) - 1)};
30
31 explicit FlatAddressSpaceMap(VaType va_limit,
32 std::function<void(VaType, VaType)> unmap_callback = {});
33
34 FlatAddressSpaceMap() = default;
35
36 void Map(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info = {}) {
37 std::scoped_lock lock(block_mutex);
38 MapLocked(virt, phys, size, extra_info);
39 }
40
41 void Unmap(VaType virt, VaType size) {
42 std::scoped_lock lock(block_mutex);
43 UnmapLocked(virt, size);
44 }
45
46 VaType GetVALimit() const {
47 return va_limit;
48 }
49
50protected:
51 /**
52 * @brief Represents a block of memory in the AS, the physical mapping is contiguous until
53 * another block with a different phys address is hit
54 */
55 struct Block {
56 /// VA of the block
57 VaType virt{UnmappedVa};
58 /// PA of the block, will increase 1-1 with VA until a new block is encountered
59 PaType phys{UnmappedPa};
60 [[no_unique_address]] ExtraBlockInfo extra_info;
61
62 Block() = default;
63
64 Block(VaType virt_, PaType phys_, ExtraBlockInfo extra_info_)
65 : virt(virt_), phys(phys_), extra_info(extra_info_) {}
66
67 bool Valid() const {
68 return virt != UnmappedVa;
69 }
70
71 bool Mapped() const {
72 return phys != UnmappedPa;
73 }
74
75 bool Unmapped() const {
76 return phys == UnmappedPa;
77 }
78
79 bool operator<(const VaType& p_virt) const {
80 return virt < p_virt;
81 }
82 };
83
84 /**
85 * @brief Maps a PA range into the given AS region
86 * @note block_mutex MUST be locked when calling this
87 */
88 void MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info);
89
90 /**
91 * @brief Unmaps the given range and merges it with other unmapped regions
92 * @note block_mutex MUST be locked when calling this
93 */
94 void UnmapLocked(VaType virt, VaType size);
95
96 std::mutex block_mutex;
97 std::vector<Block> blocks{Block{}};
98
99 /// a soft limit on the maximum VA of the AS
100 VaType va_limit{VaMaximum};
101
102private:
103 /// Callback called when the mappings in an region have changed
104 std::function<void(VaType, VaType)> unmap_callback{};
105};
106
107/**
108 * @brief FlatMemoryManager specialises FlatAddressSpaceMap to work as an allocator, with an
109 * initial, fast linear pass and a subsequent slower pass that iterates until it finds a free block
110 */
111template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits>
112requires AddressSpaceValid<VaType, AddressSpaceBits>
113class FlatAllocator
114 : public FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits> {
115private:
116 using Base = FlatAddressSpaceMap<VaType, UnmappedVa, bool, false, false, AddressSpaceBits>;
117
118public:
119 explicit FlatAllocator(VaType virt_start, VaType va_limit = Base::VaMaximum);
120
121 /**
122 * @brief Allocates a region in the AS of the given size and returns its address
123 */
124 VaType Allocate(VaType size);
125
126 /**
127 * @brief Marks the given region in the AS as allocated
128 */
129 void AllocateFixed(VaType virt, VaType size);
130
131 /**
132 * @brief Frees an AS region so it can be used again
133 */
134 void Free(VaType virt, VaType size);
135
136 VaType GetVAStart() const {
137 return virt_start;
138 }
139
140private:
141 /// The base VA of the allocator, no allocations will be below this
142 VaType virt_start;
143
144 /**
145 * The end address for the initial linear allocation pass
146 * Once this reaches the AS limit the slower allocation path will be used
147 */
148 VaType current_linear_alloc_end;
149};
150} // namespace Common
diff --git a/src/common/address_space.inc b/src/common/address_space.inc
new file mode 100644
index 000000000..2195dabd5
--- /dev/null
+++ b/src/common/address_space.inc
@@ -0,0 +1,366 @@
1// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include "common/address_space.h"
5#include "common/assert.h"
6
7#define MAP_MEMBER(returnType) \
8 template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \
9 bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \
10 requires AddressSpaceValid<VaType, AddressSpaceBits> returnType FlatAddressSpaceMap< \
11 VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo>
12#define MAP_MEMBER_CONST() \
13 template <typename VaType, VaType UnmappedVa, typename PaType, PaType UnmappedPa, \
14 bool PaContigSplit, size_t AddressSpaceBits, typename ExtraBlockInfo> \
15 requires AddressSpaceValid<VaType, AddressSpaceBits> FlatAddressSpaceMap< \
16 VaType, UnmappedVa, PaType, UnmappedPa, PaContigSplit, AddressSpaceBits, ExtraBlockInfo>
17
18#define MM_MEMBER(returnType) \
19 template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
20 requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \
21 FlatMemoryManager<VaType, UnmappedVa, AddressSpaceBits>
22
23#define ALLOC_MEMBER(returnType) \
24 template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
25 requires AddressSpaceValid<VaType, AddressSpaceBits> returnType \
26 FlatAllocator<VaType, UnmappedVa, AddressSpaceBits>
27#define ALLOC_MEMBER_CONST() \
28 template <typename VaType, VaType UnmappedVa, size_t AddressSpaceBits> \
29 requires AddressSpaceValid<VaType, AddressSpaceBits> \
30 FlatAllocator<VaType, UnmappedVa, AddressSpaceBits>
31
32namespace Common {
33MAP_MEMBER_CONST()::FlatAddressSpaceMap(VaType va_limit_,
34 std::function<void(VaType, VaType)> unmap_callback_)
35 : va_limit{va_limit_}, unmap_callback{std::move(unmap_callback_)} {
36 if (va_limit > VaMaximum) {
37 ASSERT_MSG(false, "Invalid VA limit!");
38 }
39}
40
41MAP_MEMBER(void)::MapLocked(VaType virt, PaType phys, VaType size, ExtraBlockInfo extra_info) {
42 VaType virt_end{virt + size};
43
44 if (virt_end > va_limit) {
45 ASSERT_MSG(false,
46 "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}",
47 virt_end, va_limit);
48 }
49
50 auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)};
51 if (block_end_successor == blocks.begin()) {
52 ASSERT_MSG(false, "Trying to map a block before the VA start: virt_end: 0x{:X}", virt_end);
53 }
54
55 auto block_end_predecessor{std::prev(block_end_successor)};
56
57 if (block_end_successor != blocks.end()) {
58 // We have blocks in front of us, if one is directly in front then we don't have to add a
59 // tail
60 if (block_end_successor->virt != virt_end) {
61 PaType tailPhys{[&]() -> PaType {
62 if constexpr (!PaContigSplit) {
63 // Always propagate unmapped regions rather than calculating offset
64 return block_end_predecessor->phys;
65 } else {
66 if (block_end_predecessor->Unmapped()) {
67 // Always propagate unmapped regions rather than calculating offset
68 return block_end_predecessor->phys;
69 } else {
70 return block_end_predecessor->phys + virt_end - block_end_predecessor->virt;
71 }
72 }
73 }()};
74
75 if (block_end_predecessor->virt >= virt) {
76 // If this block's start would be overlapped by the map then reuse it as a tail
77 // block
78 block_end_predecessor->virt = virt_end;
79 block_end_predecessor->phys = tailPhys;
80 block_end_predecessor->extra_info = block_end_predecessor->extra_info;
81
82 // No longer predecessor anymore
83 block_end_successor = block_end_predecessor--;
84 } else {
85 // Else insert a new one and we're done
86 blocks.insert(block_end_successor,
87 {Block(virt, phys, extra_info),
88 Block(virt_end, tailPhys, block_end_predecessor->extra_info)});
89 if (unmap_callback) {
90 unmap_callback(virt, size);
91 }
92
93 return;
94 }
95 }
96 } else {
97 // block_end_predecessor will always be unmapped as blocks has to be terminated by an
98 // unmapped chunk
99 if (block_end_predecessor != blocks.begin() && block_end_predecessor->virt >= virt) {
100 // Move the unmapped block start backwards
101 block_end_predecessor->virt = virt_end;
102
103 // No longer predecessor anymore
104 block_end_successor = block_end_predecessor--;
105 } else {
106 // Else insert a new one and we're done
107 blocks.insert(block_end_successor,
108 {Block(virt, phys, extra_info), Block(virt_end, UnmappedPa, {})});
109 if (unmap_callback) {
110 unmap_callback(virt, size);
111 }
112
113 return;
114 }
115 }
116
117 auto block_start_successor{block_end_successor};
118
119 // Walk the block vector to find the start successor as this is more efficient than another
120 // binary search in most scenarios
121 while (std::prev(block_start_successor)->virt >= virt) {
122 block_start_successor--;
123 }
124
125 // Check that the start successor is either the end block or something in between
126 if (block_start_successor->virt > virt_end) {
127 ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt);
128 } else if (block_start_successor->virt == virt_end) {
129 // We need to create a new block as there are none spare that we would overwrite
130 blocks.insert(block_start_successor, Block(virt, phys, extra_info));
131 } else {
132 // Erase overwritten blocks
133 if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) {
134 blocks.erase(eraseStart, block_end_successor);
135 }
136
137 // Reuse a block that would otherwise be overwritten as a start block
138 block_start_successor->virt = virt;
139 block_start_successor->phys = phys;
140 block_start_successor->extra_info = extra_info;
141 }
142
143 if (unmap_callback) {
144 unmap_callback(virt, size);
145 }
146}
147
148MAP_MEMBER(void)::UnmapLocked(VaType virt, VaType size) {
149 VaType virt_end{virt + size};
150
151 if (virt_end > va_limit) {
152 ASSERT_MSG(false,
153 "Trying to map a block past the VA limit: virt_end: 0x{:X}, va_limit: 0x{:X}",
154 virt_end, va_limit);
155 }
156
157 auto block_end_successor{std::lower_bound(blocks.begin(), blocks.end(), virt_end)};
158 if (block_end_successor == blocks.begin()) {
159 ASSERT_MSG(false, "Trying to unmap a block before the VA start: virt_end: 0x{:X}",
160 virt_end);
161 }
162
163 auto block_end_predecessor{std::prev(block_end_successor)};
164
165 auto walk_back_to_predecessor{[&](auto iter) {
166 while (iter->virt >= virt) {
167 iter--;
168 }
169
170 return iter;
171 }};
172
173 auto erase_blocks_with_end_unmapped{[&](auto unmappedEnd) {
174 auto block_start_predecessor{walk_back_to_predecessor(unmappedEnd)};
175 auto block_start_successor{std::next(block_start_predecessor)};
176
177 auto eraseEnd{[&]() {
178 if (block_start_predecessor->Unmapped()) {
179 // If the start predecessor is unmapped then we can erase everything in our region
180 // and be done
181 return std::next(unmappedEnd);
182 } else {
183 // Else reuse the end predecessor as the start of our unmapped region then erase all
184 // up to it
185 unmappedEnd->virt = virt;
186 return unmappedEnd;
187 }
188 }()};
189
190 // We can't have two unmapped regions after each other
191 if (eraseEnd != blocks.end() &&
192 (eraseEnd == block_start_successor ||
193 (block_start_predecessor->Unmapped() && eraseEnd->Unmapped()))) {
194 ASSERT_MSG(false, "Multiple contiguous unmapped regions are unsupported!");
195 }
196
197 blocks.erase(block_start_successor, eraseEnd);
198 }};
199
200 // We can avoid any splitting logic if these are the case
201 if (block_end_predecessor->Unmapped()) {
202 if (block_end_predecessor->virt > virt) {
203 erase_blocks_with_end_unmapped(block_end_predecessor);
204 }
205
206 if (unmap_callback) {
207 unmap_callback(virt, size);
208 }
209
210 return; // The region is unmapped, bail out early
211 } else if (block_end_successor->virt == virt_end && block_end_successor->Unmapped()) {
212 erase_blocks_with_end_unmapped(block_end_successor);
213
214 if (unmap_callback) {
215 unmap_callback(virt, size);
216 }
217
218 return; // The region is unmapped here and doesn't need splitting, bail out early
219 } else if (block_end_successor == blocks.end()) {
220 // This should never happen as the end should always follow an unmapped block
221 ASSERT_MSG(false, "Unexpected Memory Manager state!");
222 } else if (block_end_successor->virt != virt_end) {
223 // If one block is directly in front then we don't have to add a tail
224
225 // The previous block is mapped so we will need to add a tail with an offset
226 PaType tailPhys{[&]() {
227 if constexpr (PaContigSplit) {
228 return block_end_predecessor->phys + virt_end - block_end_predecessor->virt;
229 } else {
230 return block_end_predecessor->phys;
231 }
232 }()};
233
234 if (block_end_predecessor->virt >= virt) {
235 // If this block's start would be overlapped by the unmap then reuse it as a tail block
236 block_end_predecessor->virt = virt_end;
237 block_end_predecessor->phys = tailPhys;
238
239 // No longer predecessor anymore
240 block_end_successor = block_end_predecessor--;
241 } else {
242 blocks.insert(block_end_successor,
243 {Block(virt, UnmappedPa, {}),
244 Block(virt_end, tailPhys, block_end_predecessor->extra_info)});
245 if (unmap_callback) {
246 unmap_callback(virt, size);
247 }
248
249 // The previous block is mapped and ends before
250 return;
251 }
252 }
253
254 // Walk the block vector to find the start predecessor as this is more efficient than another
255 // binary search in most scenarios
256 auto block_start_predecessor{walk_back_to_predecessor(block_end_successor)};
257 auto block_start_successor{std::next(block_start_predecessor)};
258
259 if (block_start_successor->virt > virt_end) {
260 ASSERT_MSG(false, "Unsorted block in AS map: virt: 0x{:X}", block_start_successor->virt);
261 } else if (block_start_successor->virt == virt_end) {
262 // There are no blocks between the start and the end that would let us skip inserting a new
263 // one for head
264
265 // The previous block is may be unmapped, if so we don't need to insert any unmaps after it
266 if (block_start_predecessor->Mapped()) {
267 blocks.insert(block_start_successor, Block(virt, UnmappedPa, {}));
268 }
269 } else if (block_start_predecessor->Unmapped()) {
270 // If the previous block is unmapped
271 blocks.erase(block_start_successor, block_end_predecessor);
272 } else {
273 // Erase overwritten blocks, skipping the first one as we have written the unmapped start
274 // block there
275 if (auto eraseStart{std::next(block_start_successor)}; eraseStart != block_end_successor) {
276 blocks.erase(eraseStart, block_end_successor);
277 }
278
279 // Add in the unmapped block header
280 block_start_successor->virt = virt;
281 block_start_successor->phys = UnmappedPa;
282 }
283
284 if (unmap_callback)
285 unmap_callback(virt, size);
286}
287
288ALLOC_MEMBER_CONST()::FlatAllocator(VaType virt_start_, VaType va_limit_)
289 : Base{va_limit_}, virt_start{virt_start_}, current_linear_alloc_end{virt_start_} {}
290
291ALLOC_MEMBER(VaType)::Allocate(VaType size) {
292 std::scoped_lock lock(this->block_mutex);
293
294 VaType alloc_start{UnmappedVa};
295 VaType alloc_end{current_linear_alloc_end + size};
296
297 // Avoid searching backwards in the address space if possible
298 if (alloc_end >= current_linear_alloc_end && alloc_end <= this->va_limit) {
299 auto alloc_end_successor{
300 std::lower_bound(this->blocks.begin(), this->blocks.end(), alloc_end)};
301 if (alloc_end_successor == this->blocks.begin()) {
302 ASSERT_MSG(false, "First block in AS map is invalid!");
303 }
304
305 auto alloc_end_predecessor{std::prev(alloc_end_successor)};
306 if (alloc_end_predecessor->virt <= current_linear_alloc_end) {
307 alloc_start = current_linear_alloc_end;
308 } else {
309 // Skip over fixed any mappings in front of us
310 while (alloc_end_successor != this->blocks.end()) {
311 if (alloc_end_successor->virt - alloc_end_predecessor->virt < size ||
312 alloc_end_predecessor->Mapped()) {
313 alloc_start = alloc_end_predecessor->virt;
314 break;
315 }
316
317 alloc_end_predecessor = alloc_end_successor++;
318
319 // Use the VA limit to calculate if we can fit in the final block since it has no
320 // successor
321 if (alloc_end_successor == this->blocks.end()) {
322 alloc_end = alloc_end_predecessor->virt + size;
323
324 if (alloc_end >= alloc_end_predecessor->virt && alloc_end <= this->va_limit) {
325 alloc_start = alloc_end_predecessor->virt;
326 }
327 }
328 }
329 }
330 }
331
332 if (alloc_start != UnmappedVa) {
333 current_linear_alloc_end = alloc_start + size;
334 } else { // If linear allocation overflows the AS then find a gap
335 if (this->blocks.size() <= 2) {
336 ASSERT_MSG(false, "Unexpected allocator state!");
337 }
338
339 auto search_predecessor{this->blocks.begin()};
340 auto search_successor{std::next(search_predecessor)};
341
342 while (search_successor != this->blocks.end() &&
343 (search_successor->virt - search_predecessor->virt < size ||
344 search_predecessor->Mapped())) {
345 search_predecessor = search_successor++;
346 }
347
348 if (search_successor != this->blocks.end()) {
349 alloc_start = search_predecessor->virt;
350 } else {
351 return {}; // AS is full
352 }
353 }
354
355 this->MapLocked(alloc_start, true, size, {});
356 return alloc_start;
357}
358
359ALLOC_MEMBER(void)::AllocateFixed(VaType virt, VaType size) {
360 this->Map(virt, true, size);
361}
362
363ALLOC_MEMBER(void)::Free(VaType virt, VaType size) {
364 this->Unmap(virt, size);
365}
366} // namespace Common
diff --git a/src/common/algorithm.h b/src/common/algorithm.h
index 9ddfd637b..c27c9241d 100644
--- a/src/common/algorithm.h
+++ b/src/common/algorithm.h
@@ -24,4 +24,12 @@ template <class ForwardIt, class T, class Compare = std::less<>>
24 return first != last && !comp(value, *first) ? first : last; 24 return first != last && !comp(value, *first) ? first : last;
25} 25}
26 26
27template <typename T, typename Func, typename... Args>
28T FoldRight(T initial_value, Func&& func, Args&&... args) {
29 T value{initial_value};
30 const auto high_func = [&value, &func]<typename U>(U x) { value = func(value, x); };
31 (std::invoke(high_func, std::forward<Args>(args)), ...);
32 return value;
33}
34
27} // namespace Common 35} // namespace Common
diff --git a/src/common/hash.h b/src/common/hash.h
index b6f3e6d6f..e8fe78b07 100644
--- a/src/common/hash.h
+++ b/src/common/hash.h
@@ -18,4 +18,11 @@ struct PairHash {
18 } 18 }
19}; 19};
20 20
21template <typename T>
22struct IdentityHash {
23 [[nodiscard]] size_t operator()(T value) const noexcept {
24 return static_cast<size_t>(value);
25 }
26};
27
21} // namespace Common 28} // namespace Common
diff --git a/src/common/input.h b/src/common/input.h
index 825b0d650..bfa0639f5 100644
--- a/src/common/input.h
+++ b/src/common/input.h
@@ -76,6 +76,19 @@ enum class PollingError {
76 Unknown, 76 Unknown,
77}; 77};
78 78
79// Nfc reply from the controller
80enum class NfcState {
81 Success,
82 NewAmiibo,
83 WaitingForAmiibo,
84 AmiiboRemoved,
85 NotAnAmiibo,
86 NotSupported,
87 WrongDeviceState,
88 WriteFailed,
89 Unknown,
90};
91
79// Ir camera reply from the controller 92// Ir camera reply from the controller
80enum class CameraError { 93enum class CameraError {
81 None, 94 None,
@@ -202,6 +215,11 @@ struct CameraStatus {
202 std::vector<u8> data{}; 215 std::vector<u8> data{};
203}; 216};
204 217
218struct NfcStatus {
219 NfcState state{};
220 std::vector<u8> data{};
221};
222
205// List of buttons to be passed to Qt that can be translated 223// List of buttons to be passed to Qt that can be translated
206enum class ButtonNames { 224enum class ButtonNames {
207 Undefined, 225 Undefined,
@@ -260,6 +278,7 @@ struct CallbackStatus {
260 BatteryStatus battery_status{}; 278 BatteryStatus battery_status{};
261 VibrationStatus vibration_status{}; 279 VibrationStatus vibration_status{};
262 CameraStatus camera_status{}; 280 CameraStatus camera_status{};
281 NfcStatus nfc_status{};
263}; 282};
264 283
265// Triggered once every input change 284// Triggered once every input change
@@ -312,6 +331,14 @@ public:
312 virtual CameraError SetCameraFormat([[maybe_unused]] CameraFormat camera_format) { 331 virtual CameraError SetCameraFormat([[maybe_unused]] CameraFormat camera_format) {
313 return CameraError::NotSupported; 332 return CameraError::NotSupported;
314 } 333 }
334
335 virtual NfcState SupportsNfc() const {
336 return NfcState::NotSupported;
337 }
338
339 virtual NfcState WriteNfcData([[maybe_unused]] const std::vector<u8>& data) {
340 return NfcState::NotSupported;
341 }
315}; 342};
316 343
317/// An abstract class template for a factory that can create input devices. 344/// An abstract class template for a factory that can create input devices.
diff --git a/src/common/logging/backend.cpp b/src/common/logging/backend.cpp
index 8ce1c2fd1..15d92505e 100644
--- a/src/common/logging/backend.cpp
+++ b/src/common/logging/backend.cpp
@@ -219,7 +219,7 @@ private:
219 219
220 void StartBackendThread() { 220 void StartBackendThread() {
221 backend_thread = std::jthread([this](std::stop_token stop_token) { 221 backend_thread = std::jthread([this](std::stop_token stop_token) {
222 Common::SetCurrentThreadName("yuzu:Log"); 222 Common::SetCurrentThreadName("Logger");
223 Entry entry; 223 Entry entry;
224 const auto write_logs = [this, &entry]() { 224 const auto write_logs = [this, &entry]() {
225 ForEachBackend([&entry](Backend& backend) { backend.Write(entry); }); 225 ForEachBackend([&entry](Backend& backend) { backend.Write(entry); });
diff --git a/src/common/multi_level_page_table.cpp b/src/common/multi_level_page_table.cpp
new file mode 100644
index 000000000..46e362f3b
--- /dev/null
+++ b/src/common/multi_level_page_table.cpp
@@ -0,0 +1,9 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/multi_level_page_table.inc"
5
6namespace Common {
7template class Common::MultiLevelPageTable<u64>;
8template class Common::MultiLevelPageTable<u32>;
9} // namespace Common
diff --git a/src/common/multi_level_page_table.h b/src/common/multi_level_page_table.h
new file mode 100644
index 000000000..31f6676a0
--- /dev/null
+++ b/src/common/multi_level_page_table.h
@@ -0,0 +1,78 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <type_traits>
7#include <utility>
8#include <vector>
9
10#include "common/common_types.h"
11
12namespace Common {
13
14template <typename BaseAddr>
15class MultiLevelPageTable final {
16public:
17 constexpr MultiLevelPageTable() = default;
18 explicit MultiLevelPageTable(std::size_t address_space_bits, std::size_t first_level_bits,
19 std::size_t page_bits);
20
21 ~MultiLevelPageTable() noexcept;
22
23 MultiLevelPageTable(const MultiLevelPageTable&) = delete;
24 MultiLevelPageTable& operator=(const MultiLevelPageTable&) = delete;
25
26 MultiLevelPageTable(MultiLevelPageTable&& other) noexcept
27 : address_space_bits{std::exchange(other.address_space_bits, 0)},
28 first_level_bits{std::exchange(other.first_level_bits, 0)}, page_bits{std::exchange(
29 other.page_bits, 0)},
30 first_level_shift{std::exchange(other.first_level_shift, 0)},
31 first_level_chunk_size{std::exchange(other.first_level_chunk_size, 0)},
32 first_level_map{std::move(other.first_level_map)}, base_ptr{std::exchange(other.base_ptr,
33 nullptr)} {}
34
35 MultiLevelPageTable& operator=(MultiLevelPageTable&& other) noexcept {
36 address_space_bits = std::exchange(other.address_space_bits, 0);
37 first_level_bits = std::exchange(other.first_level_bits, 0);
38 page_bits = std::exchange(other.page_bits, 0);
39 first_level_shift = std::exchange(other.first_level_shift, 0);
40 first_level_chunk_size = std::exchange(other.first_level_chunk_size, 0);
41 alloc_size = std::exchange(other.alloc_size, 0);
42 first_level_map = std::move(other.first_level_map);
43 base_ptr = std::exchange(other.base_ptr, nullptr);
44 return *this;
45 }
46
47 void ReserveRange(u64 start, std::size_t size);
48
49 [[nodiscard]] const BaseAddr& operator[](std::size_t index) const {
50 return base_ptr[index];
51 }
52
53 [[nodiscard]] BaseAddr& operator[](std::size_t index) {
54 return base_ptr[index];
55 }
56
57 [[nodiscard]] BaseAddr* data() {
58 return base_ptr;
59 }
60
61 [[nodiscard]] const BaseAddr* data() const {
62 return base_ptr;
63 }
64
65private:
66 void AllocateLevel(u64 level);
67
68 std::size_t address_space_bits{};
69 std::size_t first_level_bits{};
70 std::size_t page_bits{};
71 std::size_t first_level_shift{};
72 std::size_t first_level_chunk_size{};
73 std::size_t alloc_size{};
74 std::vector<void*> first_level_map{};
75 BaseAddr* base_ptr{};
76};
77
78} // namespace Common
diff --git a/src/common/multi_level_page_table.inc b/src/common/multi_level_page_table.inc
new file mode 100644
index 000000000..8ac506fa0
--- /dev/null
+++ b/src/common/multi_level_page_table.inc
@@ -0,0 +1,84 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#ifdef _WIN32
5#include <windows.h>
6#else
7#include <sys/mman.h>
8#endif
9
10#include "common/assert.h"
11#include "common/multi_level_page_table.h"
12
13namespace Common {
14
15template <typename BaseAddr>
16MultiLevelPageTable<BaseAddr>::MultiLevelPageTable(std::size_t address_space_bits_,
17 std::size_t first_level_bits_,
18 std::size_t page_bits_)
19 : address_space_bits{address_space_bits_},
20 first_level_bits{first_level_bits_}, page_bits{page_bits_} {
21 if (page_bits == 0) {
22 return;
23 }
24 first_level_shift = address_space_bits - first_level_bits;
25 first_level_chunk_size = (1ULL << (first_level_shift - page_bits)) * sizeof(BaseAddr);
26 alloc_size = (1ULL << (address_space_bits - page_bits)) * sizeof(BaseAddr);
27 std::size_t first_level_size = 1ULL << first_level_bits;
28 first_level_map.resize(first_level_size, nullptr);
29#ifdef _WIN32
30 void* base{VirtualAlloc(nullptr, alloc_size, MEM_RESERVE, PAGE_READWRITE)};
31#else
32 void* base{mmap(nullptr, alloc_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)};
33
34 if (base == MAP_FAILED) {
35 base = nullptr;
36 }
37#endif
38
39 ASSERT(base);
40 base_ptr = reinterpret_cast<BaseAddr*>(base);
41}
42
43template <typename BaseAddr>
44MultiLevelPageTable<BaseAddr>::~MultiLevelPageTable() noexcept {
45 if (!base_ptr) {
46 return;
47 }
48#ifdef _WIN32
49 ASSERT(VirtualFree(base_ptr, 0, MEM_RELEASE));
50#else
51 ASSERT(munmap(base_ptr, alloc_size) == 0);
52#endif
53}
54
55template <typename BaseAddr>
56void MultiLevelPageTable<BaseAddr>::ReserveRange(u64 start, std::size_t size) {
57 const u64 new_start = start >> first_level_shift;
58 const u64 new_end = (start + size) >> first_level_shift;
59 for (u64 i = new_start; i <= new_end; i++) {
60 if (!first_level_map[i]) {
61 AllocateLevel(i);
62 }
63 }
64}
65
66template <typename BaseAddr>
67void MultiLevelPageTable<BaseAddr>::AllocateLevel(u64 level) {
68 void* ptr = reinterpret_cast<char *>(base_ptr) + level * first_level_chunk_size;
69#ifdef _WIN32
70 void* base{VirtualAlloc(ptr, first_level_chunk_size, MEM_COMMIT, PAGE_READWRITE)};
71#else
72 void* base{mmap(ptr, first_level_chunk_size, PROT_READ | PROT_WRITE,
73 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0)};
74
75 if (base == MAP_FAILED) {
76 base = nullptr;
77 }
78#endif
79 ASSERT(base);
80
81 first_level_map[level] = base;
82}
83
84} // namespace Common
diff --git a/src/common/settings.h b/src/common/settings.h
index 851812f28..d2452c93b 100644
--- a/src/common/settings.h
+++ b/src/common/settings.h
@@ -531,6 +531,7 @@ struct Values {
531 Setting<bool> use_auto_stub{false, "use_auto_stub"}; 531 Setting<bool> use_auto_stub{false, "use_auto_stub"};
532 Setting<bool> enable_all_controllers{false, "enable_all_controllers"}; 532 Setting<bool> enable_all_controllers{false, "enable_all_controllers"};
533 Setting<bool> create_crash_dumps{false, "create_crash_dumps"}; 533 Setting<bool> create_crash_dumps{false, "create_crash_dumps"};
534 Setting<bool> perform_vulkan_check{true, "perform_vulkan_check"};
534 535
535 // Miscellaneous 536 // Miscellaneous
536 Setting<std::string> log_filter{"*:Info", "log_filter"}; 537 Setting<std::string> log_filter{"*:Info", "log_filter"};
diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt
index 33cf470d5..95302c419 100644
--- a/src/core/CMakeLists.txt
+++ b/src/core/CMakeLists.txt
@@ -138,8 +138,6 @@ add_library(core STATIC
138 frontend/emu_window.h 138 frontend/emu_window.h
139 frontend/framebuffer_layout.cpp 139 frontend/framebuffer_layout.cpp
140 frontend/framebuffer_layout.h 140 frontend/framebuffer_layout.h
141 hardware_interrupt_manager.cpp
142 hardware_interrupt_manager.h
143 hid/emulated_console.cpp 141 hid/emulated_console.cpp
144 hid/emulated_console.h 142 hid/emulated_console.h
145 hid/emulated_controller.cpp 143 hid/emulated_controller.cpp
@@ -460,6 +458,8 @@ add_library(core STATIC
460 hle/service/hid/controllers/mouse.h 458 hle/service/hid/controllers/mouse.h
461 hle/service/hid/controllers/npad.cpp 459 hle/service/hid/controllers/npad.cpp
462 hle/service/hid/controllers/npad.h 460 hle/service/hid/controllers/npad.h
461 hle/service/hid/controllers/palma.cpp
462 hle/service/hid/controllers/palma.h
463 hle/service/hid/controllers/stubbed.cpp 463 hle/service/hid/controllers/stubbed.cpp
464 hle/service/hid/controllers/stubbed.h 464 hle/service/hid/controllers/stubbed.h
465 hle/service/hid/controllers/touchscreen.cpp 465 hle/service/hid/controllers/touchscreen.cpp
@@ -494,6 +494,8 @@ add_library(core STATIC
494 hle/service/jit/jit.h 494 hle/service/jit/jit.h
495 hle/service/lbl/lbl.cpp 495 hle/service/lbl/lbl.cpp
496 hle/service/lbl/lbl.h 496 hle/service/lbl/lbl.h
497 hle/service/ldn/lan_discovery.cpp
498 hle/service/ldn/lan_discovery.h
497 hle/service/ldn/ldn_results.h 499 hle/service/ldn/ldn_results.h
498 hle/service/ldn/ldn.cpp 500 hle/service/ldn/ldn.cpp
499 hle/service/ldn/ldn.h 501 hle/service/ldn/ldn.h
@@ -521,9 +523,12 @@ add_library(core STATIC
521 hle/service/nfc/nfc.h 523 hle/service/nfc/nfc.h
522 hle/service/nfp/amiibo_crypto.cpp 524 hle/service/nfp/amiibo_crypto.cpp
523 hle/service/nfp/amiibo_crypto.h 525 hle/service/nfp/amiibo_crypto.h
524 hle/service/nfp/amiibo_types.h
525 hle/service/nfp/nfp.cpp 526 hle/service/nfp/nfp.cpp
526 hle/service/nfp/nfp.h 527 hle/service/nfp/nfp.h
528 hle/service/nfp/nfp_device.cpp
529 hle/service/nfp/nfp_device.h
530 hle/service/nfp/nfp_result.h
531 hle/service/nfp/nfp_types.h
527 hle/service/nfp/nfp_user.cpp 532 hle/service/nfp/nfp_user.cpp
528 hle/service/nfp/nfp_user.h 533 hle/service/nfp/nfp_user.h
529 hle/service/ngct/ngct.cpp 534 hle/service/ngct/ngct.cpp
@@ -543,6 +548,12 @@ add_library(core STATIC
543 hle/service/ns/ns.h 548 hle/service/ns/ns.h
544 hle/service/ns/pdm_qry.cpp 549 hle/service/ns/pdm_qry.cpp
545 hle/service/ns/pdm_qry.h 550 hle/service/ns/pdm_qry.h
551 hle/service/nvdrv/core/container.cpp
552 hle/service/nvdrv/core/container.h
553 hle/service/nvdrv/core/nvmap.cpp
554 hle/service/nvdrv/core/nvmap.h
555 hle/service/nvdrv/core/syncpoint_manager.cpp
556 hle/service/nvdrv/core/syncpoint_manager.h
546 hle/service/nvdrv/devices/nvdevice.h 557 hle/service/nvdrv/devices/nvdevice.h
547 hle/service/nvdrv/devices/nvdisp_disp0.cpp 558 hle/service/nvdrv/devices/nvdisp_disp0.cpp
548 hle/service/nvdrv/devices/nvdisp_disp0.h 559 hle/service/nvdrv/devices/nvdisp_disp0.h
@@ -571,8 +582,6 @@ add_library(core STATIC
571 hle/service/nvdrv/nvdrv_interface.h 582 hle/service/nvdrv/nvdrv_interface.h
572 hle/service/nvdrv/nvmemp.cpp 583 hle/service/nvdrv/nvmemp.cpp
573 hle/service/nvdrv/nvmemp.h 584 hle/service/nvdrv/nvmemp.h
574 hle/service/nvdrv/syncpoint_manager.cpp
575 hle/service/nvdrv/syncpoint_manager.h
576 hle/service/nvflinger/binder.h 585 hle/service/nvflinger/binder.h
577 hle/service/nvflinger/buffer_item.h 586 hle/service/nvflinger/buffer_item.h
578 hle/service/nvflinger/buffer_item_consumer.cpp 587 hle/service/nvflinger/buffer_item_consumer.cpp
diff --git a/src/core/core.cpp b/src/core/core.cpp
index 121092868..1deeee154 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -27,7 +27,6 @@
27#include "core/file_sys/savedata_factory.h" 27#include "core/file_sys/savedata_factory.h"
28#include "core/file_sys/vfs_concat.h" 28#include "core/file_sys/vfs_concat.h"
29#include "core/file_sys/vfs_real.h" 29#include "core/file_sys/vfs_real.h"
30#include "core/hardware_interrupt_manager.h"
31#include "core/hid/hid_core.h" 30#include "core/hid/hid_core.h"
32#include "core/hle/kernel/k_memory_manager.h" 31#include "core/hle/kernel/k_memory_manager.h"
33#include "core/hle/kernel/k_process.h" 32#include "core/hle/kernel/k_process.h"
@@ -51,6 +50,7 @@
51#include "core/telemetry_session.h" 50#include "core/telemetry_session.h"
52#include "core/tools/freezer.h" 51#include "core/tools/freezer.h"
53#include "network/network.h" 52#include "network/network.h"
53#include "video_core/host1x/host1x.h"
54#include "video_core/renderer_base.h" 54#include "video_core/renderer_base.h"
55#include "video_core/video_core.h" 55#include "video_core/video_core.h"
56 56
@@ -215,6 +215,7 @@ struct System::Impl {
215 215
216 telemetry_session = std::make_unique<Core::TelemetrySession>(); 216 telemetry_session = std::make_unique<Core::TelemetrySession>();
217 217
218 host1x_core = std::make_unique<Tegra::Host1x::Host1x>(system);
218 gpu_core = VideoCore::CreateGPU(emu_window, system); 219 gpu_core = VideoCore::CreateGPU(emu_window, system);
219 if (!gpu_core) { 220 if (!gpu_core) {
220 return SystemResultStatus::ErrorVideoCore; 221 return SystemResultStatus::ErrorVideoCore;
@@ -224,7 +225,6 @@ struct System::Impl {
224 225
225 service_manager = std::make_shared<Service::SM::ServiceManager>(kernel); 226 service_manager = std::make_shared<Service::SM::ServiceManager>(kernel);
226 services = std::make_unique<Service::Services>(service_manager, system); 227 services = std::make_unique<Service::Services>(service_manager, system);
227 interrupt_manager = std::make_unique<Hardware::InterruptManager>(system);
228 228
229 // Initialize time manager, which must happen after kernel is created 229 // Initialize time manager, which must happen after kernel is created
230 time_manager.Initialize(); 230 time_manager.Initialize();
@@ -373,6 +373,7 @@ struct System::Impl {
373 app_loader.reset(); 373 app_loader.reset();
374 audio_core.reset(); 374 audio_core.reset();
375 gpu_core.reset(); 375 gpu_core.reset();
376 host1x_core.reset();
376 perf_stats.reset(); 377 perf_stats.reset();
377 kernel.Shutdown(); 378 kernel.Shutdown();
378 memory.Reset(); 379 memory.Reset();
@@ -450,7 +451,7 @@ struct System::Impl {
450 /// AppLoader used to load the current executing application 451 /// AppLoader used to load the current executing application
451 std::unique_ptr<Loader::AppLoader> app_loader; 452 std::unique_ptr<Loader::AppLoader> app_loader;
452 std::unique_ptr<Tegra::GPU> gpu_core; 453 std::unique_ptr<Tegra::GPU> gpu_core;
453 std::unique_ptr<Hardware::InterruptManager> interrupt_manager; 454 std::unique_ptr<Tegra::Host1x::Host1x> host1x_core;
454 std::unique_ptr<Core::DeviceMemory> device_memory; 455 std::unique_ptr<Core::DeviceMemory> device_memory;
455 std::unique_ptr<AudioCore::AudioCore> audio_core; 456 std::unique_ptr<AudioCore::AudioCore> audio_core;
456 Core::Memory::Memory memory; 457 Core::Memory::Memory memory;
@@ -668,12 +669,12 @@ const Tegra::GPU& System::GPU() const {
668 return *impl->gpu_core; 669 return *impl->gpu_core;
669} 670}
670 671
671Core::Hardware::InterruptManager& System::InterruptManager() { 672Tegra::Host1x::Host1x& System::Host1x() {
672 return *impl->interrupt_manager; 673 return *impl->host1x_core;
673} 674}
674 675
675const Core::Hardware::InterruptManager& System::InterruptManager() const { 676const Tegra::Host1x::Host1x& System::Host1x() const {
676 return *impl->interrupt_manager; 677 return *impl->host1x_core;
677} 678}
678 679
679VideoCore::RendererBase& System::Renderer() { 680VideoCore::RendererBase& System::Renderer() {
diff --git a/src/core/core.h b/src/core/core.h
index 0ce3b1d60..7843cc8ad 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -74,6 +74,9 @@ class TimeManager;
74namespace Tegra { 74namespace Tegra {
75class DebugContext; 75class DebugContext;
76class GPU; 76class GPU;
77namespace Host1x {
78class Host1x;
79} // namespace Host1x
77} // namespace Tegra 80} // namespace Tegra
78 81
79namespace VideoCore { 82namespace VideoCore {
@@ -88,10 +91,6 @@ namespace Core::Timing {
88class CoreTiming; 91class CoreTiming;
89} 92}
90 93
91namespace Core::Hardware {
92class InterruptManager;
93}
94
95namespace Core::HID { 94namespace Core::HID {
96class HIDCore; 95class HIDCore;
97} 96}
@@ -260,6 +259,12 @@ public:
260 /// Gets an immutable reference to the GPU interface. 259 /// Gets an immutable reference to the GPU interface.
261 [[nodiscard]] const Tegra::GPU& GPU() const; 260 [[nodiscard]] const Tegra::GPU& GPU() const;
262 261
262 /// Gets a mutable reference to the Host1x interface
263 [[nodiscard]] Tegra::Host1x::Host1x& Host1x();
264
265 /// Gets an immutable reference to the Host1x interface.
266 [[nodiscard]] const Tegra::Host1x::Host1x& Host1x() const;
267
263 /// Gets a mutable reference to the renderer. 268 /// Gets a mutable reference to the renderer.
264 [[nodiscard]] VideoCore::RendererBase& Renderer(); 269 [[nodiscard]] VideoCore::RendererBase& Renderer();
265 270
@@ -296,12 +301,6 @@ public:
296 /// Provides a constant reference to the core timing instance. 301 /// Provides a constant reference to the core timing instance.
297 [[nodiscard]] const Timing::CoreTiming& CoreTiming() const; 302 [[nodiscard]] const Timing::CoreTiming& CoreTiming() const;
298 303
299 /// Provides a reference to the interrupt manager instance.
300 [[nodiscard]] Core::Hardware::InterruptManager& InterruptManager();
301
302 /// Provides a constant reference to the interrupt manager instance.
303 [[nodiscard]] const Core::Hardware::InterruptManager& InterruptManager() const;
304
305 /// Provides a reference to the kernel instance. 304 /// Provides a reference to the kernel instance.
306 [[nodiscard]] Kernel::KernelCore& Kernel(); 305 [[nodiscard]] Kernel::KernelCore& Kernel();
307 306
diff --git a/src/core/core_timing.cpp b/src/core/core_timing.cpp
index f6c4567ba..6c0fcb7b5 100644
--- a/src/core/core_timing.cpp
+++ b/src/core/core_timing.cpp
@@ -43,7 +43,7 @@ CoreTiming::CoreTiming()
43CoreTiming::~CoreTiming() = default; 43CoreTiming::~CoreTiming() = default;
44 44
45void CoreTiming::ThreadEntry(CoreTiming& instance) { 45void CoreTiming::ThreadEntry(CoreTiming& instance) {
46 constexpr char name[] = "yuzu:HostTiming"; 46 constexpr char name[] = "HostTiming";
47 MicroProfileOnThreadCreate(name); 47 MicroProfileOnThreadCreate(name);
48 Common::SetCurrentThreadName(name); 48 Common::SetCurrentThreadName(name);
49 Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical); 49 Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical);
diff --git a/src/core/cpu_manager.cpp b/src/core/cpu_manager.cpp
index 9b1565ae1..0dd4c2196 100644
--- a/src/core/cpu_manager.cpp
+++ b/src/core/cpu_manager.cpp
@@ -189,9 +189,9 @@ void CpuManager::RunThread(std::size_t core) {
189 system.RegisterCoreThread(core); 189 system.RegisterCoreThread(core);
190 std::string name; 190 std::string name;
191 if (is_multicore) { 191 if (is_multicore) {
192 name = "yuzu:CPUCore_" + std::to_string(core); 192 name = "CPUCore_" + std::to_string(core);
193 } else { 193 } else {
194 name = "yuzu:CPUThread"; 194 name = "CPUThread";
195 } 195 }
196 MicroProfileOnThreadCreate(name.c_str()); 196 MicroProfileOnThreadCreate(name.c_str());
197 Common::SetCurrentThreadName(name.c_str()); 197 Common::SetCurrentThreadName(name.c_str());
diff --git a/src/core/debugger/debugger.cpp b/src/core/debugger/debugger.cpp
index e42bdd17d..339f971e6 100644
--- a/src/core/debugger/debugger.cpp
+++ b/src/core/debugger/debugger.cpp
@@ -140,7 +140,7 @@ private:
140 } 140 }
141 141
142 void ThreadLoop(std::stop_token stop_token) { 142 void ThreadLoop(std::stop_token stop_token) {
143 Common::SetCurrentThreadName("yuzu:Debugger"); 143 Common::SetCurrentThreadName("Debugger");
144 144
145 // Set up the client signals for new data. 145 // Set up the client signals for new data.
146 AsyncReceiveInto(signal_pipe, pipe_data, [&](auto d) { PipeData(d); }); 146 AsyncReceiveInto(signal_pipe, pipe_data, [&](auto d) { PipeData(d); });
diff --git a/src/core/file_sys/program_metadata.cpp b/src/core/file_sys/program_metadata.cpp
index e0cdf3520..08d489eab 100644
--- a/src/core/file_sys/program_metadata.cpp
+++ b/src/core/file_sys/program_metadata.cpp
@@ -33,11 +33,55 @@ Loader::ResultStatus ProgramMetadata::Load(VirtualFile file) {
33 return Loader::ResultStatus::ErrorBadACIHeader; 33 return Loader::ResultStatus::ErrorBadACIHeader;
34 } 34 }
35 35
36 if (sizeof(FileAccessControl) != file->ReadObject(&acid_file_access, acid_header.fac_offset)) { 36 // Load acid_file_access per-component instead of the entire struct, since this struct does not
37 // reflect the layout of the real data.
38 std::size_t current_offset = acid_header.fac_offset;
39 if (sizeof(FileAccessControl::version) != file->ReadBytes(&acid_file_access.version,
40 sizeof(FileAccessControl::version),
41 current_offset)) {
42 return Loader::ResultStatus::ErrorBadFileAccessControl;
43 }
44 if (sizeof(FileAccessControl::permissions) !=
45 file->ReadBytes(&acid_file_access.permissions, sizeof(FileAccessControl::permissions),
46 current_offset += sizeof(FileAccessControl::version) + 3)) {
47 return Loader::ResultStatus::ErrorBadFileAccessControl;
48 }
49 if (sizeof(FileAccessControl::unknown) !=
50 file->ReadBytes(&acid_file_access.unknown, sizeof(FileAccessControl::unknown),
51 current_offset + sizeof(FileAccessControl::permissions))) {
37 return Loader::ResultStatus::ErrorBadFileAccessControl; 52 return Loader::ResultStatus::ErrorBadFileAccessControl;
38 } 53 }
39 54
40 if (sizeof(FileAccessHeader) != file->ReadObject(&aci_file_access, aci_header.fah_offset)) { 55 // Load aci_file_access per-component instead of the entire struct, same as acid_file_access
56 current_offset = aci_header.fah_offset;
57 if (sizeof(FileAccessHeader::version) != file->ReadBytes(&aci_file_access.version,
58 sizeof(FileAccessHeader::version),
59 current_offset)) {
60 return Loader::ResultStatus::ErrorBadFileAccessHeader;
61 }
62 if (sizeof(FileAccessHeader::permissions) !=
63 file->ReadBytes(&aci_file_access.permissions, sizeof(FileAccessHeader::permissions),
64 current_offset += sizeof(FileAccessHeader::version) + 3)) {
65 return Loader::ResultStatus::ErrorBadFileAccessHeader;
66 }
67 if (sizeof(FileAccessHeader::unk_offset) !=
68 file->ReadBytes(&aci_file_access.unk_offset, sizeof(FileAccessHeader::unk_offset),
69 current_offset += sizeof(FileAccessHeader::permissions))) {
70 return Loader::ResultStatus::ErrorBadFileAccessHeader;
71 }
72 if (sizeof(FileAccessHeader::unk_size) !=
73 file->ReadBytes(&aci_file_access.unk_size, sizeof(FileAccessHeader::unk_size),
74 current_offset += sizeof(FileAccessHeader::unk_offset))) {
75 return Loader::ResultStatus::ErrorBadFileAccessHeader;
76 }
77 if (sizeof(FileAccessHeader::unk_offset_2) !=
78 file->ReadBytes(&aci_file_access.unk_offset_2, sizeof(FileAccessHeader::unk_offset_2),
79 current_offset += sizeof(FileAccessHeader::unk_size))) {
80 return Loader::ResultStatus::ErrorBadFileAccessHeader;
81 }
82 if (sizeof(FileAccessHeader::unk_size_2) !=
83 file->ReadBytes(&aci_file_access.unk_size_2, sizeof(FileAccessHeader::unk_size_2),
84 current_offset + sizeof(FileAccessHeader::unk_offset_2))) {
41 return Loader::ResultStatus::ErrorBadFileAccessHeader; 85 return Loader::ResultStatus::ErrorBadFileAccessHeader;
42 } 86 }
43 87
@@ -152,9 +196,7 @@ void ProgramMetadata::Print() const {
152 LOG_DEBUG(Service_FS, " > Is Retail: {}", acid_header.is_retail ? "YES" : "NO"); 196 LOG_DEBUG(Service_FS, " > Is Retail: {}", acid_header.is_retail ? "YES" : "NO");
153 LOG_DEBUG(Service_FS, "Title ID Min: 0x{:016X}", acid_header.title_id_min); 197 LOG_DEBUG(Service_FS, "Title ID Min: 0x{:016X}", acid_header.title_id_min);
154 LOG_DEBUG(Service_FS, "Title ID Max: 0x{:016X}", acid_header.title_id_max); 198 LOG_DEBUG(Service_FS, "Title ID Max: 0x{:016X}", acid_header.title_id_max);
155 u64_le permissions_l; // local copy to fix alignment error 199 LOG_DEBUG(Service_FS, "Filesystem Access: 0x{:016X}\n", acid_file_access.permissions);
156 std::memcpy(&permissions_l, &acid_file_access.permissions, sizeof(permissions_l));
157 LOG_DEBUG(Service_FS, "Filesystem Access: 0x{:016X}\n", permissions_l);
158 200
159 // Begin ACI0 printing (actual perms, unsigned) 201 // Begin ACI0 printing (actual perms, unsigned)
160 LOG_DEBUG(Service_FS, "Magic: {:.4}", aci_header.magic.data()); 202 LOG_DEBUG(Service_FS, "Magic: {:.4}", aci_header.magic.data());
diff --git a/src/core/file_sys/program_metadata.h b/src/core/file_sys/program_metadata.h
index e8fb4e27f..2e8960b07 100644
--- a/src/core/file_sys/program_metadata.h
+++ b/src/core/file_sys/program_metadata.h
@@ -144,20 +144,18 @@ private:
144 144
145 static_assert(sizeof(AciHeader) == 0x40, "ACI0 header structure size is wrong"); 145 static_assert(sizeof(AciHeader) == 0x40, "ACI0 header structure size is wrong");
146 146
147#pragma pack(push, 1) 147 // FileAccessControl and FileAccessHeader need loaded per-component: this layout does not
148 148 // reflect the real layout to avoid reference binding to misaligned addresses
149 struct FileAccessControl { 149 struct FileAccessControl {
150 u8 version; 150 u8 version;
151 INSERT_PADDING_BYTES(3); 151 // 3 padding bytes
152 u64_le permissions; 152 u64_le permissions;
153 std::array<u8, 0x20> unknown; 153 std::array<u8, 0x20> unknown;
154 }; 154 };
155 155
156 static_assert(sizeof(FileAccessControl) == 0x2C, "FS access control structure size is wrong");
157
158 struct FileAccessHeader { 156 struct FileAccessHeader {
159 u8 version; 157 u8 version;
160 INSERT_PADDING_BYTES(3); 158 // 3 padding bytes
161 u64_le permissions; 159 u64_le permissions;
162 u32_le unk_offset; 160 u32_le unk_offset;
163 u32_le unk_size; 161 u32_le unk_size;
@@ -165,10 +163,6 @@ private:
165 u32_le unk_size_2; 163 u32_le unk_size_2;
166 }; 164 };
167 165
168 static_assert(sizeof(FileAccessHeader) == 0x1C, "FS access header structure size is wrong");
169
170#pragma pack(pop)
171
172 Header npdm_header; 166 Header npdm_header;
173 AciHeader aci_header; 167 AciHeader aci_header;
174 AcidHeader acid_header; 168 AcidHeader acid_header;
diff --git a/src/core/hardware_interrupt_manager.cpp b/src/core/hardware_interrupt_manager.cpp
deleted file mode 100644
index d08cc3315..000000000
--- a/src/core/hardware_interrupt_manager.cpp
+++ /dev/null
@@ -1,32 +0,0 @@
1// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/core.h"
5#include "core/core_timing.h"
6#include "core/hardware_interrupt_manager.h"
7#include "core/hle/service/nvdrv/nvdrv_interface.h"
8#include "core/hle/service/sm/sm.h"
9
10namespace Core::Hardware {
11
12InterruptManager::InterruptManager(Core::System& system_in) : system(system_in) {
13 gpu_interrupt_event = Core::Timing::CreateEvent(
14 "GPUInterrupt",
15 [this](std::uintptr_t message, u64 time,
16 std::chrono::nanoseconds) -> std::optional<std::chrono::nanoseconds> {
17 auto nvdrv = system.ServiceManager().GetService<Service::Nvidia::NVDRV>("nvdrv");
18 const u32 syncpt = static_cast<u32>(message >> 32);
19 const u32 value = static_cast<u32>(message);
20 nvdrv->SignalGPUInterruptSyncpt(syncpt, value);
21 return std::nullopt;
22 });
23}
24
25InterruptManager::~InterruptManager() = default;
26
27void InterruptManager::GPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) {
28 const u64 msg = (static_cast<u64>(syncpoint_id) << 32ULL) | value;
29 system.CoreTiming().ScheduleEvent(std::chrono::nanoseconds{10}, gpu_interrupt_event, msg);
30}
31
32} // namespace Core::Hardware
diff --git a/src/core/hardware_interrupt_manager.h b/src/core/hardware_interrupt_manager.h
deleted file mode 100644
index 5665c5918..000000000
--- a/src/core/hardware_interrupt_manager.h
+++ /dev/null
@@ -1,32 +0,0 @@
1// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <memory>
7
8#include "common/common_types.h"
9
10namespace Core {
11class System;
12}
13
14namespace Core::Timing {
15struct EventType;
16}
17
18namespace Core::Hardware {
19
20class InterruptManager {
21public:
22 explicit InterruptManager(Core::System& system);
23 ~InterruptManager();
24
25 void GPUInterruptSyncpt(u32 syncpoint_id, u32 value);
26
27private:
28 Core::System& system;
29 std::shared_ptr<Core::Timing::EventType> gpu_interrupt_event;
30};
31
32} // namespace Core::Hardware
diff --git a/src/core/hid/emulated_controller.cpp b/src/core/hid/emulated_controller.cpp
index 01c43be93..025f1c78e 100644
--- a/src/core/hid/emulated_controller.cpp
+++ b/src/core/hid/emulated_controller.cpp
@@ -93,7 +93,7 @@ void EmulatedController::ReloadFromSettings() {
93 .body = GetNpadColor(player.body_color_left), 93 .body = GetNpadColor(player.body_color_left),
94 .button = GetNpadColor(player.button_color_left), 94 .button = GetNpadColor(player.button_color_left),
95 }; 95 };
96 controller.colors_state.left = { 96 controller.colors_state.right = {
97 .body = GetNpadColor(player.body_color_right), 97 .body = GetNpadColor(player.body_color_right),
98 .button = GetNpadColor(player.button_color_right), 98 .button = GetNpadColor(player.button_color_right),
99 }; 99 };
@@ -131,13 +131,16 @@ void EmulatedController::LoadDevices() {
131 battery_params[RightIndex].Set("battery", true); 131 battery_params[RightIndex].Set("battery", true);
132 132
133 camera_params = Common::ParamPackage{"engine:camera,camera:1"}; 133 camera_params = Common::ParamPackage{"engine:camera,camera:1"};
134 nfc_params = Common::ParamPackage{"engine:virtual_amiibo,nfc:1"};
134 135
135 output_params[LeftIndex] = left_joycon; 136 output_params[LeftIndex] = left_joycon;
136 output_params[RightIndex] = right_joycon; 137 output_params[RightIndex] = right_joycon;
137 output_params[2] = camera_params; 138 output_params[2] = camera_params;
139 output_params[3] = nfc_params;
138 output_params[LeftIndex].Set("output", true); 140 output_params[LeftIndex].Set("output", true);
139 output_params[RightIndex].Set("output", true); 141 output_params[RightIndex].Set("output", true);
140 output_params[2].Set("output", true); 142 output_params[2].Set("output", true);
143 output_params[3].Set("output", true);
141 144
142 LoadTASParams(); 145 LoadTASParams();
143 146
@@ -155,6 +158,7 @@ void EmulatedController::LoadDevices() {
155 std::transform(battery_params.begin(), battery_params.end(), battery_devices.begin(), 158 std::transform(battery_params.begin(), battery_params.end(), battery_devices.begin(),
156 Common::Input::CreateDevice<Common::Input::InputDevice>); 159 Common::Input::CreateDevice<Common::Input::InputDevice>);
157 camera_devices = Common::Input::CreateDevice<Common::Input::InputDevice>(camera_params); 160 camera_devices = Common::Input::CreateDevice<Common::Input::InputDevice>(camera_params);
161 nfc_devices = Common::Input::CreateDevice<Common::Input::InputDevice>(nfc_params);
158 std::transform(output_params.begin(), output_params.end(), output_devices.begin(), 162 std::transform(output_params.begin(), output_params.end(), output_devices.begin(),
159 Common::Input::CreateDevice<Common::Input::OutputDevice>); 163 Common::Input::CreateDevice<Common::Input::OutputDevice>);
160 164
@@ -284,6 +288,16 @@ void EmulatedController::ReloadInput() {
284 camera_devices->ForceUpdate(); 288 camera_devices->ForceUpdate();
285 } 289 }
286 290
291 if (nfc_devices) {
292 if (npad_id_type == NpadIdType::Handheld || npad_id_type == NpadIdType::Player1) {
293 nfc_devices->SetCallback({
294 .on_change =
295 [this](const Common::Input::CallbackStatus& callback) { SetNfc(callback); },
296 });
297 nfc_devices->ForceUpdate();
298 }
299 }
300
287 // Use a common UUID for TAS 301 // Use a common UUID for TAS
288 static constexpr Common::UUID TAS_UUID = Common::UUID{ 302 static constexpr Common::UUID TAS_UUID = Common::UUID{
289 {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7, 0xA5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}; 303 {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7, 0xA5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}};
@@ -339,6 +353,8 @@ void EmulatedController::UnloadInput() {
339 for (auto& stick : tas_stick_devices) { 353 for (auto& stick : tas_stick_devices) {
340 stick.reset(); 354 stick.reset();
341 } 355 }
356 camera_devices.reset();
357 nfc_devices.reset();
342} 358}
343 359
344void EmulatedController::EnableConfiguration() { 360void EmulatedController::EnableConfiguration() {
@@ -903,6 +919,25 @@ void EmulatedController::SetCamera(const Common::Input::CallbackStatus& callback
903 TriggerOnChange(ControllerTriggerType::IrSensor, true); 919 TriggerOnChange(ControllerTriggerType::IrSensor, true);
904} 920}
905 921
922void EmulatedController::SetNfc(const Common::Input::CallbackStatus& callback) {
923 std::unique_lock lock{mutex};
924 controller.nfc_values = TransformToNfc(callback);
925
926 if (is_configuring) {
927 lock.unlock();
928 TriggerOnChange(ControllerTriggerType::Nfc, false);
929 return;
930 }
931
932 controller.nfc_state = {
933 controller.nfc_values.state,
934 controller.nfc_values.data,
935 };
936
937 lock.unlock();
938 TriggerOnChange(ControllerTriggerType::Nfc, true);
939}
940
906bool EmulatedController::SetVibration(std::size_t device_index, VibrationValue vibration) { 941bool EmulatedController::SetVibration(std::size_t device_index, VibrationValue vibration) {
907 if (device_index >= output_devices.size()) { 942 if (device_index >= output_devices.size()) {
908 return false; 943 return false;
@@ -980,7 +1015,13 @@ bool EmulatedController::TestVibration(std::size_t device_index) {
980bool EmulatedController::SetPollingMode(Common::Input::PollingMode polling_mode) { 1015bool EmulatedController::SetPollingMode(Common::Input::PollingMode polling_mode) {
981 LOG_INFO(Service_HID, "Set polling mode {}", polling_mode); 1016 LOG_INFO(Service_HID, "Set polling mode {}", polling_mode);
982 auto& output_device = output_devices[static_cast<std::size_t>(DeviceIndex::Right)]; 1017 auto& output_device = output_devices[static_cast<std::size_t>(DeviceIndex::Right)];
983 return output_device->SetPollingMode(polling_mode) == Common::Input::PollingError::None; 1018 auto& nfc_output_device = output_devices[3];
1019
1020 const auto virtual_nfc_result = nfc_output_device->SetPollingMode(polling_mode);
1021 const auto mapped_nfc_result = output_device->SetPollingMode(polling_mode);
1022
1023 return virtual_nfc_result == Common::Input::PollingError::None ||
1024 mapped_nfc_result == Common::Input::PollingError::None;
984} 1025}
985 1026
986bool EmulatedController::SetCameraFormat( 1027bool EmulatedController::SetCameraFormat(
@@ -1000,6 +1041,32 @@ bool EmulatedController::SetCameraFormat(
1000 camera_format)) == Common::Input::CameraError::None; 1041 camera_format)) == Common::Input::CameraError::None;
1001} 1042}
1002 1043
1044bool EmulatedController::HasNfc() const {
1045 const auto& nfc_output_device = output_devices[3];
1046
1047 switch (npad_type) {
1048 case NpadStyleIndex::JoyconRight:
1049 case NpadStyleIndex::JoyconDual:
1050 case NpadStyleIndex::ProController:
1051 break;
1052 default:
1053 return false;
1054 }
1055
1056 const bool has_virtual_nfc =
1057 npad_id_type == NpadIdType::Player1 || npad_id_type == NpadIdType::Handheld;
1058 const bool is_virtual_nfc_supported =
1059 nfc_output_device->SupportsNfc() != Common::Input::NfcState::NotSupported;
1060
1061 return is_connected && (has_virtual_nfc && is_virtual_nfc_supported);
1062}
1063
1064bool EmulatedController::WriteNfc(const std::vector<u8>& data) {
1065 auto& nfc_output_device = output_devices[3];
1066
1067 return nfc_output_device->WriteNfcData(data) == Common::Input::NfcState::Success;
1068}
1069
1003void EmulatedController::SetLedPattern() { 1070void EmulatedController::SetLedPattern() {
1004 for (auto& device : output_devices) { 1071 for (auto& device : output_devices) {
1005 if (!device) { 1072 if (!device) {
@@ -1363,6 +1430,11 @@ const CameraState& EmulatedController::GetCamera() const {
1363 return controller.camera_state; 1430 return controller.camera_state;
1364} 1431}
1365 1432
1433const NfcState& EmulatedController::GetNfc() const {
1434 std::scoped_lock lock{mutex};
1435 return controller.nfc_state;
1436}
1437
1366NpadColor EmulatedController::GetNpadColor(u32 color) { 1438NpadColor EmulatedController::GetNpadColor(u32 color) {
1367 return { 1439 return {
1368 .r = static_cast<u8>((color >> 16) & 0xFF), 1440 .r = static_cast<u8>((color >> 16) & 0xFF),
diff --git a/src/core/hid/emulated_controller.h b/src/core/hid/emulated_controller.h
index c3aa8f9d3..319226bf8 100644
--- a/src/core/hid/emulated_controller.h
+++ b/src/core/hid/emulated_controller.h
@@ -20,7 +20,7 @@
20 20
21namespace Core::HID { 21namespace Core::HID {
22const std::size_t max_emulated_controllers = 2; 22const std::size_t max_emulated_controllers = 2;
23const std::size_t output_devices = 3; 23const std::size_t output_devices_size = 4;
24struct ControllerMotionInfo { 24struct ControllerMotionInfo {
25 Common::Input::MotionStatus raw_status{}; 25 Common::Input::MotionStatus raw_status{};
26 MotionInput emulated{}; 26 MotionInput emulated{};
@@ -37,7 +37,8 @@ using TriggerDevices =
37using BatteryDevices = 37using BatteryDevices =
38 std::array<std::unique_ptr<Common::Input::InputDevice>, max_emulated_controllers>; 38 std::array<std::unique_ptr<Common::Input::InputDevice>, max_emulated_controllers>;
39using CameraDevices = std::unique_ptr<Common::Input::InputDevice>; 39using CameraDevices = std::unique_ptr<Common::Input::InputDevice>;
40using OutputDevices = std::array<std::unique_ptr<Common::Input::OutputDevice>, output_devices>; 40using NfcDevices = std::unique_ptr<Common::Input::InputDevice>;
41using OutputDevices = std::array<std::unique_ptr<Common::Input::OutputDevice>, output_devices_size>;
41 42
42using ButtonParams = std::array<Common::ParamPackage, Settings::NativeButton::NumButtons>; 43using ButtonParams = std::array<Common::ParamPackage, Settings::NativeButton::NumButtons>;
43using StickParams = std::array<Common::ParamPackage, Settings::NativeAnalog::NumAnalogs>; 44using StickParams = std::array<Common::ParamPackage, Settings::NativeAnalog::NumAnalogs>;
@@ -45,7 +46,8 @@ using ControllerMotionParams = std::array<Common::ParamPackage, Settings::Native
45using TriggerParams = std::array<Common::ParamPackage, Settings::NativeTrigger::NumTriggers>; 46using TriggerParams = std::array<Common::ParamPackage, Settings::NativeTrigger::NumTriggers>;
46using BatteryParams = std::array<Common::ParamPackage, max_emulated_controllers>; 47using BatteryParams = std::array<Common::ParamPackage, max_emulated_controllers>;
47using CameraParams = Common::ParamPackage; 48using CameraParams = Common::ParamPackage;
48using OutputParams = std::array<Common::ParamPackage, output_devices>; 49using NfcParams = Common::ParamPackage;
50using OutputParams = std::array<Common::ParamPackage, output_devices_size>;
49 51
50using ButtonValues = std::array<Common::Input::ButtonStatus, Settings::NativeButton::NumButtons>; 52using ButtonValues = std::array<Common::Input::ButtonStatus, Settings::NativeButton::NumButtons>;
51using SticksValues = std::array<Common::Input::StickStatus, Settings::NativeAnalog::NumAnalogs>; 53using SticksValues = std::array<Common::Input::StickStatus, Settings::NativeAnalog::NumAnalogs>;
@@ -55,6 +57,7 @@ using ControllerMotionValues = std::array<ControllerMotionInfo, Settings::Native
55using ColorValues = std::array<Common::Input::BodyColorStatus, max_emulated_controllers>; 57using ColorValues = std::array<Common::Input::BodyColorStatus, max_emulated_controllers>;
56using BatteryValues = std::array<Common::Input::BatteryStatus, max_emulated_controllers>; 58using BatteryValues = std::array<Common::Input::BatteryStatus, max_emulated_controllers>;
57using CameraValues = Common::Input::CameraStatus; 59using CameraValues = Common::Input::CameraStatus;
60using NfcValues = Common::Input::NfcStatus;
58using VibrationValues = std::array<Common::Input::VibrationStatus, max_emulated_controllers>; 61using VibrationValues = std::array<Common::Input::VibrationStatus, max_emulated_controllers>;
59 62
60struct AnalogSticks { 63struct AnalogSticks {
@@ -80,6 +83,11 @@ struct CameraState {
80 std::size_t sample{}; 83 std::size_t sample{};
81}; 84};
82 85
86struct NfcState {
87 Common::Input::NfcState state{};
88 std::vector<u8> data{};
89};
90
83struct ControllerMotion { 91struct ControllerMotion {
84 Common::Vec3f accel{}; 92 Common::Vec3f accel{};
85 Common::Vec3f gyro{}; 93 Common::Vec3f gyro{};
@@ -107,6 +115,7 @@ struct ControllerStatus {
107 BatteryValues battery_values{}; 115 BatteryValues battery_values{};
108 VibrationValues vibration_values{}; 116 VibrationValues vibration_values{};
109 CameraValues camera_values{}; 117 CameraValues camera_values{};
118 NfcValues nfc_values{};
110 119
111 // Data for HID serices 120 // Data for HID serices
112 HomeButtonState home_button_state{}; 121 HomeButtonState home_button_state{};
@@ -119,6 +128,7 @@ struct ControllerStatus {
119 ControllerColors colors_state{}; 128 ControllerColors colors_state{};
120 BatteryLevelState battery_state{}; 129 BatteryLevelState battery_state{};
121 CameraState camera_state{}; 130 CameraState camera_state{};
131 NfcState nfc_state{};
122}; 132};
123 133
124enum class ControllerTriggerType { 134enum class ControllerTriggerType {
@@ -130,6 +140,7 @@ enum class ControllerTriggerType {
130 Battery, 140 Battery,
131 Vibration, 141 Vibration,
132 IrSensor, 142 IrSensor,
143 Nfc,
133 Connected, 144 Connected,
134 Disconnected, 145 Disconnected,
135 Type, 146 Type,
@@ -315,6 +326,9 @@ public:
315 /// Returns the latest camera status from the controller 326 /// Returns the latest camera status from the controller
316 const CameraState& GetCamera() const; 327 const CameraState& GetCamera() const;
317 328
329 /// Returns the latest ntag status from the controller
330 const NfcState& GetNfc() const;
331
318 /** 332 /**
319 * Sends a specific vibration to the output device 333 * Sends a specific vibration to the output device
320 * @return true if vibration had no errors 334 * @return true if vibration had no errors
@@ -341,6 +355,12 @@ public:
341 */ 355 */
342 bool SetCameraFormat(Core::IrSensor::ImageTransferProcessorFormat camera_format); 356 bool SetCameraFormat(Core::IrSensor::ImageTransferProcessorFormat camera_format);
343 357
358 /// Returns true if the device has nfc support
359 bool HasNfc() const;
360
361 /// Returns true if the nfc tag was written
362 bool WriteNfc(const std::vector<u8>& data);
363
344 /// Returns the led pattern corresponding to this emulated controller 364 /// Returns the led pattern corresponding to this emulated controller
345 LedPattern GetLedPattern() const; 365 LedPattern GetLedPattern() const;
346 366
@@ -425,6 +445,12 @@ private:
425 void SetCamera(const Common::Input::CallbackStatus& callback); 445 void SetCamera(const Common::Input::CallbackStatus& callback);
426 446
427 /** 447 /**
448 * Updates the nfc status of the controller
449 * @param callback A CallbackStatus containing the nfc status
450 */
451 void SetNfc(const Common::Input::CallbackStatus& callback);
452
453 /**
428 * Converts a color format from bgra to rgba 454 * Converts a color format from bgra to rgba
429 * @param color in bgra format 455 * @param color in bgra format
430 * @return NpadColor in rgba format 456 * @return NpadColor in rgba format
@@ -458,6 +484,7 @@ private:
458 TriggerParams trigger_params; 484 TriggerParams trigger_params;
459 BatteryParams battery_params; 485 BatteryParams battery_params;
460 CameraParams camera_params; 486 CameraParams camera_params;
487 NfcParams nfc_params;
461 OutputParams output_params; 488 OutputParams output_params;
462 489
463 ButtonDevices button_devices; 490 ButtonDevices button_devices;
@@ -466,6 +493,7 @@ private:
466 TriggerDevices trigger_devices; 493 TriggerDevices trigger_devices;
467 BatteryDevices battery_devices; 494 BatteryDevices battery_devices;
468 CameraDevices camera_devices; 495 CameraDevices camera_devices;
496 NfcDevices nfc_devices;
469 OutputDevices output_devices; 497 OutputDevices output_devices;
470 498
471 // TAS related variables 499 // TAS related variables
diff --git a/src/core/hid/input_converter.cpp b/src/core/hid/input_converter.cpp
index 52fb69e9c..fe9915abe 100644
--- a/src/core/hid/input_converter.cpp
+++ b/src/core/hid/input_converter.cpp
@@ -287,6 +287,20 @@ Common::Input::CameraStatus TransformToCamera(const Common::Input::CallbackStatu
287 return camera; 287 return camera;
288} 288}
289 289
290Common::Input::NfcStatus TransformToNfc(const Common::Input::CallbackStatus& callback) {
291 Common::Input::NfcStatus nfc{};
292 switch (callback.type) {
293 case Common::Input::InputType::Nfc:
294 return callback.nfc_status;
295 break;
296 default:
297 LOG_ERROR(Input, "Conversion from type {} to NFC not implemented", callback.type);
298 break;
299 }
300
301 return nfc;
302}
303
290void SanitizeAnalog(Common::Input::AnalogStatus& analog, bool clamp_value) { 304void SanitizeAnalog(Common::Input::AnalogStatus& analog, bool clamp_value) {
291 const auto& properties = analog.properties; 305 const auto& properties = analog.properties;
292 float& raw_value = analog.raw_value; 306 float& raw_value = analog.raw_value;
diff --git a/src/core/hid/input_converter.h b/src/core/hid/input_converter.h
index 143c50cc0..b7eb6e660 100644
--- a/src/core/hid/input_converter.h
+++ b/src/core/hid/input_converter.h
@@ -85,6 +85,14 @@ Common::Input::AnalogStatus TransformToAnalog(const Common::Input::CallbackStatu
85Common::Input::CameraStatus TransformToCamera(const Common::Input::CallbackStatus& callback); 85Common::Input::CameraStatus TransformToCamera(const Common::Input::CallbackStatus& callback);
86 86
87/** 87/**
88 * Converts raw input data into a valid nfc status.
89 *
90 * @param callback Supported callbacks: Nfc.
91 * @return A valid CameraObject object.
92 */
93Common::Input::NfcStatus TransformToNfc(const Common::Input::CallbackStatus& callback);
94
95/**
88 * Converts raw analog data into a valid analog value 96 * Converts raw analog data into a valid analog value
89 * @param analog An analog object containing raw data and properties 97 * @param analog An analog object containing raw data and properties
90 * @param clamp_value determines if the value needs to be clamped between -1.0f and 1.0f. 98 * @param clamp_value determines if the value needs to be clamped between -1.0f and 1.0f.
diff --git a/src/core/hle/kernel/k_worker_task_manager.cpp b/src/core/hle/kernel/k_worker_task_manager.cpp
index 221f341ee..04042bf8f 100644
--- a/src/core/hle/kernel/k_worker_task_manager.cpp
+++ b/src/core/hle/kernel/k_worker_task_manager.cpp
@@ -23,7 +23,7 @@ void KWorkerTask::DoWorkerTask() {
23 } 23 }
24} 24}
25 25
26KWorkerTaskManager::KWorkerTaskManager() : m_waiting_thread(1, "yuzu:KWorkerTaskManager") {} 26KWorkerTaskManager::KWorkerTaskManager() : m_waiting_thread(1, "KWorkerTaskManager") {}
27 27
28void KWorkerTaskManager::AddTask(KernelCore& kernel, WorkerType type, KWorkerTask* task) { 28void KWorkerTaskManager::AddTask(KernelCore& kernel, WorkerType type, KWorkerTask* task) {
29 ASSERT(type <= WorkerType::Count); 29 ASSERT(type <= WorkerType::Count);
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index ce7fa8275..9251f29ad 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -48,7 +48,7 @@ namespace Kernel {
48struct KernelCore::Impl { 48struct KernelCore::Impl {
49 explicit Impl(Core::System& system_, KernelCore& kernel_) 49 explicit Impl(Core::System& system_, KernelCore& kernel_)
50 : time_manager{system_}, 50 : time_manager{system_},
51 service_threads_manager{1, "yuzu:ServiceThreadsManager"}, system{system_} {} 51 service_threads_manager{1, "ServiceThreadsManager"}, system{system_} {}
52 52
53 void SetMulticore(bool is_multi) { 53 void SetMulticore(bool is_multi) {
54 is_multicore = is_multi; 54 is_multicore = is_multi;
diff --git a/src/core/hle/kernel/service_thread.cpp b/src/core/hle/kernel/service_thread.cpp
index 2e87b4ea4..d23d76706 100644
--- a/src/core/hle/kernel/service_thread.cpp
+++ b/src/core/hle/kernel/service_thread.cpp
@@ -36,7 +36,7 @@ ServiceThread::Impl::Impl(KernelCore& kernel, std::size_t num_threads, const std
36 : service_name{name} { 36 : service_name{name} {
37 for (std::size_t i = 0; i < num_threads; ++i) { 37 for (std::size_t i = 0; i < num_threads; ++i) {
38 threads.emplace_back([this, &kernel](std::stop_token stop_token) { 38 threads.emplace_back([this, &kernel](std::stop_token stop_token) {
39 Common::SetCurrentThreadName(std::string{"yuzu:HleService:" + service_name}.c_str()); 39 Common::SetCurrentThreadName(std::string{service_name}.c_str());
40 40
41 // Wait for first request before trying to acquire a render context 41 // Wait for first request before trying to acquire a render context
42 { 42 {
diff --git a/src/core/hle/service/hid/controllers/npad.cpp b/src/core/hle/service/hid/controllers/npad.cpp
index cb29004e8..f8972ec7a 100644
--- a/src/core/hle/service/hid/controllers/npad.cpp
+++ b/src/core/hle/service/hid/controllers/npad.cpp
@@ -660,7 +660,6 @@ void Controller_NPad::OnMotionUpdate(const Core::Timing::CoreTiming& core_timing
660 ASSERT(false); 660 ASSERT(false);
661 break; 661 break;
662 case Core::HID::NpadStyleIndex::ProController: 662 case Core::HID::NpadStyleIndex::ProController:
663 case Core::HID::NpadStyleIndex::Pokeball:
664 set_motion_state(sixaxis_fullkey_state, motion_state[0]); 663 set_motion_state(sixaxis_fullkey_state, motion_state[0]);
665 break; 664 break;
666 case Core::HID::NpadStyleIndex::Handheld: 665 case Core::HID::NpadStyleIndex::Handheld:
@@ -676,6 +675,11 @@ void Controller_NPad::OnMotionUpdate(const Core::Timing::CoreTiming& core_timing
676 case Core::HID::NpadStyleIndex::JoyconRight: 675 case Core::HID::NpadStyleIndex::JoyconRight:
677 set_motion_state(sixaxis_right_lifo_state, motion_state[1]); 676 set_motion_state(sixaxis_right_lifo_state, motion_state[1]);
678 break; 677 break;
678 case Core::HID::NpadStyleIndex::Pokeball:
679 using namespace std::literals::chrono_literals;
680 set_motion_state(sixaxis_fullkey_state, motion_state[0]);
681 sixaxis_fullkey_state.delta_time = std::chrono::nanoseconds(15ms).count();
682 break;
679 default: 683 default:
680 break; 684 break;
681 } 685 }
diff --git a/src/core/hle/service/hid/controllers/palma.cpp b/src/core/hle/service/hid/controllers/palma.cpp
new file mode 100644
index 000000000..575d4e626
--- /dev/null
+++ b/src/core/hle/service/hid/controllers/palma.cpp
@@ -0,0 +1,229 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/core_timing.h"
5#include "core/hid/emulated_controller.h"
6#include "core/hid/hid_core.h"
7#include "core/hid/hid_types.h"
8#include "core/hle/kernel/k_event.h"
9#include "core/hle/kernel/k_readable_event.h"
10#include "core/hle/service/hid/controllers/palma.h"
11#include "core/hle/service/kernel_helpers.h"
12
13namespace Service::HID {
14
15Controller_Palma::Controller_Palma(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_,
16 KernelHelpers::ServiceContext& service_context_)
17 : ControllerBase{hid_core_}, service_context{service_context_} {
18 controller = hid_core.GetEmulatedController(Core::HID::NpadIdType::Other);
19 operation_complete_event = service_context.CreateEvent("hid:PalmaOperationCompleteEvent");
20}
21
22Controller_Palma::~Controller_Palma() = default;
23
24void Controller_Palma::OnInit() {}
25
26void Controller_Palma::OnRelease() {}
27
28void Controller_Palma::OnUpdate(const Core::Timing::CoreTiming& core_timing) {
29 if (!IsControllerActivated()) {
30 return;
31 }
32}
33
34Result Controller_Palma::GetPalmaConnectionHandle(Core::HID::NpadIdType npad_id,
35 PalmaConnectionHandle& handle) {
36 active_handle.npad_id = npad_id;
37 handle = active_handle;
38 return ResultSuccess;
39}
40
41Result Controller_Palma::InitializePalma(const PalmaConnectionHandle& handle) {
42 if (handle.npad_id != active_handle.npad_id) {
43 return InvalidPalmaHandle;
44 }
45 ActivateController();
46 return ResultSuccess;
47}
48
49Kernel::KReadableEvent& Controller_Palma::AcquirePalmaOperationCompleteEvent(
50 const PalmaConnectionHandle& handle) const {
51 if (handle.npad_id != active_handle.npad_id) {
52 LOG_ERROR(Service_HID, "Invalid npad id {}", handle.npad_id);
53 }
54 return operation_complete_event->GetReadableEvent();
55}
56
57Result Controller_Palma::GetPalmaOperationInfo(const PalmaConnectionHandle& handle,
58 PalmaOperationType& operation_type,
59 PalmaOperationData& data) const {
60 if (handle.npad_id != active_handle.npad_id) {
61 return InvalidPalmaHandle;
62 }
63 operation_type = operation.operation;
64 data = operation.data;
65 return ResultSuccess;
66}
67
68Result Controller_Palma::PlayPalmaActivity(const PalmaConnectionHandle& handle,
69 u64 palma_activity) {
70 if (handle.npad_id != active_handle.npad_id) {
71 return InvalidPalmaHandle;
72 }
73 operation.operation = PalmaOperationType::PlayActivity;
74 operation.result = PalmaResultSuccess;
75 operation.data = {};
76 operation_complete_event->GetWritableEvent().Signal();
77 return ResultSuccess;
78}
79
80Result Controller_Palma::SetPalmaFrModeType(const PalmaConnectionHandle& handle,
81 PalmaFrModeType fr_mode_) {
82 if (handle.npad_id != active_handle.npad_id) {
83 return InvalidPalmaHandle;
84 }
85 fr_mode = fr_mode_;
86 return ResultSuccess;
87}
88
89Result Controller_Palma::ReadPalmaStep(const PalmaConnectionHandle& handle) {
90 if (handle.npad_id != active_handle.npad_id) {
91 return InvalidPalmaHandle;
92 }
93 operation.operation = PalmaOperationType::ReadStep;
94 operation.result = PalmaResultSuccess;
95 operation.data = {};
96 operation_complete_event->GetWritableEvent().Signal();
97 return ResultSuccess;
98}
99
100Result Controller_Palma::EnablePalmaStep(const PalmaConnectionHandle& handle, bool is_enabled) {
101 if (handle.npad_id != active_handle.npad_id) {
102 return InvalidPalmaHandle;
103 }
104 return ResultSuccess;
105}
106
107Result Controller_Palma::ResetPalmaStep(const PalmaConnectionHandle& handle) {
108 if (handle.npad_id != active_handle.npad_id) {
109 return InvalidPalmaHandle;
110 }
111 return ResultSuccess;
112}
113
114void Controller_Palma::ReadPalmaApplicationSection() {}
115
116void Controller_Palma::WritePalmaApplicationSection() {}
117
118Result Controller_Palma::ReadPalmaUniqueCode(const PalmaConnectionHandle& handle) {
119 if (handle.npad_id != active_handle.npad_id) {
120 return InvalidPalmaHandle;
121 }
122 operation.operation = PalmaOperationType::ReadUniqueCode;
123 operation.result = PalmaResultSuccess;
124 operation.data = {};
125 operation_complete_event->GetWritableEvent().Signal();
126 return ResultSuccess;
127}
128
129Result Controller_Palma::SetPalmaUniqueCodeInvalid(const PalmaConnectionHandle& handle) {
130 if (handle.npad_id != active_handle.npad_id) {
131 return InvalidPalmaHandle;
132 }
133 operation.operation = PalmaOperationType::SetUniqueCodeInvalid;
134 operation.result = PalmaResultSuccess;
135 operation.data = {};
136 operation_complete_event->GetWritableEvent().Signal();
137 return ResultSuccess;
138}
139
140void Controller_Palma::WritePalmaActivityEntry() {}
141
142Result Controller_Palma::WritePalmaRgbLedPatternEntry(const PalmaConnectionHandle& handle,
143 u64 unknown) {
144 if (handle.npad_id != active_handle.npad_id) {
145 return InvalidPalmaHandle;
146 }
147 operation.operation = PalmaOperationType::WriteRgbLedPatternEntry;
148 operation.result = PalmaResultSuccess;
149 operation.data = {};
150 operation_complete_event->GetWritableEvent().Signal();
151 return ResultSuccess;
152}
153
154Result Controller_Palma::WritePalmaWaveEntry(const PalmaConnectionHandle& handle, PalmaWaveSet wave,
155 u8* t_mem, u64 size) {
156 if (handle.npad_id != active_handle.npad_id) {
157 return InvalidPalmaHandle;
158 }
159 operation.operation = PalmaOperationType::WriteWaveEntry;
160 operation.result = PalmaResultSuccess;
161 operation.data = {};
162 operation_complete_event->GetWritableEvent().Signal();
163 return ResultSuccess;
164}
165
166Result Controller_Palma::SetPalmaDataBaseIdentificationVersion(const PalmaConnectionHandle& handle,
167 s32 database_id_version_) {
168 if (handle.npad_id != active_handle.npad_id) {
169 return InvalidPalmaHandle;
170 }
171 database_id_version = database_id_version_;
172 operation.operation = PalmaOperationType::ReadDataBaseIdentificationVersion;
173 operation.result = PalmaResultSuccess;
174 operation.data[0] = {};
175 operation_complete_event->GetWritableEvent().Signal();
176 return ResultSuccess;
177}
178
179Result Controller_Palma::GetPalmaDataBaseIdentificationVersion(
180 const PalmaConnectionHandle& handle) {
181 if (handle.npad_id != active_handle.npad_id) {
182 return InvalidPalmaHandle;
183 }
184 operation.operation = PalmaOperationType::ReadDataBaseIdentificationVersion;
185 operation.result = PalmaResultSuccess;
186 operation.data = {};
187 operation.data[0] = static_cast<u8>(database_id_version);
188 operation_complete_event->GetWritableEvent().Signal();
189 return ResultSuccess;
190}
191
192void Controller_Palma::SuspendPalmaFeature() {}
193
194Result Controller_Palma::GetPalmaOperationResult(const PalmaConnectionHandle& handle) const {
195 if (handle.npad_id != active_handle.npad_id) {
196 return InvalidPalmaHandle;
197 }
198 return operation.result;
199}
200void Controller_Palma::ReadPalmaPlayLog() {}
201
202void Controller_Palma::ResetPalmaPlayLog() {}
203
204void Controller_Palma::SetIsPalmaAllConnectable(bool is_all_connectable) {
205 // If true controllers are able to be paired
206 is_connectable = is_all_connectable;
207}
208
209void Controller_Palma::SetIsPalmaPairedConnectable() {}
210
211Result Controller_Palma::PairPalma(const PalmaConnectionHandle& handle) {
212 if (handle.npad_id != active_handle.npad_id) {
213 return InvalidPalmaHandle;
214 }
215 // TODO: Do something
216 return ResultSuccess;
217}
218
219void Controller_Palma::SetPalmaBoostMode(bool boost_mode) {}
220
221void Controller_Palma::CancelWritePalmaWaveEntry() {}
222
223void Controller_Palma::EnablePalmaBoostMode() {}
224
225void Controller_Palma::GetPalmaBluetoothAddress() {}
226
227void Controller_Palma::SetDisallowedPalmaConnection() {}
228
229} // namespace Service::HID
diff --git a/src/core/hle/service/hid/controllers/palma.h b/src/core/hle/service/hid/controllers/palma.h
new file mode 100644
index 000000000..1d7fc94e1
--- /dev/null
+++ b/src/core/hle/service/hid/controllers/palma.h
@@ -0,0 +1,163 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <array>
7#include "common/common_funcs.h"
8#include "common/common_types.h"
9#include "core/hle/service/hid/controllers/controller_base.h"
10#include "core/hle/service/hid/errors.h"
11
12namespace Kernel {
13class KEvent;
14class KReadableEvent;
15} // namespace Kernel
16
17namespace Service::KernelHelpers {
18class ServiceContext;
19}
20
21namespace Core::HID {
22class EmulatedController;
23} // namespace Core::HID
24
25namespace Service::HID {
26class Controller_Palma final : public ControllerBase {
27public:
28 using PalmaOperationData = std::array<u8, 0x140>;
29
30 // This is nn::hid::PalmaOperationType
31 enum class PalmaOperationType {
32 PlayActivity,
33 SetFrModeType,
34 ReadStep,
35 EnableStep,
36 ResetStep,
37 ReadApplicationSection,
38 WriteApplicationSection,
39 ReadUniqueCode,
40 SetUniqueCodeInvalid,
41 WriteActivityEntry,
42 WriteRgbLedPatternEntry,
43 WriteWaveEntry,
44 ReadDataBaseIdentificationVersion,
45 WriteDataBaseIdentificationVersion,
46 SuspendFeature,
47 ReadPlayLog,
48 ResetPlayLog,
49 };
50
51 // This is nn::hid::PalmaWaveSet
52 enum class PalmaWaveSet : u64 {
53 Small,
54 Medium,
55 Large,
56 };
57
58 // This is nn::hid::PalmaFrModeType
59 enum class PalmaFrModeType : u64 {
60 Off,
61 B01,
62 B02,
63 B03,
64 Downloaded,
65 };
66
67 // This is nn::hid::PalmaFeature
68 enum class PalmaFeature : u64 {
69 FrMode,
70 RumbleFeedback,
71 Step,
72 MuteSwitch,
73 };
74
75 // This is nn::hid::PalmaOperationInfo
76 struct PalmaOperationInfo {
77 PalmaOperationType operation{};
78 Result result{PalmaResultSuccess};
79 PalmaOperationData data{};
80 };
81 static_assert(sizeof(PalmaOperationInfo) == 0x148, "PalmaOperationInfo is an invalid size");
82
83 // This is nn::hid::PalmaActivityEntry
84 struct PalmaActivityEntry {
85 u32 rgb_led_pattern_index;
86 INSERT_PADDING_BYTES(2);
87 PalmaWaveSet wave_set;
88 u32 wave_index;
89 INSERT_PADDING_BYTES(12);
90 };
91 static_assert(sizeof(PalmaActivityEntry) == 0x20, "PalmaActivityEntry is an invalid size");
92
93 struct PalmaConnectionHandle {
94 Core::HID::NpadIdType npad_id;
95 INSERT_PADDING_BYTES(4); // Unknown
96 };
97 static_assert(sizeof(PalmaConnectionHandle) == 0x8,
98 "PalmaConnectionHandle has incorrect size.");
99
100 explicit Controller_Palma(Core::HID::HIDCore& hid_core_, u8* raw_shared_memory_,
101 KernelHelpers::ServiceContext& service_context_);
102 ~Controller_Palma() override;
103
104 // Called when the controller is initialized
105 void OnInit() override;
106
107 // When the controller is released
108 void OnRelease() override;
109
110 // When the controller is requesting an update for the shared memory
111 void OnUpdate(const Core::Timing::CoreTiming& core_timing) override;
112
113 Result GetPalmaConnectionHandle(Core::HID::NpadIdType npad_id, PalmaConnectionHandle& handle);
114 Result InitializePalma(const PalmaConnectionHandle& handle);
115 Kernel::KReadableEvent& AcquirePalmaOperationCompleteEvent(
116 const PalmaConnectionHandle& handle) const;
117 Result GetPalmaOperationInfo(const PalmaConnectionHandle& handle,
118 PalmaOperationType& operation_type,
119 PalmaOperationData& data) const;
120 Result PlayPalmaActivity(const PalmaConnectionHandle& handle, u64 palma_activity);
121 Result SetPalmaFrModeType(const PalmaConnectionHandle& handle, PalmaFrModeType fr_mode_);
122 Result ReadPalmaStep(const PalmaConnectionHandle& handle);
123 Result EnablePalmaStep(const PalmaConnectionHandle& handle, bool is_enabled);
124 Result ResetPalmaStep(const PalmaConnectionHandle& handle);
125 Result ReadPalmaUniqueCode(const PalmaConnectionHandle& handle);
126 Result SetPalmaUniqueCodeInvalid(const PalmaConnectionHandle& handle);
127 Result WritePalmaRgbLedPatternEntry(const PalmaConnectionHandle& handle, u64 unknown);
128 Result WritePalmaWaveEntry(const PalmaConnectionHandle& handle, PalmaWaveSet wave, u8* t_mem,
129 u64 size);
130 Result SetPalmaDataBaseIdentificationVersion(const PalmaConnectionHandle& handle,
131 s32 database_id_version_);
132 Result GetPalmaDataBaseIdentificationVersion(const PalmaConnectionHandle& handle);
133 Result GetPalmaOperationResult(const PalmaConnectionHandle& handle) const;
134 void SetIsPalmaAllConnectable(bool is_all_connectable);
135 Result PairPalma(const PalmaConnectionHandle& handle);
136 void SetPalmaBoostMode(bool boost_mode);
137
138private:
139 void ReadPalmaApplicationSection();
140 void WritePalmaApplicationSection();
141 void WritePalmaActivityEntry();
142 void SuspendPalmaFeature();
143 void ReadPalmaPlayLog();
144 void ResetPalmaPlayLog();
145 void SetIsPalmaPairedConnectable();
146 void CancelWritePalmaWaveEntry();
147 void EnablePalmaBoostMode();
148 void GetPalmaBluetoothAddress();
149 void SetDisallowedPalmaConnection();
150
151 bool is_connectable{};
152 s32 database_id_version{};
153 PalmaOperationInfo operation{};
154 PalmaFrModeType fr_mode{};
155 PalmaConnectionHandle active_handle{};
156
157 Core::HID::EmulatedController* controller;
158
159 Kernel::KEvent* operation_complete_event;
160 KernelHelpers::ServiceContext& service_context;
161};
162
163} // namespace Service::HID
diff --git a/src/core/hle/service/hid/errors.h b/src/core/hle/service/hid/errors.h
index 4613a4e60..76208e9a4 100644
--- a/src/core/hle/service/hid/errors.h
+++ b/src/core/hle/service/hid/errors.h
@@ -7,6 +7,7 @@
7 7
8namespace Service::HID { 8namespace Service::HID {
9 9
10constexpr Result PalmaResultSuccess{ErrorModule::HID, 0};
10constexpr Result NpadInvalidHandle{ErrorModule::HID, 100}; 11constexpr Result NpadInvalidHandle{ErrorModule::HID, 100};
11constexpr Result NpadDeviceIndexOutOfRange{ErrorModule::HID, 107}; 12constexpr Result NpadDeviceIndexOutOfRange{ErrorModule::HID, 107};
12constexpr Result VibrationInvalidStyleIndex{ErrorModule::HID, 122}; 13constexpr Result VibrationInvalidStyleIndex{ErrorModule::HID, 122};
@@ -17,6 +18,7 @@ constexpr Result NpadIsDualJoycon{ErrorModule::HID, 601};
17constexpr Result NpadIsSameType{ErrorModule::HID, 602}; 18constexpr Result NpadIsSameType{ErrorModule::HID, 602};
18constexpr Result InvalidNpadId{ErrorModule::HID, 709}; 19constexpr Result InvalidNpadId{ErrorModule::HID, 709};
19constexpr Result NpadNotConnected{ErrorModule::HID, 710}; 20constexpr Result NpadNotConnected{ErrorModule::HID, 710};
21constexpr Result InvalidPalmaHandle{ErrorModule::HID, 3302};
20 22
21} // namespace Service::HID 23} // namespace Service::HID
22 24
diff --git a/src/core/hle/service/hid/hid.cpp b/src/core/hle/service/hid/hid.cpp
index 3d3457160..46bad7871 100644
--- a/src/core/hle/service/hid/hid.cpp
+++ b/src/core/hle/service/hid/hid.cpp
@@ -27,6 +27,7 @@
27#include "core/hle/service/hid/controllers/keyboard.h" 27#include "core/hle/service/hid/controllers/keyboard.h"
28#include "core/hle/service/hid/controllers/mouse.h" 28#include "core/hle/service/hid/controllers/mouse.h"
29#include "core/hle/service/hid/controllers/npad.h" 29#include "core/hle/service/hid/controllers/npad.h"
30#include "core/hle/service/hid/controllers/palma.h"
30#include "core/hle/service/hid/controllers/stubbed.h" 31#include "core/hle/service/hid/controllers/stubbed.h"
31#include "core/hle/service/hid/controllers/touchscreen.h" 32#include "core/hle/service/hid/controllers/touchscreen.h"
32#include "core/hle/service/hid/controllers/xpad.h" 33#include "core/hle/service/hid/controllers/xpad.h"
@@ -35,7 +36,8 @@ namespace Service::HID {
35 36
36// Updating period for each HID device. 37// Updating period for each HID device.
37// Period time is obtained by measuring the number of samples in a second on HW using a homebrew 38// Period time is obtained by measuring the number of samples in a second on HW using a homebrew
38constexpr auto pad_update_ns = std::chrono::nanoseconds{4 * 1000 * 1000}; // (4ms, 250Hz) 39// Correct pad_update_ns is 4ms this is overclocked to lower input lag
40constexpr auto pad_update_ns = std::chrono::nanoseconds{1 * 1000 * 1000}; // (1ms, 1000Hz)
39constexpr auto mouse_keyboard_update_ns = std::chrono::nanoseconds{8 * 1000 * 1000}; // (8ms, 125Hz) 41constexpr auto mouse_keyboard_update_ns = std::chrono::nanoseconds{8 * 1000 * 1000}; // (8ms, 125Hz)
40constexpr auto motion_update_ns = std::chrono::nanoseconds{5 * 1000 * 1000}; // (5ms, 200Hz) 42constexpr auto motion_update_ns = std::chrono::nanoseconds{5 * 1000 * 1000}; // (5ms, 200Hz)
41 43
@@ -60,6 +62,7 @@ IAppletResource::IAppletResource(Core::System& system_,
60 MakeControllerWithServiceContext<Controller_NPad>(HidController::NPad, shared_memory); 62 MakeControllerWithServiceContext<Controller_NPad>(HidController::NPad, shared_memory);
61 MakeController<Controller_Gesture>(HidController::Gesture, shared_memory); 63 MakeController<Controller_Gesture>(HidController::Gesture, shared_memory);
62 MakeController<Controller_ConsoleSixAxis>(HidController::ConsoleSixAxisSensor, shared_memory); 64 MakeController<Controller_ConsoleSixAxis>(HidController::ConsoleSixAxisSensor, shared_memory);
65 MakeControllerWithServiceContext<Controller_Palma>(HidController::Palma, shared_memory);
63 66
64 // Homebrew doesn't try to activate some controllers, so we activate them by default 67 // Homebrew doesn't try to activate some controllers, so we activate them by default
65 GetController<Controller_NPad>(HidController::NPad).ActivateController(); 68 GetController<Controller_NPad>(HidController::NPad).ActivateController();
@@ -310,36 +313,36 @@ Hid::Hid(Core::System& system_)
310 {406, nullptr, "GetNpadLeftRightInterfaceType"}, 313 {406, nullptr, "GetNpadLeftRightInterfaceType"},
311 {407, nullptr, "GetNpadOfHighestBatteryLevel"}, 314 {407, nullptr, "GetNpadOfHighestBatteryLevel"},
312 {408, nullptr, "GetNpadOfHighestBatteryLevelForJoyRight"}, 315 {408, nullptr, "GetNpadOfHighestBatteryLevelForJoyRight"},
313 {500, nullptr, "GetPalmaConnectionHandle"}, 316 {500, &Hid::GetPalmaConnectionHandle, "GetPalmaConnectionHandle"},
314 {501, nullptr, "InitializePalma"}, 317 {501, &Hid::InitializePalma, "InitializePalma"},
315 {502, nullptr, "AcquirePalmaOperationCompleteEvent"}, 318 {502, &Hid::AcquirePalmaOperationCompleteEvent, "AcquirePalmaOperationCompleteEvent"},
316 {503, nullptr, "GetPalmaOperationInfo"}, 319 {503, &Hid::GetPalmaOperationInfo, "GetPalmaOperationInfo"},
317 {504, nullptr, "PlayPalmaActivity"}, 320 {504, &Hid::PlayPalmaActivity, "PlayPalmaActivity"},
318 {505, nullptr, "SetPalmaFrModeType"}, 321 {505, &Hid::SetPalmaFrModeType, "SetPalmaFrModeType"},
319 {506, nullptr, "ReadPalmaStep"}, 322 {506, &Hid::ReadPalmaStep, "ReadPalmaStep"},
320 {507, nullptr, "EnablePalmaStep"}, 323 {507, &Hid::EnablePalmaStep, "EnablePalmaStep"},
321 {508, nullptr, "ResetPalmaStep"}, 324 {508, &Hid::ResetPalmaStep, "ResetPalmaStep"},
322 {509, nullptr, "ReadPalmaApplicationSection"}, 325 {509, &Hid::ReadPalmaApplicationSection, "ReadPalmaApplicationSection"},
323 {510, nullptr, "WritePalmaApplicationSection"}, 326 {510, &Hid::WritePalmaApplicationSection, "WritePalmaApplicationSection"},
324 {511, nullptr, "ReadPalmaUniqueCode"}, 327 {511, &Hid::ReadPalmaUniqueCode, "ReadPalmaUniqueCode"},
325 {512, nullptr, "SetPalmaUniqueCodeInvalid"}, 328 {512, &Hid::SetPalmaUniqueCodeInvalid, "SetPalmaUniqueCodeInvalid"},
326 {513, nullptr, "WritePalmaActivityEntry"}, 329 {513, &Hid::WritePalmaActivityEntry, "WritePalmaActivityEntry"},
327 {514, nullptr, "WritePalmaRgbLedPatternEntry"}, 330 {514, &Hid::WritePalmaRgbLedPatternEntry, "WritePalmaRgbLedPatternEntry"},
328 {515, nullptr, "WritePalmaWaveEntry"}, 331 {515, &Hid::WritePalmaWaveEntry, "WritePalmaWaveEntry"},
329 {516, nullptr, "SetPalmaDataBaseIdentificationVersion"}, 332 {516, &Hid::SetPalmaDataBaseIdentificationVersion, "SetPalmaDataBaseIdentificationVersion"},
330 {517, nullptr, "GetPalmaDataBaseIdentificationVersion"}, 333 {517, &Hid::GetPalmaDataBaseIdentificationVersion, "GetPalmaDataBaseIdentificationVersion"},
331 {518, nullptr, "SuspendPalmaFeature"}, 334 {518, &Hid::SuspendPalmaFeature, "SuspendPalmaFeature"},
332 {519, nullptr, "GetPalmaOperationResult"}, 335 {519, &Hid::GetPalmaOperationResult, "GetPalmaOperationResult"},
333 {520, nullptr, "ReadPalmaPlayLog"}, 336 {520, &Hid::ReadPalmaPlayLog, "ReadPalmaPlayLog"},
334 {521, nullptr, "ResetPalmaPlayLog"}, 337 {521, &Hid::ResetPalmaPlayLog, "ResetPalmaPlayLog"},
335 {522, &Hid::SetIsPalmaAllConnectable, "SetIsPalmaAllConnectable"}, 338 {522, &Hid::SetIsPalmaAllConnectable, "SetIsPalmaAllConnectable"},
336 {523, nullptr, "SetIsPalmaPairedConnectable"}, 339 {523, &Hid::SetIsPalmaPairedConnectable, "SetIsPalmaPairedConnectable"},
337 {524, nullptr, "PairPalma"}, 340 {524, &Hid::PairPalma, "PairPalma"},
338 {525, &Hid::SetPalmaBoostMode, "SetPalmaBoostMode"}, 341 {525, &Hid::SetPalmaBoostMode, "SetPalmaBoostMode"},
339 {526, nullptr, "CancelWritePalmaWaveEntry"}, 342 {526, &Hid::CancelWritePalmaWaveEntry, "CancelWritePalmaWaveEntry"},
340 {527, nullptr, "EnablePalmaBoostMode"}, 343 {527, &Hid::EnablePalmaBoostMode, "EnablePalmaBoostMode"},
341 {528, nullptr, "GetPalmaBluetoothAddress"}, 344 {528, &Hid::GetPalmaBluetoothAddress, "GetPalmaBluetoothAddress"},
342 {529, nullptr, "SetDisallowedPalmaConnection"}, 345 {529, &Hid::SetDisallowedPalmaConnection, "SetDisallowedPalmaConnection"},
343 {1000, &Hid::SetNpadCommunicationMode, "SetNpadCommunicationMode"}, 346 {1000, &Hid::SetNpadCommunicationMode, "SetNpadCommunicationMode"},
344 {1001, &Hid::GetNpadCommunicationMode, "GetNpadCommunicationMode"}, 347 {1001, &Hid::GetNpadCommunicationMode, "GetNpadCommunicationMode"},
345 {1002, &Hid::SetTouchScreenConfiguration, "SetTouchScreenConfiguration"}, 348 {1002, &Hid::SetTouchScreenConfiguration, "SetTouchScreenConfiguration"},
@@ -1878,14 +1881,361 @@ void Hid::IsUsbFullKeyControllerEnabled(Kernel::HLERequestContext& ctx) {
1878 rb.Push(false); 1881 rb.Push(false);
1879} 1882}
1880 1883
1884void Hid::GetPalmaConnectionHandle(Kernel::HLERequestContext& ctx) {
1885 IPC::RequestParser rp{ctx};
1886 struct Parameters {
1887 Core::HID::NpadIdType npad_id;
1888 INSERT_PADDING_WORDS_NOINIT(1);
1889 u64 applet_resource_user_id;
1890 };
1891 static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
1892
1893 const auto parameters{rp.PopRaw<Parameters>()};
1894
1895 LOG_WARNING(Service_HID, "(STUBBED) called, npad_id={}, applet_resource_user_id={}",
1896 parameters.npad_id, parameters.applet_resource_user_id);
1897
1898 Controller_Palma::PalmaConnectionHandle handle;
1899 auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
1900 const auto result = controller.GetPalmaConnectionHandle(parameters.npad_id, handle);
1901
1902 IPC::ResponseBuilder rb{ctx, 4};
1903 rb.Push(result);
1904 rb.PushRaw(handle);
1905}
1906
1907void Hid::InitializePalma(Kernel::HLERequestContext& ctx) {
1908 IPC::RequestParser rp{ctx};
1909 const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
1910
1911 LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
1912
1913 auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
1914 const auto result = controller.InitializePalma(connection_handle);
1915
1916 IPC::ResponseBuilder rb{ctx, 2};
1917 rb.Push(result);
1918}
1919
1920void Hid::AcquirePalmaOperationCompleteEvent(Kernel::HLERequestContext& ctx) {
1921 IPC::RequestParser rp{ctx};
1922 const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
1923
1924 LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
1925
1926 auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
1927
1928 IPC::ResponseBuilder rb{ctx, 2, 1};
1929 rb.Push(ResultSuccess);
1930 rb.PushCopyObjects(controller.AcquirePalmaOperationCompleteEvent(connection_handle));
1931}
1932
1933void Hid::GetPalmaOperationInfo(Kernel::HLERequestContext& ctx) {
1934 IPC::RequestParser rp{ctx};
1935 const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
1936
1937 LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
1938
1939 Controller_Palma::PalmaOperationType operation_type;
1940 Controller_Palma::PalmaOperationData data;
1941 auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
1942 const auto result = controller.GetPalmaOperationInfo(connection_handle, operation_type, data);
1943
1944 if (result.IsError()) {
1945 IPC::ResponseBuilder rb{ctx, 2};
1946 rb.Push(result);
1947 }
1948
1949 ctx.WriteBuffer(data);
1950 IPC::ResponseBuilder rb{ctx, 4};
1951 rb.Push(result);
1952 rb.Push(static_cast<u64>(operation_type));
1953}
1954
1955void Hid::PlayPalmaActivity(Kernel::HLERequestContext& ctx) {
1956 IPC::RequestParser rp{ctx};
1957 const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
1958 const auto palma_activity{rp.Pop<u64>()};
1959
1960 LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, palma_activity={}",
1961 connection_handle.npad_id, palma_activity);
1962
1963 auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
1964 const auto result = controller.PlayPalmaActivity(connection_handle, palma_activity);
1965
1966 IPC::ResponseBuilder rb{ctx, 2};
1967 rb.Push(result);
1968}
1969
1970void Hid::SetPalmaFrModeType(Kernel::HLERequestContext& ctx) {
1971 IPC::RequestParser rp{ctx};
1972 const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
1973 const auto fr_mode{rp.PopEnum<Controller_Palma::PalmaFrModeType>()};
1974
1975 LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, fr_mode={}",
1976 connection_handle.npad_id, fr_mode);
1977
1978 auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
1979 const auto result = controller.SetPalmaFrModeType(connection_handle, fr_mode);
1980
1981 IPC::ResponseBuilder rb{ctx, 2};
1982 rb.Push(result);
1983}
1984
1985void Hid::ReadPalmaStep(Kernel::HLERequestContext& ctx) {
1986 IPC::RequestParser rp{ctx};
1987 const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
1988
1989 LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
1990
1991 auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
1992 const auto result = controller.ReadPalmaStep(connection_handle);
1993
1994 IPC::ResponseBuilder rb{ctx, 2};
1995 rb.Push(result);
1996}
1997
1998void Hid::EnablePalmaStep(Kernel::HLERequestContext& ctx) {
1999 IPC::RequestParser rp{ctx};
2000 struct Parameters {
2001 bool is_enabled;
2002 INSERT_PADDING_WORDS_NOINIT(1);
2003 Controller_Palma::PalmaConnectionHandle connection_handle;
2004 };
2005 static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
2006
2007 const auto parameters{rp.PopRaw<Parameters>()};
2008
2009 LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, is_enabled={}",
2010 parameters.connection_handle.npad_id, parameters.is_enabled);
2011
2012 auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
2013 const auto result =
2014 controller.EnablePalmaStep(parameters.connection_handle, parameters.is_enabled);
2015
2016 IPC::ResponseBuilder rb{ctx, 2};
2017 rb.Push(result);
2018}
2019
2020void Hid::ResetPalmaStep(Kernel::HLERequestContext& ctx) {
2021 IPC::RequestParser rp{ctx};
2022 const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
2023
2024 LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
2025
2026 auto& controller = GetAppletResource()->GetController<Controller_Palma>(HidController::Palma);
2027 const auto result = controller.ResetPalmaStep(connection_handle);
2028
2029 IPC::ResponseBuilder rb{ctx, 2};
2030 rb.Push(result);
2031}
2032
2033void Hid::ReadPalmaApplicationSection(Kernel::HLERequestContext& ctx) {
2034 LOG_WARNING(Service_HID, "(STUBBED) called");
2035
2036 IPC::ResponseBuilder rb{ctx, 2};
2037 rb.Push(ResultSuccess);
2038}
2039
2040void Hid::WritePalmaApplicationSection(Kernel::HLERequestContext& ctx) {
2041 LOG_WARNING(Service_HID, "(STUBBED) called");
2042
2043 IPC::ResponseBuilder rb{ctx, 2};
2044 rb.Push(ResultSuccess);
2045}
2046
2047void Hid::ReadPalmaUniqueCode(Kernel::HLERequestContext& ctx) {
2048 IPC::RequestParser rp{ctx};
2049 const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
2050
2051 LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
2052
2053 applet_resource->GetController<Controller_Palma>(HidController::Palma)
2054 .ReadPalmaUniqueCode(connection_handle);
2055
2056 IPC::ResponseBuilder rb{ctx, 2};
2057 rb.Push(ResultSuccess);
2058}
2059
2060void Hid::SetPalmaUniqueCodeInvalid(Kernel::HLERequestContext& ctx) {
2061 IPC::RequestParser rp{ctx};
2062 const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
2063
2064 LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
2065
2066 applet_resource->GetController<Controller_Palma>(HidController::Palma)
2067 .SetPalmaUniqueCodeInvalid(connection_handle);
2068
2069 IPC::ResponseBuilder rb{ctx, 2};
2070 rb.Push(ResultSuccess);
2071}
2072
2073void Hid::WritePalmaActivityEntry(Kernel::HLERequestContext& ctx) {
2074 LOG_CRITICAL(Service_HID, "(STUBBED) called");
2075
2076 IPC::ResponseBuilder rb{ctx, 2};
2077 rb.Push(ResultSuccess);
2078}
2079
2080void Hid::WritePalmaRgbLedPatternEntry(Kernel::HLERequestContext& ctx) {
2081 IPC::RequestParser rp{ctx};
2082 const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
2083 const auto unknown{rp.Pop<u64>()};
2084
2085 const auto buffer = ctx.ReadBuffer();
2086
2087 LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, unknown={}",
2088 connection_handle.npad_id, unknown);
2089
2090 applet_resource->GetController<Controller_Palma>(HidController::Palma)
2091 .WritePalmaRgbLedPatternEntry(connection_handle, unknown);
2092
2093 IPC::ResponseBuilder rb{ctx, 2};
2094 rb.Push(ResultSuccess);
2095}
2096
2097void Hid::WritePalmaWaveEntry(Kernel::HLERequestContext& ctx) {
2098 IPC::RequestParser rp{ctx};
2099 const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
2100 const auto wave_set{rp.PopEnum<Controller_Palma::PalmaWaveSet>()};
2101 const auto unknown{rp.Pop<u64>()};
2102 const auto t_mem_size{rp.Pop<u64>()};
2103 const auto t_mem_handle{ctx.GetCopyHandle(0)};
2104 const auto size{rp.Pop<u64>()};
2105
2106 ASSERT_MSG(t_mem_size == 0x3000, "t_mem_size is not 0x3000 bytes");
2107
2108 auto t_mem =
2109 system.CurrentProcess()->GetHandleTable().GetObject<Kernel::KTransferMemory>(t_mem_handle);
2110
2111 if (t_mem.IsNull()) {
2112 LOG_ERROR(Service_HID, "t_mem is a nullptr for handle=0x{:08X}", t_mem_handle);
2113 IPC::ResponseBuilder rb{ctx, 2};
2114 rb.Push(ResultUnknown);
2115 return;
2116 }
2117
2118 ASSERT_MSG(t_mem->GetSize() == 0x3000, "t_mem has incorrect size");
2119
2120 LOG_WARNING(Service_HID,
2121 "(STUBBED) called, connection_handle={}, wave_set={}, unkown={}, "
2122 "t_mem_handle=0x{:08X}, t_mem_size={}, size={}",
2123 connection_handle.npad_id, wave_set, unknown, t_mem_handle, t_mem_size, size);
2124
2125 applet_resource->GetController<Controller_Palma>(HidController::Palma)
2126 .WritePalmaWaveEntry(connection_handle, wave_set,
2127 system.Memory().GetPointer(t_mem->GetSourceAddress()), t_mem_size);
2128
2129 IPC::ResponseBuilder rb{ctx, 2};
2130 rb.Push(ResultSuccess);
2131}
2132
2133void Hid::SetPalmaDataBaseIdentificationVersion(Kernel::HLERequestContext& ctx) {
2134 IPC::RequestParser rp{ctx};
2135 struct Parameters {
2136 s32 database_id_version;
2137 INSERT_PADDING_WORDS_NOINIT(1);
2138 Controller_Palma::PalmaConnectionHandle connection_handle;
2139 };
2140 static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
2141
2142 const auto parameters{rp.PopRaw<Parameters>()};
2143
2144 LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}, database_id_version={}",
2145 parameters.connection_handle.npad_id, parameters.database_id_version);
2146
2147 applet_resource->GetController<Controller_Palma>(HidController::Palma)
2148 .SetPalmaDataBaseIdentificationVersion(parameters.connection_handle,
2149 parameters.database_id_version);
2150
2151 IPC::ResponseBuilder rb{ctx, 2};
2152 rb.Push(ResultSuccess);
2153}
2154
2155void Hid::GetPalmaDataBaseIdentificationVersion(Kernel::HLERequestContext& ctx) {
2156 IPC::RequestParser rp{ctx};
2157 const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
2158
2159 LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
2160
2161 applet_resource->GetController<Controller_Palma>(HidController::Palma)
2162 .GetPalmaDataBaseIdentificationVersion(connection_handle);
2163
2164 IPC::ResponseBuilder rb{ctx, 2};
2165 rb.Push(ResultSuccess);
2166}
2167
2168void Hid::SuspendPalmaFeature(Kernel::HLERequestContext& ctx) {
2169 LOG_WARNING(Service_HID, "(STUBBED) called");
2170
2171 IPC::ResponseBuilder rb{ctx, 2};
2172 rb.Push(ResultSuccess);
2173}
2174
2175void Hid::GetPalmaOperationResult(Kernel::HLERequestContext& ctx) {
2176 IPC::RequestParser rp{ctx};
2177 const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
2178
2179 LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
2180
2181 const auto result = applet_resource->GetController<Controller_Palma>(HidController::Palma)
2182 .GetPalmaOperationResult(connection_handle);
2183
2184 IPC::ResponseBuilder rb{ctx, 2};
2185 rb.Push(result);
2186}
2187
2188void Hid::ReadPalmaPlayLog(Kernel::HLERequestContext& ctx) {
2189 LOG_WARNING(Service_HID, "(STUBBED) called");
2190
2191 IPC::ResponseBuilder rb{ctx, 2};
2192 rb.Push(ResultSuccess);
2193}
2194
2195void Hid::ResetPalmaPlayLog(Kernel::HLERequestContext& ctx) {
2196 LOG_WARNING(Service_HID, "(STUBBED) called");
2197
2198 IPC::ResponseBuilder rb{ctx, 2};
2199 rb.Push(ResultSuccess);
2200}
2201
1881void Hid::SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx) { 2202void Hid::SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx) {
1882 IPC::RequestParser rp{ctx}; 2203 IPC::RequestParser rp{ctx};
1883 const auto applet_resource_user_id{rp.Pop<u64>()}; 2204 struct Parameters {
1884 const auto is_palma_all_connectable{rp.Pop<bool>()}; 2205 bool is_palma_all_connectable;
2206 INSERT_PADDING_BYTES_NOINIT(7);
2207 u64 applet_resource_user_id;
2208 };
2209 static_assert(sizeof(Parameters) == 0x10, "Parameters has incorrect size.");
2210
2211 const auto parameters{rp.PopRaw<Parameters>()};
1885 2212
1886 LOG_WARNING(Service_HID, 2213 LOG_WARNING(Service_HID,
1887 "(STUBBED) called, applet_resource_user_id={}, is_palma_all_connectable={}", 2214 "(STUBBED) called, is_palma_all_connectable={},applet_resource_user_id={}",
1888 applet_resource_user_id, is_palma_all_connectable); 2215 parameters.is_palma_all_connectable, parameters.applet_resource_user_id);
2216
2217 applet_resource->GetController<Controller_Palma>(HidController::Palma)
2218 .SetIsPalmaAllConnectable(parameters.is_palma_all_connectable);
2219
2220 IPC::ResponseBuilder rb{ctx, 2};
2221 rb.Push(ResultSuccess);
2222}
2223
2224void Hid::SetIsPalmaPairedConnectable(Kernel::HLERequestContext& ctx) {
2225 LOG_WARNING(Service_HID, "(STUBBED) called");
2226
2227 IPC::ResponseBuilder rb{ctx, 2};
2228 rb.Push(ResultSuccess);
2229}
2230
2231void Hid::PairPalma(Kernel::HLERequestContext& ctx) {
2232 IPC::RequestParser rp{ctx};
2233 const auto connection_handle{rp.PopRaw<Controller_Palma::PalmaConnectionHandle>()};
2234
2235 LOG_WARNING(Service_HID, "(STUBBED) called, connection_handle={}", connection_handle.npad_id);
2236
2237 applet_resource->GetController<Controller_Palma>(HidController::Palma)
2238 .PairPalma(connection_handle);
1889 2239
1890 IPC::ResponseBuilder rb{ctx, 2}; 2240 IPC::ResponseBuilder rb{ctx, 2};
1891 rb.Push(ResultSuccess); 2241 rb.Push(ResultSuccess);
@@ -1897,6 +2247,37 @@ void Hid::SetPalmaBoostMode(Kernel::HLERequestContext& ctx) {
1897 2247
1898 LOG_WARNING(Service_HID, "(STUBBED) called, palma_boost_mode={}", palma_boost_mode); 2248 LOG_WARNING(Service_HID, "(STUBBED) called, palma_boost_mode={}", palma_boost_mode);
1899 2249
2250 applet_resource->GetController<Controller_Palma>(HidController::Palma)
2251 .SetPalmaBoostMode(palma_boost_mode);
2252
2253 IPC::ResponseBuilder rb{ctx, 2};
2254 rb.Push(ResultSuccess);
2255}
2256
2257void Hid::CancelWritePalmaWaveEntry(Kernel::HLERequestContext& ctx) {
2258 LOG_WARNING(Service_HID, "(STUBBED) called");
2259
2260 IPC::ResponseBuilder rb{ctx, 2};
2261 rb.Push(ResultSuccess);
2262}
2263
2264void Hid::EnablePalmaBoostMode(Kernel::HLERequestContext& ctx) {
2265 LOG_WARNING(Service_HID, "(STUBBED) called");
2266
2267 IPC::ResponseBuilder rb{ctx, 2};
2268 rb.Push(ResultSuccess);
2269}
2270
2271void Hid::GetPalmaBluetoothAddress(Kernel::HLERequestContext& ctx) {
2272 LOG_WARNING(Service_HID, "(STUBBED) called");
2273
2274 IPC::ResponseBuilder rb{ctx, 2};
2275 rb.Push(ResultSuccess);
2276}
2277
2278void Hid::SetDisallowedPalmaConnection(Kernel::HLERequestContext& ctx) {
2279 LOG_WARNING(Service_HID, "(STUBBED) called");
2280
1900 IPC::ResponseBuilder rb{ctx, 2}; 2281 IPC::ResponseBuilder rb{ctx, 2};
1901 rb.Push(ResultSuccess); 2282 rb.Push(ResultSuccess);
1902} 2283}
diff --git a/src/core/hle/service/hid/hid.h b/src/core/hle/service/hid/hid.h
index ac4333022..340d26fdc 100644
--- a/src/core/hle/service/hid/hid.h
+++ b/src/core/hle/service/hid/hid.h
@@ -33,6 +33,7 @@ enum class HidController : std::size_t {
33 NPad, 33 NPad,
34 Gesture, 34 Gesture,
35 ConsoleSixAxisSensor, 35 ConsoleSixAxisSensor,
36 Palma,
36 37
37 MaxControllers, 38 MaxControllers,
38}; 39};
@@ -166,8 +167,36 @@ private:
166 void FinalizeSevenSixAxisSensor(Kernel::HLERequestContext& ctx); 167 void FinalizeSevenSixAxisSensor(Kernel::HLERequestContext& ctx);
167 void ResetSevenSixAxisSensorTimestamp(Kernel::HLERequestContext& ctx); 168 void ResetSevenSixAxisSensorTimestamp(Kernel::HLERequestContext& ctx);
168 void IsUsbFullKeyControllerEnabled(Kernel::HLERequestContext& ctx); 169 void IsUsbFullKeyControllerEnabled(Kernel::HLERequestContext& ctx);
170 void GetPalmaConnectionHandle(Kernel::HLERequestContext& ctx);
171 void InitializePalma(Kernel::HLERequestContext& ctx);
172 void AcquirePalmaOperationCompleteEvent(Kernel::HLERequestContext& ctx);
173 void GetPalmaOperationInfo(Kernel::HLERequestContext& ctx);
174 void PlayPalmaActivity(Kernel::HLERequestContext& ctx);
175 void SetPalmaFrModeType(Kernel::HLERequestContext& ctx);
176 void ReadPalmaStep(Kernel::HLERequestContext& ctx);
177 void EnablePalmaStep(Kernel::HLERequestContext& ctx);
178 void ResetPalmaStep(Kernel::HLERequestContext& ctx);
179 void ReadPalmaApplicationSection(Kernel::HLERequestContext& ctx);
180 void WritePalmaApplicationSection(Kernel::HLERequestContext& ctx);
181 void ReadPalmaUniqueCode(Kernel::HLERequestContext& ctx);
182 void SetPalmaUniqueCodeInvalid(Kernel::HLERequestContext& ctx);
183 void WritePalmaActivityEntry(Kernel::HLERequestContext& ctx);
184 void WritePalmaRgbLedPatternEntry(Kernel::HLERequestContext& ctx);
185 void WritePalmaWaveEntry(Kernel::HLERequestContext& ctx);
186 void SetPalmaDataBaseIdentificationVersion(Kernel::HLERequestContext& ctx);
187 void GetPalmaDataBaseIdentificationVersion(Kernel::HLERequestContext& ctx);
188 void SuspendPalmaFeature(Kernel::HLERequestContext& ctx);
189 void GetPalmaOperationResult(Kernel::HLERequestContext& ctx);
190 void ReadPalmaPlayLog(Kernel::HLERequestContext& ctx);
191 void ResetPalmaPlayLog(Kernel::HLERequestContext& ctx);
169 void SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx); 192 void SetIsPalmaAllConnectable(Kernel::HLERequestContext& ctx);
193 void SetIsPalmaPairedConnectable(Kernel::HLERequestContext& ctx);
194 void PairPalma(Kernel::HLERequestContext& ctx);
170 void SetPalmaBoostMode(Kernel::HLERequestContext& ctx); 195 void SetPalmaBoostMode(Kernel::HLERequestContext& ctx);
196 void CancelWritePalmaWaveEntry(Kernel::HLERequestContext& ctx);
197 void EnablePalmaBoostMode(Kernel::HLERequestContext& ctx);
198 void GetPalmaBluetoothAddress(Kernel::HLERequestContext& ctx);
199 void SetDisallowedPalmaConnection(Kernel::HLERequestContext& ctx);
171 void SetNpadCommunicationMode(Kernel::HLERequestContext& ctx); 200 void SetNpadCommunicationMode(Kernel::HLERequestContext& ctx);
172 void GetNpadCommunicationMode(Kernel::HLERequestContext& ctx); 201 void GetNpadCommunicationMode(Kernel::HLERequestContext& ctx);
173 void SetTouchScreenConfiguration(Kernel::HLERequestContext& ctx); 202 void SetTouchScreenConfiguration(Kernel::HLERequestContext& ctx);
diff --git a/src/core/hle/service/hid/irs.cpp b/src/core/hle/service/hid/irs.cpp
index c4b44cbf9..6a3453457 100644
--- a/src/core/hle/service/hid/irs.cpp
+++ b/src/core/hle/service/hid/irs.cpp
@@ -542,7 +542,8 @@ Result IRS::IsIrCameraHandleValid(const Core::IrSensor::IrCameraHandle& camera_h
542 542
543Core::IrSensor::DeviceFormat& IRS::GetIrCameraSharedMemoryDeviceEntry( 543Core::IrSensor::DeviceFormat& IRS::GetIrCameraSharedMemoryDeviceEntry(
544 const Core::IrSensor::IrCameraHandle& camera_handle) { 544 const Core::IrSensor::IrCameraHandle& camera_handle) {
545 ASSERT_MSG(sizeof(StatusManager::device) > camera_handle.npad_id, "invalid npad_id"); 545 const auto npad_id_max_index = static_cast<u8>(sizeof(StatusManager::device));
546 ASSERT_MSG(camera_handle.npad_id < npad_id_max_index, "invalid npad_id");
546 return shared_memory->device[camera_handle.npad_id]; 547 return shared_memory->device[camera_handle.npad_id];
547} 548}
548 549
diff --git a/src/core/hle/service/ldn/lan_discovery.cpp b/src/core/hle/service/ldn/lan_discovery.cpp
new file mode 100644
index 000000000..8f3c04550
--- /dev/null
+++ b/src/core/hle/service/ldn/lan_discovery.cpp
@@ -0,0 +1,633 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/hle/service/ldn/lan_discovery.h"
5#include "core/internal_network/network.h"
6#include "core/internal_network/network_interface.h"
7
8namespace Service::LDN {
9
10LanStation::LanStation(s8 node_id_, LANDiscovery* discovery_)
11 : node_info(nullptr), status(NodeStatus::Disconnected), node_id(node_id_),
12 discovery(discovery_) {}
13
14LanStation::~LanStation() = default;
15
16NodeStatus LanStation::GetStatus() const {
17 return status;
18}
19
20void LanStation::OnClose() {
21 LOG_INFO(Service_LDN, "OnClose {}", node_id);
22 Reset();
23 discovery->UpdateNodes();
24}
25
26void LanStation::Reset() {
27 status = NodeStatus::Disconnected;
28};
29
30void LanStation::OverrideInfo() {
31 bool connected = GetStatus() == NodeStatus::Connected;
32 node_info->node_id = node_id;
33 node_info->is_connected = connected ? 1 : 0;
34}
35
36LANDiscovery::LANDiscovery(Network::RoomNetwork& room_network_)
37 : stations({{{1, this}, {2, this}, {3, this}, {4, this}, {5, this}, {6, this}, {7, this}}}),
38 room_network{room_network_} {}
39
40LANDiscovery::~LANDiscovery() {
41 if (inited) {
42 Result rc = Finalize();
43 LOG_INFO(Service_LDN, "Finalize: {}", rc.raw);
44 }
45}
46
47void LANDiscovery::InitNetworkInfo() {
48 network_info.common.bssid = GetFakeMac();
49 network_info.common.channel = WifiChannel::Wifi24_6;
50 network_info.common.link_level = LinkLevel::Good;
51 network_info.common.network_type = PackedNetworkType::Ldn;
52 network_info.common.ssid = fake_ssid;
53
54 auto& nodes = network_info.ldn.nodes;
55 for (std::size_t i = 0; i < NodeCountMax; i++) {
56 nodes[i].node_id = static_cast<s8>(i);
57 nodes[i].is_connected = 0;
58 }
59}
60
61void LANDiscovery::InitNodeStateChange() {
62 for (auto& node_update : node_changes) {
63 node_update.state_change = NodeStateChange::None;
64 }
65 for (auto& node_state : node_last_states) {
66 node_state = 0;
67 }
68}
69
70State LANDiscovery::GetState() const {
71 return state;
72}
73
74void LANDiscovery::SetState(State new_state) {
75 state = new_state;
76}
77
78Result LANDiscovery::GetNetworkInfo(NetworkInfo& out_network) const {
79 if (state == State::AccessPointCreated || state == State::StationConnected) {
80 std::memcpy(&out_network, &network_info, sizeof(network_info));
81 return ResultSuccess;
82 }
83
84 return ResultBadState;
85}
86
87Result LANDiscovery::GetNetworkInfo(NetworkInfo& out_network,
88 std::vector<NodeLatestUpdate>& out_updates,
89 std::size_t buffer_count) {
90 if (buffer_count > NodeCountMax) {
91 return ResultInvalidBufferCount;
92 }
93
94 if (state == State::AccessPointCreated || state == State::StationConnected) {
95 std::memcpy(&out_network, &network_info, sizeof(network_info));
96 for (std::size_t i = 0; i < buffer_count; i++) {
97 out_updates[i].state_change = node_changes[i].state_change;
98 node_changes[i].state_change = NodeStateChange::None;
99 }
100 return ResultSuccess;
101 }
102
103 return ResultBadState;
104}
105
106DisconnectReason LANDiscovery::GetDisconnectReason() const {
107 return disconnect_reason;
108}
109
110Result LANDiscovery::Scan(std::vector<NetworkInfo>& networks, u16& count,
111 const ScanFilter& filter) {
112 if (!IsFlagSet(filter.flag, ScanFilterFlag::NetworkType) ||
113 filter.network_type <= NetworkType::All) {
114 if (!IsFlagSet(filter.flag, ScanFilterFlag::Ssid) && filter.ssid.length >= SsidLengthMax) {
115 return ResultBadInput;
116 }
117 }
118
119 {
120 std::scoped_lock lock{packet_mutex};
121 scan_results.clear();
122
123 SendBroadcast(Network::LDNPacketType::Scan);
124 }
125
126 LOG_INFO(Service_LDN, "Waiting for scan replies");
127 std::this_thread::sleep_for(std::chrono::seconds(1));
128
129 std::scoped_lock lock{packet_mutex};
130 for (const auto& [key, info] : scan_results) {
131 if (count >= networks.size()) {
132 break;
133 }
134
135 if (IsFlagSet(filter.flag, ScanFilterFlag::LocalCommunicationId)) {
136 if (filter.network_id.intent_id.local_communication_id !=
137 info.network_id.intent_id.local_communication_id) {
138 continue;
139 }
140 }
141 if (IsFlagSet(filter.flag, ScanFilterFlag::SessionId)) {
142 if (filter.network_id.session_id != info.network_id.session_id) {
143 continue;
144 }
145 }
146 if (IsFlagSet(filter.flag, ScanFilterFlag::NetworkType)) {
147 if (filter.network_type != static_cast<NetworkType>(info.common.network_type)) {
148 continue;
149 }
150 }
151 if (IsFlagSet(filter.flag, ScanFilterFlag::Ssid)) {
152 if (filter.ssid != info.common.ssid) {
153 continue;
154 }
155 }
156 if (IsFlagSet(filter.flag, ScanFilterFlag::SceneId)) {
157 if (filter.network_id.intent_id.scene_id != info.network_id.intent_id.scene_id) {
158 continue;
159 }
160 }
161
162 networks[count++] = info;
163 }
164
165 return ResultSuccess;
166}
167
168Result LANDiscovery::SetAdvertiseData(std::span<const u8> data) {
169 std::scoped_lock lock{packet_mutex};
170 const std::size_t size = data.size();
171 if (size > AdvertiseDataSizeMax) {
172 return ResultAdvertiseDataTooLarge;
173 }
174
175 std::memcpy(network_info.ldn.advertise_data.data(), data.data(), size);
176 network_info.ldn.advertise_data_size = static_cast<u16>(size);
177
178 UpdateNodes();
179
180 return ResultSuccess;
181}
182
183Result LANDiscovery::OpenAccessPoint() {
184 std::scoped_lock lock{packet_mutex};
185 disconnect_reason = DisconnectReason::None;
186 if (state == State::None) {
187 return ResultBadState;
188 }
189
190 ResetStations();
191 SetState(State::AccessPointOpened);
192
193 return ResultSuccess;
194}
195
196Result LANDiscovery::CloseAccessPoint() {
197 std::scoped_lock lock{packet_mutex};
198 if (state == State::None) {
199 return ResultBadState;
200 }
201
202 if (state == State::AccessPointCreated) {
203 DestroyNetwork();
204 }
205
206 ResetStations();
207 SetState(State::Initialized);
208
209 return ResultSuccess;
210}
211
212Result LANDiscovery::OpenStation() {
213 std::scoped_lock lock{packet_mutex};
214 disconnect_reason = DisconnectReason::None;
215 if (state == State::None) {
216 return ResultBadState;
217 }
218
219 ResetStations();
220 SetState(State::StationOpened);
221
222 return ResultSuccess;
223}
224
225Result LANDiscovery::CloseStation() {
226 std::scoped_lock lock{packet_mutex};
227 if (state == State::None) {
228 return ResultBadState;
229 }
230
231 if (state == State::StationConnected) {
232 Disconnect();
233 }
234
235 ResetStations();
236 SetState(State::Initialized);
237
238 return ResultSuccess;
239}
240
241Result LANDiscovery::CreateNetwork(const SecurityConfig& security_config,
242 const UserConfig& user_config,
243 const NetworkConfig& network_config) {
244 std::scoped_lock lock{packet_mutex};
245
246 if (state != State::AccessPointOpened) {
247 return ResultBadState;
248 }
249
250 InitNetworkInfo();
251 network_info.ldn.node_count_max = network_config.node_count_max;
252 network_info.ldn.security_mode = security_config.security_mode;
253
254 if (network_config.channel == WifiChannel::Default) {
255 network_info.common.channel = WifiChannel::Wifi24_6;
256 } else {
257 network_info.common.channel = network_config.channel;
258 }
259
260 std::independent_bits_engine<std::mt19937, 64, u64> bits_engine;
261 network_info.network_id.session_id.high = bits_engine();
262 network_info.network_id.session_id.low = bits_engine();
263 network_info.network_id.intent_id = network_config.intent_id;
264
265 NodeInfo& node0 = network_info.ldn.nodes[0];
266 const Result rc2 = GetNodeInfo(node0, user_config, network_config.local_communication_version);
267 if (rc2.IsError()) {
268 return ResultAccessPointConnectionFailed;
269 }
270
271 SetState(State::AccessPointCreated);
272
273 InitNodeStateChange();
274 node0.is_connected = 1;
275 UpdateNodes();
276
277 return rc2;
278}
279
280Result LANDiscovery::DestroyNetwork() {
281 for (auto local_ip : connected_clients) {
282 SendPacket(Network::LDNPacketType::DestroyNetwork, local_ip);
283 }
284
285 ResetStations();
286
287 SetState(State::AccessPointOpened);
288 lan_event();
289
290 return ResultSuccess;
291}
292
293Result LANDiscovery::Connect(const NetworkInfo& network_info_, const UserConfig& user_config,
294 u16 local_communication_version) {
295 std::scoped_lock lock{packet_mutex};
296 if (network_info_.ldn.node_count == 0) {
297 return ResultInvalidNodeCount;
298 }
299
300 Result rc = GetNodeInfo(node_info, user_config, local_communication_version);
301 if (rc.IsError()) {
302 return ResultConnectionFailed;
303 }
304
305 Ipv4Address node_host = network_info_.ldn.nodes[0].ipv4_address;
306 std::reverse(std::begin(node_host), std::end(node_host)); // htonl
307 host_ip = node_host;
308 SendPacket(Network::LDNPacketType::Connect, node_info, *host_ip);
309
310 InitNodeStateChange();
311
312 std::this_thread::sleep_for(std::chrono::seconds(1));
313
314 return ResultSuccess;
315}
316
317Result LANDiscovery::Disconnect() {
318 if (host_ip) {
319 SendPacket(Network::LDNPacketType::Disconnect, node_info, *host_ip);
320 }
321
322 SetState(State::StationOpened);
323 lan_event();
324
325 return ResultSuccess;
326}
327
328Result LANDiscovery::Initialize(LanEventFunc lan_event_, bool listening) {
329 std::scoped_lock lock{packet_mutex};
330 if (inited) {
331 return ResultSuccess;
332 }
333
334 for (auto& station : stations) {
335 station.discovery = this;
336 station.node_info = &network_info.ldn.nodes[station.node_id];
337 station.Reset();
338 }
339
340 connected_clients.clear();
341 lan_event = lan_event_;
342
343 SetState(State::Initialized);
344
345 inited = true;
346 return ResultSuccess;
347}
348
349Result LANDiscovery::Finalize() {
350 std::scoped_lock lock{packet_mutex};
351 Result rc = ResultSuccess;
352
353 if (inited) {
354 if (state == State::AccessPointCreated) {
355 DestroyNetwork();
356 }
357 if (state == State::StationConnected) {
358 Disconnect();
359 }
360
361 ResetStations();
362 inited = false;
363 }
364
365 SetState(State::None);
366
367 return rc;
368}
369
370void LANDiscovery::ResetStations() {
371 for (auto& station : stations) {
372 station.Reset();
373 }
374 connected_clients.clear();
375}
376
377void LANDiscovery::UpdateNodes() {
378 u8 count = 0;
379 for (auto& station : stations) {
380 bool connected = station.GetStatus() == NodeStatus::Connected;
381 if (connected) {
382 count++;
383 }
384 station.OverrideInfo();
385 }
386 network_info.ldn.node_count = count + 1;
387
388 for (auto local_ip : connected_clients) {
389 SendPacket(Network::LDNPacketType::SyncNetwork, network_info, local_ip);
390 }
391
392 OnNetworkInfoChanged();
393}
394
395void LANDiscovery::OnSyncNetwork(const NetworkInfo& info) {
396 network_info = info;
397 if (state == State::StationOpened) {
398 SetState(State::StationConnected);
399 }
400 OnNetworkInfoChanged();
401}
402
403void LANDiscovery::OnDisconnectFromHost() {
404 LOG_INFO(Service_LDN, "OnDisconnectFromHost state: {}", static_cast<int>(state));
405 host_ip = std::nullopt;
406 if (state == State::StationConnected) {
407 SetState(State::StationOpened);
408 lan_event();
409 }
410}
411
412void LANDiscovery::OnNetworkInfoChanged() {
413 if (IsNodeStateChanged()) {
414 lan_event();
415 }
416 return;
417}
418
419Network::IPv4Address LANDiscovery::GetLocalIp() const {
420 Network::IPv4Address local_ip{0xFF, 0xFF, 0xFF, 0xFF};
421 if (auto room_member = room_network.GetRoomMember().lock()) {
422 if (room_member->IsConnected()) {
423 local_ip = room_member->GetFakeIpAddress();
424 }
425 }
426 return local_ip;
427}
428
429template <typename Data>
430void LANDiscovery::SendPacket(Network::LDNPacketType type, const Data& data,
431 Ipv4Address remote_ip) {
432 Network::LDNPacket packet;
433 packet.type = type;
434
435 packet.broadcast = false;
436 packet.local_ip = GetLocalIp();
437 packet.remote_ip = remote_ip;
438
439 packet.data.resize(sizeof(data));
440 std::memcpy(packet.data.data(), &data, sizeof(data));
441 SendPacket(packet);
442}
443
444void LANDiscovery::SendPacket(Network::LDNPacketType type, Ipv4Address remote_ip) {
445 Network::LDNPacket packet;
446 packet.type = type;
447
448 packet.broadcast = false;
449 packet.local_ip = GetLocalIp();
450 packet.remote_ip = remote_ip;
451
452 SendPacket(packet);
453}
454
455template <typename Data>
456void LANDiscovery::SendBroadcast(Network::LDNPacketType type, const Data& data) {
457 Network::LDNPacket packet;
458 packet.type = type;
459
460 packet.broadcast = true;
461 packet.local_ip = GetLocalIp();
462
463 packet.data.resize(sizeof(data));
464 std::memcpy(packet.data.data(), &data, sizeof(data));
465 SendPacket(packet);
466}
467
468void LANDiscovery::SendBroadcast(Network::LDNPacketType type) {
469 Network::LDNPacket packet;
470 packet.type = type;
471
472 packet.broadcast = true;
473 packet.local_ip = GetLocalIp();
474
475 SendPacket(packet);
476}
477
478void LANDiscovery::SendPacket(const Network::LDNPacket& packet) {
479 if (auto room_member = room_network.GetRoomMember().lock()) {
480 if (room_member->IsConnected()) {
481 room_member->SendLdnPacket(packet);
482 }
483 }
484}
485
486void LANDiscovery::ReceivePacket(const Network::LDNPacket& packet) {
487 std::scoped_lock lock{packet_mutex};
488 switch (packet.type) {
489 case Network::LDNPacketType::Scan: {
490 LOG_INFO(Frontend, "Scan packet received!");
491 if (state == State::AccessPointCreated) {
492 // Reply to the sender
493 SendPacket(Network::LDNPacketType::ScanResp, network_info, packet.local_ip);
494 }
495 break;
496 }
497 case Network::LDNPacketType::ScanResp: {
498 LOG_INFO(Frontend, "ScanResp packet received!");
499
500 NetworkInfo info{};
501 std::memcpy(&info, packet.data.data(), sizeof(NetworkInfo));
502 scan_results.insert({info.common.bssid, info});
503
504 break;
505 }
506 case Network::LDNPacketType::Connect: {
507 LOG_INFO(Frontend, "Connect packet received!");
508
509 NodeInfo info{};
510 std::memcpy(&info, packet.data.data(), sizeof(NodeInfo));
511
512 connected_clients.push_back(packet.local_ip);
513
514 for (LanStation& station : stations) {
515 if (station.status != NodeStatus::Connected) {
516 *station.node_info = info;
517 station.status = NodeStatus::Connected;
518 break;
519 }
520 }
521
522 UpdateNodes();
523
524 break;
525 }
526 case Network::LDNPacketType::Disconnect: {
527 LOG_INFO(Frontend, "Disconnect packet received!");
528
529 connected_clients.erase(
530 std::remove(connected_clients.begin(), connected_clients.end(), packet.local_ip),
531 connected_clients.end());
532
533 NodeInfo info{};
534 std::memcpy(&info, packet.data.data(), sizeof(NodeInfo));
535
536 for (LanStation& station : stations) {
537 if (station.status == NodeStatus::Connected &&
538 station.node_info->mac_address == info.mac_address) {
539 station.OnClose();
540 break;
541 }
542 }
543
544 break;
545 }
546 case Network::LDNPacketType::DestroyNetwork: {
547 ResetStations();
548 OnDisconnectFromHost();
549 break;
550 }
551 case Network::LDNPacketType::SyncNetwork: {
552 if (state == State::StationOpened || state == State::StationConnected) {
553 LOG_INFO(Frontend, "SyncNetwork packet received!");
554 NetworkInfo info{};
555 std::memcpy(&info, packet.data.data(), sizeof(NetworkInfo));
556
557 OnSyncNetwork(info);
558 } else {
559 LOG_INFO(Frontend, "SyncNetwork packet received but in wrong State!");
560 }
561
562 break;
563 }
564 default: {
565 LOG_INFO(Frontend, "ReceivePacket unhandled type {}", static_cast<int>(packet.type));
566 break;
567 }
568 }
569}
570
571bool LANDiscovery::IsNodeStateChanged() {
572 bool changed = false;
573 const auto& nodes = network_info.ldn.nodes;
574 for (int i = 0; i < NodeCountMax; i++) {
575 if (nodes[i].is_connected != node_last_states[i]) {
576 if (nodes[i].is_connected) {
577 node_changes[i].state_change |= NodeStateChange::Connect;
578 } else {
579 node_changes[i].state_change |= NodeStateChange::Disconnect;
580 }
581 node_last_states[i] = nodes[i].is_connected;
582 changed = true;
583 }
584 }
585 return changed;
586}
587
588bool LANDiscovery::IsFlagSet(ScanFilterFlag flag, ScanFilterFlag search_flag) const {
589 const auto flag_value = static_cast<u32>(flag);
590 const auto search_flag_value = static_cast<u32>(search_flag);
591 return (flag_value & search_flag_value) == search_flag_value;
592}
593
594int LANDiscovery::GetStationCount() const {
595 return static_cast<int>(
596 std::count_if(stations.begin(), stations.end(), [](const auto& station) {
597 return station.GetStatus() != NodeStatus::Disconnected;
598 }));
599}
600
601MacAddress LANDiscovery::GetFakeMac() const {
602 MacAddress mac{};
603 mac.raw[0] = 0x02;
604 mac.raw[1] = 0x00;
605
606 const auto ip = GetLocalIp();
607 memcpy(mac.raw.data() + 2, &ip, sizeof(ip));
608
609 return mac;
610}
611
612Result LANDiscovery::GetNodeInfo(NodeInfo& node, const UserConfig& userConfig,
613 u16 localCommunicationVersion) {
614 const auto network_interface = Network::GetSelectedNetworkInterface();
615
616 if (!network_interface) {
617 LOG_ERROR(Service_LDN, "No network interface available");
618 return ResultNoIpAddress;
619 }
620
621 node.mac_address = GetFakeMac();
622 node.is_connected = 1;
623 std::memcpy(node.user_name.data(), userConfig.user_name.data(), UserNameBytesMax + 1);
624 node.local_communication_version = localCommunicationVersion;
625
626 Ipv4Address current_address = GetLocalIp();
627 std::reverse(std::begin(current_address), std::end(current_address)); // ntohl
628 node.ipv4_address = current_address;
629
630 return ResultSuccess;
631}
632
633} // namespace Service::LDN
diff --git a/src/core/hle/service/ldn/lan_discovery.h b/src/core/hle/service/ldn/lan_discovery.h
new file mode 100644
index 000000000..3833cd764
--- /dev/null
+++ b/src/core/hle/service/ldn/lan_discovery.h
@@ -0,0 +1,134 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <array>
7#include <cstring>
8#include <functional>
9#include <memory>
10#include <mutex>
11#include <optional>
12#include <random>
13#include <span>
14#include <thread>
15#include <unordered_map>
16
17#include "common/logging/log.h"
18#include "common/socket_types.h"
19#include "core/hle/result.h"
20#include "core/hle/service/ldn/ldn_results.h"
21#include "core/hle/service/ldn/ldn_types.h"
22#include "network/network.h"
23
24namespace Service::LDN {
25
26class LANDiscovery;
27
28class LanStation {
29public:
30 LanStation(s8 node_id_, LANDiscovery* discovery_);
31 ~LanStation();
32
33 void OnClose();
34 NodeStatus GetStatus() const;
35 void Reset();
36 void OverrideInfo();
37
38protected:
39 friend class LANDiscovery;
40 NodeInfo* node_info;
41 NodeStatus status;
42 s8 node_id;
43 LANDiscovery* discovery;
44};
45
46class LANDiscovery {
47public:
48 using LanEventFunc = std::function<void()>;
49
50 LANDiscovery(Network::RoomNetwork& room_network_);
51 ~LANDiscovery();
52
53 State GetState() const;
54 void SetState(State new_state);
55
56 Result GetNetworkInfo(NetworkInfo& out_network) const;
57 Result GetNetworkInfo(NetworkInfo& out_network, std::vector<NodeLatestUpdate>& out_updates,
58 std::size_t buffer_count);
59
60 DisconnectReason GetDisconnectReason() const;
61 Result Scan(std::vector<NetworkInfo>& networks, u16& count, const ScanFilter& filter);
62 Result SetAdvertiseData(std::span<const u8> data);
63
64 Result OpenAccessPoint();
65 Result CloseAccessPoint();
66
67 Result OpenStation();
68 Result CloseStation();
69
70 Result CreateNetwork(const SecurityConfig& security_config, const UserConfig& user_config,
71 const NetworkConfig& network_config);
72 Result DestroyNetwork();
73
74 Result Connect(const NetworkInfo& network_info_, const UserConfig& user_config,
75 u16 local_communication_version);
76 Result Disconnect();
77
78 Result Initialize(LanEventFunc lan_event_ = empty_func, bool listening = true);
79 Result Finalize();
80
81 void ReceivePacket(const Network::LDNPacket& packet);
82
83protected:
84 friend class LanStation;
85
86 void InitNetworkInfo();
87 void InitNodeStateChange();
88
89 void ResetStations();
90 void UpdateNodes();
91
92 void OnSyncNetwork(const NetworkInfo& info);
93 void OnDisconnectFromHost();
94 void OnNetworkInfoChanged();
95
96 bool IsNodeStateChanged();
97 bool IsFlagSet(ScanFilterFlag flag, ScanFilterFlag search_flag) const;
98 int GetStationCount() const;
99 MacAddress GetFakeMac() const;
100 Result GetNodeInfo(NodeInfo& node, const UserConfig& user_config,
101 u16 local_communication_version);
102
103 Network::IPv4Address GetLocalIp() const;
104 template <typename Data>
105 void SendPacket(Network::LDNPacketType type, const Data& data, Ipv4Address remote_ip);
106 void SendPacket(Network::LDNPacketType type, Ipv4Address remote_ip);
107 template <typename Data>
108 void SendBroadcast(Network::LDNPacketType type, const Data& data);
109 void SendBroadcast(Network::LDNPacketType type);
110 void SendPacket(const Network::LDNPacket& packet);
111
112 static const LanEventFunc empty_func;
113 static constexpr Ssid fake_ssid{"YuzuFakeSsidForLdn"};
114
115 bool inited{};
116 std::mutex packet_mutex;
117 std::array<LanStation, StationCountMax> stations;
118 std::array<NodeLatestUpdate, NodeCountMax> node_changes{};
119 std::array<u8, NodeCountMax> node_last_states{};
120 std::unordered_map<MacAddress, NetworkInfo, MACAddressHash> scan_results{};
121 NodeInfo node_info{};
122 NetworkInfo network_info{};
123 State state{State::None};
124 DisconnectReason disconnect_reason{DisconnectReason::None};
125
126 // TODO (flTobi): Should this be an std::set?
127 std::vector<Ipv4Address> connected_clients;
128 std::optional<Ipv4Address> host_ip;
129
130 LanEventFunc lan_event;
131
132 Network::RoomNetwork& room_network;
133};
134} // namespace Service::LDN
diff --git a/src/core/hle/service/ldn/ldn.cpp b/src/core/hle/service/ldn/ldn.cpp
index c11daff54..ea3e7e55a 100644
--- a/src/core/hle/service/ldn/ldn.cpp
+++ b/src/core/hle/service/ldn/ldn.cpp
@@ -4,11 +4,13 @@
4#include <memory> 4#include <memory>
5 5
6#include "core/core.h" 6#include "core/core.h"
7#include "core/hle/service/ldn/lan_discovery.h"
7#include "core/hle/service/ldn/ldn.h" 8#include "core/hle/service/ldn/ldn.h"
8#include "core/hle/service/ldn/ldn_results.h" 9#include "core/hle/service/ldn/ldn_results.h"
9#include "core/hle/service/ldn/ldn_types.h" 10#include "core/hle/service/ldn/ldn_types.h"
10#include "core/internal_network/network.h" 11#include "core/internal_network/network.h"
11#include "core/internal_network/network_interface.h" 12#include "core/internal_network/network_interface.h"
13#include "network/network.h"
12 14
13// This is defined by synchapi.h and conflicts with ServiceContext::CreateEvent 15// This is defined by synchapi.h and conflicts with ServiceContext::CreateEvent
14#undef CreateEvent 16#undef CreateEvent
@@ -105,13 +107,13 @@ class IUserLocalCommunicationService final
105public: 107public:
106 explicit IUserLocalCommunicationService(Core::System& system_) 108 explicit IUserLocalCommunicationService(Core::System& system_)
107 : ServiceFramework{system_, "IUserLocalCommunicationService", ServiceThreadType::CreateNew}, 109 : ServiceFramework{system_, "IUserLocalCommunicationService", ServiceThreadType::CreateNew},
108 service_context{system, "IUserLocalCommunicationService"}, room_network{ 110 service_context{system, "IUserLocalCommunicationService"},
109 system_.GetRoomNetwork()} { 111 room_network{system_.GetRoomNetwork()}, lan_discovery{room_network} {
110 // clang-format off 112 // clang-format off
111 static const FunctionInfo functions[] = { 113 static const FunctionInfo functions[] = {
112 {0, &IUserLocalCommunicationService::GetState, "GetState"}, 114 {0, &IUserLocalCommunicationService::GetState, "GetState"},
113 {1, &IUserLocalCommunicationService::GetNetworkInfo, "GetNetworkInfo"}, 115 {1, &IUserLocalCommunicationService::GetNetworkInfo, "GetNetworkInfo"},
114 {2, nullptr, "GetIpv4Address"}, 116 {2, &IUserLocalCommunicationService::GetIpv4Address, "GetIpv4Address"},
115 {3, &IUserLocalCommunicationService::GetDisconnectReason, "GetDisconnectReason"}, 117 {3, &IUserLocalCommunicationService::GetDisconnectReason, "GetDisconnectReason"},
116 {4, &IUserLocalCommunicationService::GetSecurityParameter, "GetSecurityParameter"}, 118 {4, &IUserLocalCommunicationService::GetSecurityParameter, "GetSecurityParameter"},
117 {5, &IUserLocalCommunicationService::GetNetworkConfig, "GetNetworkConfig"}, 119 {5, &IUserLocalCommunicationService::GetNetworkConfig, "GetNetworkConfig"},
@@ -119,7 +121,7 @@ public:
119 {101, &IUserLocalCommunicationService::GetNetworkInfoLatestUpdate, "GetNetworkInfoLatestUpdate"}, 121 {101, &IUserLocalCommunicationService::GetNetworkInfoLatestUpdate, "GetNetworkInfoLatestUpdate"},
120 {102, &IUserLocalCommunicationService::Scan, "Scan"}, 122 {102, &IUserLocalCommunicationService::Scan, "Scan"},
121 {103, &IUserLocalCommunicationService::ScanPrivate, "ScanPrivate"}, 123 {103, &IUserLocalCommunicationService::ScanPrivate, "ScanPrivate"},
122 {104, nullptr, "SetWirelessControllerRestriction"}, 124 {104, &IUserLocalCommunicationService::SetWirelessControllerRestriction, "SetWirelessControllerRestriction"},
123 {200, &IUserLocalCommunicationService::OpenAccessPoint, "OpenAccessPoint"}, 125 {200, &IUserLocalCommunicationService::OpenAccessPoint, "OpenAccessPoint"},
124 {201, &IUserLocalCommunicationService::CloseAccessPoint, "CloseAccessPoint"}, 126 {201, &IUserLocalCommunicationService::CloseAccessPoint, "CloseAccessPoint"},
125 {202, &IUserLocalCommunicationService::CreateNetwork, "CreateNetwork"}, 127 {202, &IUserLocalCommunicationService::CreateNetwork, "CreateNetwork"},
@@ -148,16 +150,30 @@ public:
148 } 150 }
149 151
150 ~IUserLocalCommunicationService() { 152 ~IUserLocalCommunicationService() {
153 if (is_initialized) {
154 if (auto room_member = room_network.GetRoomMember().lock()) {
155 room_member->Unbind(ldn_packet_received);
156 }
157 }
158
151 service_context.CloseEvent(state_change_event); 159 service_context.CloseEvent(state_change_event);
152 } 160 }
153 161
162 /// Callback to parse and handle a received LDN packet.
163 void OnLDNPacketReceived(const Network::LDNPacket& packet) {
164 lan_discovery.ReceivePacket(packet);
165 }
166
154 void OnEventFired() { 167 void OnEventFired() {
155 state_change_event->GetWritableEvent().Signal(); 168 state_change_event->GetWritableEvent().Signal();
156 } 169 }
157 170
158 void GetState(Kernel::HLERequestContext& ctx) { 171 void GetState(Kernel::HLERequestContext& ctx) {
159 State state = State::Error; 172 State state = State::Error;
160 LOG_WARNING(Service_LDN, "(STUBBED) called, state = {}", state); 173
174 if (is_initialized) {
175 state = lan_discovery.GetState();
176 }
161 177
162 IPC::ResponseBuilder rb{ctx, 3}; 178 IPC::ResponseBuilder rb{ctx, 3};
163 rb.Push(ResultSuccess); 179 rb.Push(ResultSuccess);
@@ -175,7 +191,7 @@ public:
175 } 191 }
176 192
177 NetworkInfo network_info{}; 193 NetworkInfo network_info{};
178 const auto rc = ResultSuccess; 194 const auto rc = lan_discovery.GetNetworkInfo(network_info);
179 if (rc.IsError()) { 195 if (rc.IsError()) {
180 LOG_ERROR(Service_LDN, "NetworkInfo is not valid {}", rc.raw); 196 LOG_ERROR(Service_LDN, "NetworkInfo is not valid {}", rc.raw);
181 IPC::ResponseBuilder rb{ctx, 2}; 197 IPC::ResponseBuilder rb{ctx, 2};
@@ -183,28 +199,50 @@ public:
183 return; 199 return;
184 } 200 }
185 201
186 LOG_WARNING(Service_LDN, "(STUBBED) called, ssid='{}', nodes={}",
187 network_info.common.ssid.GetStringValue(), network_info.ldn.node_count);
188
189 ctx.WriteBuffer<NetworkInfo>(network_info); 202 ctx.WriteBuffer<NetworkInfo>(network_info);
190 IPC::ResponseBuilder rb{ctx, 2}; 203 IPC::ResponseBuilder rb{ctx, 2};
191 rb.Push(rc); 204 rb.Push(ResultSuccess);
192 } 205 }
193 206
194 void GetDisconnectReason(Kernel::HLERequestContext& ctx) { 207 void GetIpv4Address(Kernel::HLERequestContext& ctx) {
195 const auto disconnect_reason = DisconnectReason::None; 208 const auto network_interface = Network::GetSelectedNetworkInterface();
209
210 if (!network_interface) {
211 LOG_ERROR(Service_LDN, "No network interface available");
212 IPC::ResponseBuilder rb{ctx, 2};
213 rb.Push(ResultNoIpAddress);
214 return;
215 }
196 216
197 LOG_WARNING(Service_LDN, "(STUBBED) called, disconnect_reason={}", disconnect_reason); 217 Ipv4Address current_address{Network::TranslateIPv4(network_interface->ip_address)};
218 Ipv4Address subnet_mask{Network::TranslateIPv4(network_interface->subnet_mask)};
219
220 // When we're connected to a room, spoof the hosts IP address
221 if (auto room_member = room_network.GetRoomMember().lock()) {
222 if (room_member->IsConnected()) {
223 current_address = room_member->GetFakeIpAddress();
224 }
225 }
226
227 std::reverse(std::begin(current_address), std::end(current_address)); // ntohl
228 std::reverse(std::begin(subnet_mask), std::end(subnet_mask)); // ntohl
229
230 IPC::ResponseBuilder rb{ctx, 4};
231 rb.Push(ResultSuccess);
232 rb.PushRaw(current_address);
233 rb.PushRaw(subnet_mask);
234 }
198 235
236 void GetDisconnectReason(Kernel::HLERequestContext& ctx) {
199 IPC::ResponseBuilder rb{ctx, 3}; 237 IPC::ResponseBuilder rb{ctx, 3};
200 rb.Push(ResultSuccess); 238 rb.Push(ResultSuccess);
201 rb.PushEnum(disconnect_reason); 239 rb.PushEnum(lan_discovery.GetDisconnectReason());
202 } 240 }
203 241
204 void GetSecurityParameter(Kernel::HLERequestContext& ctx) { 242 void GetSecurityParameter(Kernel::HLERequestContext& ctx) {
205 SecurityParameter security_parameter{}; 243 SecurityParameter security_parameter{};
206 NetworkInfo info{}; 244 NetworkInfo info{};
207 const Result rc = ResultSuccess; 245 const Result rc = lan_discovery.GetNetworkInfo(info);
208 246
209 if (rc.IsError()) { 247 if (rc.IsError()) {
210 LOG_ERROR(Service_LDN, "NetworkInfo is not valid {}", rc.raw); 248 LOG_ERROR(Service_LDN, "NetworkInfo is not valid {}", rc.raw);
@@ -217,8 +255,6 @@ public:
217 std::memcpy(security_parameter.data.data(), info.ldn.security_parameter.data(), 255 std::memcpy(security_parameter.data.data(), info.ldn.security_parameter.data(),
218 sizeof(SecurityParameter::data)); 256 sizeof(SecurityParameter::data));
219 257
220 LOG_WARNING(Service_LDN, "(STUBBED) called");
221
222 IPC::ResponseBuilder rb{ctx, 10}; 258 IPC::ResponseBuilder rb{ctx, 10};
223 rb.Push(rc); 259 rb.Push(rc);
224 rb.PushRaw<SecurityParameter>(security_parameter); 260 rb.PushRaw<SecurityParameter>(security_parameter);
@@ -227,7 +263,7 @@ public:
227 void GetNetworkConfig(Kernel::HLERequestContext& ctx) { 263 void GetNetworkConfig(Kernel::HLERequestContext& ctx) {
228 NetworkConfig config{}; 264 NetworkConfig config{};
229 NetworkInfo info{}; 265 NetworkInfo info{};
230 const Result rc = ResultSuccess; 266 const Result rc = lan_discovery.GetNetworkInfo(info);
231 267
232 if (rc.IsError()) { 268 if (rc.IsError()) {
233 LOG_ERROR(Service_LDN, "NetworkConfig is not valid {}", rc.raw); 269 LOG_ERROR(Service_LDN, "NetworkConfig is not valid {}", rc.raw);
@@ -241,12 +277,6 @@ public:
241 config.node_count_max = info.ldn.node_count_max; 277 config.node_count_max = info.ldn.node_count_max;
242 config.local_communication_version = info.ldn.nodes[0].local_communication_version; 278 config.local_communication_version = info.ldn.nodes[0].local_communication_version;
243 279
244 LOG_WARNING(Service_LDN,
245 "(STUBBED) called, intent_id={}/{}, channel={}, node_count_max={}, "
246 "local_communication_version={}",
247 config.intent_id.local_communication_id, config.intent_id.scene_id,
248 config.channel, config.node_count_max, config.local_communication_version);
249
250 IPC::ResponseBuilder rb{ctx, 10}; 280 IPC::ResponseBuilder rb{ctx, 10};
251 rb.Push(rc); 281 rb.Push(rc);
252 rb.PushRaw<NetworkConfig>(config); 282 rb.PushRaw<NetworkConfig>(config);
@@ -265,17 +295,17 @@ public:
265 const std::size_t node_buffer_count = ctx.GetWriteBufferSize(1) / sizeof(NodeLatestUpdate); 295 const std::size_t node_buffer_count = ctx.GetWriteBufferSize(1) / sizeof(NodeLatestUpdate);
266 296
267 if (node_buffer_count == 0 || network_buffer_size != sizeof(NetworkInfo)) { 297 if (node_buffer_count == 0 || network_buffer_size != sizeof(NetworkInfo)) {
268 LOG_ERROR(Service_LDN, "Invalid buffer size {}, {}", network_buffer_size, 298 LOG_ERROR(Service_LDN, "Invalid buffer, size = {}, count = {}", network_buffer_size,
269 node_buffer_count); 299 node_buffer_count);
270 IPC::ResponseBuilder rb{ctx, 2}; 300 IPC::ResponseBuilder rb{ctx, 2};
271 rb.Push(ResultBadInput); 301 rb.Push(ResultBadInput);
272 return; 302 return;
273 } 303 }
274 304
275 NetworkInfo info; 305 NetworkInfo info{};
276 std::vector<NodeLatestUpdate> latest_update(node_buffer_count); 306 std::vector<NodeLatestUpdate> latest_update(node_buffer_count);
277 307
278 const auto rc = ResultSuccess; 308 const auto rc = lan_discovery.GetNetworkInfo(info, latest_update, latest_update.size());
279 if (rc.IsError()) { 309 if (rc.IsError()) {
280 LOG_ERROR(Service_LDN, "NetworkInfo is not valid {}", rc.raw); 310 LOG_ERROR(Service_LDN, "NetworkInfo is not valid {}", rc.raw);
281 IPC::ResponseBuilder rb{ctx, 2}; 311 IPC::ResponseBuilder rb{ctx, 2};
@@ -283,9 +313,6 @@ public:
283 return; 313 return;
284 } 314 }
285 315
286 LOG_WARNING(Service_LDN, "(STUBBED) called, ssid='{}', nodes={}",
287 info.common.ssid.GetStringValue(), info.ldn.node_count);
288
289 ctx.WriteBuffer(info, 0); 316 ctx.WriteBuffer(info, 0);
290 ctx.WriteBuffer(latest_update, 1); 317 ctx.WriteBuffer(latest_update, 1);
291 318
@@ -317,92 +344,78 @@ public:
317 344
318 u16 count = 0; 345 u16 count = 0;
319 std::vector<NetworkInfo> network_infos(network_info_size); 346 std::vector<NetworkInfo> network_infos(network_info_size);
347 Result rc = lan_discovery.Scan(network_infos, count, scan_filter);
320 348
321 LOG_WARNING(Service_LDN, 349 LOG_INFO(Service_LDN,
322 "(STUBBED) called, channel={}, filter_scan_flag={}, filter_network_type={}", 350 "called, channel={}, filter_scan_flag={}, filter_network_type={}, is_private={}",
323 channel, scan_filter.flag, scan_filter.network_type); 351 channel, scan_filter.flag, scan_filter.network_type, is_private);
324 352
325 ctx.WriteBuffer(network_infos); 353 ctx.WriteBuffer(network_infos);
326 354
327 IPC::ResponseBuilder rb{ctx, 3}; 355 IPC::ResponseBuilder rb{ctx, 3};
328 rb.Push(ResultSuccess); 356 rb.Push(rc);
329 rb.Push<u32>(count); 357 rb.Push<u32>(count);
330 } 358 }
331 359
332 void OpenAccessPoint(Kernel::HLERequestContext& ctx) { 360 void SetWirelessControllerRestriction(Kernel::HLERequestContext& ctx) {
333 LOG_WARNING(Service_LDN, "(STUBBED) called"); 361 LOG_WARNING(Service_LDN, "(STUBBED) called");
334 362
335 IPC::ResponseBuilder rb{ctx, 2}; 363 IPC::ResponseBuilder rb{ctx, 2};
336 rb.Push(ResultSuccess); 364 rb.Push(ResultSuccess);
337 } 365 }
338 366
367 void OpenAccessPoint(Kernel::HLERequestContext& ctx) {
368 LOG_INFO(Service_LDN, "called");
369
370 IPC::ResponseBuilder rb{ctx, 2};
371 rb.Push(lan_discovery.OpenAccessPoint());
372 }
373
339 void CloseAccessPoint(Kernel::HLERequestContext& ctx) { 374 void CloseAccessPoint(Kernel::HLERequestContext& ctx) {
340 LOG_WARNING(Service_LDN, "(STUBBED) called"); 375 LOG_INFO(Service_LDN, "called");
341 376
342 IPC::ResponseBuilder rb{ctx, 2}; 377 IPC::ResponseBuilder rb{ctx, 2};
343 rb.Push(ResultSuccess); 378 rb.Push(lan_discovery.CloseAccessPoint());
344 } 379 }
345 380
346 void CreateNetwork(Kernel::HLERequestContext& ctx) { 381 void CreateNetwork(Kernel::HLERequestContext& ctx) {
347 IPC::RequestParser rp{ctx}; 382 LOG_INFO(Service_LDN, "called");
348 struct Parameters {
349 SecurityConfig security_config;
350 UserConfig user_config;
351 INSERT_PADDING_WORDS_NOINIT(1);
352 NetworkConfig network_config;
353 };
354 static_assert(sizeof(Parameters) == 0x98, "Parameters has incorrect size.");
355 383
356 const auto parameters{rp.PopRaw<Parameters>()}; 384 CreateNetworkImpl(ctx);
385 }
357 386
358 LOG_WARNING(Service_LDN, 387 void CreateNetworkPrivate(Kernel::HLERequestContext& ctx) {
359 "(STUBBED) called, passphrase_size={}, security_mode={}, " 388 LOG_INFO(Service_LDN, "called");
360 "local_communication_version={}",
361 parameters.security_config.passphrase_size,
362 parameters.security_config.security_mode,
363 parameters.network_config.local_communication_version);
364 389
365 IPC::ResponseBuilder rb{ctx, 2}; 390 CreateNetworkImpl(ctx, true);
366 rb.Push(ResultSuccess);
367 } 391 }
368 392
369 void CreateNetworkPrivate(Kernel::HLERequestContext& ctx) { 393 void CreateNetworkImpl(Kernel::HLERequestContext& ctx, bool is_private = false) {
370 IPC::RequestParser rp{ctx}; 394 IPC::RequestParser rp{ctx};
371 struct Parameters {
372 SecurityConfig security_config;
373 SecurityParameter security_parameter;
374 UserConfig user_config;
375 NetworkConfig network_config;
376 };
377 static_assert(sizeof(Parameters) == 0xB8, "Parameters has incorrect size.");
378
379 const auto parameters{rp.PopRaw<Parameters>()};
380 395
381 LOG_WARNING(Service_LDN, 396 const auto security_config{rp.PopRaw<SecurityConfig>()};
382 "(STUBBED) called, passphrase_size={}, security_mode={}, " 397 [[maybe_unused]] const auto security_parameter{is_private ? rp.PopRaw<SecurityParameter>()
383 "local_communication_version={}", 398 : SecurityParameter{}};
384 parameters.security_config.passphrase_size, 399 const auto user_config{rp.PopRaw<UserConfig>()};
385 parameters.security_config.security_mode, 400 rp.Pop<u32>(); // Padding
386 parameters.network_config.local_communication_version); 401 const auto network_Config{rp.PopRaw<NetworkConfig>()};
387 402
388 IPC::ResponseBuilder rb{ctx, 2}; 403 IPC::ResponseBuilder rb{ctx, 2};
389 rb.Push(ResultSuccess); 404 rb.Push(lan_discovery.CreateNetwork(security_config, user_config, network_Config));
390 } 405 }
391 406
392 void DestroyNetwork(Kernel::HLERequestContext& ctx) { 407 void DestroyNetwork(Kernel::HLERequestContext& ctx) {
393 LOG_WARNING(Service_LDN, "(STUBBED) called"); 408 LOG_INFO(Service_LDN, "called");
394 409
395 IPC::ResponseBuilder rb{ctx, 2}; 410 IPC::ResponseBuilder rb{ctx, 2};
396 rb.Push(ResultSuccess); 411 rb.Push(lan_discovery.DestroyNetwork());
397 } 412 }
398 413
399 void SetAdvertiseData(Kernel::HLERequestContext& ctx) { 414 void SetAdvertiseData(Kernel::HLERequestContext& ctx) {
400 std::vector<u8> read_buffer = ctx.ReadBuffer(); 415 std::vector<u8> read_buffer = ctx.ReadBuffer();
401 416
402 LOG_WARNING(Service_LDN, "(STUBBED) called, size {}", read_buffer.size());
403
404 IPC::ResponseBuilder rb{ctx, 2}; 417 IPC::ResponseBuilder rb{ctx, 2};
405 rb.Push(ResultSuccess); 418 rb.Push(lan_discovery.SetAdvertiseData(read_buffer));
406 } 419 }
407 420
408 void SetStationAcceptPolicy(Kernel::HLERequestContext& ctx) { 421 void SetStationAcceptPolicy(Kernel::HLERequestContext& ctx) {
@@ -420,17 +433,17 @@ public:
420 } 433 }
421 434
422 void OpenStation(Kernel::HLERequestContext& ctx) { 435 void OpenStation(Kernel::HLERequestContext& ctx) {
423 LOG_WARNING(Service_LDN, "(STUBBED) called"); 436 LOG_INFO(Service_LDN, "called");
424 437
425 IPC::ResponseBuilder rb{ctx, 2}; 438 IPC::ResponseBuilder rb{ctx, 2};
426 rb.Push(ResultSuccess); 439 rb.Push(lan_discovery.OpenStation());
427 } 440 }
428 441
429 void CloseStation(Kernel::HLERequestContext& ctx) { 442 void CloseStation(Kernel::HLERequestContext& ctx) {
430 LOG_WARNING(Service_LDN, "(STUBBED) called"); 443 LOG_INFO(Service_LDN, "called");
431 444
432 IPC::ResponseBuilder rb{ctx, 2}; 445 IPC::ResponseBuilder rb{ctx, 2};
433 rb.Push(ResultSuccess); 446 rb.Push(lan_discovery.CloseStation());
434 } 447 }
435 448
436 void Connect(Kernel::HLERequestContext& ctx) { 449 void Connect(Kernel::HLERequestContext& ctx) {
@@ -445,16 +458,13 @@ public:
445 458
446 const auto parameters{rp.PopRaw<Parameters>()}; 459 const auto parameters{rp.PopRaw<Parameters>()};
447 460
448 LOG_WARNING(Service_LDN, 461 LOG_INFO(Service_LDN,
449 "(STUBBED) called, passphrase_size={}, security_mode={}, " 462 "called, passphrase_size={}, security_mode={}, "
450 "local_communication_version={}", 463 "local_communication_version={}",
451 parameters.security_config.passphrase_size, 464 parameters.security_config.passphrase_size,
452 parameters.security_config.security_mode, 465 parameters.security_config.security_mode, parameters.local_communication_version);
453 parameters.local_communication_version);
454 466
455 const std::vector<u8> read_buffer = ctx.ReadBuffer(); 467 const std::vector<u8> read_buffer = ctx.ReadBuffer();
456 NetworkInfo network_info{};
457
458 if (read_buffer.size() != sizeof(NetworkInfo)) { 468 if (read_buffer.size() != sizeof(NetworkInfo)) {
459 LOG_ERROR(Frontend, "NetworkInfo doesn't match read_buffer size!"); 469 LOG_ERROR(Frontend, "NetworkInfo doesn't match read_buffer size!");
460 IPC::ResponseBuilder rb{ctx, 2}; 470 IPC::ResponseBuilder rb{ctx, 2};
@@ -462,40 +472,47 @@ public:
462 return; 472 return;
463 } 473 }
464 474
475 NetworkInfo network_info{};
465 std::memcpy(&network_info, read_buffer.data(), read_buffer.size()); 476 std::memcpy(&network_info, read_buffer.data(), read_buffer.size());
466 477
467 IPC::ResponseBuilder rb{ctx, 2}; 478 IPC::ResponseBuilder rb{ctx, 2};
468 rb.Push(ResultSuccess); 479 rb.Push(lan_discovery.Connect(network_info, parameters.user_config,
480 static_cast<u16>(parameters.local_communication_version)));
469 } 481 }
470 482
471 void Disconnect(Kernel::HLERequestContext& ctx) { 483 void Disconnect(Kernel::HLERequestContext& ctx) {
472 LOG_WARNING(Service_LDN, "(STUBBED) called"); 484 LOG_INFO(Service_LDN, "called");
473 485
474 IPC::ResponseBuilder rb{ctx, 2}; 486 IPC::ResponseBuilder rb{ctx, 2};
475 rb.Push(ResultSuccess); 487 rb.Push(lan_discovery.Disconnect());
476 } 488 }
477 void Initialize(Kernel::HLERequestContext& ctx) {
478 LOG_WARNING(Service_LDN, "(STUBBED) called");
479 489
490 void Initialize(Kernel::HLERequestContext& ctx) {
480 const auto rc = InitializeImpl(ctx); 491 const auto rc = InitializeImpl(ctx);
492 if (rc.IsError()) {
493 LOG_ERROR(Service_LDN, "Network isn't initialized, rc={}", rc.raw);
494 }
481 495
482 IPC::ResponseBuilder rb{ctx, 2}; 496 IPC::ResponseBuilder rb{ctx, 2};
483 rb.Push(rc); 497 rb.Push(rc);
484 } 498 }
485 499
486 void Finalize(Kernel::HLERequestContext& ctx) { 500 void Finalize(Kernel::HLERequestContext& ctx) {
487 LOG_WARNING(Service_LDN, "(STUBBED) called"); 501 if (auto room_member = room_network.GetRoomMember().lock()) {
502 room_member->Unbind(ldn_packet_received);
503 }
488 504
489 is_initialized = false; 505 is_initialized = false;
490 506
491 IPC::ResponseBuilder rb{ctx, 2}; 507 IPC::ResponseBuilder rb{ctx, 2};
492 rb.Push(ResultSuccess); 508 rb.Push(lan_discovery.Finalize());
493 } 509 }
494 510
495 void Initialize2(Kernel::HLERequestContext& ctx) { 511 void Initialize2(Kernel::HLERequestContext& ctx) {
496 LOG_WARNING(Service_LDN, "(STUBBED) called");
497
498 const auto rc = InitializeImpl(ctx); 512 const auto rc = InitializeImpl(ctx);
513 if (rc.IsError()) {
514 LOG_ERROR(Service_LDN, "Network isn't initialized, rc={}", rc.raw);
515 }
499 516
500 IPC::ResponseBuilder rb{ctx, 2}; 517 IPC::ResponseBuilder rb{ctx, 2};
501 rb.Push(rc); 518 rb.Push(rc);
@@ -508,14 +525,26 @@ public:
508 return ResultAirplaneModeEnabled; 525 return ResultAirplaneModeEnabled;
509 } 526 }
510 527
528 if (auto room_member = room_network.GetRoomMember().lock()) {
529 ldn_packet_received = room_member->BindOnLdnPacketReceived(
530 [this](const Network::LDNPacket& packet) { OnLDNPacketReceived(packet); });
531 } else {
532 LOG_ERROR(Service_LDN, "Couldn't bind callback!");
533 return ResultAirplaneModeEnabled;
534 }
535
536 lan_discovery.Initialize([&]() { OnEventFired(); });
511 is_initialized = true; 537 is_initialized = true;
512 // TODO (flTobi): Change this to ResultSuccess when LDN is fully implemented 538 return ResultSuccess;
513 return ResultAirplaneModeEnabled;
514 } 539 }
515 540
516 KernelHelpers::ServiceContext service_context; 541 KernelHelpers::ServiceContext service_context;
517 Kernel::KEvent* state_change_event; 542 Kernel::KEvent* state_change_event;
518 Network::RoomNetwork& room_network; 543 Network::RoomNetwork& room_network;
544 LANDiscovery lan_discovery;
545
546 // Callback identifier for the OnLDNPacketReceived event.
547 Network::RoomMember::CallbackHandle<Network::LDNPacket> ldn_packet_received;
519 548
520 bool is_initialized{}; 549 bool is_initialized{};
521}; 550};
diff --git a/src/core/hle/service/ldn/ldn_types.h b/src/core/hle/service/ldn/ldn_types.h
index 6231e936d..44c2c773b 100644
--- a/src/core/hle/service/ldn/ldn_types.h
+++ b/src/core/hle/service/ldn/ldn_types.h
@@ -31,6 +31,8 @@ enum class NodeStateChange : u8 {
31 DisconnectAndConnect, 31 DisconnectAndConnect,
32}; 32};
33 33
34DECLARE_ENUM_FLAG_OPERATORS(NodeStateChange)
35
34enum class ScanFilterFlag : u32 { 36enum class ScanFilterFlag : u32 {
35 None = 0, 37 None = 0,
36 LocalCommunicationId = 1 << 0, 38 LocalCommunicationId = 1 << 0,
@@ -100,13 +102,13 @@ enum class AcceptPolicy : u8 {
100 102
101enum class WifiChannel : s16 { 103enum class WifiChannel : s16 {
102 Default = 0, 104 Default = 0,
103 wifi24_1 = 1, 105 Wifi24_1 = 1,
104 wifi24_6 = 6, 106 Wifi24_6 = 6,
105 wifi24_11 = 11, 107 Wifi24_11 = 11,
106 wifi50_36 = 36, 108 Wifi50_36 = 36,
107 wifi50_40 = 40, 109 Wifi50_40 = 40,
108 wifi50_44 = 44, 110 Wifi50_44 = 44,
109 wifi50_48 = 48, 111 Wifi50_48 = 48,
110}; 112};
111 113
112enum class LinkLevel : s8 { 114enum class LinkLevel : s8 {
@@ -116,6 +118,11 @@ enum class LinkLevel : s8 {
116 Excellent, 118 Excellent,
117}; 119};
118 120
121enum class NodeStatus : u8 {
122 Disconnected,
123 Connected,
124};
125
119struct NodeLatestUpdate { 126struct NodeLatestUpdate {
120 NodeStateChange state_change; 127 NodeStateChange state_change;
121 INSERT_PADDING_BYTES(0x7); // Unknown 128 INSERT_PADDING_BYTES(0x7); // Unknown
@@ -150,7 +157,7 @@ struct Ssid {
150 157
151 Ssid() = default; 158 Ssid() = default;
152 159
153 explicit Ssid(std::string_view data) { 160 constexpr explicit Ssid(std::string_view data) {
154 length = static_cast<u8>(std::min(data.size(), SsidLengthMax)); 161 length = static_cast<u8>(std::min(data.size(), SsidLengthMax));
155 data.copy(raw.data(), length); 162 data.copy(raw.data(), length);
156 raw[length] = 0; 163 raw[length] = 0;
@@ -159,19 +166,18 @@ struct Ssid {
159 std::string GetStringValue() const { 166 std::string GetStringValue() const {
160 return std::string(raw.data()); 167 return std::string(raw.data());
161 } 168 }
162};
163static_assert(sizeof(Ssid) == 0x22, "Ssid is an invalid size");
164 169
165struct Ipv4Address { 170 bool operator==(const Ssid& b) const {
166 union { 171 return (length == b.length) && (std::memcmp(raw.data(), b.raw.data(), length) == 0);
167 u32 raw{}; 172 }
168 std::array<u8, 4> bytes;
169 };
170 173
171 std::string GetStringValue() const { 174 bool operator!=(const Ssid& b) const {
172 return fmt::format("{}.{}.{}.{}", bytes[3], bytes[2], bytes[1], bytes[0]); 175 return !operator==(b);
173 } 176 }
174}; 177};
178static_assert(sizeof(Ssid) == 0x22, "Ssid is an invalid size");
179
180using Ipv4Address = std::array<u8, 4>;
175static_assert(sizeof(Ipv4Address) == 0x4, "Ipv4Address is an invalid size"); 181static_assert(sizeof(Ipv4Address) == 0x4, "Ipv4Address is an invalid size");
176 182
177struct MacAddress { 183struct MacAddress {
@@ -181,6 +187,14 @@ struct MacAddress {
181}; 187};
182static_assert(sizeof(MacAddress) == 0x6, "MacAddress is an invalid size"); 188static_assert(sizeof(MacAddress) == 0x6, "MacAddress is an invalid size");
183 189
190struct MACAddressHash {
191 size_t operator()(const MacAddress& address) const {
192 u64 value{};
193 std::memcpy(&value, address.raw.data(), sizeof(address.raw));
194 return value;
195 }
196};
197
184struct ScanFilter { 198struct ScanFilter {
185 NetworkId network_id; 199 NetworkId network_id;
186 NetworkType network_type; 200 NetworkType network_type;
diff --git a/src/core/hle/service/mii/mii_manager.cpp b/src/core/hle/service/mii/mii_manager.cpp
index c484a9c8d..3a2fe938f 100644
--- a/src/core/hle/service/mii/mii_manager.cpp
+++ b/src/core/hle/service/mii/mii_manager.cpp
@@ -427,12 +427,11 @@ CharInfo MiiManager::BuildDefault(std::size_t index) {
427 return ConvertStoreDataToInfo(BuildDefaultStoreData(RawData::DefaultMii.at(index), user_id)); 427 return ConvertStoreDataToInfo(BuildDefaultStoreData(RawData::DefaultMii.at(index), user_id));
428} 428}
429 429
430CharInfo MiiManager::ConvertV3ToCharInfo(Ver3StoreData mii_v3) const { 430CharInfo MiiManager::ConvertV3ToCharInfo(const Ver3StoreData& mii_v3) const {
431 Service::Mii::MiiManager manager; 431 Service::Mii::MiiManager manager;
432 auto mii = manager.BuildDefault(0); 432 auto mii = manager.BuildDefault(0);
433 433
434 // Check if mii data exist 434 if (!ValidateV3Info(mii_v3)) {
435 if (mii_v3.mii_name[0] == 0) {
436 return mii; 435 return mii;
437 } 436 }
438 437
@@ -443,8 +442,15 @@ CharInfo MiiManager::ConvertV3ToCharInfo(Ver3StoreData mii_v3) const {
443 mii.height = mii_v3.height; 442 mii.height = mii_v3.height;
444 mii.build = mii_v3.build; 443 mii.build = mii_v3.build;
445 444
446 memset(mii.name.data(), 0, sizeof(mii.name)); 445 // Copy name until string terminator
447 memcpy(mii.name.data(), mii_v3.mii_name.data(), sizeof(mii_v3.mii_name)); 446 mii.name = {};
447 for (std::size_t index = 0; index < mii.name.size() - 1; index++) {
448 mii.name[index] = mii_v3.mii_name[index];
449 if (mii.name[index] == 0) {
450 break;
451 }
452 }
453
448 mii.font_region = mii_v3.region_information.character_set; 454 mii.font_region = mii_v3.region_information.character_set;
449 455
450 mii.faceline_type = mii_v3.appearance_bits1.face_shape; 456 mii.faceline_type = mii_v3.appearance_bits1.face_shape;
@@ -504,6 +510,151 @@ CharInfo MiiManager::ConvertV3ToCharInfo(Ver3StoreData mii_v3) const {
504 return mii; 510 return mii;
505} 511}
506 512
513Ver3StoreData MiiManager::ConvertCharInfoToV3(const CharInfo& mii) const {
514 Service::Mii::MiiManager manager;
515 Ver3StoreData mii_v3{};
516
517 // TODO: We are ignoring a bunch of data from the mii_v3
518
519 mii_v3.version = 1;
520 mii_v3.mii_information.gender.Assign(mii.gender);
521 mii_v3.mii_information.favorite_color.Assign(mii.favorite_color);
522 mii_v3.height = mii.height;
523 mii_v3.build = mii.build;
524
525 // Copy name until string terminator
526 mii_v3.mii_name = {};
527 for (std::size_t index = 0; index < mii.name.size() - 1; index++) {
528 mii_v3.mii_name[index] = mii.name[index];
529 if (mii_v3.mii_name[index] == 0) {
530 break;
531 }
532 }
533
534 mii_v3.region_information.character_set.Assign(mii.font_region);
535
536 mii_v3.appearance_bits1.face_shape.Assign(mii.faceline_type);
537 mii_v3.appearance_bits1.skin_color.Assign(mii.faceline_color);
538 mii_v3.appearance_bits2.wrinkles.Assign(mii.faceline_wrinkle);
539 mii_v3.appearance_bits2.makeup.Assign(mii.faceline_make);
540
541 mii_v3.hair_style = mii.hair_type;
542 mii_v3.appearance_bits3.hair_color.Assign(mii.hair_color);
543 mii_v3.appearance_bits3.flip_hair.Assign(mii.hair_flip);
544
545 mii_v3.appearance_bits4.eye_type.Assign(mii.eye_type);
546 mii_v3.appearance_bits4.eye_color.Assign(mii.eye_color);
547 mii_v3.appearance_bits4.eye_scale.Assign(mii.eye_scale);
548 mii_v3.appearance_bits4.eye_vertical_stretch.Assign(mii.eye_aspect);
549 mii_v3.appearance_bits4.eye_rotation.Assign(mii.eye_rotate);
550 mii_v3.appearance_bits4.eye_spacing.Assign(mii.eye_x);
551 mii_v3.appearance_bits4.eye_y_position.Assign(mii.eye_y);
552
553 mii_v3.appearance_bits5.eyebrow_style.Assign(mii.eyebrow_type);
554 mii_v3.appearance_bits5.eyebrow_color.Assign(mii.eyebrow_color);
555 mii_v3.appearance_bits5.eyebrow_scale.Assign(mii.eyebrow_scale);
556 mii_v3.appearance_bits5.eyebrow_yscale.Assign(mii.eyebrow_aspect);
557 mii_v3.appearance_bits5.eyebrow_rotation.Assign(mii.eyebrow_rotate);
558 mii_v3.appearance_bits5.eyebrow_spacing.Assign(mii.eyebrow_x);
559 mii_v3.appearance_bits5.eyebrow_y_position.Assign(mii.eyebrow_y);
560
561 mii_v3.appearance_bits6.nose_type.Assign(mii.nose_type);
562 mii_v3.appearance_bits6.nose_scale.Assign(mii.nose_scale);
563 mii_v3.appearance_bits6.nose_y_position.Assign(mii.nose_y);
564
565 mii_v3.appearance_bits7.mouth_type.Assign(mii.mouth_type);
566 mii_v3.appearance_bits7.mouth_color.Assign(mii.mouth_color);
567 mii_v3.appearance_bits7.mouth_scale.Assign(mii.mouth_scale);
568 mii_v3.appearance_bits7.mouth_horizontal_stretch.Assign(mii.mouth_aspect);
569 mii_v3.appearance_bits8.mouth_y_position.Assign(mii.mouth_y);
570
571 mii_v3.appearance_bits8.mustache_type.Assign(mii.mustache_type);
572 mii_v3.appearance_bits9.mustache_scale.Assign(mii.mustache_scale);
573 mii_v3.appearance_bits9.mustache_y_position.Assign(mii.mustache_y);
574
575 mii_v3.appearance_bits9.bear_type.Assign(mii.beard_type);
576 mii_v3.appearance_bits9.facial_hair_color.Assign(mii.beard_color);
577
578 mii_v3.appearance_bits10.glasses_type.Assign(mii.glasses_type);
579 mii_v3.appearance_bits10.glasses_color.Assign(mii.glasses_color);
580 mii_v3.appearance_bits10.glasses_scale.Assign(mii.glasses_scale);
581 mii_v3.appearance_bits10.glasses_y_position.Assign(mii.glasses_y);
582
583 mii_v3.appearance_bits11.mole_enabled.Assign(mii.mole_type);
584 mii_v3.appearance_bits11.mole_scale.Assign(mii.mole_scale);
585 mii_v3.appearance_bits11.mole_x_position.Assign(mii.mole_x);
586 mii_v3.appearance_bits11.mole_y_position.Assign(mii.mole_y);
587
588 // TODO: Validate mii_v3 data
589
590 return mii_v3;
591}
592
593bool MiiManager::ValidateV3Info(const Ver3StoreData& mii_v3) const {
594 bool is_valid = mii_v3.version == 0 || mii_v3.version == 3;
595
596 is_valid = is_valid && (mii_v3.mii_name[0] != 0);
597
598 is_valid = is_valid && (mii_v3.mii_information.birth_month < 13);
599 is_valid = is_valid && (mii_v3.mii_information.birth_day < 32);
600 is_valid = is_valid && (mii_v3.mii_information.favorite_color < 12);
601 is_valid = is_valid && (mii_v3.height < 128);
602 is_valid = is_valid && (mii_v3.build < 128);
603
604 is_valid = is_valid && (mii_v3.appearance_bits1.face_shape < 12);
605 is_valid = is_valid && (mii_v3.appearance_bits1.skin_color < 7);
606 is_valid = is_valid && (mii_v3.appearance_bits2.wrinkles < 12);
607 is_valid = is_valid && (mii_v3.appearance_bits2.makeup < 12);
608
609 is_valid = is_valid && (mii_v3.hair_style < 132);
610 is_valid = is_valid && (mii_v3.appearance_bits3.hair_color < 8);
611
612 is_valid = is_valid && (mii_v3.appearance_bits4.eye_type < 60);
613 is_valid = is_valid && (mii_v3.appearance_bits4.eye_color < 6);
614 is_valid = is_valid && (mii_v3.appearance_bits4.eye_scale < 8);
615 is_valid = is_valid && (mii_v3.appearance_bits4.eye_vertical_stretch < 7);
616 is_valid = is_valid && (mii_v3.appearance_bits4.eye_rotation < 8);
617 is_valid = is_valid && (mii_v3.appearance_bits4.eye_spacing < 13);
618 is_valid = is_valid && (mii_v3.appearance_bits4.eye_y_position < 19);
619
620 is_valid = is_valid && (mii_v3.appearance_bits5.eyebrow_style < 25);
621 is_valid = is_valid && (mii_v3.appearance_bits5.eyebrow_color < 8);
622 is_valid = is_valid && (mii_v3.appearance_bits5.eyebrow_scale < 9);
623 is_valid = is_valid && (mii_v3.appearance_bits5.eyebrow_yscale < 7);
624 is_valid = is_valid && (mii_v3.appearance_bits5.eyebrow_rotation < 12);
625 is_valid = is_valid && (mii_v3.appearance_bits5.eyebrow_spacing < 12);
626 is_valid = is_valid && (mii_v3.appearance_bits5.eyebrow_y_position < 19);
627
628 is_valid = is_valid && (mii_v3.appearance_bits6.nose_type < 18);
629 is_valid = is_valid && (mii_v3.appearance_bits6.nose_scale < 9);
630 is_valid = is_valid && (mii_v3.appearance_bits6.nose_y_position < 19);
631
632 is_valid = is_valid && (mii_v3.appearance_bits7.mouth_type < 36);
633 is_valid = is_valid && (mii_v3.appearance_bits7.mouth_color < 5);
634 is_valid = is_valid && (mii_v3.appearance_bits7.mouth_scale < 9);
635 is_valid = is_valid && (mii_v3.appearance_bits7.mouth_horizontal_stretch < 7);
636 is_valid = is_valid && (mii_v3.appearance_bits8.mouth_y_position < 19);
637
638 is_valid = is_valid && (mii_v3.appearance_bits8.mustache_type < 6);
639 is_valid = is_valid && (mii_v3.appearance_bits9.mustache_scale < 7);
640 is_valid = is_valid && (mii_v3.appearance_bits9.mustache_y_position < 17);
641
642 is_valid = is_valid && (mii_v3.appearance_bits9.bear_type < 6);
643 is_valid = is_valid && (mii_v3.appearance_bits9.facial_hair_color < 8);
644
645 is_valid = is_valid && (mii_v3.appearance_bits10.glasses_type < 9);
646 is_valid = is_valid && (mii_v3.appearance_bits10.glasses_color < 6);
647 is_valid = is_valid && (mii_v3.appearance_bits10.glasses_scale < 8);
648 is_valid = is_valid && (mii_v3.appearance_bits10.glasses_y_position < 21);
649
650 is_valid = is_valid && (mii_v3.appearance_bits11.mole_enabled < 2);
651 is_valid = is_valid && (mii_v3.appearance_bits11.mole_scale < 9);
652 is_valid = is_valid && (mii_v3.appearance_bits11.mole_x_position < 17);
653 is_valid = is_valid && (mii_v3.appearance_bits11.mole_y_position < 31);
654
655 return is_valid;
656}
657
507ResultVal<std::vector<MiiInfoElement>> MiiManager::GetDefault(SourceFlag source_flag) { 658ResultVal<std::vector<MiiInfoElement>> MiiManager::GetDefault(SourceFlag source_flag) {
508 std::vector<MiiInfoElement> result; 659 std::vector<MiiInfoElement> result;
509 660
diff --git a/src/core/hle/service/mii/mii_manager.h b/src/core/hle/service/mii/mii_manager.h
index d847de0bd..83ad3d343 100644
--- a/src/core/hle/service/mii/mii_manager.h
+++ b/src/core/hle/service/mii/mii_manager.h
@@ -22,7 +22,9 @@ public:
22 ResultVal<CharInfo> UpdateLatest(const CharInfo& info, SourceFlag source_flag); 22 ResultVal<CharInfo> UpdateLatest(const CharInfo& info, SourceFlag source_flag);
23 CharInfo BuildRandom(Age age, Gender gender, Race race); 23 CharInfo BuildRandom(Age age, Gender gender, Race race);
24 CharInfo BuildDefault(std::size_t index); 24 CharInfo BuildDefault(std::size_t index);
25 CharInfo ConvertV3ToCharInfo(Ver3StoreData mii_v3) const; 25 CharInfo ConvertV3ToCharInfo(const Ver3StoreData& mii_v3) const;
26 Ver3StoreData ConvertCharInfoToV3(const CharInfo& mii) const;
27 bool ValidateV3Info(const Ver3StoreData& mii_v3) const;
26 ResultVal<std::vector<MiiInfoElement>> GetDefault(SourceFlag source_flag); 28 ResultVal<std::vector<MiiInfoElement>> GetDefault(SourceFlag source_flag);
27 Result GetIndex(const CharInfo& info, u32& index); 29 Result GetIndex(const CharInfo& info, u32& index);
28 30
diff --git a/src/core/hle/service/nfc/nfc.cpp b/src/core/hle/service/nfc/nfc.cpp
index 13a843a28..046c5f18f 100644
--- a/src/core/hle/service/nfc/nfc.cpp
+++ b/src/core/hle/service/nfc/nfc.cpp
@@ -106,10 +106,10 @@ public:
106 {1, &IUser::FinalizeOld, "FinalizeOld"}, 106 {1, &IUser::FinalizeOld, "FinalizeOld"},
107 {2, &IUser::GetStateOld, "GetStateOld"}, 107 {2, &IUser::GetStateOld, "GetStateOld"},
108 {3, &IUser::IsNfcEnabledOld, "IsNfcEnabledOld"}, 108 {3, &IUser::IsNfcEnabledOld, "IsNfcEnabledOld"},
109 {400, nullptr, "Initialize"}, 109 {400, &IUser::InitializeOld, "Initialize"},
110 {401, nullptr, "Finalize"}, 110 {401, &IUser::FinalizeOld, "Finalize"},
111 {402, nullptr, "GetState"}, 111 {402, &IUser::GetStateOld, "GetState"},
112 {403, nullptr, "IsNfcEnabled"}, 112 {403, &IUser::IsNfcEnabledOld, "IsNfcEnabled"},
113 {404, nullptr, "ListDevices"}, 113 {404, nullptr, "ListDevices"},
114 {405, nullptr, "GetDeviceState"}, 114 {405, nullptr, "GetDeviceState"},
115 {406, nullptr, "GetNpadId"}, 115 {406, nullptr, "GetNpadId"},
diff --git a/src/core/hle/service/nfp/amiibo_crypto.cpp b/src/core/hle/service/nfp/amiibo_crypto.cpp
index 31dd3a307..c32a6816b 100644
--- a/src/core/hle/service/nfp/amiibo_crypto.cpp
+++ b/src/core/hle/service/nfp/amiibo_crypto.cpp
@@ -20,14 +20,15 @@ bool IsAmiiboValid(const EncryptedNTAG215File& ntag_file) {
20 const auto& amiibo_data = ntag_file.user_memory; 20 const auto& amiibo_data = ntag_file.user_memory;
21 LOG_DEBUG(Service_NFP, "uuid_lock=0x{0:x}", ntag_file.static_lock); 21 LOG_DEBUG(Service_NFP, "uuid_lock=0x{0:x}", ntag_file.static_lock);
22 LOG_DEBUG(Service_NFP, "compability_container=0x{0:x}", ntag_file.compability_container); 22 LOG_DEBUG(Service_NFP, "compability_container=0x{0:x}", ntag_file.compability_container);
23 LOG_INFO(Service_NFP, "write_count={}", amiibo_data.write_counter); 23 LOG_DEBUG(Service_NFP, "write_count={}", static_cast<u16>(amiibo_data.write_counter));
24 24
25 LOG_INFO(Service_NFP, "character_id=0x{0:x}", amiibo_data.model_info.character_id); 25 LOG_DEBUG(Service_NFP, "character_id=0x{0:x}", amiibo_data.model_info.character_id);
26 LOG_INFO(Service_NFP, "character_variant={}", amiibo_data.model_info.character_variant); 26 LOG_DEBUG(Service_NFP, "character_variant={}", amiibo_data.model_info.character_variant);
27 LOG_INFO(Service_NFP, "amiibo_type={}", amiibo_data.model_info.amiibo_type); 27 LOG_DEBUG(Service_NFP, "amiibo_type={}", amiibo_data.model_info.amiibo_type);
28 LOG_INFO(Service_NFP, "model_number=0x{0:x}", amiibo_data.model_info.model_number); 28 LOG_DEBUG(Service_NFP, "model_number=0x{0:x}",
29 LOG_INFO(Service_NFP, "series={}", amiibo_data.model_info.series); 29 static_cast<u16>(amiibo_data.model_info.model_number));
30 LOG_DEBUG(Service_NFP, "fixed_value=0x{0:x}", amiibo_data.model_info.constant_value); 30 LOG_DEBUG(Service_NFP, "series={}", amiibo_data.model_info.series);
31 LOG_DEBUG(Service_NFP, "tag_type=0x{0:x}", amiibo_data.model_info.tag_type);
31 32
32 LOG_DEBUG(Service_NFP, "tag_dynamic_lock=0x{0:x}", ntag_file.dynamic_lock); 33 LOG_DEBUG(Service_NFP, "tag_dynamic_lock=0x{0:x}", ntag_file.dynamic_lock);
33 LOG_DEBUG(Service_NFP, "tag_CFG0=0x{0:x}", ntag_file.CFG0); 34 LOG_DEBUG(Service_NFP, "tag_CFG0=0x{0:x}", ntag_file.CFG0);
@@ -35,11 +36,12 @@ bool IsAmiiboValid(const EncryptedNTAG215File& ntag_file) {
35 36
36 // Validate UUID 37 // Validate UUID
37 constexpr u8 CT = 0x88; // As defined in `ISO / IEC 14443 - 3` 38 constexpr u8 CT = 0x88; // As defined in `ISO / IEC 14443 - 3`
38 if ((CT ^ ntag_file.uuid[0] ^ ntag_file.uuid[1] ^ ntag_file.uuid[2]) != ntag_file.uuid[3]) { 39 if ((CT ^ ntag_file.uuid.uid[0] ^ ntag_file.uuid.uid[1] ^ ntag_file.uuid.uid[2]) !=
40 ntag_file.uuid.uid[3]) {
39 return false; 41 return false;
40 } 42 }
41 if ((ntag_file.uuid[4] ^ ntag_file.uuid[5] ^ ntag_file.uuid[6] ^ ntag_file.uuid[7]) != 43 if ((ntag_file.uuid.uid[4] ^ ntag_file.uuid.uid[5] ^ ntag_file.uuid.uid[6] ^
42 ntag_file.uuid[8]) { 44 ntag_file.uuid.nintendo_id) != ntag_file.uuid.lock_bytes[0]) {
43 return false; 45 return false;
44 } 46 }
45 47
@@ -53,11 +55,12 @@ bool IsAmiiboValid(const EncryptedNTAG215File& ntag_file) {
53 if (amiibo_data.constant_value != 0xA5) { 55 if (amiibo_data.constant_value != 0xA5) {
54 return false; 56 return false;
55 } 57 }
56 if (amiibo_data.model_info.constant_value != 0x02) { 58 if (amiibo_data.model_info.tag_type != PackedTagType::Type2) {
59 return false;
60 }
61 if ((ntag_file.dynamic_lock & 0xFFFFFF) != 0x0F0001U) {
57 return false; 62 return false;
58 } 63 }
59 // dynamic_lock value apparently is not constant
60 // ntag_file.dynamic_lock == 0x0F0001
61 if (ntag_file.CFG0 != 0x04000000U) { 64 if (ntag_file.CFG0 != 0x04000000U) {
62 return false; 65 return false;
63 } 66 }
@@ -70,7 +73,8 @@ bool IsAmiiboValid(const EncryptedNTAG215File& ntag_file) {
70NTAG215File NfcDataToEncodedData(const EncryptedNTAG215File& nfc_data) { 73NTAG215File NfcDataToEncodedData(const EncryptedNTAG215File& nfc_data) {
71 NTAG215File encoded_data{}; 74 NTAG215File encoded_data{};
72 75
73 memcpy(encoded_data.uuid2.data(), nfc_data.uuid.data() + 0x8, sizeof(encoded_data.uuid2)); 76 encoded_data.uid = nfc_data.uuid.uid;
77 encoded_data.nintendo_id = nfc_data.uuid.nintendo_id;
74 encoded_data.static_lock = nfc_data.static_lock; 78 encoded_data.static_lock = nfc_data.static_lock;
75 encoded_data.compability_container = nfc_data.compability_container; 79 encoded_data.compability_container = nfc_data.compability_container;
76 encoded_data.hmac_data = nfc_data.user_memory.hmac_data; 80 encoded_data.hmac_data = nfc_data.user_memory.hmac_data;
@@ -82,10 +86,10 @@ NTAG215File NfcDataToEncodedData(const EncryptedNTAG215File& nfc_data) {
82 encoded_data.applicaton_write_counter = nfc_data.user_memory.applicaton_write_counter; 86 encoded_data.applicaton_write_counter = nfc_data.user_memory.applicaton_write_counter;
83 encoded_data.application_area_id = nfc_data.user_memory.application_area_id; 87 encoded_data.application_area_id = nfc_data.user_memory.application_area_id;
84 encoded_data.unknown = nfc_data.user_memory.unknown; 88 encoded_data.unknown = nfc_data.user_memory.unknown;
85 encoded_data.hash = nfc_data.user_memory.hash; 89 encoded_data.unknown2 = nfc_data.user_memory.unknown2;
86 encoded_data.application_area = nfc_data.user_memory.application_area; 90 encoded_data.application_area = nfc_data.user_memory.application_area;
87 encoded_data.hmac_tag = nfc_data.user_memory.hmac_tag; 91 encoded_data.hmac_tag = nfc_data.user_memory.hmac_tag;
88 memcpy(encoded_data.uuid.data(), nfc_data.uuid.data(), sizeof(encoded_data.uuid)); 92 encoded_data.lock_bytes = nfc_data.uuid.lock_bytes;
89 encoded_data.model_info = nfc_data.user_memory.model_info; 93 encoded_data.model_info = nfc_data.user_memory.model_info;
90 encoded_data.keygen_salt = nfc_data.user_memory.keygen_salt; 94 encoded_data.keygen_salt = nfc_data.user_memory.keygen_salt;
91 encoded_data.dynamic_lock = nfc_data.dynamic_lock; 95 encoded_data.dynamic_lock = nfc_data.dynamic_lock;
@@ -99,8 +103,9 @@ NTAG215File NfcDataToEncodedData(const EncryptedNTAG215File& nfc_data) {
99EncryptedNTAG215File EncodedDataToNfcData(const NTAG215File& encoded_data) { 103EncryptedNTAG215File EncodedDataToNfcData(const NTAG215File& encoded_data) {
100 EncryptedNTAG215File nfc_data{}; 104 EncryptedNTAG215File nfc_data{};
101 105
102 memcpy(nfc_data.uuid.data() + 0x8, encoded_data.uuid2.data(), sizeof(encoded_data.uuid2)); 106 nfc_data.uuid.uid = encoded_data.uid;
103 memcpy(nfc_data.uuid.data(), encoded_data.uuid.data(), sizeof(encoded_data.uuid)); 107 nfc_data.uuid.nintendo_id = encoded_data.nintendo_id;
108 nfc_data.uuid.lock_bytes = encoded_data.lock_bytes;
104 nfc_data.static_lock = encoded_data.static_lock; 109 nfc_data.static_lock = encoded_data.static_lock;
105 nfc_data.compability_container = encoded_data.compability_container; 110 nfc_data.compability_container = encoded_data.compability_container;
106 nfc_data.user_memory.hmac_data = encoded_data.hmac_data; 111 nfc_data.user_memory.hmac_data = encoded_data.hmac_data;
@@ -112,7 +117,7 @@ EncryptedNTAG215File EncodedDataToNfcData(const NTAG215File& encoded_data) {
112 nfc_data.user_memory.applicaton_write_counter = encoded_data.applicaton_write_counter; 117 nfc_data.user_memory.applicaton_write_counter = encoded_data.applicaton_write_counter;
113 nfc_data.user_memory.application_area_id = encoded_data.application_area_id; 118 nfc_data.user_memory.application_area_id = encoded_data.application_area_id;
114 nfc_data.user_memory.unknown = encoded_data.unknown; 119 nfc_data.user_memory.unknown = encoded_data.unknown;
115 nfc_data.user_memory.hash = encoded_data.hash; 120 nfc_data.user_memory.unknown2 = encoded_data.unknown2;
116 nfc_data.user_memory.application_area = encoded_data.application_area; 121 nfc_data.user_memory.application_area = encoded_data.application_area;
117 nfc_data.user_memory.hmac_tag = encoded_data.hmac_tag; 122 nfc_data.user_memory.hmac_tag = encoded_data.hmac_tag;
118 nfc_data.user_memory.model_info = encoded_data.model_info; 123 nfc_data.user_memory.model_info = encoded_data.model_info;
@@ -127,10 +132,10 @@ EncryptedNTAG215File EncodedDataToNfcData(const NTAG215File& encoded_data) {
127 132
128u32 GetTagPassword(const TagUuid& uuid) { 133u32 GetTagPassword(const TagUuid& uuid) {
129 // Verifiy that the generated password is correct 134 // Verifiy that the generated password is correct
130 u32 password = 0xAA ^ (uuid[1] ^ uuid[3]); 135 u32 password = 0xAA ^ (uuid.uid[1] ^ uuid.uid[3]);
131 password &= (0x55 ^ (uuid[2] ^ uuid[4])) << 8; 136 password &= (0x55 ^ (uuid.uid[2] ^ uuid.uid[4])) << 8;
132 password &= (0xAA ^ (uuid[3] ^ uuid[5])) << 16; 137 password &= (0xAA ^ (uuid.uid[3] ^ uuid.uid[5])) << 16;
133 password &= (0x55 ^ (uuid[4] ^ uuid[6])) << 24; 138 password &= (0x55 ^ (uuid.uid[4] ^ uuid.uid[6])) << 24;
134 return password; 139 return password;
135} 140}
136 141
@@ -138,15 +143,13 @@ HashSeed GetSeed(const NTAG215File& data) {
138 HashSeed seed{ 143 HashSeed seed{
139 .magic = data.write_counter, 144 .magic = data.write_counter,
140 .padding = {}, 145 .padding = {},
141 .uuid1 = {}, 146 .uid_1 = data.uid,
142 .uuid2 = {}, 147 .nintendo_id_1 = data.nintendo_id,
148 .uid_2 = data.uid,
149 .nintendo_id_2 = data.nintendo_id,
143 .keygen_salt = data.keygen_salt, 150 .keygen_salt = data.keygen_salt,
144 }; 151 };
145 152
146 // Copy the first 8 bytes of uuid
147 memcpy(seed.uuid1.data(), data.uuid.data(), sizeof(seed.uuid1));
148 memcpy(seed.uuid2.data(), data.uuid.data(), sizeof(seed.uuid2));
149
150 return seed; 153 return seed;
151} 154}
152 155
@@ -165,8 +168,10 @@ std::vector<u8> GenerateInternalKey(const InternalKey& key, const HashSeed& seed
165 output.insert(output.end(), key.magic_bytes.begin(), 168 output.insert(output.end(), key.magic_bytes.begin(),
166 key.magic_bytes.begin() + key.magic_length); 169 key.magic_bytes.begin() + key.magic_length);
167 170
168 output.insert(output.end(), seed.uuid1.begin(), seed.uuid1.end()); 171 output.insert(output.end(), seed.uid_1.begin(), seed.uid_1.end());
169 output.insert(output.end(), seed.uuid2.begin(), seed.uuid2.end()); 172 output.emplace_back(seed.nintendo_id_1);
173 output.insert(output.end(), seed.uid_2.begin(), seed.uid_2.end());
174 output.emplace_back(seed.nintendo_id_2);
170 175
171 for (std::size_t i = 0; i < sizeof(seed.keygen_salt); i++) { 176 for (std::size_t i = 0; i < sizeof(seed.keygen_salt); i++) {
172 output.emplace_back(static_cast<u8>(seed.keygen_salt[i] ^ key.xor_pad[i])); 177 output.emplace_back(static_cast<u8>(seed.keygen_salt[i] ^ key.xor_pad[i]));
@@ -177,7 +182,6 @@ std::vector<u8> GenerateInternalKey(const InternalKey& key, const HashSeed& seed
177 182
178void CryptoInit(CryptoCtx& ctx, mbedtls_md_context_t& hmac_ctx, const HmacKey& hmac_key, 183void CryptoInit(CryptoCtx& ctx, mbedtls_md_context_t& hmac_ctx, const HmacKey& hmac_key,
179 const std::vector<u8>& seed) { 184 const std::vector<u8>& seed) {
180
181 // Initialize context 185 // Initialize context
182 ctx.used = false; 186 ctx.used = false;
183 ctx.counter = 0; 187 ctx.counter = 0;
@@ -250,14 +254,15 @@ void Cipher(const DerivedKeys& keys, const NTAG215File& in_data, NTAG215File& ou
250 reinterpret_cast<unsigned char*>(&out_data.settings)); 254 reinterpret_cast<unsigned char*>(&out_data.settings));
251 255
252 // Copy the rest of the data directly 256 // Copy the rest of the data directly
253 out_data.uuid2 = in_data.uuid2; 257 out_data.uid = in_data.uid;
258 out_data.nintendo_id = in_data.nintendo_id;
259 out_data.lock_bytes = in_data.lock_bytes;
254 out_data.static_lock = in_data.static_lock; 260 out_data.static_lock = in_data.static_lock;
255 out_data.compability_container = in_data.compability_container; 261 out_data.compability_container = in_data.compability_container;
256 262
257 out_data.constant_value = in_data.constant_value; 263 out_data.constant_value = in_data.constant_value;
258 out_data.write_counter = in_data.write_counter; 264 out_data.write_counter = in_data.write_counter;
259 265
260 out_data.uuid = in_data.uuid;
261 out_data.model_info = in_data.model_info; 266 out_data.model_info = in_data.model_info;
262 out_data.keygen_salt = in_data.keygen_salt; 267 out_data.keygen_salt = in_data.keygen_salt;
263 out_data.dynamic_lock = in_data.dynamic_lock; 268 out_data.dynamic_lock = in_data.dynamic_lock;
@@ -309,7 +314,7 @@ bool DecodeAmiibo(const EncryptedNTAG215File& encrypted_tag_data, NTAG215File& t
309 // Regenerate tag HMAC. Note: order matters, data HMAC depends on tag HMAC! 314 // Regenerate tag HMAC. Note: order matters, data HMAC depends on tag HMAC!
310 constexpr std::size_t input_length = DYNAMIC_LOCK_START - UUID_START; 315 constexpr std::size_t input_length = DYNAMIC_LOCK_START - UUID_START;
311 mbedtls_md_hmac(mbedtls_md_info_from_type(MBEDTLS_MD_SHA256), tag_keys.hmac_key.data(), 316 mbedtls_md_hmac(mbedtls_md_info_from_type(MBEDTLS_MD_SHA256), tag_keys.hmac_key.data(),
312 sizeof(HmacKey), reinterpret_cast<const unsigned char*>(&tag_data.uuid), 317 sizeof(HmacKey), reinterpret_cast<const unsigned char*>(&tag_data.uid),
313 input_length, reinterpret_cast<unsigned char*>(&tag_data.hmac_tag)); 318 input_length, reinterpret_cast<unsigned char*>(&tag_data.hmac_tag));
314 319
315 // Regenerate data HMAC 320 // Regenerate data HMAC
@@ -350,7 +355,7 @@ bool EncodeAmiibo(const NTAG215File& tag_data, EncryptedNTAG215File& encrypted_t
350 constexpr std::size_t input_length = DYNAMIC_LOCK_START - UUID_START; 355 constexpr std::size_t input_length = DYNAMIC_LOCK_START - UUID_START;
351 constexpr std::size_t input_length2 = HMAC_TAG_START - WRITE_COUNTER_START; 356 constexpr std::size_t input_length2 = HMAC_TAG_START - WRITE_COUNTER_START;
352 mbedtls_md_hmac(mbedtls_md_info_from_type(MBEDTLS_MD_SHA256), tag_keys.hmac_key.data(), 357 mbedtls_md_hmac(mbedtls_md_info_from_type(MBEDTLS_MD_SHA256), tag_keys.hmac_key.data(),
353 sizeof(HmacKey), reinterpret_cast<const unsigned char*>(&tag_data.uuid), 358 sizeof(HmacKey), reinterpret_cast<const unsigned char*>(&tag_data.uid),
354 input_length, reinterpret_cast<unsigned char*>(&encoded_tag_data.hmac_tag)); 359 input_length, reinterpret_cast<unsigned char*>(&encoded_tag_data.hmac_tag));
355 360
356 // Init mbedtls HMAC context 361 // Init mbedtls HMAC context
@@ -364,7 +369,7 @@ bool EncodeAmiibo(const NTAG215File& tag_data, EncryptedNTAG215File& encrypted_t
364 input_length2); // Data 369 input_length2); // Data
365 mbedtls_md_hmac_update(&ctx, reinterpret_cast<unsigned char*>(&encoded_tag_data.hmac_tag), 370 mbedtls_md_hmac_update(&ctx, reinterpret_cast<unsigned char*>(&encoded_tag_data.hmac_tag),
366 sizeof(HashData)); // Tag HMAC 371 sizeof(HashData)); // Tag HMAC
367 mbedtls_md_hmac_update(&ctx, reinterpret_cast<const unsigned char*>(&tag_data.uuid), 372 mbedtls_md_hmac_update(&ctx, reinterpret_cast<const unsigned char*>(&tag_data.uid),
368 input_length); 373 input_length);
369 mbedtls_md_hmac_finish(&ctx, reinterpret_cast<unsigned char*>(&encoded_tag_data.hmac_data)); 374 mbedtls_md_hmac_finish(&ctx, reinterpret_cast<unsigned char*>(&encoded_tag_data.hmac_data));
370 375
diff --git a/src/core/hle/service/nfp/amiibo_crypto.h b/src/core/hle/service/nfp/amiibo_crypto.h
index af7335912..0175ced91 100644
--- a/src/core/hle/service/nfp/amiibo_crypto.h
+++ b/src/core/hle/service/nfp/amiibo_crypto.h
@@ -5,7 +5,7 @@
5 5
6#include <array> 6#include <array>
7 7
8#include "core/hle/service/nfp/amiibo_types.h" 8#include "core/hle/service/nfp/nfp_types.h"
9 9
10struct mbedtls_md_context_t; 10struct mbedtls_md_context_t;
11 11
@@ -22,10 +22,12 @@ using HmacKey = std::array<u8, 0x10>;
22using DrgbOutput = std::array<u8, 0x20>; 22using DrgbOutput = std::array<u8, 0x20>;
23 23
24struct HashSeed { 24struct HashSeed {
25 u16 magic; 25 u16_be magic;
26 std::array<u8, 0xE> padding; 26 std::array<u8, 0xE> padding;
27 std::array<u8, 0x8> uuid1; 27 UniqueSerialNumber uid_1;
28 std::array<u8, 0x8> uuid2; 28 u8 nintendo_id_1;
29 UniqueSerialNumber uid_2;
30 u8 nintendo_id_2;
29 std::array<u8, 0x20> keygen_salt; 31 std::array<u8, 0x20> keygen_salt;
30}; 32};
31static_assert(sizeof(HashSeed) == 0x40, "HashSeed is an invalid size"); 33static_assert(sizeof(HashSeed) == 0x40, "HashSeed is an invalid size");
diff --git a/src/core/hle/service/nfp/nfp.cpp b/src/core/hle/service/nfp/nfp.cpp
index e0ed3f771..0cb55ca49 100644
--- a/src/core/hle/service/nfp/nfp.cpp
+++ b/src/core/hle/service/nfp/nfp.cpp
@@ -1,1098 +1,43 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include <array>
5#include <atomic>
6
7#include "common/fs/file.h"
8#include "common/fs/path_util.h"
9#include "common/logging/log.h" 4#include "common/logging/log.h"
10#include "common/string_util.h"
11#include "core/core.h"
12#include "core/hid/emulated_controller.h"
13#include "core/hid/hid_core.h"
14#include "core/hid/hid_types.h"
15#include "core/hle/ipc_helpers.h" 5#include "core/hle/ipc_helpers.h"
16#include "core/hle/kernel/k_event.h"
17#include "core/hle/service/mii/mii_manager.h"
18#include "core/hle/service/nfp/amiibo_crypto.h"
19#include "core/hle/service/nfp/nfp.h" 6#include "core/hle/service/nfp/nfp.h"
20#include "core/hle/service/nfp/nfp_user.h" 7#include "core/hle/service/nfp/nfp_user.h"
21 8
22namespace Service::NFP { 9namespace Service::NFP {
23namespace ErrCodes {
24constexpr Result DeviceNotFound(ErrorModule::NFP, 64);
25constexpr Result WrongDeviceState(ErrorModule::NFP, 73);
26constexpr Result NfcDisabled(ErrorModule::NFP, 80);
27constexpr Result WriteAmiiboFailed(ErrorModule::NFP, 88);
28constexpr Result TagRemoved(ErrorModule::NFP, 97);
29constexpr Result ApplicationAreaIsNotInitialized(ErrorModule::NFP, 128);
30constexpr Result WrongApplicationAreaId(ErrorModule::NFP, 152);
31constexpr Result ApplicationAreaExist(ErrorModule::NFP, 168);
32} // namespace ErrCodes
33
34IUser::IUser(Module::Interface& nfp_interface_, Core::System& system_)
35 : ServiceFramework{system_, "NFP::IUser"}, service_context{system_, service_name},
36 nfp_interface{nfp_interface_} {
37 static const FunctionInfo functions[] = {
38 {0, &IUser::Initialize, "Initialize"},
39 {1, &IUser::Finalize, "Finalize"},
40 {2, &IUser::ListDevices, "ListDevices"},
41 {3, &IUser::StartDetection, "StartDetection"},
42 {4, &IUser::StopDetection, "StopDetection"},
43 {5, &IUser::Mount, "Mount"},
44 {6, &IUser::Unmount, "Unmount"},
45 {7, &IUser::OpenApplicationArea, "OpenApplicationArea"},
46 {8, &IUser::GetApplicationArea, "GetApplicationArea"},
47 {9, &IUser::SetApplicationArea, "SetApplicationArea"},
48 {10, &IUser::Flush, "Flush"},
49 {11, nullptr, "Restore"},
50 {12, &IUser::CreateApplicationArea, "CreateApplicationArea"},
51 {13, &IUser::GetTagInfo, "GetTagInfo"},
52 {14, &IUser::GetRegisterInfo, "GetRegisterInfo"},
53 {15, &IUser::GetCommonInfo, "GetCommonInfo"},
54 {16, &IUser::GetModelInfo, "GetModelInfo"},
55 {17, &IUser::AttachActivateEvent, "AttachActivateEvent"},
56 {18, &IUser::AttachDeactivateEvent, "AttachDeactivateEvent"},
57 {19, &IUser::GetState, "GetState"},
58 {20, &IUser::GetDeviceState, "GetDeviceState"},
59 {21, &IUser::GetNpadId, "GetNpadId"},
60 {22, &IUser::GetApplicationAreaSize, "GetApplicationAreaSize"},
61 {23, &IUser::AttachAvailabilityChangeEvent, "AttachAvailabilityChangeEvent"},
62 {24, &IUser::RecreateApplicationArea, "RecreateApplicationArea"},
63 };
64 RegisterHandlers(functions);
65
66 availability_change_event = service_context.CreateEvent("IUser:AvailabilityChangeEvent");
67}
68
69void IUser::Initialize(Kernel::HLERequestContext& ctx) {
70 LOG_INFO(Service_NFC, "called");
71
72 state = State::Initialized;
73
74 // TODO(german77): Loop through all interfaces
75 nfp_interface.Initialize();
76
77 IPC::ResponseBuilder rb{ctx, 2, 0};
78 rb.Push(ResultSuccess);
79}
80
81void IUser::Finalize(Kernel::HLERequestContext& ctx) {
82 LOG_INFO(Service_NFP, "called");
83
84 state = State::NonInitialized;
85
86 // TODO(german77): Loop through all interfaces
87 nfp_interface.Finalize();
88
89 IPC::ResponseBuilder rb{ctx, 2};
90 rb.Push(ResultSuccess);
91}
92
93void IUser::ListDevices(Kernel::HLERequestContext& ctx) {
94 LOG_INFO(Service_NFP, "called");
95
96 if (state == State::NonInitialized) {
97 IPC::ResponseBuilder rb{ctx, 2};
98 rb.Push(ErrCodes::NfcDisabled);
99 return;
100 }
101
102 std::vector<u64> devices;
103
104 // TODO(german77): Loop through all interfaces
105 devices.push_back(nfp_interface.GetHandle());
106
107 if (devices.size() == 0) {
108 IPC::ResponseBuilder rb{ctx, 2};
109 rb.Push(ErrCodes::DeviceNotFound);
110 return;
111 }
112
113 ctx.WriteBuffer(devices);
114
115 IPC::ResponseBuilder rb{ctx, 3};
116 rb.Push(ResultSuccess);
117 rb.Push(static_cast<s32>(devices.size()));
118}
119
120void IUser::StartDetection(Kernel::HLERequestContext& ctx) {
121 IPC::RequestParser rp{ctx};
122 const auto device_handle{rp.Pop<u64>()};
123 const auto nfp_protocol{rp.Pop<s32>()};
124 LOG_INFO(Service_NFP, "called, device_handle={}, nfp_protocol={}", device_handle, nfp_protocol);
125
126 if (state == State::NonInitialized) {
127 IPC::ResponseBuilder rb{ctx, 2};
128 rb.Push(ErrCodes::NfcDisabled);
129 return;
130 }
131
132 // TODO(german77): Loop through all interfaces
133 if (device_handle == nfp_interface.GetHandle()) {
134 const auto result = nfp_interface.StartDetection(nfp_protocol);
135 IPC::ResponseBuilder rb{ctx, 2};
136 rb.Push(result);
137 return;
138 }
139
140 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
141
142 IPC::ResponseBuilder rb{ctx, 2};
143 rb.Push(ErrCodes::DeviceNotFound);
144}
145
146void IUser::StopDetection(Kernel::HLERequestContext& ctx) {
147 IPC::RequestParser rp{ctx};
148 const auto device_handle{rp.Pop<u64>()};
149 LOG_INFO(Service_NFP, "called, device_handle={}", device_handle);
150
151 if (state == State::NonInitialized) {
152 IPC::ResponseBuilder rb{ctx, 2};
153 rb.Push(ErrCodes::NfcDisabled);
154 return;
155 }
156
157 // TODO(german77): Loop through all interfaces
158 if (device_handle == nfp_interface.GetHandle()) {
159 const auto result = nfp_interface.StopDetection();
160 IPC::ResponseBuilder rb{ctx, 2};
161 rb.Push(result);
162 return;
163 }
164
165 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
166
167 IPC::ResponseBuilder rb{ctx, 2};
168 rb.Push(ErrCodes::DeviceNotFound);
169}
170
171void IUser::Mount(Kernel::HLERequestContext& ctx) {
172 IPC::RequestParser rp{ctx};
173 const auto device_handle{rp.Pop<u64>()};
174 const auto model_type{rp.PopEnum<ModelType>()};
175 const auto mount_target{rp.PopEnum<MountTarget>()};
176 LOG_INFO(Service_NFP, "called, device_handle={}, model_type={}, mount_target={}", device_handle,
177 model_type, mount_target);
178
179 if (state == State::NonInitialized) {
180 IPC::ResponseBuilder rb{ctx, 2};
181 rb.Push(ErrCodes::NfcDisabled);
182 return;
183 }
184
185 // TODO(german77): Loop through all interfaces
186 if (device_handle == nfp_interface.GetHandle()) {
187 const auto result = nfp_interface.Mount();
188 IPC::ResponseBuilder rb{ctx, 2};
189 rb.Push(result);
190 return;
191 }
192
193 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
194
195 IPC::ResponseBuilder rb{ctx, 2};
196 rb.Push(ErrCodes::DeviceNotFound);
197}
198
199void IUser::Unmount(Kernel::HLERequestContext& ctx) {
200 IPC::RequestParser rp{ctx};
201 const auto device_handle{rp.Pop<u64>()};
202 LOG_INFO(Service_NFP, "called, device_handle={}", device_handle);
203
204 if (state == State::NonInitialized) {
205 IPC::ResponseBuilder rb{ctx, 2};
206 rb.Push(ErrCodes::NfcDisabled);
207 return;
208 }
209
210 // TODO(german77): Loop through all interfaces
211 if (device_handle == nfp_interface.GetHandle()) {
212 const auto result = nfp_interface.Unmount();
213 IPC::ResponseBuilder rb{ctx, 2};
214 rb.Push(result);
215 return;
216 }
217
218 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
219
220 IPC::ResponseBuilder rb{ctx, 2};
221 rb.Push(ErrCodes::DeviceNotFound);
222}
223
224void IUser::OpenApplicationArea(Kernel::HLERequestContext& ctx) {
225 IPC::RequestParser rp{ctx};
226 const auto device_handle{rp.Pop<u64>()};
227 const auto access_id{rp.Pop<u32>()};
228 LOG_WARNING(Service_NFP, "(STUBBED) called, device_handle={}, access_id={}", device_handle,
229 access_id);
230
231 if (state == State::NonInitialized) {
232 IPC::ResponseBuilder rb{ctx, 2};
233 rb.Push(ErrCodes::NfcDisabled);
234 return;
235 }
236
237 // TODO(german77): Loop through all interfaces
238 if (device_handle == nfp_interface.GetHandle()) {
239 const auto result = nfp_interface.OpenApplicationArea(access_id);
240 IPC::ResponseBuilder rb{ctx, 2};
241 rb.Push(result);
242 return;
243 }
244
245 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
246
247 IPC::ResponseBuilder rb{ctx, 2};
248 rb.Push(ErrCodes::DeviceNotFound);
249}
250
251void IUser::GetApplicationArea(Kernel::HLERequestContext& ctx) {
252 IPC::RequestParser rp{ctx};
253 const auto device_handle{rp.Pop<u64>()};
254 LOG_INFO(Service_NFP, "called, device_handle={}", device_handle);
255
256 if (state == State::NonInitialized) {
257 IPC::ResponseBuilder rb{ctx, 2};
258 rb.Push(ErrCodes::NfcDisabled);
259 return;
260 }
261
262 // TODO(german77): Loop through all interfaces
263 if (device_handle == nfp_interface.GetHandle()) {
264 ApplicationArea data{};
265 const auto result = nfp_interface.GetApplicationArea(data);
266 ctx.WriteBuffer(data);
267 IPC::ResponseBuilder rb{ctx, 3};
268 rb.Push(result);
269 rb.Push(static_cast<u32>(data.size()));
270 return;
271 }
272
273 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
274
275 IPC::ResponseBuilder rb{ctx, 2};
276 rb.Push(ErrCodes::DeviceNotFound);
277}
278
279void IUser::SetApplicationArea(Kernel::HLERequestContext& ctx) {
280 IPC::RequestParser rp{ctx};
281 const auto device_handle{rp.Pop<u64>()};
282 const auto data{ctx.ReadBuffer()};
283 LOG_WARNING(Service_NFP, "(STUBBED) called, device_handle={}, data_size={}", device_handle,
284 data.size());
285
286 if (state == State::NonInitialized) {
287 IPC::ResponseBuilder rb{ctx, 2};
288 rb.Push(ErrCodes::NfcDisabled);
289 return;
290 }
291
292 // TODO(german77): Loop through all interfaces
293 if (device_handle == nfp_interface.GetHandle()) {
294 const auto result = nfp_interface.SetApplicationArea(data);
295 IPC::ResponseBuilder rb{ctx, 2};
296 rb.Push(result);
297 return;
298 }
299
300 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
301
302 IPC::ResponseBuilder rb{ctx, 2};
303 rb.Push(ErrCodes::DeviceNotFound);
304}
305
306void IUser::Flush(Kernel::HLERequestContext& ctx) {
307 IPC::RequestParser rp{ctx};
308 const auto device_handle{rp.Pop<u64>()};
309 LOG_WARNING(Service_NFP, "(STUBBED) called, device_handle={}", device_handle);
310
311 if (state == State::NonInitialized) {
312 IPC::ResponseBuilder rb{ctx, 2};
313 rb.Push(ErrCodes::NfcDisabled);
314 return;
315 }
316
317 // TODO(german77): Loop through all interfaces
318 if (device_handle == nfp_interface.GetHandle()) {
319 const auto result = nfp_interface.Flush();
320 IPC::ResponseBuilder rb{ctx, 2};
321 rb.Push(result);
322 return;
323 }
324
325 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
326
327 IPC::ResponseBuilder rb{ctx, 2};
328 rb.Push(ErrCodes::DeviceNotFound);
329}
330
331void IUser::CreateApplicationArea(Kernel::HLERequestContext& ctx) {
332 IPC::RequestParser rp{ctx};
333 const auto device_handle{rp.Pop<u64>()};
334 const auto access_id{rp.Pop<u32>()};
335 const auto data{ctx.ReadBuffer()};
336 LOG_WARNING(Service_NFP, "(STUBBED) called, device_handle={}, data_size={}, access_id={}",
337 device_handle, access_id, data.size());
338
339 if (state == State::NonInitialized) {
340 IPC::ResponseBuilder rb{ctx, 2};
341 rb.Push(ErrCodes::NfcDisabled);
342 return;
343 }
344
345 // TODO(german77): Loop through all interfaces
346 if (device_handle == nfp_interface.GetHandle()) {
347 const auto result = nfp_interface.CreateApplicationArea(access_id, data);
348 IPC::ResponseBuilder rb{ctx, 2};
349 rb.Push(result);
350 return;
351 }
352
353 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
354
355 IPC::ResponseBuilder rb{ctx, 2};
356 rb.Push(ErrCodes::DeviceNotFound);
357}
358
359void IUser::GetTagInfo(Kernel::HLERequestContext& ctx) {
360 IPC::RequestParser rp{ctx};
361 const auto device_handle{rp.Pop<u64>()};
362 LOG_INFO(Service_NFP, "called, device_handle={}", device_handle);
363
364 if (state == State::NonInitialized) {
365 IPC::ResponseBuilder rb{ctx, 2};
366 rb.Push(ErrCodes::NfcDisabled);
367 return;
368 }
369
370 // TODO(german77): Loop through all interfaces
371 if (device_handle == nfp_interface.GetHandle()) {
372 TagInfo tag_info{};
373 const auto result = nfp_interface.GetTagInfo(tag_info);
374 ctx.WriteBuffer(tag_info);
375 IPC::ResponseBuilder rb{ctx, 2};
376 rb.Push(result);
377 return;
378 }
379
380 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
381
382 IPC::ResponseBuilder rb{ctx, 2};
383 rb.Push(ErrCodes::DeviceNotFound);
384}
385
386void IUser::GetRegisterInfo(Kernel::HLERequestContext& ctx) {
387 IPC::RequestParser rp{ctx};
388 const auto device_handle{rp.Pop<u64>()};
389 LOG_INFO(Service_NFP, "called, device_handle={}", device_handle);
390
391 if (state == State::NonInitialized) {
392 IPC::ResponseBuilder rb{ctx, 2};
393 rb.Push(ErrCodes::NfcDisabled);
394 return;
395 }
396
397 // TODO(german77): Loop through all interfaces
398 if (device_handle == nfp_interface.GetHandle()) {
399 RegisterInfo register_info{};
400 const auto result = nfp_interface.GetRegisterInfo(register_info);
401 ctx.WriteBuffer(register_info);
402 IPC::ResponseBuilder rb{ctx, 2};
403 rb.Push(result);
404 return;
405 }
406
407 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
408
409 IPC::ResponseBuilder rb{ctx, 2};
410 rb.Push(ErrCodes::DeviceNotFound);
411}
412
413void IUser::GetCommonInfo(Kernel::HLERequestContext& ctx) {
414 IPC::RequestParser rp{ctx};
415 const auto device_handle{rp.Pop<u64>()};
416 LOG_INFO(Service_NFP, "called, device_handle={}", device_handle);
417
418 if (state == State::NonInitialized) {
419 IPC::ResponseBuilder rb{ctx, 2};
420 rb.Push(ErrCodes::NfcDisabled);
421 return;
422 }
423
424 // TODO(german77): Loop through all interfaces
425 if (device_handle == nfp_interface.GetHandle()) {
426 CommonInfo common_info{};
427 const auto result = nfp_interface.GetCommonInfo(common_info);
428 ctx.WriteBuffer(common_info);
429 IPC::ResponseBuilder rb{ctx, 2};
430 rb.Push(result);
431 return;
432 }
433
434 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
435
436 IPC::ResponseBuilder rb{ctx, 2};
437 rb.Push(ErrCodes::DeviceNotFound);
438}
439
440void IUser::GetModelInfo(Kernel::HLERequestContext& ctx) {
441 IPC::RequestParser rp{ctx};
442 const auto device_handle{rp.Pop<u64>()};
443 LOG_INFO(Service_NFP, "called, device_handle={}", device_handle);
444
445 if (state == State::NonInitialized) {
446 IPC::ResponseBuilder rb{ctx, 2};
447 rb.Push(ErrCodes::NfcDisabled);
448 return;
449 }
450
451 // TODO(german77): Loop through all interfaces
452 if (device_handle == nfp_interface.GetHandle()) {
453 ModelInfo model_info{};
454 const auto result = nfp_interface.GetModelInfo(model_info);
455 ctx.WriteBuffer(model_info);
456 IPC::ResponseBuilder rb{ctx, 2};
457 rb.Push(result);
458 return;
459 }
460
461 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
462
463 IPC::ResponseBuilder rb{ctx, 2};
464 rb.Push(ErrCodes::DeviceNotFound);
465}
466
467void IUser::AttachActivateEvent(Kernel::HLERequestContext& ctx) {
468 IPC::RequestParser rp{ctx};
469 const auto device_handle{rp.Pop<u64>()};
470 LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle);
471
472 if (state == State::NonInitialized) {
473 IPC::ResponseBuilder rb{ctx, 2};
474 rb.Push(ErrCodes::NfcDisabled);
475 return;
476 }
477
478 // TODO(german77): Loop through all interfaces
479 if (device_handle == nfp_interface.GetHandle()) {
480 IPC::ResponseBuilder rb{ctx, 2, 1};
481 rb.Push(ResultSuccess);
482 rb.PushCopyObjects(nfp_interface.GetActivateEvent());
483 return;
484 }
485
486 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
487
488 IPC::ResponseBuilder rb{ctx, 2};
489 rb.Push(ErrCodes::DeviceNotFound);
490}
491
492void IUser::AttachDeactivateEvent(Kernel::HLERequestContext& ctx) {
493 IPC::RequestParser rp{ctx};
494 const auto device_handle{rp.Pop<u64>()};
495 LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle);
496
497 if (state == State::NonInitialized) {
498 IPC::ResponseBuilder rb{ctx, 2};
499 rb.Push(ErrCodes::NfcDisabled);
500 return;
501 }
502
503 // TODO(german77): Loop through all interfaces
504 if (device_handle == nfp_interface.GetHandle()) {
505 IPC::ResponseBuilder rb{ctx, 2, 1};
506 rb.Push(ResultSuccess);
507 rb.PushCopyObjects(nfp_interface.GetDeactivateEvent());
508 return;
509 }
510
511 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
512
513 IPC::ResponseBuilder rb{ctx, 2};
514 rb.Push(ErrCodes::DeviceNotFound);
515}
516
517void IUser::GetState(Kernel::HLERequestContext& ctx) {
518 LOG_DEBUG(Service_NFC, "called");
519
520 IPC::ResponseBuilder rb{ctx, 3, 0};
521 rb.Push(ResultSuccess);
522 rb.PushEnum(state);
523}
524
525void IUser::GetDeviceState(Kernel::HLERequestContext& ctx) {
526 IPC::RequestParser rp{ctx};
527 const auto device_handle{rp.Pop<u64>()};
528 LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle);
529
530 // TODO(german77): Loop through all interfaces
531 if (device_handle == nfp_interface.GetHandle()) {
532 IPC::ResponseBuilder rb{ctx, 3};
533 rb.Push(ResultSuccess);
534 rb.PushEnum(nfp_interface.GetCurrentState());
535 return;
536 }
537
538 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
539
540 IPC::ResponseBuilder rb{ctx, 2};
541 rb.Push(ErrCodes::DeviceNotFound);
542}
543
544void IUser::GetNpadId(Kernel::HLERequestContext& ctx) {
545 IPC::RequestParser rp{ctx};
546 const auto device_handle{rp.Pop<u64>()};
547 LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle);
548
549 if (state == State::NonInitialized) {
550 IPC::ResponseBuilder rb{ctx, 2};
551 rb.Push(ErrCodes::NfcDisabled);
552 return;
553 }
554
555 // TODO(german77): Loop through all interfaces
556 if (device_handle == nfp_interface.GetHandle()) {
557 IPC::ResponseBuilder rb{ctx, 3};
558 rb.Push(ResultSuccess);
559 rb.PushEnum(nfp_interface.GetNpadId());
560 return;
561 }
562
563 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
564
565 IPC::ResponseBuilder rb{ctx, 2};
566 rb.Push(ErrCodes::DeviceNotFound);
567}
568
569void IUser::GetApplicationAreaSize(Kernel::HLERequestContext& ctx) {
570 IPC::RequestParser rp{ctx};
571 const auto device_handle{rp.Pop<u64>()};
572 LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle);
573
574 // TODO(german77): Loop through all interfaces
575 if (device_handle == nfp_interface.GetHandle()) {
576 IPC::ResponseBuilder rb{ctx, 3};
577 rb.Push(ResultSuccess);
578 rb.Push(sizeof(ApplicationArea));
579 return;
580 }
581
582 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
583
584 IPC::ResponseBuilder rb{ctx, 2};
585 rb.Push(ErrCodes::DeviceNotFound);
586}
587
588void IUser::AttachAvailabilityChangeEvent(Kernel::HLERequestContext& ctx) {
589 LOG_DEBUG(Service_NFP, "(STUBBED) called");
590
591 if (state == State::NonInitialized) {
592 IPC::ResponseBuilder rb{ctx, 2};
593 rb.Push(ErrCodes::NfcDisabled);
594 return;
595 }
596
597 IPC::ResponseBuilder rb{ctx, 2, 1};
598 rb.Push(ResultSuccess);
599 rb.PushCopyObjects(availability_change_event->GetReadableEvent());
600}
601
602void IUser::RecreateApplicationArea(Kernel::HLERequestContext& ctx) {
603 IPC::RequestParser rp{ctx};
604 const auto device_handle{rp.Pop<u64>()};
605 const auto access_id{rp.Pop<u32>()};
606 const auto data{ctx.ReadBuffer()};
607 LOG_WARNING(Service_NFP, "(STUBBED) called, device_handle={}, data_size={}, access_id={}",
608 device_handle, access_id, data.size());
609
610 if (state == State::NonInitialized) {
611 IPC::ResponseBuilder rb{ctx, 2};
612 rb.Push(ErrCodes::NfcDisabled);
613 return;
614 }
615
616 // TODO(german77): Loop through all interfaces
617 if (device_handle == nfp_interface.GetHandle()) {
618 const auto result = nfp_interface.RecreateApplicationArea(access_id, data);
619 IPC::ResponseBuilder rb{ctx, 2};
620 rb.Push(result);
621 return;
622 }
623
624 LOG_ERROR(Service_NFP, "Handle not found, device_handle={}", device_handle);
625
626 IPC::ResponseBuilder rb{ctx, 2};
627 rb.Push(ErrCodes::DeviceNotFound);
628}
629
630Module::Interface::Interface(std::shared_ptr<Module> module_, Core::System& system_,
631 const char* name)
632 : ServiceFramework{system_, name}, module{std::move(module_)},
633 npad_id{Core::HID::NpadIdType::Player1}, service_context{system_, service_name} {
634 activate_event = service_context.CreateEvent("IUser:NFPActivateEvent");
635 deactivate_event = service_context.CreateEvent("IUser:NFPDeactivateEvent");
636}
637
638Module::Interface::~Interface() = default;
639
640void Module::Interface::CreateUserInterface(Kernel::HLERequestContext& ctx) {
641 LOG_DEBUG(Service_NFP, "called");
642
643 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
644 rb.Push(ResultSuccess);
645 rb.PushIpcInterface<IUser>(*this, system);
646}
647
648bool Module::Interface::LoadAmiiboFile(const std::string& filename) {
649 constexpr auto tag_size_without_password = sizeof(NTAG215File) - sizeof(NTAG215Password);
650 const Common::FS::IOFile amiibo_file{filename, Common::FS::FileAccessMode::Read,
651 Common::FS::FileType::BinaryFile};
652
653 if (!amiibo_file.IsOpen()) {
654 LOG_ERROR(Service_NFP, "Amiibo is already on use");
655 return false;
656 }
657
658 // Workaround for files with missing password data
659 std::array<u8, sizeof(EncryptedNTAG215File)> buffer{};
660 if (amiibo_file.Read(buffer) < tag_size_without_password) {
661 LOG_ERROR(Service_NFP, "Failed to read amiibo file");
662 return false;
663 }
664 memcpy(&encrypted_tag_data, buffer.data(), sizeof(EncryptedNTAG215File));
665
666 if (!AmiiboCrypto::IsAmiiboValid(encrypted_tag_data)) {
667 LOG_INFO(Service_NFP, "Invalid amiibo");
668 return false;
669 }
670
671 file_path = filename;
672 return true;
673}
674
675bool Module::Interface::LoadAmiibo(const std::string& filename) {
676 if (device_state != DeviceState::SearchingForTag) {
677 LOG_ERROR(Service_NFP, "Game is not looking for amiibos, current state {}", device_state);
678 return false;
679 }
680
681 if (!LoadAmiiboFile(filename)) {
682 return false;
683 }
684
685 device_state = DeviceState::TagFound;
686 activate_event->GetWritableEvent().Signal();
687 return true;
688}
689
690void Module::Interface::CloseAmiibo() {
691 LOG_INFO(Service_NFP, "Remove amiibo");
692 device_state = DeviceState::TagRemoved;
693 is_data_decoded = false;
694 is_application_area_initialized = false;
695 encrypted_tag_data = {};
696 tag_data = {};
697 deactivate_event->GetWritableEvent().Signal();
698}
699
700Kernel::KReadableEvent& Module::Interface::GetActivateEvent() const {
701 return activate_event->GetReadableEvent();
702}
703
704Kernel::KReadableEvent& Module::Interface::GetDeactivateEvent() const {
705 return deactivate_event->GetReadableEvent();
706}
707
708void Module::Interface::Initialize() {
709 device_state = DeviceState::Initialized;
710 is_data_decoded = false;
711 is_application_area_initialized = false;
712 encrypted_tag_data = {};
713 tag_data = {};
714}
715
716void Module::Interface::Finalize() {
717 if (device_state == DeviceState::TagMounted) {
718 Unmount();
719 }
720 if (device_state == DeviceState::SearchingForTag || device_state == DeviceState::TagRemoved) {
721 StopDetection();
722 }
723 device_state = DeviceState::Unaviable;
724}
725
726Result Module::Interface::StartDetection(s32 protocol_) {
727 auto npad_device = system.HIDCore().GetEmulatedController(npad_id);
728
729 // TODO(german77): Add callback for when nfc data is available
730
731 if (device_state == DeviceState::Initialized || device_state == DeviceState::TagRemoved) {
732 npad_device->SetPollingMode(Common::Input::PollingMode::NFC);
733 device_state = DeviceState::SearchingForTag;
734 protocol = protocol_;
735 return ResultSuccess;
736 }
737
738 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
739 return ErrCodes::WrongDeviceState;
740}
741
742Result Module::Interface::StopDetection() {
743 auto npad_device = system.HIDCore().GetEmulatedController(npad_id);
744 npad_device->SetPollingMode(Common::Input::PollingMode::Active);
745
746 if (device_state == DeviceState::TagFound || device_state == DeviceState::TagMounted) {
747 CloseAmiibo();
748 return ResultSuccess;
749 }
750 if (device_state == DeviceState::SearchingForTag || device_state == DeviceState::TagRemoved) {
751 device_state = DeviceState::Initialized;
752 return ResultSuccess;
753 }
754
755 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
756 return ErrCodes::WrongDeviceState;
757}
758
759Result Module::Interface::Flush() {
760 // Ignore write command if we can't encrypt the data
761 if (!is_data_decoded) {
762 return ResultSuccess;
763 }
764
765 constexpr auto tag_size_without_password = sizeof(NTAG215File) - sizeof(NTAG215Password);
766 EncryptedNTAG215File tmp_encrypted_tag_data{};
767 const Common::FS::IOFile amiibo_file{file_path, Common::FS::FileAccessMode::ReadWrite,
768 Common::FS::FileType::BinaryFile};
769
770 if (!amiibo_file.IsOpen()) {
771 LOG_ERROR(Core, "Amiibo is already on use");
772 return ErrCodes::WriteAmiiboFailed;
773 }
774
775 // Workaround for files with missing password data
776 std::array<u8, sizeof(EncryptedNTAG215File)> buffer{};
777 if (amiibo_file.Read(buffer) < tag_size_without_password) {
778 LOG_ERROR(Core, "Failed to read amiibo file");
779 return ErrCodes::WriteAmiiboFailed;
780 }
781 memcpy(&tmp_encrypted_tag_data, buffer.data(), sizeof(EncryptedNTAG215File));
782
783 if (!AmiiboCrypto::IsAmiiboValid(tmp_encrypted_tag_data)) {
784 LOG_INFO(Service_NFP, "Invalid amiibo");
785 return ErrCodes::WriteAmiiboFailed;
786 }
787
788 bool is_uuid_equal = memcmp(tmp_encrypted_tag_data.uuid.data(), tag_data.uuid.data(), 8) == 0;
789 bool is_character_equal = tmp_encrypted_tag_data.user_memory.model_info.character_id ==
790 tag_data.model_info.character_id;
791 if (!is_uuid_equal || !is_character_equal) {
792 LOG_ERROR(Service_NFP, "Not the same amiibo");
793 return ErrCodes::WriteAmiiboFailed;
794 }
795
796 if (!AmiiboCrypto::EncodeAmiibo(tag_data, encrypted_tag_data)) {
797 LOG_ERROR(Service_NFP, "Failed to encode data");
798 return ErrCodes::WriteAmiiboFailed;
799 }
800
801 // Return to the start of the file
802 if (!amiibo_file.Seek(0)) {
803 LOG_ERROR(Service_NFP, "Error writting to file");
804 return ErrCodes::WriteAmiiboFailed;
805 }
806
807 if (!amiibo_file.Write(encrypted_tag_data)) {
808 LOG_ERROR(Service_NFP, "Error writting to file");
809 return ErrCodes::WriteAmiiboFailed;
810 }
811
812 return ResultSuccess;
813}
814
815Result Module::Interface::Mount() {
816 if (device_state != DeviceState::TagFound) {
817 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
818 return ErrCodes::WrongDeviceState;
819 }
820 10
821 is_data_decoded = AmiiboCrypto::DecodeAmiibo(encrypted_tag_data, tag_data); 11class IUserManager final : public ServiceFramework<IUserManager> {
822 LOG_INFO(Service_NFP, "Is amiibo decoded {}", is_data_decoded); 12public:
823 13 explicit IUserManager(Core::System& system_) : ServiceFramework{system_, "nfp:user"} {
824 is_application_area_initialized = false; 14 // clang-format off
825 device_state = DeviceState::TagMounted; 15 static const FunctionInfo functions[] = {
826 return ResultSuccess; 16 {0, &IUserManager::CreateUserInterface, "CreateUserInterface"},
827}
828
829Result Module::Interface::Unmount() {
830 if (device_state != DeviceState::TagMounted) {
831 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
832 return ErrCodes::WrongDeviceState;
833 }
834
835 is_data_decoded = false;
836 is_application_area_initialized = false;
837 device_state = DeviceState::TagFound;
838 return ResultSuccess;
839}
840
841Result Module::Interface::GetTagInfo(TagInfo& tag_info) const {
842 if (device_state != DeviceState::TagFound && device_state != DeviceState::TagMounted) {
843 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
844 return ErrCodes::WrongDeviceState;
845 }
846
847 tag_info = {
848 .uuid = encrypted_tag_data.uuid,
849 .uuid_length = static_cast<u8>(encrypted_tag_data.uuid.size()),
850 .protocol = protocol,
851 .tag_type = static_cast<u32>(encrypted_tag_data.user_memory.model_info.amiibo_type),
852 };
853
854 return ResultSuccess;
855}
856
857Result Module::Interface::GetCommonInfo(CommonInfo& common_info) const {
858 if (device_state != DeviceState::TagMounted) {
859 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
860 return ErrCodes::WrongDeviceState;
861 }
862
863 if (is_data_decoded && tag_data.settings.settings.amiibo_initialized != 0) {
864 const auto& settings = tag_data.settings;
865 // TODO: Validate this data
866 common_info = {
867 .last_write_year = settings.write_date.GetYear(),
868 .last_write_month = settings.write_date.GetMonth(),
869 .last_write_day = settings.write_date.GetDay(),
870 .write_counter = settings.crc_counter,
871 .version = 1,
872 .application_area_size = sizeof(ApplicationArea),
873 };
874 return ResultSuccess;
875 }
876
877 // Generate a generic answer
878 common_info = {
879 .last_write_year = 2022,
880 .last_write_month = 2,
881 .last_write_day = 7,
882 .write_counter = 0,
883 .version = 1,
884 .application_area_size = sizeof(ApplicationArea),
885 };
886 return ResultSuccess;
887}
888
889Result Module::Interface::GetModelInfo(ModelInfo& model_info) const {
890 if (device_state != DeviceState::TagMounted) {
891 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
892 return ErrCodes::WrongDeviceState;
893 }
894
895 const auto& model_info_data = encrypted_tag_data.user_memory.model_info;
896 model_info = {
897 .character_id = model_info_data.character_id,
898 .character_variant = model_info_data.character_variant,
899 .amiibo_type = model_info_data.amiibo_type,
900 .model_number = model_info_data.model_number,
901 .series = model_info_data.series,
902 .constant_value = model_info_data.constant_value,
903 };
904 return ResultSuccess;
905}
906
907Result Module::Interface::GetRegisterInfo(RegisterInfo& register_info) const {
908 if (device_state != DeviceState::TagMounted) {
909 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
910 if (device_state == DeviceState::TagRemoved) {
911 return ErrCodes::TagRemoved;
912 }
913 return ErrCodes::WrongDeviceState;
914 }
915
916 Service::Mii::MiiManager manager;
917
918 if (is_data_decoded && tag_data.settings.settings.amiibo_initialized != 0) {
919 const auto& settings = tag_data.settings;
920
921 // TODO: Validate this data
922 register_info = {
923 .mii_char_info = manager.ConvertV3ToCharInfo(tag_data.owner_mii),
924 .first_write_year = settings.init_date.GetYear(),
925 .first_write_month = settings.init_date.GetMonth(),
926 .first_write_day = settings.init_date.GetDay(),
927 .amiibo_name = GetAmiiboName(settings),
928 .font_region = {},
929 }; 17 };
18 // clang-format on
930 19
931 return ResultSuccess; 20 RegisterHandlers(functions);
932 } 21 }
933 22
934 // Generate a generic answer 23private:
935 register_info = { 24 void CreateUserInterface(Kernel::HLERequestContext& ctx) {
936 .mii_char_info = manager.BuildDefault(0), 25 LOG_DEBUG(Service_NFP, "called");
937 .first_write_year = 2022,
938 .first_write_month = 2,
939 .first_write_day = 7,
940 .amiibo_name = {'Y', 'u', 'z', 'u', 'A', 'm', 'i', 'i', 'b', 'o', 0},
941 .font_region = {},
942 };
943 return ResultSuccess;
944}
945 26
946Result Module::Interface::OpenApplicationArea(u32 access_id) { 27 if (user_interface == nullptr) {
947 if (device_state != DeviceState::TagMounted) { 28 user_interface = std::make_shared<IUser>(system);
948 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
949 if (device_state == DeviceState::TagRemoved) {
950 return ErrCodes::TagRemoved;
951 } 29 }
952 return ErrCodes::WrongDeviceState;
953 }
954
955 // Fallback for lack of amiibo keys
956 if (!is_data_decoded) {
957 LOG_WARNING(Service_NFP, "Application area is not initialized");
958 return ErrCodes::ApplicationAreaIsNotInitialized;
959 }
960 30
961 if (tag_data.settings.settings.appdata_initialized == 0) { 31 IPC::ResponseBuilder rb{ctx, 2, 0, 1};
962 LOG_WARNING(Service_NFP, "Application area is not initialized"); 32 rb.Push(ResultSuccess);
963 return ErrCodes::ApplicationAreaIsNotInitialized; 33 rb.PushIpcInterface<IUser>(user_interface);
964 }
965
966 if (tag_data.application_area_id != access_id) {
967 LOG_WARNING(Service_NFP, "Wrong application area id");
968 return ErrCodes::WrongApplicationAreaId;
969 }
970
971 is_application_area_initialized = true;
972 return ResultSuccess;
973}
974
975Result Module::Interface::GetApplicationArea(ApplicationArea& data) const {
976 if (device_state != DeviceState::TagMounted) {
977 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
978 if (device_state == DeviceState::TagRemoved) {
979 return ErrCodes::TagRemoved;
980 }
981 return ErrCodes::WrongDeviceState;
982 }
983
984 if (!is_application_area_initialized) {
985 LOG_ERROR(Service_NFP, "Application area is not initialized");
986 return ErrCodes::ApplicationAreaIsNotInitialized;
987 }
988
989 data = tag_data.application_area;
990
991 return ResultSuccess;
992}
993
994Result Module::Interface::SetApplicationArea(const std::vector<u8>& data) {
995 if (device_state != DeviceState::TagMounted) {
996 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
997 if (device_state == DeviceState::TagRemoved) {
998 return ErrCodes::TagRemoved;
999 }
1000 return ErrCodes::WrongDeviceState;
1001 }
1002
1003 if (!is_application_area_initialized) {
1004 LOG_ERROR(Service_NFP, "Application area is not initialized");
1005 return ErrCodes::ApplicationAreaIsNotInitialized;
1006 }
1007
1008 if (data.size() != sizeof(ApplicationArea)) {
1009 LOG_ERROR(Service_NFP, "Wrong data size {}", data.size());
1010 return ResultUnknown;
1011 }
1012
1013 std::memcpy(&tag_data.application_area, data.data(), sizeof(ApplicationArea));
1014 return ResultSuccess;
1015}
1016
1017Result Module::Interface::CreateApplicationArea(u32 access_id, const std::vector<u8>& data) {
1018 if (device_state != DeviceState::TagMounted) {
1019 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
1020 if (device_state == DeviceState::TagRemoved) {
1021 return ErrCodes::TagRemoved;
1022 }
1023 return ErrCodes::WrongDeviceState;
1024 }
1025
1026 if (tag_data.settings.settings.appdata_initialized != 0) {
1027 LOG_ERROR(Service_NFP, "Application area already exist");
1028 return ErrCodes::ApplicationAreaExist;
1029 }
1030
1031 if (data.size() != sizeof(ApplicationArea)) {
1032 LOG_ERROR(Service_NFP, "Wrong data size {}", data.size());
1033 return ResultUnknown;
1034 }
1035
1036 std::memcpy(&tag_data.application_area, data.data(), sizeof(ApplicationArea));
1037 tag_data.application_area_id = access_id;
1038
1039 return ResultSuccess;
1040}
1041
1042Result Module::Interface::RecreateApplicationArea(u32 access_id, const std::vector<u8>& data) {
1043 if (device_state != DeviceState::TagMounted) {
1044 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
1045 if (device_state == DeviceState::TagRemoved) {
1046 return ErrCodes::TagRemoved;
1047 }
1048 return ErrCodes::WrongDeviceState;
1049 }
1050
1051 if (data.size() != sizeof(ApplicationArea)) {
1052 LOG_ERROR(Service_NFP, "Wrong data size {}", data.size());
1053 return ResultUnknown;
1054 }
1055
1056 std::memcpy(&tag_data.application_area, data.data(), sizeof(ApplicationArea));
1057 tag_data.application_area_id = access_id;
1058
1059 return ResultSuccess;
1060}
1061
1062u64 Module::Interface::GetHandle() const {
1063 // Generate a handle based of the npad id
1064 return static_cast<u64>(npad_id);
1065}
1066
1067DeviceState Module::Interface::GetCurrentState() const {
1068 return device_state;
1069}
1070
1071Core::HID::NpadIdType Module::Interface::GetNpadId() const {
1072 // Return first connected npad id as a workaround for lack of a single nfc interface per
1073 // controller
1074 return system.HIDCore().GetFirstNpadId();
1075}
1076
1077AmiiboName Module::Interface::GetAmiiboName(const AmiiboSettings& settings) const {
1078 std::array<char16_t, amiibo_name_length> settings_amiibo_name{};
1079 AmiiboName amiibo_name{};
1080
1081 // Convert from big endian to little endian
1082 for (std::size_t i = 0; i < amiibo_name_length; i++) {
1083 settings_amiibo_name[i] = static_cast<u16>(settings.amiibo_name[i]);
1084 } 34 }
1085 35
1086 // Convert from utf16 to utf8 36 std::shared_ptr<IUser> user_interface;
1087 const auto amiibo_name_utf8 = Common::UTF16ToUTF8(settings_amiibo_name.data()); 37};
1088 memcpy(amiibo_name.data(), amiibo_name_utf8.data(), amiibo_name_utf8.size());
1089
1090 return amiibo_name;
1091}
1092 38
1093void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system) { 39void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system) {
1094 auto module = std::make_shared<Module>(); 40 std::make_shared<IUserManager>(system)->InstallAsService(service_manager);
1095 std::make_shared<NFP_User>(module, system)->InstallAsService(service_manager);
1096} 41}
1097 42
1098} // namespace Service::NFP 43} // namespace Service::NFP
diff --git a/src/core/hle/service/nfp/nfp.h b/src/core/hle/service/nfp/nfp.h
index 0de0b48e7..a25c362b8 100644
--- a/src/core/hle/service/nfp/nfp.h
+++ b/src/core/hle/service/nfp/nfp.h
@@ -3,170 +3,9 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <array>
7#include <vector>
8
9#include "common/common_funcs.h"
10#include "core/hle/service/kernel_helpers.h"
11#include "core/hle/service/mii/types.h"
12#include "core/hle/service/nfp/amiibo_types.h"
13#include "core/hle/service/service.h" 6#include "core/hle/service/service.h"
14 7
15namespace Kernel {
16class KEvent;
17class KReadableEvent;
18} // namespace Kernel
19
20namespace Core::HID {
21enum class NpadIdType : u32;
22} // namespace Core::HID
23
24namespace Service::NFP { 8namespace Service::NFP {
25using AmiiboName = std::array<char, (amiibo_name_length * 4) + 1>;
26
27struct TagInfo {
28 TagUuid uuid;
29 u8 uuid_length;
30 INSERT_PADDING_BYTES(0x15);
31 s32 protocol;
32 u32 tag_type;
33 INSERT_PADDING_BYTES(0x30);
34};
35static_assert(sizeof(TagInfo) == 0x58, "TagInfo is an invalid size");
36
37struct CommonInfo {
38 u16 last_write_year;
39 u8 last_write_month;
40 u8 last_write_day;
41 u16 write_counter;
42 u16 version;
43 u32 application_area_size;
44 INSERT_PADDING_BYTES(0x34);
45};
46static_assert(sizeof(CommonInfo) == 0x40, "CommonInfo is an invalid size");
47
48struct ModelInfo {
49 u16 character_id;
50 u8 character_variant;
51 AmiiboType amiibo_type;
52 u16 model_number;
53 AmiiboSeries series;
54 u8 constant_value; // Must be 02
55 INSERT_PADDING_BYTES(0x38); // Unknown
56};
57static_assert(sizeof(ModelInfo) == 0x40, "ModelInfo is an invalid size");
58
59struct RegisterInfo {
60 Service::Mii::CharInfo mii_char_info;
61 u16 first_write_year;
62 u8 first_write_month;
63 u8 first_write_day;
64 AmiiboName amiibo_name;
65 u8 font_region;
66 INSERT_PADDING_BYTES(0x7A);
67};
68static_assert(sizeof(RegisterInfo) == 0x100, "RegisterInfo is an invalid size");
69
70class Module final {
71public:
72 class Interface : public ServiceFramework<Interface> {
73 public:
74 explicit Interface(std::shared_ptr<Module> module_, Core::System& system_,
75 const char* name);
76 ~Interface() override;
77
78 void CreateUserInterface(Kernel::HLERequestContext& ctx);
79 bool LoadAmiibo(const std::string& filename);
80 bool LoadAmiiboFile(const std::string& filename);
81 void CloseAmiibo();
82
83 void Initialize();
84 void Finalize();
85
86 Result StartDetection(s32 protocol_);
87 Result StopDetection();
88 Result Mount();
89 Result Unmount();
90 Result Flush();
91
92 Result GetTagInfo(TagInfo& tag_info) const;
93 Result GetCommonInfo(CommonInfo& common_info) const;
94 Result GetModelInfo(ModelInfo& model_info) const;
95 Result GetRegisterInfo(RegisterInfo& register_info) const;
96
97 Result OpenApplicationArea(u32 access_id);
98 Result GetApplicationArea(ApplicationArea& data) const;
99 Result SetApplicationArea(const std::vector<u8>& data);
100 Result CreateApplicationArea(u32 access_id, const std::vector<u8>& data);
101 Result RecreateApplicationArea(u32 access_id, const std::vector<u8>& data);
102
103 u64 GetHandle() const;
104 DeviceState GetCurrentState() const;
105 Core::HID::NpadIdType GetNpadId() const;
106
107 Kernel::KReadableEvent& GetActivateEvent() const;
108 Kernel::KReadableEvent& GetDeactivateEvent() const;
109
110 protected:
111 std::shared_ptr<Module> module;
112
113 private:
114 AmiiboName GetAmiiboName(const AmiiboSettings& settings) const;
115
116 const Core::HID::NpadIdType npad_id;
117
118 bool is_data_decoded{};
119 bool is_application_area_initialized{};
120 s32 protocol;
121 std::string file_path{};
122 Kernel::KEvent* activate_event;
123 Kernel::KEvent* deactivate_event;
124 DeviceState device_state{DeviceState::Unaviable};
125 KernelHelpers::ServiceContext service_context;
126
127 NTAG215File tag_data{};
128 EncryptedNTAG215File encrypted_tag_data{};
129 };
130};
131
132class IUser final : public ServiceFramework<IUser> {
133public:
134 explicit IUser(Module::Interface& nfp_interface_, Core::System& system_);
135
136private:
137 void Initialize(Kernel::HLERequestContext& ctx);
138 void Finalize(Kernel::HLERequestContext& ctx);
139 void ListDevices(Kernel::HLERequestContext& ctx);
140 void StartDetection(Kernel::HLERequestContext& ctx);
141 void StopDetection(Kernel::HLERequestContext& ctx);
142 void Mount(Kernel::HLERequestContext& ctx);
143 void Unmount(Kernel::HLERequestContext& ctx);
144 void OpenApplicationArea(Kernel::HLERequestContext& ctx);
145 void GetApplicationArea(Kernel::HLERequestContext& ctx);
146 void SetApplicationArea(Kernel::HLERequestContext& ctx);
147 void Flush(Kernel::HLERequestContext& ctx);
148 void CreateApplicationArea(Kernel::HLERequestContext& ctx);
149 void GetTagInfo(Kernel::HLERequestContext& ctx);
150 void GetRegisterInfo(Kernel::HLERequestContext& ctx);
151 void GetCommonInfo(Kernel::HLERequestContext& ctx);
152 void GetModelInfo(Kernel::HLERequestContext& ctx);
153 void AttachActivateEvent(Kernel::HLERequestContext& ctx);
154 void AttachDeactivateEvent(Kernel::HLERequestContext& ctx);
155 void GetState(Kernel::HLERequestContext& ctx);
156 void GetDeviceState(Kernel::HLERequestContext& ctx);
157 void GetNpadId(Kernel::HLERequestContext& ctx);
158 void GetApplicationAreaSize(Kernel::HLERequestContext& ctx);
159 void AttachAvailabilityChangeEvent(Kernel::HLERequestContext& ctx);
160 void RecreateApplicationArea(Kernel::HLERequestContext& ctx);
161
162 KernelHelpers::ServiceContext service_context;
163
164 // TODO(german77): We should have a vector of interfaces
165 Module::Interface& nfp_interface;
166
167 State state{State::NonInitialized};
168 Kernel::KEvent* availability_change_event;
169};
170 9
171void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system); 10void InstallInterfaces(SM::ServiceManager& service_manager, Core::System& system);
172 11
diff --git a/src/core/hle/service/nfp/nfp_device.cpp b/src/core/hle/service/nfp/nfp_device.cpp
new file mode 100644
index 000000000..ec895ac01
--- /dev/null
+++ b/src/core/hle/service/nfp/nfp_device.cpp
@@ -0,0 +1,681 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include <array>
5#include <atomic>
6
7#include "common/fs/file.h"
8#include "common/fs/path_util.h"
9#include "common/input.h"
10#include "common/logging/log.h"
11#include "common/string_util.h"
12#include "common/tiny_mt.h"
13#include "core/core.h"
14#include "core/hid/emulated_controller.h"
15#include "core/hid/hid_core.h"
16#include "core/hid/hid_types.h"
17#include "core/hle/ipc_helpers.h"
18#include "core/hle/kernel/k_event.h"
19#include "core/hle/service/mii/mii_manager.h"
20#include "core/hle/service/nfp/amiibo_crypto.h"
21#include "core/hle/service/nfp/nfp.h"
22#include "core/hle/service/nfp/nfp_device.h"
23#include "core/hle/service/nfp/nfp_result.h"
24#include "core/hle/service/nfp/nfp_user.h"
25#include "core/hle/service/time/time_manager.h"
26#include "core/hle/service/time/time_zone_content_manager.h"
27#include "core/hle/service/time/time_zone_types.h"
28
29namespace Service::NFP {
30NfpDevice::NfpDevice(Core::HID::NpadIdType npad_id_, Core::System& system_,
31 KernelHelpers::ServiceContext& service_context_,
32 Kernel::KEvent* availability_change_event_)
33 : npad_id{npad_id_}, system{system_}, service_context{service_context_},
34 availability_change_event{availability_change_event_} {
35 activate_event = service_context.CreateEvent("IUser:NFPActivateEvent");
36 deactivate_event = service_context.CreateEvent("IUser:NFPDeactivateEvent");
37 npad_device = system.HIDCore().GetEmulatedController(npad_id);
38
39 Core::HID::ControllerUpdateCallback engine_callback{
40 .on_change = [this](Core::HID::ControllerTriggerType type) { NpadUpdate(type); },
41 .is_npad_service = false,
42 };
43 is_controller_set = true;
44 callback_key = npad_device->SetCallback(engine_callback);
45
46 auto& standard_steady_clock{system.GetTimeManager().GetStandardSteadyClockCore()};
47 current_posix_time = standard_steady_clock.GetCurrentTimePoint(system).time_point;
48}
49
50NfpDevice::~NfpDevice() {
51 if (!is_controller_set) {
52 return;
53 }
54 npad_device->DeleteCallback(callback_key);
55 is_controller_set = false;
56};
57
58void NfpDevice::NpadUpdate(Core::HID::ControllerTriggerType type) {
59 if (type == Core::HID::ControllerTriggerType::Connected ||
60 type == Core::HID::ControllerTriggerType::Disconnected) {
61 availability_change_event->GetWritableEvent().Signal();
62 return;
63 }
64
65 if (type != Core::HID::ControllerTriggerType::Nfc) {
66 return;
67 }
68
69 if (!npad_device->IsConnected()) {
70 return;
71 }
72
73 const auto nfc_status = npad_device->GetNfc();
74 switch (nfc_status.state) {
75 case Common::Input::NfcState::NewAmiibo:
76 LoadAmiibo(nfc_status.data);
77 break;
78 case Common::Input::NfcState::AmiiboRemoved:
79 if (device_state != DeviceState::SearchingForTag) {
80 CloseAmiibo();
81 }
82 break;
83 default:
84 break;
85 }
86}
87
88bool NfpDevice::LoadAmiibo(std::span<const u8> data) {
89 if (device_state != DeviceState::SearchingForTag) {
90 LOG_ERROR(Service_NFP, "Game is not looking for amiibos, current state {}", device_state);
91 return false;
92 }
93
94 if (data.size() != sizeof(EncryptedNTAG215File)) {
95 LOG_ERROR(Service_NFP, "Not an amiibo, size={}", data.size());
96 return false;
97 }
98
99 memcpy(&encrypted_tag_data, data.data(), sizeof(EncryptedNTAG215File));
100
101 device_state = DeviceState::TagFound;
102 deactivate_event->GetReadableEvent().Clear();
103 activate_event->GetWritableEvent().Signal();
104 return true;
105}
106
107void NfpDevice::CloseAmiibo() {
108 LOG_INFO(Service_NFP, "Remove amiibo");
109
110 if (device_state == DeviceState::TagMounted) {
111 Unmount();
112 }
113
114 device_state = DeviceState::TagRemoved;
115 encrypted_tag_data = {};
116 tag_data = {};
117 activate_event->GetReadableEvent().Clear();
118 deactivate_event->GetWritableEvent().Signal();
119}
120
121Kernel::KReadableEvent& NfpDevice::GetActivateEvent() const {
122 return activate_event->GetReadableEvent();
123}
124
125Kernel::KReadableEvent& NfpDevice::GetDeactivateEvent() const {
126 return deactivate_event->GetReadableEvent();
127}
128
129void NfpDevice::Initialize() {
130 device_state = npad_device->HasNfc() ? DeviceState::Initialized : DeviceState::Unavailable;
131 encrypted_tag_data = {};
132 tag_data = {};
133}
134
135void NfpDevice::Finalize() {
136 if (device_state == DeviceState::TagMounted) {
137 Unmount();
138 }
139 if (device_state == DeviceState::SearchingForTag || device_state == DeviceState::TagRemoved) {
140 StopDetection();
141 }
142 device_state = DeviceState::Unavailable;
143}
144
145Result NfpDevice::StartDetection(s32 protocol_) {
146 if (device_state != DeviceState::Initialized && device_state != DeviceState::TagRemoved) {
147 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
148 return WrongDeviceState;
149 }
150
151 if (!npad_device->SetPollingMode(Common::Input::PollingMode::NFC)) {
152 LOG_ERROR(Service_NFP, "Nfc not supported");
153 return NfcDisabled;
154 }
155
156 device_state = DeviceState::SearchingForTag;
157 protocol = protocol_;
158 return ResultSuccess;
159}
160
161Result NfpDevice::StopDetection() {
162 npad_device->SetPollingMode(Common::Input::PollingMode::Active);
163
164 if (device_state == DeviceState::Initialized) {
165 return ResultSuccess;
166 }
167
168 if (device_state == DeviceState::TagFound || device_state == DeviceState::TagMounted) {
169 CloseAmiibo();
170 return ResultSuccess;
171 }
172 if (device_state == DeviceState::SearchingForTag || device_state == DeviceState::TagRemoved) {
173 device_state = DeviceState::Initialized;
174 return ResultSuccess;
175 }
176
177 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
178 return WrongDeviceState;
179}
180
181Result NfpDevice::Flush() {
182 if (device_state != DeviceState::TagMounted) {
183 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
184 if (device_state == DeviceState::TagRemoved) {
185 return TagRemoved;
186 }
187 return WrongDeviceState;
188 }
189
190 if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) {
191 LOG_ERROR(Service_NFP, "Amiibo is read only", device_state);
192 return WrongDeviceState;
193 }
194
195 auto& settings = tag_data.settings;
196
197 const auto& current_date = GetAmiiboDate(current_posix_time);
198 if (settings.write_date.raw_date != current_date.raw_date) {
199 settings.write_date = current_date;
200 settings.crc_counter++;
201 // TODO: Find how to calculate the crc check
202 // settings.crc = CalculateCRC(settings);
203 }
204
205 tag_data.write_counter++;
206
207 if (!AmiiboCrypto::EncodeAmiibo(tag_data, encrypted_tag_data)) {
208 LOG_ERROR(Service_NFP, "Failed to encode data");
209 return WriteAmiiboFailed;
210 }
211
212 std::vector<u8> data(sizeof(encrypted_tag_data));
213 memcpy(data.data(), &encrypted_tag_data, sizeof(encrypted_tag_data));
214
215 if (!npad_device->WriteNfc(data)) {
216 LOG_ERROR(Service_NFP, "Error writing to file");
217 return WriteAmiiboFailed;
218 }
219
220 is_data_moddified = false;
221
222 return ResultSuccess;
223}
224
225Result NfpDevice::Mount(MountTarget mount_target_) {
226 if (device_state != DeviceState::TagFound) {
227 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
228 return WrongDeviceState;
229 }
230
231 if (!AmiiboCrypto::IsAmiiboValid(encrypted_tag_data)) {
232 LOG_ERROR(Service_NFP, "Not an amiibo");
233 return NotAnAmiibo;
234 }
235
236 if (!AmiiboCrypto::DecodeAmiibo(encrypted_tag_data, tag_data)) {
237 LOG_ERROR(Service_NFP, "Can't decode amiibo {}", device_state);
238 return CorruptedData;
239 }
240
241 device_state = DeviceState::TagMounted;
242 mount_target = mount_target_;
243 return ResultSuccess;
244}
245
246Result NfpDevice::Unmount() {
247 if (device_state != DeviceState::TagMounted) {
248 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
249 if (device_state == DeviceState::TagRemoved) {
250 return TagRemoved;
251 }
252 return WrongDeviceState;
253 }
254
255 // Save data before unloading the amiibo
256 if (is_data_moddified) {
257 Flush();
258 }
259
260 device_state = DeviceState::TagFound;
261 mount_target = MountTarget::None;
262 is_app_area_open = false;
263
264 return ResultSuccess;
265}
266
267Result NfpDevice::GetTagInfo(TagInfo& tag_info) const {
268 if (device_state != DeviceState::TagFound && device_state != DeviceState::TagMounted) {
269 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
270 if (device_state == DeviceState::TagRemoved) {
271 return TagRemoved;
272 }
273 return WrongDeviceState;
274 }
275
276 tag_info = {
277 .uuid = encrypted_tag_data.uuid.uid,
278 .uuid_length = static_cast<u8>(encrypted_tag_data.uuid.uid.size()),
279 .protocol = TagProtocol::TypeA,
280 .tag_type = TagType::Type2,
281 };
282
283 return ResultSuccess;
284}
285
286Result NfpDevice::GetCommonInfo(CommonInfo& common_info) const {
287 if (device_state != DeviceState::TagMounted) {
288 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
289 if (device_state == DeviceState::TagRemoved) {
290 return TagRemoved;
291 }
292 return WrongDeviceState;
293 }
294
295 if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) {
296 LOG_ERROR(Service_NFP, "Amiibo is read only", device_state);
297 return WrongDeviceState;
298 }
299
300 const auto& settings = tag_data.settings;
301
302 // TODO: Validate this data
303 common_info = {
304 .last_write_date = settings.write_date.GetWriteDate(),
305 .write_counter = tag_data.write_counter,
306 .version = 0,
307 .application_area_size = sizeof(ApplicationArea),
308 };
309 return ResultSuccess;
310}
311
312Result NfpDevice::GetModelInfo(ModelInfo& model_info) const {
313 if (device_state != DeviceState::TagMounted) {
314 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
315 if (device_state == DeviceState::TagRemoved) {
316 return TagRemoved;
317 }
318 return WrongDeviceState;
319 }
320
321 const auto& model_info_data = encrypted_tag_data.user_memory.model_info;
322 model_info = {
323 .character_id = model_info_data.character_id,
324 .character_variant = model_info_data.character_variant,
325 .amiibo_type = model_info_data.amiibo_type,
326 .model_number = model_info_data.model_number,
327 .series = model_info_data.series,
328 };
329 return ResultSuccess;
330}
331
332Result NfpDevice::GetRegisterInfo(RegisterInfo& register_info) const {
333 if (device_state != DeviceState::TagMounted) {
334 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
335 if (device_state == DeviceState::TagRemoved) {
336 return TagRemoved;
337 }
338 return WrongDeviceState;
339 }
340
341 if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) {
342 LOG_ERROR(Service_NFP, "Amiibo is read only", device_state);
343 return WrongDeviceState;
344 }
345
346 if (tag_data.settings.settings.amiibo_initialized == 0) {
347 return RegistrationIsNotInitialized;
348 }
349
350 Service::Mii::MiiManager manager;
351 const auto& settings = tag_data.settings;
352
353 // TODO: Validate this data
354 register_info = {
355 .mii_char_info = manager.ConvertV3ToCharInfo(tag_data.owner_mii),
356 .creation_date = settings.init_date.GetWriteDate(),
357 .amiibo_name = GetAmiiboName(settings),
358 .font_region = {},
359 };
360
361 return ResultSuccess;
362}
363
364Result NfpDevice::SetNicknameAndOwner(const AmiiboName& amiibo_name) {
365 if (device_state != DeviceState::TagMounted) {
366 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
367 if (device_state == DeviceState::TagRemoved) {
368 return TagRemoved;
369 }
370 return WrongDeviceState;
371 }
372
373 if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) {
374 LOG_ERROR(Service_NFP, "Amiibo is read only", device_state);
375 return WrongDeviceState;
376 }
377
378 Service::Mii::MiiManager manager;
379 auto& settings = tag_data.settings;
380
381 settings.init_date = GetAmiiboDate(current_posix_time);
382 settings.write_date = GetAmiiboDate(current_posix_time);
383 settings.crc_counter++;
384 // TODO: Find how to calculate the crc check
385 // settings.crc = CalculateCRC(settings);
386
387 SetAmiiboName(settings, amiibo_name);
388 tag_data.owner_mii = manager.ConvertCharInfoToV3(manager.BuildDefault(0));
389 settings.settings.amiibo_initialized.Assign(1);
390
391 return Flush();
392}
393
394Result NfpDevice::RestoreAmiibo() {
395 if (device_state != DeviceState::TagMounted) {
396 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
397 if (device_state == DeviceState::TagRemoved) {
398 return TagRemoved;
399 }
400 return WrongDeviceState;
401 }
402
403 if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) {
404 LOG_ERROR(Service_NFP, "Amiibo is read only", device_state);
405 return WrongDeviceState;
406 }
407
408 // TODO: Load amiibo from backup on system
409 LOG_ERROR(Service_NFP, "Not Implemented");
410 return ResultSuccess;
411}
412
413Result NfpDevice::DeleteAllData() {
414 const auto result = DeleteApplicationArea();
415 if (result.IsError()) {
416 return result;
417 }
418
419 if (device_state != DeviceState::TagMounted) {
420 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
421 if (device_state == DeviceState::TagRemoved) {
422 return TagRemoved;
423 }
424 return WrongDeviceState;
425 }
426
427 Common::TinyMT rng{};
428 rng.GenerateRandomBytes(&tag_data.owner_mii, sizeof(tag_data.owner_mii));
429 tag_data.settings.settings.amiibo_initialized.Assign(0);
430
431 return Flush();
432}
433
434Result NfpDevice::OpenApplicationArea(u32 access_id) {
435 if (device_state != DeviceState::TagMounted) {
436 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
437 if (device_state == DeviceState::TagRemoved) {
438 return TagRemoved;
439 }
440 return WrongDeviceState;
441 }
442
443 if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) {
444 LOG_ERROR(Service_NFP, "Amiibo is read only", device_state);
445 return WrongDeviceState;
446 }
447
448 if (tag_data.settings.settings.appdata_initialized.Value() == 0) {
449 LOG_WARNING(Service_NFP, "Application area is not initialized");
450 return ApplicationAreaIsNotInitialized;
451 }
452
453 if (tag_data.application_area_id != access_id) {
454 LOG_WARNING(Service_NFP, "Wrong application area id");
455 return WrongApplicationAreaId;
456 }
457
458 is_app_area_open = true;
459
460 return ResultSuccess;
461}
462
463Result NfpDevice::GetApplicationArea(std::vector<u8>& data) const {
464 if (device_state != DeviceState::TagMounted) {
465 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
466 if (device_state == DeviceState::TagRemoved) {
467 return TagRemoved;
468 }
469 return WrongDeviceState;
470 }
471
472 if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) {
473 LOG_ERROR(Service_NFP, "Amiibo is read only", device_state);
474 return WrongDeviceState;
475 }
476
477 if (!is_app_area_open) {
478 LOG_ERROR(Service_NFP, "Application area is not open");
479 return WrongDeviceState;
480 }
481
482 if (tag_data.settings.settings.appdata_initialized.Value() == 0) {
483 LOG_ERROR(Service_NFP, "Application area is not initialized");
484 return ApplicationAreaIsNotInitialized;
485 }
486
487 if (data.size() > sizeof(ApplicationArea)) {
488 data.resize(sizeof(ApplicationArea));
489 }
490
491 memcpy(data.data(), tag_data.application_area.data(), data.size());
492
493 return ResultSuccess;
494}
495
496Result NfpDevice::SetApplicationArea(std::span<const u8> data) {
497 if (device_state != DeviceState::TagMounted) {
498 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
499 if (device_state == DeviceState::TagRemoved) {
500 return TagRemoved;
501 }
502 return WrongDeviceState;
503 }
504
505 if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) {
506 LOG_ERROR(Service_NFP, "Amiibo is read only", device_state);
507 return WrongDeviceState;
508 }
509
510 if (!is_app_area_open) {
511 LOG_ERROR(Service_NFP, "Application area is not open");
512 return WrongDeviceState;
513 }
514
515 if (tag_data.settings.settings.appdata_initialized.Value() == 0) {
516 LOG_ERROR(Service_NFP, "Application area is not initialized");
517 return ApplicationAreaIsNotInitialized;
518 }
519
520 if (data.size() > sizeof(ApplicationArea)) {
521 LOG_ERROR(Service_NFP, "Wrong data size {}", data.size());
522 return ResultUnknown;
523 }
524
525 Common::TinyMT rng{};
526 std::memcpy(tag_data.application_area.data(), data.data(), data.size());
527 // Fill remaining data with random numbers
528 rng.GenerateRandomBytes(tag_data.application_area.data() + data.size(),
529 sizeof(ApplicationArea) - data.size());
530
531 tag_data.applicaton_write_counter++;
532 is_data_moddified = true;
533
534 return ResultSuccess;
535}
536
537Result NfpDevice::CreateApplicationArea(u32 access_id, std::span<const u8> data) {
538 if (device_state != DeviceState::TagMounted) {
539 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
540 if (device_state == DeviceState::TagRemoved) {
541 return TagRemoved;
542 }
543 return WrongDeviceState;
544 }
545
546 if (tag_data.settings.settings.appdata_initialized.Value() != 0) {
547 LOG_ERROR(Service_NFP, "Application area already exist");
548 return ApplicationAreaExist;
549 }
550
551 return RecreateApplicationArea(access_id, data);
552}
553
554Result NfpDevice::RecreateApplicationArea(u32 access_id, std::span<const u8> data) {
555 if (device_state != DeviceState::TagMounted) {
556 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
557 if (device_state == DeviceState::TagRemoved) {
558 return TagRemoved;
559 }
560 return WrongDeviceState;
561 }
562
563 if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) {
564 LOG_ERROR(Service_NFP, "Amiibo is read only", device_state);
565 return WrongDeviceState;
566 }
567
568 if (data.size() > sizeof(ApplicationArea)) {
569 LOG_ERROR(Service_NFP, "Wrong data size {}", data.size());
570 return WrongApplicationAreaSize;
571 }
572
573 Common::TinyMT rng{};
574 std::memcpy(tag_data.application_area.data(), data.data(), data.size());
575 // Fill remaining data with random numbers
576 rng.GenerateRandomBytes(tag_data.application_area.data() + data.size(),
577 sizeof(ApplicationArea) - data.size());
578
579 // TODO: Investigate why the title id needs to be moddified
580 tag_data.title_id = system.GetCurrentProcessProgramID();
581 tag_data.title_id = tag_data.title_id | 0x30000000ULL;
582 tag_data.settings.settings.appdata_initialized.Assign(1);
583 tag_data.application_area_id = access_id;
584 tag_data.applicaton_write_counter++;
585 tag_data.unknown = {};
586
587 return Flush();
588}
589
590Result NfpDevice::DeleteApplicationArea() {
591 if (device_state != DeviceState::TagMounted) {
592 LOG_ERROR(Service_NFP, "Wrong device state {}", device_state);
593 if (device_state == DeviceState::TagRemoved) {
594 return TagRemoved;
595 }
596 return WrongDeviceState;
597 }
598
599 if (mount_target == MountTarget::None || mount_target == MountTarget::Rom) {
600 LOG_ERROR(Service_NFP, "Amiibo is read only", device_state);
601 return WrongDeviceState;
602 }
603
604 Common::TinyMT rng{};
605 rng.GenerateRandomBytes(tag_data.application_area.data(), sizeof(ApplicationArea));
606 rng.GenerateRandomBytes(&tag_data.title_id, sizeof(u64));
607 rng.GenerateRandomBytes(&tag_data.application_area_id, sizeof(u32));
608 tag_data.settings.settings.appdata_initialized.Assign(0);
609 tag_data.applicaton_write_counter++;
610 tag_data.unknown = {};
611
612 return Flush();
613}
614
615u64 NfpDevice::GetHandle() const {
616 // Generate a handle based of the npad id
617 return static_cast<u64>(npad_id);
618}
619
620u32 NfpDevice::GetApplicationAreaSize() const {
621 return sizeof(ApplicationArea);
622}
623
624DeviceState NfpDevice::GetCurrentState() const {
625 return device_state;
626}
627
628Core::HID::NpadIdType NfpDevice::GetNpadId() const {
629 return npad_id;
630}
631
632AmiiboName NfpDevice::GetAmiiboName(const AmiiboSettings& settings) const {
633 std::array<char16_t, amiibo_name_length> settings_amiibo_name{};
634 AmiiboName amiibo_name{};
635
636 // Convert from big endian to little endian
637 for (std::size_t i = 0; i < amiibo_name_length; i++) {
638 settings_amiibo_name[i] = static_cast<u16>(settings.amiibo_name[i]);
639 }
640
641 // Convert from utf16 to utf8
642 const auto amiibo_name_utf8 = Common::UTF16ToUTF8(settings_amiibo_name.data());
643 memcpy(amiibo_name.data(), amiibo_name_utf8.data(), amiibo_name_utf8.size());
644
645 return amiibo_name;
646}
647
648void NfpDevice::SetAmiiboName(AmiiboSettings& settings, const AmiiboName& amiibo_name) {
649 std::array<char16_t, amiibo_name_length> settings_amiibo_name{};
650
651 // Convert from utf8 to utf16
652 const auto amiibo_name_utf16 = Common::UTF8ToUTF16(amiibo_name.data());
653 memcpy(settings_amiibo_name.data(), amiibo_name_utf16.data(),
654 amiibo_name_utf16.size() * sizeof(char16_t));
655
656 // Convert from little endian to big endian
657 for (std::size_t i = 0; i < amiibo_name_length; i++) {
658 settings.amiibo_name[i] = static_cast<u16_be>(settings_amiibo_name[i]);
659 }
660}
661
662AmiiboDate NfpDevice::GetAmiiboDate(s64 posix_time) const {
663 const auto& time_zone_manager =
664 system.GetTimeManager().GetTimeZoneContentManager().GetTimeZoneManager();
665 Time::TimeZone::CalendarInfo calendar_info{};
666 AmiiboDate amiibo_date{};
667
668 amiibo_date.SetYear(2000);
669 amiibo_date.SetMonth(1);
670 amiibo_date.SetDay(1);
671
672 if (time_zone_manager.ToCalendarTime({}, posix_time, calendar_info) == ResultSuccess) {
673 amiibo_date.SetYear(calendar_info.time.year);
674 amiibo_date.SetMonth(calendar_info.time.month);
675 amiibo_date.SetDay(calendar_info.time.day);
676 }
677
678 return amiibo_date;
679}
680
681} // namespace Service::NFP
diff --git a/src/core/hle/service/nfp/nfp_device.h b/src/core/hle/service/nfp/nfp_device.h
new file mode 100644
index 000000000..a5b72cf19
--- /dev/null
+++ b/src/core/hle/service/nfp/nfp_device.h
@@ -0,0 +1,101 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <array>
7#include <vector>
8
9#include "common/common_funcs.h"
10#include "core/hle/service/kernel_helpers.h"
11#include "core/hle/service/mii/types.h"
12#include "core/hle/service/nfp/nfp_types.h"
13#include "core/hle/service/service.h"
14
15namespace Kernel {
16class KEvent;
17class KReadableEvent;
18} // namespace Kernel
19
20namespace Core {
21class System;
22} // namespace Core
23
24namespace Core::HID {
25class EmulatedController;
26enum class ControllerTriggerType;
27enum class NpadIdType : u32;
28} // namespace Core::HID
29
30namespace Service::NFP {
31class NfpDevice {
32public:
33 NfpDevice(Core::HID::NpadIdType npad_id_, Core::System& system_,
34 KernelHelpers::ServiceContext& service_context_,
35 Kernel::KEvent* availability_change_event_);
36 ~NfpDevice();
37
38 void Initialize();
39 void Finalize();
40
41 Result StartDetection(s32 protocol_);
42 Result StopDetection();
43 Result Mount(MountTarget mount_target);
44 Result Unmount();
45 Result Flush();
46
47 Result GetTagInfo(TagInfo& tag_info) const;
48 Result GetCommonInfo(CommonInfo& common_info) const;
49 Result GetModelInfo(ModelInfo& model_info) const;
50 Result GetRegisterInfo(RegisterInfo& register_info) const;
51
52 Result SetNicknameAndOwner(const AmiiboName& amiibo_name);
53 Result RestoreAmiibo();
54 Result DeleteAllData();
55
56 Result OpenApplicationArea(u32 access_id);
57 Result GetApplicationArea(std::vector<u8>& data) const;
58 Result SetApplicationArea(std::span<const u8> data);
59 Result CreateApplicationArea(u32 access_id, std::span<const u8> data);
60 Result RecreateApplicationArea(u32 access_id, std::span<const u8> data);
61 Result DeleteApplicationArea();
62
63 u64 GetHandle() const;
64 u32 GetApplicationAreaSize() const;
65 DeviceState GetCurrentState() const;
66 Core::HID::NpadIdType GetNpadId() const;
67
68 Kernel::KReadableEvent& GetActivateEvent() const;
69 Kernel::KReadableEvent& GetDeactivateEvent() const;
70
71private:
72 void NpadUpdate(Core::HID::ControllerTriggerType type);
73 bool LoadAmiibo(std::span<const u8> data);
74 void CloseAmiibo();
75
76 AmiiboName GetAmiiboName(const AmiiboSettings& settings) const;
77 void SetAmiiboName(AmiiboSettings& settings, const AmiiboName& amiibo_name);
78 AmiiboDate GetAmiiboDate(s64 posix_time) const;
79
80 bool is_controller_set{};
81 int callback_key;
82 const Core::HID::NpadIdType npad_id;
83 Core::System& system;
84 Core::HID::EmulatedController* npad_device = nullptr;
85 KernelHelpers::ServiceContext& service_context;
86 Kernel::KEvent* activate_event = nullptr;
87 Kernel::KEvent* deactivate_event = nullptr;
88 Kernel::KEvent* availability_change_event = nullptr;
89
90 bool is_data_moddified{};
91 bool is_app_area_open{};
92 s32 protocol{};
93 s64 current_posix_time{};
94 MountTarget mount_target{MountTarget::None};
95 DeviceState device_state{DeviceState::Unavailable};
96
97 NTAG215File tag_data{};
98 EncryptedNTAG215File encrypted_tag_data{};
99};
100
101} // namespace Service::NFP
diff --git a/src/core/hle/service/nfp/nfp_result.h b/src/core/hle/service/nfp/nfp_result.h
new file mode 100644
index 000000000..d8e4cf094
--- /dev/null
+++ b/src/core/hle/service/nfp/nfp_result.h
@@ -0,0 +1,24 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#pragma once
5
6#include "core/hle/result.h"
7
8namespace Service::NFP {
9
10constexpr Result DeviceNotFound(ErrorModule::NFP, 64);
11constexpr Result InvalidArgument(ErrorModule::NFP, 65);
12constexpr Result WrongApplicationAreaSize(ErrorModule::NFP, 68);
13constexpr Result WrongDeviceState(ErrorModule::NFP, 73);
14constexpr Result NfcDisabled(ErrorModule::NFP, 80);
15constexpr Result WriteAmiiboFailed(ErrorModule::NFP, 88);
16constexpr Result TagRemoved(ErrorModule::NFP, 97);
17constexpr Result RegistrationIsNotInitialized(ErrorModule::NFP, 120);
18constexpr Result ApplicationAreaIsNotInitialized(ErrorModule::NFP, 128);
19constexpr Result CorruptedData(ErrorModule::NFP, 144);
20constexpr Result WrongApplicationAreaId(ErrorModule::NFP, 152);
21constexpr Result ApplicationAreaExist(ErrorModule::NFP, 168);
22constexpr Result NotAnAmiibo(ErrorModule::NFP, 178);
23
24} // namespace Service::NFP
diff --git a/src/core/hle/service/nfp/amiibo_types.h b/src/core/hle/service/nfp/nfp_types.h
index bf2de811a..c09f9ddb6 100644
--- a/src/core/hle/service/nfp/amiibo_types.h
+++ b/src/core/hle/service/nfp/nfp_types.h
@@ -5,6 +5,7 @@
5 5
6#include <array> 6#include <array>
7 7
8#include "common/swap.h"
8#include "core/hle/service/mii/types.h" 9#include "core/hle/service/mii/types.h"
9 10
10namespace Service::NFP { 11namespace Service::NFP {
@@ -27,7 +28,7 @@ enum class DeviceState : u32 {
27 TagFound, 28 TagFound,
28 TagRemoved, 29 TagRemoved,
29 TagMounted, 30 TagMounted,
30 Unaviable, 31 Unavailable,
31 Finalized, 32 Finalized,
32}; 33};
33 34
@@ -36,6 +37,7 @@ enum class ModelType : u32 {
36}; 37};
37 38
38enum class MountTarget : u32 { 39enum class MountTarget : u32 {
40 None,
39 Rom, 41 Rom,
40 Ram, 42 Ram,
41 All, 43 All,
@@ -73,21 +75,101 @@ enum class AmiiboSeries : u8 {
73 Diablo, 75 Diablo,
74}; 76};
75 77
76using TagUuid = std::array<u8, 10>; 78enum class TagType : u32 {
79 None,
80 Type1, // ISO14443A RW 96-2k bytes 106kbit/s
81 Type2, // ISO14443A RW/RO 540 bytes 106kbit/s
82 Type3, // Sony Felica RW/RO 2k bytes 212kbit/s
83 Type4, // ISO14443A RW/RO 4k-32k bytes 424kbit/s
84 Type5, // ISO15693 RW/RO 540 bytes 106kbit/s
85};
86
87enum class PackedTagType : u8 {
88 None,
89 Type1, // ISO14443A RW 96-2k bytes 106kbit/s
90 Type2, // ISO14443A RW/RO 540 bytes 106kbit/s
91 Type3, // Sony Felica RW/RO 2k bytes 212kbit/s
92 Type4, // ISO14443A RW/RO 4k-32k bytes 424kbit/s
93 Type5, // ISO15693 RW/RO 540 bytes 106kbit/s
94};
95
96enum class TagProtocol : u32 {
97 None,
98 TypeA, // ISO14443A
99 TypeB, // ISO14443B
100 TypeF, // Sony Felica
101};
102
103using UniqueSerialNumber = std::array<u8, 7>;
104using LockBytes = std::array<u8, 2>;
77using HashData = std::array<u8, 0x20>; 105using HashData = std::array<u8, 0x20>;
78using ApplicationArea = std::array<u8, 0xD8>; 106using ApplicationArea = std::array<u8, 0xD8>;
107using AmiiboName = std::array<char, (amiibo_name_length * 4) + 1>;
108
109struct TagUuid {
110 UniqueSerialNumber uid;
111 u8 nintendo_id;
112 LockBytes lock_bytes;
113};
114static_assert(sizeof(TagUuid) == 10, "TagUuid is an invalid size");
115
116struct WriteDate {
117 u16 year;
118 u8 month;
119 u8 day;
120};
121static_assert(sizeof(WriteDate) == 0x4, "WriteDate is an invalid size");
79 122
80struct AmiiboDate { 123struct AmiiboDate {
81 u16 raw_date{}; 124 u16 raw_date{};
82 125
126 u16 GetValue() const {
127 return Common::swap16(raw_date);
128 }
129
83 u16 GetYear() const { 130 u16 GetYear() const {
84 return static_cast<u16>(((raw_date & 0xFE00) >> 9) + 2000); 131 return static_cast<u16>(((GetValue() & 0xFE00) >> 9) + 2000);
85 } 132 }
86 u8 GetMonth() const { 133 u8 GetMonth() const {
87 return static_cast<u8>(((raw_date & 0x01E0) >> 5) - 1); 134 return static_cast<u8>((GetValue() & 0x01E0) >> 5);
88 } 135 }
89 u8 GetDay() const { 136 u8 GetDay() const {
90 return static_cast<u8>(raw_date & 0x001F); 137 return static_cast<u8>(GetValue() & 0x001F);
138 }
139
140 WriteDate GetWriteDate() const {
141 if (!IsValidDate()) {
142 return {
143 .year = 2000,
144 .month = 1,
145 .day = 1,
146 };
147 }
148 return {
149 .year = GetYear(),
150 .month = GetMonth(),
151 .day = GetDay(),
152 };
153 }
154
155 void SetYear(u16 year) {
156 const u16 year_converted = static_cast<u16>((year - 2000) << 9);
157 raw_date = Common::swap16((GetValue() & ~0xFE00) | year_converted);
158 }
159 void SetMonth(u8 month) {
160 const u16 month_converted = static_cast<u16>(month << 5);
161 raw_date = Common::swap16((GetValue() & ~0x01E0) | month_converted);
162 }
163 void SetDay(u8 day) {
164 const u16 day_converted = static_cast<u16>(day);
165 raw_date = Common::swap16((GetValue() & ~0x001F) | day_converted);
166 }
167
168 bool IsValidDate() const {
169 const bool is_day_valid = GetDay() > 0 && GetDay() < 32;
170 const bool is_month_valid = GetMonth() > 0 && GetMonth() < 13;
171 const bool is_year_valid = GetYear() >= 2000;
172 return is_year_valid && is_month_valid && is_day_valid;
91 } 173 }
92}; 174};
93static_assert(sizeof(AmiiboDate) == 2, "AmiiboDate is an invalid size"); 175static_assert(sizeof(AmiiboDate) == 2, "AmiiboDate is an invalid size");
@@ -117,9 +199,9 @@ struct AmiiboModelInfo {
117 u16 character_id; 199 u16 character_id;
118 u8 character_variant; 200 u8 character_variant;
119 AmiiboType amiibo_type; 201 AmiiboType amiibo_type;
120 u16 model_number; 202 u16_be model_number;
121 AmiiboSeries series; 203 AmiiboSeries series;
122 u8 constant_value; // Must be 02 204 PackedTagType tag_type;
123 INSERT_PADDING_BYTES(0x4); // Unknown 205 INSERT_PADDING_BYTES(0x4); // Unknown
124}; 206};
125static_assert(sizeof(AmiiboModelInfo) == 0xC, "AmiiboModelInfo is an invalid size"); 207static_assert(sizeof(AmiiboModelInfo) == 0xC, "AmiiboModelInfo is an invalid size");
@@ -134,7 +216,7 @@ static_assert(sizeof(NTAG215Password) == 0x8, "NTAG215Password is an invalid siz
134#pragma pack(1) 216#pragma pack(1)
135struct EncryptedAmiiboFile { 217struct EncryptedAmiiboFile {
136 u8 constant_value; // Must be A5 218 u8 constant_value; // Must be A5
137 u16 write_counter; // Number of times the amiibo has been written? 219 u16_be write_counter; // Number of times the amiibo has been written?
138 INSERT_PADDING_BYTES(0x1); // Unknown 1 220 INSERT_PADDING_BYTES(0x1); // Unknown 1
139 AmiiboSettings settings; // Encrypted amiibo settings 221 AmiiboSettings settings; // Encrypted amiibo settings
140 HashData hmac_tag; // Hash 222 HashData hmac_tag; // Hash
@@ -146,18 +228,18 @@ struct EncryptedAmiiboFile {
146 u16_be applicaton_write_counter; // Encrypted Counter 228 u16_be applicaton_write_counter; // Encrypted Counter
147 u32_be application_area_id; // Encrypted Game id 229 u32_be application_area_id; // Encrypted Game id
148 std::array<u8, 0x2> unknown; 230 std::array<u8, 0x2> unknown;
149 HashData hash; // Probably a SHA256-HMAC hash? 231 std::array<u32, 0x8> unknown2;
150 ApplicationArea application_area; // Encrypted Game data 232 ApplicationArea application_area; // Encrypted Game data
151}; 233};
152static_assert(sizeof(EncryptedAmiiboFile) == 0x1F8, "AmiiboFile is an invalid size"); 234static_assert(sizeof(EncryptedAmiiboFile) == 0x1F8, "AmiiboFile is an invalid size");
153 235
154struct NTAG215File { 236struct NTAG215File {
155 std::array<u8, 0x2> uuid2; 237 LockBytes lock_bytes; // Tag UUID
156 u16 static_lock; // Set defined pages as read only 238 u16 static_lock; // Set defined pages as read only
157 u32 compability_container; // Defines available memory 239 u32 compability_container; // Defines available memory
158 HashData hmac_data; // Hash 240 HashData hmac_data; // Hash
159 u8 constant_value; // Must be A5 241 u8 constant_value; // Must be A5
160 u16 write_counter; // Number of times the amiibo has been written? 242 u16_be write_counter; // Number of times the amiibo has been written?
161 INSERT_PADDING_BYTES(0x1); // Unknown 1 243 INSERT_PADDING_BYTES(0x1); // Unknown 1
162 AmiiboSettings settings; 244 AmiiboSettings settings;
163 Service::Mii::Ver3StoreData owner_mii; // Encrypted Mii data 245 Service::Mii::Ver3StoreData owner_mii; // Encrypted Mii data
@@ -165,10 +247,11 @@ struct NTAG215File {
165 u16_be applicaton_write_counter; // Encrypted Counter 247 u16_be applicaton_write_counter; // Encrypted Counter
166 u32_be application_area_id; 248 u32_be application_area_id;
167 std::array<u8, 0x2> unknown; 249 std::array<u8, 0x2> unknown;
168 HashData hash; // Probably a SHA256-HMAC hash? 250 std::array<u32, 0x8> unknown2;
169 ApplicationArea application_area; // Encrypted Game data 251 ApplicationArea application_area; // Encrypted Game data
170 HashData hmac_tag; // Hash 252 HashData hmac_tag; // Hash
171 std::array<u8, 0x8> uuid; 253 UniqueSerialNumber uid; // Unique serial number
254 u8 nintendo_id; // Tag UUID
172 AmiiboModelInfo model_info; 255 AmiiboModelInfo model_info;
173 HashData keygen_salt; // Salt 256 HashData keygen_salt; // Salt
174 u32 dynamic_lock; // Dynamic lock 257 u32 dynamic_lock; // Dynamic lock
@@ -194,4 +277,44 @@ static_assert(sizeof(EncryptedNTAG215File) == 0x21C, "EncryptedNTAG215File is an
194static_assert(std::is_trivially_copyable_v<EncryptedNTAG215File>, 277static_assert(std::is_trivially_copyable_v<EncryptedNTAG215File>,
195 "EncryptedNTAG215File must be trivially copyable."); 278 "EncryptedNTAG215File must be trivially copyable.");
196 279
280struct TagInfo {
281 UniqueSerialNumber uuid;
282 INSERT_PADDING_BYTES(0x3);
283 u8 uuid_length;
284 INSERT_PADDING_BYTES(0x15);
285 TagProtocol protocol;
286 TagType tag_type;
287 INSERT_PADDING_BYTES(0x30);
288};
289static_assert(sizeof(TagInfo) == 0x58, "TagInfo is an invalid size");
290
291struct CommonInfo {
292 WriteDate last_write_date;
293 u16 write_counter;
294 u8 version;
295 INSERT_PADDING_BYTES(0x1);
296 u32 application_area_size;
297 INSERT_PADDING_BYTES(0x34);
298};
299static_assert(sizeof(CommonInfo) == 0x40, "CommonInfo is an invalid size");
300
301struct ModelInfo {
302 u16 character_id;
303 u8 character_variant;
304 AmiiboType amiibo_type;
305 u16 model_number;
306 AmiiboSeries series;
307 INSERT_PADDING_BYTES(0x39); // Unknown
308};
309static_assert(sizeof(ModelInfo) == 0x40, "ModelInfo is an invalid size");
310
311struct RegisterInfo {
312 Service::Mii::CharInfo mii_char_info;
313 WriteDate creation_date;
314 AmiiboName amiibo_name;
315 u8 font_region;
316 INSERT_PADDING_BYTES(0x7A);
317};
318static_assert(sizeof(RegisterInfo) == 0x100, "RegisterInfo is an invalid size");
319
197} // namespace Service::NFP 320} // namespace Service::NFP
diff --git a/src/core/hle/service/nfp/nfp_user.cpp b/src/core/hle/service/nfp/nfp_user.cpp
index 2d7b156cf..4ed53b534 100644
--- a/src/core/hle/service/nfp/nfp_user.cpp
+++ b/src/core/hle/service/nfp/nfp_user.cpp
@@ -1,18 +1,674 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include <array>
5#include <atomic>
6
7#include "common/logging/log.h"
8#include "core/core.h"
9#include "core/hid/emulated_controller.h"
10#include "core/hid/hid_core.h"
11#include "core/hid/hid_types.h"
12#include "core/hle/ipc_helpers.h"
13#include "core/hle/kernel/k_event.h"
14#include "core/hle/service/mii/mii_manager.h"
15#include "core/hle/service/nfp/nfp_device.h"
16#include "core/hle/service/nfp/nfp_result.h"
4#include "core/hle/service/nfp/nfp_user.h" 17#include "core/hle/service/nfp/nfp_user.h"
5 18
6namespace Service::NFP { 19namespace Service::NFP {
7 20
8NFP_User::NFP_User(std::shared_ptr<Module> module_, Core::System& system_) 21IUser::IUser(Core::System& system_)
9 : Interface(std::move(module_), system_, "nfp:user") { 22 : ServiceFramework{system_, "NFP::IUser"}, service_context{system_, service_name} {
10 static const FunctionInfo functions[] = { 23 static const FunctionInfo functions[] = {
11 {0, &NFP_User::CreateUserInterface, "CreateUserInterface"}, 24 {0, &IUser::Initialize, "Initialize"},
25 {1, &IUser::Finalize, "Finalize"},
26 {2, &IUser::ListDevices, "ListDevices"},
27 {3, &IUser::StartDetection, "StartDetection"},
28 {4, &IUser::StopDetection, "StopDetection"},
29 {5, &IUser::Mount, "Mount"},
30 {6, &IUser::Unmount, "Unmount"},
31 {7, &IUser::OpenApplicationArea, "OpenApplicationArea"},
32 {8, &IUser::GetApplicationArea, "GetApplicationArea"},
33 {9, &IUser::SetApplicationArea, "SetApplicationArea"},
34 {10, &IUser::Flush, "Flush"},
35 {11, &IUser::Restore, "Restore"},
36 {12, &IUser::CreateApplicationArea, "CreateApplicationArea"},
37 {13, &IUser::GetTagInfo, "GetTagInfo"},
38 {14, &IUser::GetRegisterInfo, "GetRegisterInfo"},
39 {15, &IUser::GetCommonInfo, "GetCommonInfo"},
40 {16, &IUser::GetModelInfo, "GetModelInfo"},
41 {17, &IUser::AttachActivateEvent, "AttachActivateEvent"},
42 {18, &IUser::AttachDeactivateEvent, "AttachDeactivateEvent"},
43 {19, &IUser::GetState, "GetState"},
44 {20, &IUser::GetDeviceState, "GetDeviceState"},
45 {21, &IUser::GetNpadId, "GetNpadId"},
46 {22, &IUser::GetApplicationAreaSize, "GetApplicationAreaSize"},
47 {23, &IUser::AttachAvailabilityChangeEvent, "AttachAvailabilityChangeEvent"},
48 {24, &IUser::RecreateApplicationArea, "RecreateApplicationArea"},
12 }; 49 };
13 RegisterHandlers(functions); 50 RegisterHandlers(functions);
51
52 availability_change_event = service_context.CreateEvent("IUser:AvailabilityChangeEvent");
53
54 for (u32 device_index = 0; device_index < 10; device_index++) {
55 devices[device_index] =
56 std::make_shared<NfpDevice>(Core::HID::IndexToNpadIdType(device_index), system,
57 service_context, availability_change_event);
58 }
59}
60
61void IUser::Initialize(Kernel::HLERequestContext& ctx) {
62 LOG_INFO(Service_NFC, "called");
63
64 state = State::Initialized;
65
66 for (auto& device : devices) {
67 device->Initialize();
68 }
69
70 IPC::ResponseBuilder rb{ctx, 2, 0};
71 rb.Push(ResultSuccess);
72}
73
74void IUser::Finalize(Kernel::HLERequestContext& ctx) {
75 LOG_INFO(Service_NFP, "called");
76
77 state = State::NonInitialized;
78
79 for (auto& device : devices) {
80 device->Finalize();
81 }
82
83 IPC::ResponseBuilder rb{ctx, 2};
84 rb.Push(ResultSuccess);
85}
86
87void IUser::ListDevices(Kernel::HLERequestContext& ctx) {
88 LOG_INFO(Service_NFP, "called");
89
90 if (state == State::NonInitialized) {
91 IPC::ResponseBuilder rb{ctx, 2};
92 rb.Push(NfcDisabled);
93 return;
94 }
95
96 if (!ctx.CanWriteBuffer()) {
97 IPC::ResponseBuilder rb{ctx, 2};
98 rb.Push(InvalidArgument);
99 return;
100 }
101
102 if (ctx.GetWriteBufferSize() == 0) {
103 IPC::ResponseBuilder rb{ctx, 2};
104 rb.Push(InvalidArgument);
105 return;
106 }
107
108 std::vector<u64> nfp_devices;
109 const std::size_t max_allowed_devices = ctx.GetWriteBufferSize() / sizeof(u64);
110
111 for (auto& device : devices) {
112 if (nfp_devices.size() >= max_allowed_devices) {
113 continue;
114 }
115 if (device->GetCurrentState() != DeviceState::Unavailable) {
116 nfp_devices.push_back(device->GetHandle());
117 }
118 }
119
120 if (nfp_devices.size() == 0) {
121 IPC::ResponseBuilder rb{ctx, 2};
122 rb.Push(DeviceNotFound);
123 return;
124 }
125
126 ctx.WriteBuffer(nfp_devices);
127
128 IPC::ResponseBuilder rb{ctx, 3};
129 rb.Push(ResultSuccess);
130 rb.Push(static_cast<s32>(nfp_devices.size()));
131}
132
133void IUser::StartDetection(Kernel::HLERequestContext& ctx) {
134 IPC::RequestParser rp{ctx};
135 const auto device_handle{rp.Pop<u64>()};
136 const auto nfp_protocol{rp.Pop<s32>()};
137 LOG_INFO(Service_NFP, "called, device_handle={}, nfp_protocol={}", device_handle, nfp_protocol);
138
139 if (state == State::NonInitialized) {
140 IPC::ResponseBuilder rb{ctx, 2};
141 rb.Push(NfcDisabled);
142 return;
143 }
144
145 auto device = GetNfpDevice(device_handle);
146
147 if (!device.has_value()) {
148 IPC::ResponseBuilder rb{ctx, 2};
149 rb.Push(DeviceNotFound);
150 return;
151 }
152
153 const auto result = device.value()->StartDetection(nfp_protocol);
154 IPC::ResponseBuilder rb{ctx, 2};
155 rb.Push(result);
156}
157
158void IUser::StopDetection(Kernel::HLERequestContext& ctx) {
159 IPC::RequestParser rp{ctx};
160 const auto device_handle{rp.Pop<u64>()};
161 LOG_INFO(Service_NFP, "called, device_handle={}", device_handle);
162
163 if (state == State::NonInitialized) {
164 IPC::ResponseBuilder rb{ctx, 2};
165 rb.Push(NfcDisabled);
166 return;
167 }
168
169 auto device = GetNfpDevice(device_handle);
170
171 if (!device.has_value()) {
172 IPC::ResponseBuilder rb{ctx, 2};
173 rb.Push(DeviceNotFound);
174 return;
175 }
176
177 const auto result = device.value()->StopDetection();
178 IPC::ResponseBuilder rb{ctx, 2};
179 rb.Push(result);
180}
181
182void IUser::Mount(Kernel::HLERequestContext& ctx) {
183 IPC::RequestParser rp{ctx};
184 const auto device_handle{rp.Pop<u64>()};
185 const auto model_type{rp.PopEnum<ModelType>()};
186 const auto mount_target{rp.PopEnum<MountTarget>()};
187 LOG_INFO(Service_NFP, "called, device_handle={}, model_type={}, mount_target={}", device_handle,
188 model_type, mount_target);
189
190 if (state == State::NonInitialized) {
191 IPC::ResponseBuilder rb{ctx, 2};
192 rb.Push(NfcDisabled);
193 return;
194 }
195
196 auto device = GetNfpDevice(device_handle);
197
198 if (!device.has_value()) {
199 IPC::ResponseBuilder rb{ctx, 2};
200 rb.Push(DeviceNotFound);
201 return;
202 }
203
204 const auto result = device.value()->Mount(mount_target);
205 IPC::ResponseBuilder rb{ctx, 2};
206 rb.Push(result);
207}
208
209void IUser::Unmount(Kernel::HLERequestContext& ctx) {
210 IPC::RequestParser rp{ctx};
211 const auto device_handle{rp.Pop<u64>()};
212 LOG_INFO(Service_NFP, "called, device_handle={}", device_handle);
213
214 if (state == State::NonInitialized) {
215 IPC::ResponseBuilder rb{ctx, 2};
216 rb.Push(NfcDisabled);
217 return;
218 }
219
220 auto device = GetNfpDevice(device_handle);
221
222 if (!device.has_value()) {
223 IPC::ResponseBuilder rb{ctx, 2};
224 rb.Push(DeviceNotFound);
225 return;
226 }
227
228 const auto result = device.value()->Unmount();
229 IPC::ResponseBuilder rb{ctx, 2};
230 rb.Push(result);
231}
232
233void IUser::OpenApplicationArea(Kernel::HLERequestContext& ctx) {
234 IPC::RequestParser rp{ctx};
235 const auto device_handle{rp.Pop<u64>()};
236 const auto access_id{rp.Pop<u32>()};
237 LOG_INFO(Service_NFP, "called, device_handle={}, access_id={}", device_handle, access_id);
238
239 if (state == State::NonInitialized) {
240 IPC::ResponseBuilder rb{ctx, 2};
241 rb.Push(NfcDisabled);
242 return;
243 }
244
245 auto device = GetNfpDevice(device_handle);
246
247 if (!device.has_value()) {
248 IPC::ResponseBuilder rb{ctx, 2};
249 rb.Push(DeviceNotFound);
250 return;
251 }
252
253 const auto result = device.value()->OpenApplicationArea(access_id);
254 IPC::ResponseBuilder rb{ctx, 2};
255 rb.Push(result);
256}
257
258void IUser::GetApplicationArea(Kernel::HLERequestContext& ctx) {
259 IPC::RequestParser rp{ctx};
260 const auto device_handle{rp.Pop<u64>()};
261 const auto data_size = ctx.GetWriteBufferSize();
262 LOG_INFO(Service_NFP, "called, device_handle={}", device_handle);
263
264 if (state == State::NonInitialized) {
265 IPC::ResponseBuilder rb{ctx, 2};
266 rb.Push(NfcDisabled);
267 return;
268 }
269
270 if (!ctx.CanWriteBuffer()) {
271 IPC::ResponseBuilder rb{ctx, 2};
272 rb.Push(InvalidArgument);
273 return;
274 }
275
276 auto device = GetNfpDevice(device_handle);
277
278 if (!device.has_value()) {
279 IPC::ResponseBuilder rb{ctx, 2};
280 rb.Push(DeviceNotFound);
281 return;
282 }
283
284 std::vector<u8> data(data_size);
285 const auto result = device.value()->GetApplicationArea(data);
286 ctx.WriteBuffer(data);
287 IPC::ResponseBuilder rb{ctx, 3};
288 rb.Push(result);
289 rb.Push(static_cast<u32>(data_size));
290}
291
292void IUser::SetApplicationArea(Kernel::HLERequestContext& ctx) {
293 IPC::RequestParser rp{ctx};
294 const auto device_handle{rp.Pop<u64>()};
295 const auto data{ctx.ReadBuffer()};
296 LOG_INFO(Service_NFP, "called, device_handle={}, data_size={}", device_handle, data.size());
297
298 if (state == State::NonInitialized) {
299 IPC::ResponseBuilder rb{ctx, 2};
300 rb.Push(NfcDisabled);
301 return;
302 }
303
304 if (!ctx.CanReadBuffer()) {
305 IPC::ResponseBuilder rb{ctx, 2};
306 rb.Push(InvalidArgument);
307 return;
308 }
309
310 auto device = GetNfpDevice(device_handle);
311
312 if (!device.has_value()) {
313 IPC::ResponseBuilder rb{ctx, 2};
314 rb.Push(DeviceNotFound);
315 return;
316 }
317
318 const auto result = device.value()->SetApplicationArea(data);
319 IPC::ResponseBuilder rb{ctx, 2};
320 rb.Push(result);
321}
322
323void IUser::Flush(Kernel::HLERequestContext& ctx) {
324 IPC::RequestParser rp{ctx};
325 const auto device_handle{rp.Pop<u64>()};
326 LOG_INFO(Service_NFP, "called, device_handle={}", device_handle);
327
328 if (state == State::NonInitialized) {
329 IPC::ResponseBuilder rb{ctx, 2};
330 rb.Push(NfcDisabled);
331 return;
332 }
333
334 auto device = GetNfpDevice(device_handle);
335
336 if (!device.has_value()) {
337 IPC::ResponseBuilder rb{ctx, 2};
338 rb.Push(DeviceNotFound);
339 return;
340 }
341
342 const auto result = device.value()->Flush();
343 IPC::ResponseBuilder rb{ctx, 2};
344 rb.Push(result);
345}
346
347void IUser::Restore(Kernel::HLERequestContext& ctx) {
348 IPC::RequestParser rp{ctx};
349 const auto device_handle{rp.Pop<u64>()};
350 LOG_WARNING(Service_NFP, "(STUBBED) called, device_handle={}", device_handle);
351
352 if (state == State::NonInitialized) {
353 IPC::ResponseBuilder rb{ctx, 2};
354 rb.Push(NfcDisabled);
355 return;
356 }
357
358 auto device = GetNfpDevice(device_handle);
359
360 if (!device.has_value()) {
361 IPC::ResponseBuilder rb{ctx, 2};
362 rb.Push(DeviceNotFound);
363 return;
364 }
365
366 const auto result = device.value()->RestoreAmiibo();
367 IPC::ResponseBuilder rb{ctx, 2};
368 rb.Push(result);
369}
370
371void IUser::CreateApplicationArea(Kernel::HLERequestContext& ctx) {
372 IPC::RequestParser rp{ctx};
373 const auto device_handle{rp.Pop<u64>()};
374 const auto access_id{rp.Pop<u32>()};
375 const auto data{ctx.ReadBuffer()};
376 LOG_INFO(Service_NFP, "called, device_handle={}, data_size={}, access_id={}", device_handle,
377 access_id, data.size());
378
379 if (state == State::NonInitialized) {
380 IPC::ResponseBuilder rb{ctx, 2};
381 rb.Push(NfcDisabled);
382 return;
383 }
384
385 if (!ctx.CanReadBuffer()) {
386 IPC::ResponseBuilder rb{ctx, 2};
387 rb.Push(InvalidArgument);
388 return;
389 }
390
391 auto device = GetNfpDevice(device_handle);
392
393 if (!device.has_value()) {
394 IPC::ResponseBuilder rb{ctx, 2};
395 rb.Push(DeviceNotFound);
396 return;
397 }
398
399 const auto result = device.value()->CreateApplicationArea(access_id, data);
400 IPC::ResponseBuilder rb{ctx, 2};
401 rb.Push(result);
402}
403
404void IUser::GetTagInfo(Kernel::HLERequestContext& ctx) {
405 IPC::RequestParser rp{ctx};
406 const auto device_handle{rp.Pop<u64>()};
407 LOG_INFO(Service_NFP, "called, device_handle={}", device_handle);
408
409 if (state == State::NonInitialized) {
410 IPC::ResponseBuilder rb{ctx, 2};
411 rb.Push(NfcDisabled);
412 return;
413 }
414
415 auto device = GetNfpDevice(device_handle);
416
417 if (!device.has_value()) {
418 IPC::ResponseBuilder rb{ctx, 2};
419 rb.Push(DeviceNotFound);
420 return;
421 }
422
423 TagInfo tag_info{};
424 const auto result = device.value()->GetTagInfo(tag_info);
425 ctx.WriteBuffer(tag_info);
426 IPC::ResponseBuilder rb{ctx, 2};
427 rb.Push(result);
428}
429
430void IUser::GetRegisterInfo(Kernel::HLERequestContext& ctx) {
431 IPC::RequestParser rp{ctx};
432 const auto device_handle{rp.Pop<u64>()};
433 LOG_INFO(Service_NFP, "called, device_handle={}", device_handle);
434
435 if (state == State::NonInitialized) {
436 IPC::ResponseBuilder rb{ctx, 2};
437 rb.Push(NfcDisabled);
438 return;
439 }
440
441 auto device = GetNfpDevice(device_handle);
442
443 if (!device.has_value()) {
444 IPC::ResponseBuilder rb{ctx, 2};
445 rb.Push(DeviceNotFound);
446 return;
447 }
448
449 RegisterInfo register_info{};
450 const auto result = device.value()->GetRegisterInfo(register_info);
451 ctx.WriteBuffer(register_info);
452 IPC::ResponseBuilder rb{ctx, 2};
453 rb.Push(result);
454}
455
456void IUser::GetCommonInfo(Kernel::HLERequestContext& ctx) {
457 IPC::RequestParser rp{ctx};
458 const auto device_handle{rp.Pop<u64>()};
459 LOG_INFO(Service_NFP, "called, device_handle={}", device_handle);
460
461 if (state == State::NonInitialized) {
462 IPC::ResponseBuilder rb{ctx, 2};
463 rb.Push(NfcDisabled);
464 return;
465 }
466
467 auto device = GetNfpDevice(device_handle);
468
469 if (!device.has_value()) {
470 IPC::ResponseBuilder rb{ctx, 2};
471 rb.Push(DeviceNotFound);
472 return;
473 }
474
475 CommonInfo common_info{};
476 const auto result = device.value()->GetCommonInfo(common_info);
477 ctx.WriteBuffer(common_info);
478 IPC::ResponseBuilder rb{ctx, 2};
479 rb.Push(result);
480}
481
482void IUser::GetModelInfo(Kernel::HLERequestContext& ctx) {
483 IPC::RequestParser rp{ctx};
484 const auto device_handle{rp.Pop<u64>()};
485 LOG_INFO(Service_NFP, "called, device_handle={}", device_handle);
486
487 if (state == State::NonInitialized) {
488 IPC::ResponseBuilder rb{ctx, 2};
489 rb.Push(NfcDisabled);
490 return;
491 }
492
493 auto device = GetNfpDevice(device_handle);
494
495 if (!device.has_value()) {
496 IPC::ResponseBuilder rb{ctx, 2};
497 rb.Push(DeviceNotFound);
498 return;
499 }
500
501 ModelInfo model_info{};
502 const auto result = device.value()->GetModelInfo(model_info);
503 ctx.WriteBuffer(model_info);
504 IPC::ResponseBuilder rb{ctx, 2};
505 rb.Push(result);
506}
507
508void IUser::AttachActivateEvent(Kernel::HLERequestContext& ctx) {
509 IPC::RequestParser rp{ctx};
510 const auto device_handle{rp.Pop<u64>()};
511 LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle);
512
513 if (state == State::NonInitialized) {
514 IPC::ResponseBuilder rb{ctx, 2};
515 rb.Push(NfcDisabled);
516 return;
517 }
518
519 auto device = GetNfpDevice(device_handle);
520
521 if (!device.has_value()) {
522 IPC::ResponseBuilder rb{ctx, 2};
523 rb.Push(DeviceNotFound);
524 return;
525 }
526
527 IPC::ResponseBuilder rb{ctx, 2, 1};
528 rb.Push(ResultSuccess);
529 rb.PushCopyObjects(device.value()->GetActivateEvent());
530}
531
532void IUser::AttachDeactivateEvent(Kernel::HLERequestContext& ctx) {
533 IPC::RequestParser rp{ctx};
534 const auto device_handle{rp.Pop<u64>()};
535 LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle);
536
537 if (state == State::NonInitialized) {
538 IPC::ResponseBuilder rb{ctx, 2};
539 rb.Push(NfcDisabled);
540 return;
541 }
542
543 auto device = GetNfpDevice(device_handle);
544
545 if (!device.has_value()) {
546 IPC::ResponseBuilder rb{ctx, 2};
547 rb.Push(DeviceNotFound);
548 return;
549 }
550
551 IPC::ResponseBuilder rb{ctx, 2, 1};
552 rb.Push(ResultSuccess);
553 rb.PushCopyObjects(device.value()->GetDeactivateEvent());
554}
555
556void IUser::GetState(Kernel::HLERequestContext& ctx) {
557 LOG_DEBUG(Service_NFC, "called");
558
559 IPC::ResponseBuilder rb{ctx, 3, 0};
560 rb.Push(ResultSuccess);
561 rb.PushEnum(state);
562}
563
564void IUser::GetDeviceState(Kernel::HLERequestContext& ctx) {
565 IPC::RequestParser rp{ctx};
566 const auto device_handle{rp.Pop<u64>()};
567 LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle);
568
569 auto device = GetNfpDevice(device_handle);
570
571 if (!device.has_value()) {
572 IPC::ResponseBuilder rb{ctx, 2};
573 rb.Push(DeviceNotFound);
574 return;
575 }
576
577 IPC::ResponseBuilder rb{ctx, 3};
578 rb.Push(ResultSuccess);
579 rb.PushEnum(device.value()->GetCurrentState());
580}
581
582void IUser::GetNpadId(Kernel::HLERequestContext& ctx) {
583 IPC::RequestParser rp{ctx};
584 const auto device_handle{rp.Pop<u64>()};
585 LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle);
586
587 if (state == State::NonInitialized) {
588 IPC::ResponseBuilder rb{ctx, 2};
589 rb.Push(NfcDisabled);
590 return;
591 }
592
593 auto device = GetNfpDevice(device_handle);
594
595 if (!device.has_value()) {
596 IPC::ResponseBuilder rb{ctx, 2};
597 rb.Push(DeviceNotFound);
598 return;
599 }
600
601 IPC::ResponseBuilder rb{ctx, 3};
602 rb.Push(ResultSuccess);
603 rb.PushEnum(device.value()->GetNpadId());
604}
605
606void IUser::GetApplicationAreaSize(Kernel::HLERequestContext& ctx) {
607 IPC::RequestParser rp{ctx};
608 const auto device_handle{rp.Pop<u64>()};
609 LOG_DEBUG(Service_NFP, "called, device_handle={}", device_handle);
610
611 auto device = GetNfpDevice(device_handle);
612
613 if (!device.has_value()) {
614 IPC::ResponseBuilder rb{ctx, 2};
615 rb.Push(DeviceNotFound);
616 return;
617 }
618
619 IPC::ResponseBuilder rb{ctx, 3};
620 rb.Push(ResultSuccess);
621 rb.Push(device.value()->GetApplicationAreaSize());
14} 622}
15 623
16NFP_User::~NFP_User() = default; 624void IUser::AttachAvailabilityChangeEvent(Kernel::HLERequestContext& ctx) {
625 LOG_INFO(Service_NFP, "called");
626
627 if (state == State::NonInitialized) {
628 IPC::ResponseBuilder rb{ctx, 2};
629 rb.Push(NfcDisabled);
630 return;
631 }
632
633 IPC::ResponseBuilder rb{ctx, 2, 1};
634 rb.Push(ResultSuccess);
635 rb.PushCopyObjects(availability_change_event->GetReadableEvent());
636}
637
638void IUser::RecreateApplicationArea(Kernel::HLERequestContext& ctx) {
639 IPC::RequestParser rp{ctx};
640 const auto device_handle{rp.Pop<u64>()};
641 const auto access_id{rp.Pop<u32>()};
642 const auto data{ctx.ReadBuffer()};
643 LOG_INFO(Service_NFP, "called, device_handle={}, data_size={}, access_id={}", device_handle,
644 access_id, data.size());
645
646 if (state == State::NonInitialized) {
647 IPC::ResponseBuilder rb{ctx, 2};
648 rb.Push(NfcDisabled);
649 return;
650 }
651
652 auto device = GetNfpDevice(device_handle);
653
654 if (!device.has_value()) {
655 IPC::ResponseBuilder rb{ctx, 2};
656 rb.Push(DeviceNotFound);
657 return;
658 }
659
660 const auto result = device.value()->RecreateApplicationArea(access_id, data);
661 IPC::ResponseBuilder rb{ctx, 2};
662 rb.Push(result);
663}
664
665std::optional<std::shared_ptr<NfpDevice>> IUser::GetNfpDevice(u64 handle) {
666 for (auto& device : devices) {
667 if (device->GetHandle() == handle) {
668 return device;
669 }
670 }
671 return std::nullopt;
672}
17 673
18} // namespace Service::NFP 674} // namespace Service::NFP
diff --git a/src/core/hle/service/nfp/nfp_user.h b/src/core/hle/service/nfp/nfp_user.h
index 519ff56ee..68c60ae82 100644
--- a/src/core/hle/service/nfp/nfp_user.h
+++ b/src/core/hle/service/nfp/nfp_user.h
@@ -3,14 +3,52 @@
3 3
4#pragma once 4#pragma once
5 5
6#include "core/hle/service/kernel_helpers.h"
6#include "core/hle/service/nfp/nfp.h" 7#include "core/hle/service/nfp/nfp.h"
8#include "core/hle/service/nfp/nfp_types.h"
7 9
8namespace Service::NFP { 10namespace Service::NFP {
11class NfpDevice;
9 12
10class NFP_User final : public Module::Interface { 13class IUser final : public ServiceFramework<IUser> {
11public: 14public:
12 explicit NFP_User(std::shared_ptr<Module> module_, Core::System& system_); 15 explicit IUser(Core::System& system_);
13 ~NFP_User() override; 16
17private:
18 void Initialize(Kernel::HLERequestContext& ctx);
19 void Finalize(Kernel::HLERequestContext& ctx);
20 void ListDevices(Kernel::HLERequestContext& ctx);
21 void StartDetection(Kernel::HLERequestContext& ctx);
22 void StopDetection(Kernel::HLERequestContext& ctx);
23 void Mount(Kernel::HLERequestContext& ctx);
24 void Unmount(Kernel::HLERequestContext& ctx);
25 void OpenApplicationArea(Kernel::HLERequestContext& ctx);
26 void GetApplicationArea(Kernel::HLERequestContext& ctx);
27 void SetApplicationArea(Kernel::HLERequestContext& ctx);
28 void Flush(Kernel::HLERequestContext& ctx);
29 void Restore(Kernel::HLERequestContext& ctx);
30 void CreateApplicationArea(Kernel::HLERequestContext& ctx);
31 void GetTagInfo(Kernel::HLERequestContext& ctx);
32 void GetRegisterInfo(Kernel::HLERequestContext& ctx);
33 void GetCommonInfo(Kernel::HLERequestContext& ctx);
34 void GetModelInfo(Kernel::HLERequestContext& ctx);
35 void AttachActivateEvent(Kernel::HLERequestContext& ctx);
36 void AttachDeactivateEvent(Kernel::HLERequestContext& ctx);
37 void GetState(Kernel::HLERequestContext& ctx);
38 void GetDeviceState(Kernel::HLERequestContext& ctx);
39 void GetNpadId(Kernel::HLERequestContext& ctx);
40 void GetApplicationAreaSize(Kernel::HLERequestContext& ctx);
41 void AttachAvailabilityChangeEvent(Kernel::HLERequestContext& ctx);
42 void RecreateApplicationArea(Kernel::HLERequestContext& ctx);
43
44 std::optional<std::shared_ptr<NfpDevice>> GetNfpDevice(u64 handle);
45
46 KernelHelpers::ServiceContext service_context;
47
48 std::array<std::shared_ptr<NfpDevice>, 10> devices{};
49
50 State state{State::NonInitialized};
51 Kernel::KEvent* availability_change_event;
14}; 52};
15 53
16} // namespace Service::NFP 54} // namespace Service::NFP
diff --git a/src/core/hle/service/nvdrv/core/container.cpp b/src/core/hle/service/nvdrv/core/container.cpp
new file mode 100644
index 000000000..37ca24f5d
--- /dev/null
+++ b/src/core/hle/service/nvdrv/core/container.cpp
@@ -0,0 +1,50 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
3// SPDX-License-Identifier: GPL-3.0-or-later
4
5#include "core/hle/service/nvdrv/core/container.h"
6#include "core/hle/service/nvdrv/core/nvmap.h"
7#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
8#include "video_core/host1x/host1x.h"
9
10namespace Service::Nvidia::NvCore {
11
12struct ContainerImpl {
13 explicit ContainerImpl(Tegra::Host1x::Host1x& host1x_)
14 : file{host1x_}, manager{host1x_}, device_file_data{} {}
15 NvMap file;
16 SyncpointManager manager;
17 Container::Host1xDeviceFileData device_file_data;
18};
19
20Container::Container(Tegra::Host1x::Host1x& host1x_) {
21 impl = std::make_unique<ContainerImpl>(host1x_);
22}
23
24Container::~Container() = default;
25
26NvMap& Container::GetNvMapFile() {
27 return impl->file;
28}
29
30const NvMap& Container::GetNvMapFile() const {
31 return impl->file;
32}
33
34Container::Host1xDeviceFileData& Container::Host1xDeviceFile() {
35 return impl->device_file_data;
36}
37
38const Container::Host1xDeviceFileData& Container::Host1xDeviceFile() const {
39 return impl->device_file_data;
40}
41
42SyncpointManager& Container::GetSyncpointManager() {
43 return impl->manager;
44}
45
46const SyncpointManager& Container::GetSyncpointManager() const {
47 return impl->manager;
48}
49
50} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/core/container.h b/src/core/hle/service/nvdrv/core/container.h
new file mode 100644
index 000000000..b4b63ac90
--- /dev/null
+++ b/src/core/hle/service/nvdrv/core/container.h
@@ -0,0 +1,52 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
3// SPDX-License-Identifier: GPL-3.0-or-later
4
5#pragma once
6
7#include <deque>
8#include <memory>
9#include <unordered_map>
10
11#include "core/hle/service/nvdrv/nvdata.h"
12
13namespace Tegra::Host1x {
14class Host1x;
15} // namespace Tegra::Host1x
16
17namespace Service::Nvidia::NvCore {
18
19class NvMap;
20class SyncpointManager;
21
22struct ContainerImpl;
23
24class Container {
25public:
26 explicit Container(Tegra::Host1x::Host1x& host1x);
27 ~Container();
28
29 NvMap& GetNvMapFile();
30
31 const NvMap& GetNvMapFile() const;
32
33 SyncpointManager& GetSyncpointManager();
34
35 const SyncpointManager& GetSyncpointManager() const;
36
37 struct Host1xDeviceFileData {
38 std::unordered_map<DeviceFD, u32> fd_to_id{};
39 std::deque<u32> syncpts_accumulated{};
40 u32 nvdec_next_id{};
41 u32 vic_next_id{};
42 };
43
44 Host1xDeviceFileData& Host1xDeviceFile();
45
46 const Host1xDeviceFileData& Host1xDeviceFile() const;
47
48private:
49 std::unique_ptr<ContainerImpl> impl;
50};
51
52} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/core/nvmap.cpp b/src/core/hle/service/nvdrv/core/nvmap.cpp
new file mode 100644
index 000000000..fbd8a74a5
--- /dev/null
+++ b/src/core/hle/service/nvdrv/core/nvmap.cpp
@@ -0,0 +1,272 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
3// SPDX-License-Identifier: GPL-3.0-or-later
4
5#include "common/alignment.h"
6#include "common/assert.h"
7#include "common/logging/log.h"
8#include "core/hle/service/nvdrv/core/nvmap.h"
9#include "core/memory.h"
10#include "video_core/host1x/host1x.h"
11
12using Core::Memory::YUZU_PAGESIZE;
13
14namespace Service::Nvidia::NvCore {
15NvMap::Handle::Handle(u64 size_, Id id_)
16 : size(size_), aligned_size(size), orig_size(size), id(id_) {
17 flags.raw = 0;
18}
19
20NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) {
21 std::scoped_lock lock(mutex);
22
23 // Handles cannot be allocated twice
24 if (allocated) {
25 return NvResult::AccessDenied;
26 }
27
28 flags = pFlags;
29 kind = pKind;
30 align = pAlign < YUZU_PAGESIZE ? YUZU_PAGESIZE : pAlign;
31
32 // This flag is only applicable for handles with an address passed
33 if (pAddress) {
34 flags.keep_uncached_after_free.Assign(0);
35 } else {
36 LOG_CRITICAL(Service_NVDRV,
37 "Mapping nvmap handles without a CPU side address is unimplemented!");
38 }
39
40 size = Common::AlignUp(size, YUZU_PAGESIZE);
41 aligned_size = Common::AlignUp(size, align);
42 address = pAddress;
43 allocated = true;
44
45 return NvResult::Success;
46}
47
48NvResult NvMap::Handle::Duplicate(bool internal_session) {
49 std::scoped_lock lock(mutex);
50 // Unallocated handles cannot be duplicated as duplication requires memory accounting (in HOS)
51 if (!allocated) [[unlikely]] {
52 return NvResult::BadValue;
53 }
54
55 // If we internally use FromId the duplication tracking of handles won't work accurately due to
56 // us not implementing per-process handle refs.
57 if (internal_session) {
58 internal_dupes++;
59 } else {
60 dupes++;
61 }
62
63 return NvResult::Success;
64}
65
66NvMap::NvMap(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {}
67
68void NvMap::AddHandle(std::shared_ptr<Handle> handle_description) {
69 std::scoped_lock lock(handles_lock);
70
71 handles.emplace(handle_description->id, std::move(handle_description));
72}
73
74void NvMap::UnmapHandle(Handle& handle_description) {
75 // Remove pending unmap queue entry if needed
76 if (handle_description.unmap_queue_entry) {
77 unmap_queue.erase(*handle_description.unmap_queue_entry);
78 handle_description.unmap_queue_entry.reset();
79 }
80
81 // Free and unmap the handle from the SMMU
82 host1x.MemoryManager().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address),
83 handle_description.aligned_size);
84 host1x.Allocator().Free(handle_description.pin_virt_address,
85 static_cast<u32>(handle_description.aligned_size));
86 handle_description.pin_virt_address = 0;
87}
88
89bool NvMap::TryRemoveHandle(const Handle& handle_description) {
90 // No dupes left, we can remove from handle map
91 if (handle_description.dupes == 0 && handle_description.internal_dupes == 0) {
92 std::scoped_lock lock(handles_lock);
93
94 auto it{handles.find(handle_description.id)};
95 if (it != handles.end()) {
96 handles.erase(it);
97 }
98
99 return true;
100 } else {
101 return false;
102 }
103}
104
105NvResult NvMap::CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out) {
106 if (!size) [[unlikely]] {
107 return NvResult::BadValue;
108 }
109
110 u32 id{next_handle_id.fetch_add(HandleIdIncrement, std::memory_order_relaxed)};
111 auto handle_description{std::make_shared<Handle>(size, id)};
112 AddHandle(handle_description);
113
114 result_out = handle_description;
115 return NvResult::Success;
116}
117
118std::shared_ptr<NvMap::Handle> NvMap::GetHandle(Handle::Id handle) {
119 std::scoped_lock lock(handles_lock);
120 try {
121 return handles.at(handle);
122 } catch (std::out_of_range&) {
123 return nullptr;
124 }
125}
126
127VAddr NvMap::GetHandleAddress(Handle::Id handle) {
128 std::scoped_lock lock(handles_lock);
129 try {
130 return handles.at(handle)->address;
131 } catch (std::out_of_range&) {
132 return 0;
133 }
134}
135
136u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
137 auto handle_description{GetHandle(handle)};
138 if (!handle_description) [[unlikely]] {
139 return 0;
140 }
141
142 std::scoped_lock lock(handle_description->mutex);
143 if (!handle_description->pins) {
144 // If we're in the unmap queue we can just remove ourselves and return since we're already
145 // mapped
146 {
147 // Lock now to prevent our queue entry from being removed for allocation in-between the
148 // following check and erase
149 std::scoped_lock queueLock(unmap_queue_lock);
150 if (handle_description->unmap_queue_entry) {
151 unmap_queue.erase(*handle_description->unmap_queue_entry);
152 handle_description->unmap_queue_entry.reset();
153
154 handle_description->pins++;
155 return handle_description->pin_virt_address;
156 }
157 }
158
159 // If not then allocate some space and map it
160 u32 address{};
161 auto& smmu_allocator = host1x.Allocator();
162 auto& smmu_memory_manager = host1x.MemoryManager();
163 while (!(address =
164 smmu_allocator.Allocate(static_cast<u32>(handle_description->aligned_size)))) {
165 // Free handles until the allocation succeeds
166 std::scoped_lock queueLock(unmap_queue_lock);
167 if (auto freeHandleDesc{unmap_queue.front()}) {
168 // Handles in the unmap queue are guaranteed not to be pinned so don't bother
169 // checking if they are before unmapping
170 std::scoped_lock freeLock(freeHandleDesc->mutex);
171 if (handle_description->pin_virt_address)
172 UnmapHandle(*freeHandleDesc);
173 } else {
174 LOG_CRITICAL(Service_NVDRV, "Ran out of SMMU address space!");
175 }
176 }
177
178 smmu_memory_manager.Map(static_cast<GPUVAddr>(address), handle_description->address,
179 handle_description->aligned_size);
180 handle_description->pin_virt_address = address;
181 }
182
183 handle_description->pins++;
184 return handle_description->pin_virt_address;
185}
186
187void NvMap::UnpinHandle(Handle::Id handle) {
188 auto handle_description{GetHandle(handle)};
189 if (!handle_description) {
190 return;
191 }
192
193 std::scoped_lock lock(handle_description->mutex);
194 if (--handle_description->pins < 0) {
195 LOG_WARNING(Service_NVDRV, "Pin count imbalance detected!");
196 } else if (!handle_description->pins) {
197 std::scoped_lock queueLock(unmap_queue_lock);
198
199 // Add to the unmap queue allowing this handle's memory to be freed if needed
200 unmap_queue.push_back(handle_description);
201 handle_description->unmap_queue_entry = std::prev(unmap_queue.end());
202 }
203}
204
205void NvMap::DuplicateHandle(Handle::Id handle, bool internal_session) {
206 auto handle_description{GetHandle(handle)};
207 if (!handle_description) {
208 LOG_CRITICAL(Service_NVDRV, "Unregistered handle!");
209 return;
210 }
211
212 auto result = handle_description->Duplicate(internal_session);
213 if (result != NvResult::Success) {
214 LOG_CRITICAL(Service_NVDRV, "Could not duplicate handle!");
215 }
216}
217
218std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool internal_session) {
219 std::weak_ptr<Handle> hWeak{GetHandle(handle)};
220 FreeInfo freeInfo;
221
222 // We use a weak ptr here so we can tell when the handle has been freed and report that back to
223 // guest
224 if (auto handle_description = hWeak.lock()) {
225 std::scoped_lock lock(handle_description->mutex);
226
227 if (internal_session) {
228 if (--handle_description->internal_dupes < 0)
229 LOG_WARNING(Service_NVDRV, "Internal duplicate count imbalance detected!");
230 } else {
231 if (--handle_description->dupes < 0) {
232 LOG_WARNING(Service_NVDRV, "User duplicate count imbalance detected!");
233 } else if (handle_description->dupes == 0) {
234 // Force unmap the handle
235 if (handle_description->pin_virt_address) {
236 std::scoped_lock queueLock(unmap_queue_lock);
237 UnmapHandle(*handle_description);
238 }
239
240 handle_description->pins = 0;
241 }
242 }
243
244 // Try to remove the shared ptr to the handle from the map, if nothing else is using the
245 // handle then it will now be freed when `handle_description` goes out of scope
246 if (TryRemoveHandle(*handle_description)) {
247 LOG_DEBUG(Service_NVDRV, "Removed nvmap handle: {}", handle);
248 } else {
249 LOG_DEBUG(Service_NVDRV,
250 "Tried to free nvmap handle: {} but didn't as it still has duplicates",
251 handle);
252 }
253
254 freeInfo = {
255 .address = handle_description->address,
256 .size = handle_description->size,
257 .was_uncached = handle_description->flags.map_uncached.Value() != 0,
258 };
259 } else {
260 return std::nullopt;
261 }
262
263 // Handle hasn't been freed from memory, set address to 0 to mark that the handle wasn't freed
264 if (!hWeak.expired()) {
265 LOG_DEBUG(Service_NVDRV, "nvmap handle: {} wasn't freed as it is still in use", handle);
266 freeInfo.address = 0;
267 }
268
269 return freeInfo;
270}
271
272} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/core/nvmap.h b/src/core/hle/service/nvdrv/core/nvmap.h
new file mode 100644
index 000000000..b9dd3801f
--- /dev/null
+++ b/src/core/hle/service/nvdrv/core/nvmap.h
@@ -0,0 +1,175 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
3// SPDX-License-Identifier: GPL-3.0-or-later
4
5#pragma once
6
7#include <atomic>
8#include <list>
9#include <memory>
10#include <mutex>
11#include <optional>
12#include <unordered_map>
13#include <assert.h>
14
15#include "common/bit_field.h"
16#include "common/common_types.h"
17#include "core/hle/service/nvdrv/nvdata.h"
18
19namespace Tegra {
20
21namespace Host1x {
22class Host1x;
23} // namespace Host1x
24
25} // namespace Tegra
26
27namespace Service::Nvidia::NvCore {
28/**
29 * @brief The nvmap core class holds the global state for nvmap and provides methods to manage
30 * handles
31 */
32class NvMap {
33public:
34 /**
35 * @brief A handle to a contiguous block of memory in an application's address space
36 */
37 struct Handle {
38 std::mutex mutex;
39
40 u64 align{}; //!< The alignment to use when pinning the handle onto the SMMU
41 u64 size; //!< Page-aligned size of the memory the handle refers to
42 u64 aligned_size; //!< `align`-aligned size of the memory the handle refers to
43 u64 orig_size; //!< Original unaligned size of the memory this handle refers to
44
45 s32 dupes{1}; //!< How many guest references there are to this handle
46 s32 internal_dupes{0}; //!< How many emulator-internal references there are to this handle
47
48 using Id = u32;
49 Id id; //!< A globally unique identifier for this handle
50
51 s32 pins{};
52 u32 pin_virt_address{};
53 std::optional<typename std::list<std::shared_ptr<Handle>>::iterator> unmap_queue_entry{};
54
55 union Flags {
56 u32 raw;
57 BitField<0, 1, u32> map_uncached; //!< If the handle should be mapped as uncached
58 BitField<2, 1, u32> keep_uncached_after_free; //!< Only applicable when the handle was
59 //!< allocated with a fixed address
60 BitField<4, 1, u32> _unk0_; //!< Passed to IOVMM for pins
61 } flags{};
62 static_assert(sizeof(Flags) == sizeof(u32));
63
64 u64 address{}; //!< The memory location in the guest's AS that this handle corresponds to,
65 //!< this can also be in the nvdrv tmem
66 bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC
67 //!< call
68
69 u8 kind{}; //!< Used for memory compression
70 bool allocated{}; //!< If the handle has been allocated with `Alloc`
71
72 u64 dma_map_addr{}; //! remove me after implementing pinning.
73
74 Handle(u64 size, Id id);
75
76 /**
77 * @brief Sets up the handle with the given memory config, can allocate memory from the tmem
78 * if a 0 address is passed
79 */
80 [[nodiscard]] NvResult Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress);
81
82 /**
83 * @brief Increases the dupe counter of the handle for the given session
84 */
85 [[nodiscard]] NvResult Duplicate(bool internal_session);
86
87 /**
88 * @brief Obtains a pointer to the handle's memory and marks the handle it as having been
89 * mapped
90 */
91 u8* GetPointer() {
92 if (!address) {
93 return nullptr;
94 }
95
96 is_shared_mem_mapped = true;
97 return reinterpret_cast<u8*>(address);
98 }
99 };
100
101 /**
102 * @brief Encapsulates the result of a FreeHandle operation
103 */
104 struct FreeInfo {
105 u64 address; //!< Address the handle referred to before deletion
106 u64 size; //!< Page-aligned handle size
107 bool was_uncached; //!< If the handle was allocated as uncached
108 };
109
110 explicit NvMap(Tegra::Host1x::Host1x& host1x);
111
112 /**
113 * @brief Creates an unallocated handle of the given size
114 */
115 [[nodiscard]] NvResult CreateHandle(u64 size, std::shared_ptr<NvMap::Handle>& result_out);
116
117 std::shared_ptr<Handle> GetHandle(Handle::Id handle);
118
119 VAddr GetHandleAddress(Handle::Id handle);
120
121 /**
122 * @brief Maps a handle into the SMMU address space
123 * @note This operation is refcounted, the number of calls to this must eventually match the
124 * number of calls to `UnpinHandle`
125 * @return The SMMU virtual address that the handle has been mapped to
126 */
127 u32 PinHandle(Handle::Id handle);
128
129 /**
130 * @brief When this has been called an equal number of times to `PinHandle` for the supplied
131 * handle it will be added to a list of handles to be freed when necessary
132 */
133 void UnpinHandle(Handle::Id handle);
134
135 /**
136 * @brief Tries to duplicate a handle
137 */
138 void DuplicateHandle(Handle::Id handle, bool internal_session = false);
139
140 /**
141 * @brief Tries to free a handle and remove a single dupe
142 * @note If a handle has no dupes left and has no other users a FreeInfo struct will be returned
143 * describing the prior state of the handle
144 */
145 std::optional<FreeInfo> FreeHandle(Handle::Id handle, bool internal_session);
146
147private:
148 std::list<std::shared_ptr<Handle>> unmap_queue{};
149 std::mutex unmap_queue_lock{}; //!< Protects access to `unmap_queue`
150
151 std::unordered_map<Handle::Id, std::shared_ptr<Handle>>
152 handles{}; //!< Main owning map of handles
153 std::mutex handles_lock; //!< Protects access to `handles`
154
155 static constexpr u32 HandleIdIncrement{
156 4}; //!< Each new handle ID is an increment of 4 from the previous
157 std::atomic<u32> next_handle_id{HandleIdIncrement};
158 Tegra::Host1x::Host1x& host1x;
159
160 void AddHandle(std::shared_ptr<Handle> handle);
161
162 /**
163 * @brief Unmaps and frees the SMMU memory region a handle is mapped to
164 * @note Both `unmap_queue_lock` and `handle_description.mutex` MUST be locked when calling this
165 */
166 void UnmapHandle(Handle& handle_description);
167
168 /**
169 * @brief Removes a handle from the map taking its dupes into account
170 * @note handle_description.mutex MUST be locked when calling this
171 * @return If the handle was removed from the map
172 */
173 bool TryRemoveHandle(const Handle& handle_description);
174};
175} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
new file mode 100644
index 000000000..eda2041a0
--- /dev/null
+++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.cpp
@@ -0,0 +1,121 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
3// SPDX-License-Identifier: GPL-3.0-or-later
4
5#include "common/assert.h"
6#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
7#include "video_core/host1x/host1x.h"
8
9namespace Service::Nvidia::NvCore {
10
11SyncpointManager::SyncpointManager(Tegra::Host1x::Host1x& host1x_) : host1x{host1x_} {
12 constexpr u32 VBlank0SyncpointId{26};
13 constexpr u32 VBlank1SyncpointId{27};
14
15 // Reserve both vblank syncpoints as client managed as they use Continuous Mode
16 // Refer to section 14.3.5.3 of the TRM for more information on Continuous Mode
17 // https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/drm/dc.c#L660
18 ReserveSyncpoint(VBlank0SyncpointId, true);
19 ReserveSyncpoint(VBlank1SyncpointId, true);
20
21 for (u32 syncpoint_id : channel_syncpoints) {
22 if (syncpoint_id) {
23 ReserveSyncpoint(syncpoint_id, false);
24 }
25 }
26}
27
28SyncpointManager::~SyncpointManager() = default;
29
30u32 SyncpointManager::ReserveSyncpoint(u32 id, bool client_managed) {
31 if (syncpoints.at(id).reserved) {
32 ASSERT_MSG(false, "Requested syncpoint is in use");
33 return 0;
34 }
35
36 syncpoints.at(id).reserved = true;
37 syncpoints.at(id).interface_managed = client_managed;
38
39 return id;
40}
41
42u32 SyncpointManager::FindFreeSyncpoint() {
43 for (u32 i{1}; i < syncpoints.size(); i++) {
44 if (!syncpoints[i].reserved) {
45 return i;
46 }
47 }
48 ASSERT_MSG(false, "Failed to find a free syncpoint!");
49 return 0;
50}
51
52u32 SyncpointManager::AllocateSyncpoint(bool client_managed) {
53 std::lock_guard lock(reservation_lock);
54 return ReserveSyncpoint(FindFreeSyncpoint(), client_managed);
55}
56
57void SyncpointManager::FreeSyncpoint(u32 id) {
58 std::lock_guard lock(reservation_lock);
59 ASSERT(syncpoints.at(id).reserved);
60 syncpoints.at(id).reserved = false;
61}
62
63bool SyncpointManager::IsSyncpointAllocated(u32 id) {
64 return (id <= SyncpointCount) && syncpoints[id].reserved;
65}
66
67bool SyncpointManager::HasSyncpointExpired(u32 id, u32 threshold) const {
68 const SyncpointInfo& syncpoint{syncpoints.at(id)};
69
70 if (!syncpoint.reserved) {
71 ASSERT(false);
72 return 0;
73 }
74
75 // If the interface manages counters then we don't keep track of the maximum value as it handles
76 // sanity checking the values then
77 if (syncpoint.interface_managed) {
78 return static_cast<s32>(syncpoint.counter_min - threshold) >= 0;
79 } else {
80 return (syncpoint.counter_max - threshold) >= (syncpoint.counter_min - threshold);
81 }
82}
83
84u32 SyncpointManager::IncrementSyncpointMaxExt(u32 id, u32 amount) {
85 if (!syncpoints.at(id).reserved) {
86 ASSERT(false);
87 return 0;
88 }
89
90 return syncpoints.at(id).counter_max += amount;
91}
92
93u32 SyncpointManager::ReadSyncpointMinValue(u32 id) {
94 if (!syncpoints.at(id).reserved) {
95 ASSERT(false);
96 return 0;
97 }
98
99 return syncpoints.at(id).counter_min;
100}
101
102u32 SyncpointManager::UpdateMin(u32 id) {
103 if (!syncpoints.at(id).reserved) {
104 ASSERT(false);
105 return 0;
106 }
107
108 syncpoints.at(id).counter_min = host1x.GetSyncpointManager().GetHostSyncpointValue(id);
109 return syncpoints.at(id).counter_min;
110}
111
112NvFence SyncpointManager::GetSyncpointFence(u32 id) {
113 if (!syncpoints.at(id).reserved) {
114 ASSERT(false);
115 return NvFence{};
116 }
117
118 return {.id = static_cast<s32>(id), .value = syncpoints.at(id).counter_max};
119}
120
121} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/core/syncpoint_manager.h b/src/core/hle/service/nvdrv/core/syncpoint_manager.h
new file mode 100644
index 000000000..b76ef9032
--- /dev/null
+++ b/src/core/hle/service/nvdrv/core/syncpoint_manager.h
@@ -0,0 +1,134 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
3// SPDX-License-Identifier: GPL-3.0-or-later
4
5#pragma once
6
7#include <array>
8#include <atomic>
9#include <mutex>
10
11#include "common/common_types.h"
12#include "core/hle/service/nvdrv/nvdata.h"
13
14namespace Tegra::Host1x {
15class Host1x;
16} // namespace Tegra::Host1x
17
18namespace Service::Nvidia::NvCore {
19
20enum class ChannelType : u32 {
21 MsEnc = 0,
22 VIC = 1,
23 GPU = 2,
24 NvDec = 3,
25 Display = 4,
26 NvJpg = 5,
27 TSec = 6,
28 Max = 7
29};
30
31/**
32 * @brief SyncpointManager handles allocating and accessing host1x syncpoints, these are cached
33 * versions of the HW syncpoints which are intermittently synced
34 * @note Refer to Chapter 14 of the Tegra X1 TRM for an exhaustive overview of them
35 * @url https://http.download.nvidia.com/tegra-public-appnotes/host1x.html
36 * @url
37 * https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/jetson-tx1/drivers/video/tegra/host/nvhost_syncpt.c
38 */
39class SyncpointManager final {
40public:
41 explicit SyncpointManager(Tegra::Host1x::Host1x& host1x);
42 ~SyncpointManager();
43
44 /**
45 * @brief Checks if the given syncpoint is both allocated and below the number of HW syncpoints
46 */
47 bool IsSyncpointAllocated(u32 id);
48
49 /**
50 * @brief Finds a free syncpoint and reserves it
51 * @return The ID of the reserved syncpoint
52 */
53 u32 AllocateSyncpoint(bool client_managed);
54
55 /**
56 * @url
57 * https://github.com/Jetson-TX1-AndroidTV/android_kernel_jetson_tx1_hdmi_primary/blob/8f74a72394efb871cb3f886a3de2998cd7ff2990/drivers/gpu/host1x/syncpt.c#L259
58 */
59 bool HasSyncpointExpired(u32 id, u32 threshold) const;
60
61 bool IsFenceSignalled(NvFence fence) const {
62 return HasSyncpointExpired(fence.id, fence.value);
63 }
64
65 /**
66 * @brief Atomically increments the maximum value of a syncpoint by the given amount
67 * @return The new max value of the syncpoint
68 */
69 u32 IncrementSyncpointMaxExt(u32 id, u32 amount);
70
71 /**
72 * @return The minimum value of the syncpoint
73 */
74 u32 ReadSyncpointMinValue(u32 id);
75
76 /**
77 * @brief Synchronises the minimum value of the syncpoint to with the GPU
78 * @return The new minimum value of the syncpoint
79 */
80 u32 UpdateMin(u32 id);
81
82 /**
83 * @brief Frees the usage of a syncpoint.
84 */
85 void FreeSyncpoint(u32 id);
86
87 /**
88 * @return A fence that will be signalled once this syncpoint hits its maximum value
89 */
90 NvFence GetSyncpointFence(u32 id);
91
92 static constexpr std::array<u32, static_cast<u32>(ChannelType::Max)> channel_syncpoints{
93 0x0, // `MsEnc` is unimplemented
94 0xC, // `VIC`
95 0x0, // `GPU` syncpoints are allocated per-channel instead
96 0x36, // `NvDec`
97 0x0, // `Display` is unimplemented
98 0x37, // `NvJpg`
99 0x0, // `TSec` is unimplemented
100 }; //!< Maps each channel ID to a constant syncpoint
101
102private:
103 /**
104 * @note reservation_lock should be locked when calling this
105 */
106 u32 ReserveSyncpoint(u32 id, bool client_managed);
107
108 /**
109 * @return The ID of the first free syncpoint
110 */
111 u32 FindFreeSyncpoint();
112
113 struct SyncpointInfo {
114 std::atomic<u32> counter_min; //!< The least value the syncpoint can be (The value it was
115 //!< when it was last synchronized with host1x)
116 std::atomic<u32> counter_max; //!< The maximum value the syncpoint can reach according to
117 //!< the current usage
118 bool interface_managed; //!< If the syncpoint is managed by a host1x client interface, a
119 //!< client interface is a HW block that can handle host1x
120 //!< transactions on behalf of a host1x client (Which would
121 //!< otherwise need to be manually synced using PIO which is
122 //!< synchronous and requires direct cooperation of the CPU)
123 bool reserved; //!< If the syncpoint is reserved or not, not to be confused with a reserved
124 //!< value
125 };
126
127 constexpr static std::size_t SyncpointCount{192};
128 std::array<SyncpointInfo, SyncpointCount> syncpoints{};
129 std::mutex reservation_lock;
130
131 Tegra::Host1x::Host1x& host1x;
132};
133
134} // namespace Service::Nvidia::NvCore
diff --git a/src/core/hle/service/nvdrv/devices/nvdevice.h b/src/core/hle/service/nvdrv/devices/nvdevice.h
index 696e8121e..204b0e757 100644
--- a/src/core/hle/service/nvdrv/devices/nvdevice.h
+++ b/src/core/hle/service/nvdrv/devices/nvdevice.h
@@ -11,6 +11,10 @@ namespace Core {
11class System; 11class System;
12} 12}
13 13
14namespace Kernel {
15class KEvent;
16}
17
14namespace Service::Nvidia::Devices { 18namespace Service::Nvidia::Devices {
15 19
16/// Represents an abstract nvidia device node. It is to be subclassed by concrete device nodes to 20/// Represents an abstract nvidia device node. It is to be subclassed by concrete device nodes to
@@ -64,6 +68,10 @@ public:
64 */ 68 */
65 virtual void OnClose(DeviceFD fd) = 0; 69 virtual void OnClose(DeviceFD fd) = 0;
66 70
71 virtual Kernel::KEvent* QueryEvent(u32 event_id) {
72 return nullptr;
73 }
74
67protected: 75protected:
68 Core::System& system; 76 Core::System& system;
69}; 77};
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
index 604711914..4122fc98d 100644
--- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.cpp
@@ -5,15 +5,16 @@
5#include "common/logging/log.h" 5#include "common/logging/log.h"
6#include "core/core.h" 6#include "core/core.h"
7#include "core/core_timing.h" 7#include "core/core_timing.h"
8#include "core/hle/service/nvdrv/core/container.h"
9#include "core/hle/service/nvdrv/core/nvmap.h"
8#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" 10#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
9#include "core/hle/service/nvdrv/devices/nvmap.h"
10#include "core/perf_stats.h" 11#include "core/perf_stats.h"
11#include "video_core/gpu.h" 12#include "video_core/gpu.h"
12 13
13namespace Service::Nvidia::Devices { 14namespace Service::Nvidia::Devices {
14 15
15nvdisp_disp0::nvdisp_disp0(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_) 16nvdisp_disp0::nvdisp_disp0(Core::System& system_, NvCore::Container& core)
16 : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)} {} 17 : nvdevice{system_}, container{core}, nvmap{core.GetNvMapFile()} {}
17nvdisp_disp0::~nvdisp_disp0() = default; 18nvdisp_disp0::~nvdisp_disp0() = default;
18 19
19NvResult nvdisp_disp0::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 20NvResult nvdisp_disp0::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -39,8 +40,9 @@ void nvdisp_disp0::OnClose(DeviceFD fd) {}
39 40
40void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, 41void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width,
41 u32 height, u32 stride, android::BufferTransformFlags transform, 42 u32 height, u32 stride, android::BufferTransformFlags transform,
42 const Common::Rectangle<int>& crop_rect) { 43 const Common::Rectangle<int>& crop_rect,
43 const VAddr addr = nvmap_dev->GetObjectAddress(buffer_handle); 44 std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences) {
45 const VAddr addr = nvmap.GetHandleAddress(buffer_handle);
44 LOG_TRACE(Service, 46 LOG_TRACE(Service,
45 "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}", 47 "Drawing from address {:X} offset {:08X} Width {} Height {} Stride {} Format {}",
46 addr, offset, width, height, stride, format); 48 addr, offset, width, height, stride, format);
@@ -48,10 +50,15 @@ void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat form
48 const Tegra::FramebufferConfig framebuffer{addr, offset, width, height, 50 const Tegra::FramebufferConfig framebuffer{addr, offset, width, height,
49 stride, format, transform, crop_rect}; 51 stride, format, transform, crop_rect};
50 52
53 system.GPU().RequestSwapBuffers(&framebuffer, fences, num_fences);
51 system.GetPerfStats().EndSystemFrame(); 54 system.GetPerfStats().EndSystemFrame();
52 system.GPU().SwapBuffers(&framebuffer);
53 system.SpeedLimiter().DoSpeedLimiting(system.CoreTiming().GetGlobalTimeUs()); 55 system.SpeedLimiter().DoSpeedLimiting(system.CoreTiming().GetGlobalTimeUs());
54 system.GetPerfStats().BeginSystemFrame(); 56 system.GetPerfStats().BeginSystemFrame();
55} 57}
56 58
59Kernel::KEvent* nvdisp_disp0::QueryEvent(u32 event_id) {
60 LOG_CRITICAL(Service_NVDRV, "Unknown DISP Event {}", event_id);
61 return nullptr;
62}
63
57} // namespace Service::Nvidia::Devices 64} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
index 67b105e02..04217ab12 100644
--- a/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
+++ b/src/core/hle/service/nvdrv/devices/nvdisp_disp0.h
@@ -11,13 +11,18 @@
11#include "core/hle/service/nvflinger/buffer_transform_flags.h" 11#include "core/hle/service/nvflinger/buffer_transform_flags.h"
12#include "core/hle/service/nvflinger/pixel_format.h" 12#include "core/hle/service/nvflinger/pixel_format.h"
13 13
14namespace Service::Nvidia::NvCore {
15class Container;
16class NvMap;
17} // namespace Service::Nvidia::NvCore
18
14namespace Service::Nvidia::Devices { 19namespace Service::Nvidia::Devices {
15 20
16class nvmap; 21class nvmap;
17 22
18class nvdisp_disp0 final : public nvdevice { 23class nvdisp_disp0 final : public nvdevice {
19public: 24public:
20 explicit nvdisp_disp0(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_); 25 explicit nvdisp_disp0(Core::System& system_, NvCore::Container& core);
21 ~nvdisp_disp0() override; 26 ~nvdisp_disp0() override;
22 27
23 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 28 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -33,10 +38,14 @@ public:
33 /// Performs a screen flip, drawing the buffer pointed to by the handle. 38 /// Performs a screen flip, drawing the buffer pointed to by the handle.
34 void flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, u32 height, 39 void flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width, u32 height,
35 u32 stride, android::BufferTransformFlags transform, 40 u32 stride, android::BufferTransformFlags transform,
36 const Common::Rectangle<int>& crop_rect); 41 const Common::Rectangle<int>& crop_rect,
42 std::array<Service::Nvidia::NvFence, 4>& fences, u32 num_fences);
43
44 Kernel::KEvent* QueryEvent(u32 event_id) override;
37 45
38private: 46private:
39 std::shared_ptr<nvmap> nvmap_dev; 47 NvCore::Container& container;
48 NvCore::NvMap& nvmap;
40}; 49};
41 50
42} // namespace Service::Nvidia::Devices 51} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
index 9867a648d..6411dbf43 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.cpp
@@ -1,21 +1,30 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project 1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
3// SPDX-License-Identifier: GPL-3.0-or-later
3 4
4#include <cstring> 5#include <cstring>
5#include <utility> 6#include <utility>
6 7
8#include "common/alignment.h"
7#include "common/assert.h" 9#include "common/assert.h"
8#include "common/logging/log.h" 10#include "common/logging/log.h"
9#include "core/core.h" 11#include "core/core.h"
12#include "core/hle/service/nvdrv/core/container.h"
13#include "core/hle/service/nvdrv/core/nvmap.h"
10#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" 14#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
11#include "core/hle/service/nvdrv/devices/nvmap.h" 15#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
16#include "core/hle/service/nvdrv/nvdrv.h"
17#include "video_core/control/channel_state.h"
18#include "video_core/gpu.h"
12#include "video_core/memory_manager.h" 19#include "video_core/memory_manager.h"
13#include "video_core/rasterizer_interface.h" 20#include "video_core/rasterizer_interface.h"
14 21
15namespace Service::Nvidia::Devices { 22namespace Service::Nvidia::Devices {
16 23
17nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_) 24nvhost_as_gpu::nvhost_as_gpu(Core::System& system_, Module& module_, NvCore::Container& core)
18 : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)} {} 25 : nvdevice{system_}, module{module_}, container{core}, nvmap{core.GetNvMapFile()}, vm{},
26 gmmu{} {}
27
19nvhost_as_gpu::~nvhost_as_gpu() = default; 28nvhost_as_gpu::~nvhost_as_gpu() = default;
20 29
21NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 30NvResult nvhost_as_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -82,12 +91,52 @@ NvResult nvhost_as_gpu::AllocAsEx(const std::vector<u8>& input, std::vector<u8>&
82 IoctlAllocAsEx params{}; 91 IoctlAllocAsEx params{};
83 std::memcpy(&params, input.data(), input.size()); 92 std::memcpy(&params, input.data(), input.size());
84 93
85 LOG_WARNING(Service_NVDRV, "(STUBBED) called, big_page_size=0x{:X}", params.big_page_size); 94 LOG_DEBUG(Service_NVDRV, "called, big_page_size=0x{:X}", params.big_page_size);
86 if (params.big_page_size == 0) { 95
87 params.big_page_size = DEFAULT_BIG_PAGE_SIZE; 96 std::scoped_lock lock(mutex);
97
98 if (vm.initialised) {
99 ASSERT_MSG(false, "Cannot initialise an address space twice!");
100 return NvResult::InvalidState;
101 }
102
103 if (params.big_page_size) {
104 if (!std::has_single_bit(params.big_page_size)) {
105 LOG_ERROR(Service_NVDRV, "Non power-of-2 big page size: 0x{:X}!", params.big_page_size);
106 return NvResult::BadValue;
107 }
108
109 if ((params.big_page_size & VM::SUPPORTED_BIG_PAGE_SIZES) == 0) {
110 LOG_ERROR(Service_NVDRV, "Unsupported big page size: 0x{:X}!", params.big_page_size);
111 return NvResult::BadValue;
112 }
113
114 vm.big_page_size = params.big_page_size;
115 vm.big_page_size_bits = static_cast<u32>(std::countr_zero(params.big_page_size));
116
117 vm.va_range_start = params.big_page_size << VM::VA_START_SHIFT;
118 }
119
120 // If this is unspecified then default values should be used
121 if (params.va_range_start) {
122 vm.va_range_start = params.va_range_start;
123 vm.va_range_split = params.va_range_split;
124 vm.va_range_end = params.va_range_end;
88 } 125 }
89 126
90 big_page_size = params.big_page_size; 127 const auto start_pages{static_cast<u32>(vm.va_range_start >> VM::PAGE_SIZE_BITS)};
128 const auto end_pages{static_cast<u32>(vm.va_range_split >> VM::PAGE_SIZE_BITS)};
129 vm.small_page_allocator = std::make_shared<VM::Allocator>(start_pages, end_pages);
130
131 const auto start_big_pages{static_cast<u32>(vm.va_range_split >> vm.big_page_size_bits)};
132 const auto end_big_pages{
133 static_cast<u32>((vm.va_range_end - vm.va_range_split) >> vm.big_page_size_bits)};
134 vm.big_page_allocator = std::make_unique<VM::Allocator>(start_big_pages, end_big_pages);
135
136 gmmu = std::make_shared<Tegra::MemoryManager>(system, 40, vm.big_page_size_bits,
137 VM::PAGE_SIZE_BITS);
138 system.GPU().InitAddressSpace(*gmmu);
139 vm.initialised = true;
91 140
92 return NvResult::Success; 141 return NvResult::Success;
93} 142}
@@ -99,21 +148,76 @@ NvResult nvhost_as_gpu::AllocateSpace(const std::vector<u8>& input, std::vector<
99 LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages, 148 LOG_DEBUG(Service_NVDRV, "called, pages={:X}, page_size={:X}, flags={:X}", params.pages,
100 params.page_size, params.flags); 149 params.page_size, params.flags);
101 150
102 const auto size{static_cast<u64>(params.pages) * static_cast<u64>(params.page_size)}; 151 std::scoped_lock lock(mutex);
103 if ((params.flags & AddressSpaceFlags::FixedOffset) != AddressSpaceFlags::None) { 152
104 params.offset = *system.GPU().MemoryManager().AllocateFixed(params.offset, size); 153 if (!vm.initialised) {
154 return NvResult::BadValue;
155 }
156
157 if (params.page_size != VM::YUZU_PAGESIZE && params.page_size != vm.big_page_size) {
158 return NvResult::BadValue;
159 }
160
161 if (params.page_size != vm.big_page_size &&
162 ((params.flags & MappingFlags::Sparse) != MappingFlags::None)) {
163 UNIMPLEMENTED_MSG("Sparse small pages are not implemented!");
164 return NvResult::NotImplemented;
165 }
166
167 const u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS
168 : vm.big_page_size_bits};
169
170 auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator
171 : *vm.big_page_allocator};
172
173 if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) {
174 allocator.AllocateFixed(static_cast<u32>(params.offset >> page_size_bits), params.pages);
105 } else { 175 } else {
106 params.offset = system.GPU().MemoryManager().Allocate(size, params.align); 176 params.offset = static_cast<u64>(allocator.Allocate(params.pages)) << page_size_bits;
177 if (!params.offset) {
178 ASSERT_MSG(false, "Failed to allocate free space in the GPU AS!");
179 return NvResult::InsufficientMemory;
180 }
107 } 181 }
108 182
109 auto result = NvResult::Success; 183 u64 size{static_cast<u64>(params.pages) * params.page_size};
110 if (!params.offset) { 184
111 LOG_CRITICAL(Service_NVDRV, "allocation failed for size {}", size); 185 if ((params.flags & MappingFlags::Sparse) != MappingFlags::None) {
112 result = NvResult::InsufficientMemory; 186 gmmu->MapSparse(params.offset, size);
113 } 187 }
114 188
189 allocation_map[params.offset] = {
190 .size = size,
191 .mappings{},
192 .page_size = params.page_size,
193 .sparse = (params.flags & MappingFlags::Sparse) != MappingFlags::None,
194 .big_pages = params.page_size != VM::YUZU_PAGESIZE,
195 };
196
115 std::memcpy(output.data(), &params, output.size()); 197 std::memcpy(output.data(), &params, output.size());
116 return result; 198 return NvResult::Success;
199}
200
201void nvhost_as_gpu::FreeMappingLocked(u64 offset) {
202 auto mapping{mapping_map.at(offset)};
203
204 if (!mapping->fixed) {
205 auto& allocator{mapping->big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
206 u32 page_size_bits{mapping->big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
207
208 allocator.Free(static_cast<u32>(mapping->offset >> page_size_bits),
209 static_cast<u32>(mapping->size >> page_size_bits));
210 }
211
212 // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
213 // Only FreeSpace can unmap them fully
214 if (mapping->sparse_alloc) {
215 gmmu->MapSparse(offset, mapping->size, mapping->big_page);
216 } else {
217 gmmu->Unmap(offset, mapping->size);
218 }
219
220 mapping_map.erase(offset);
117} 221}
118 222
119NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>& output) { 223NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>& output) {
@@ -123,8 +227,40 @@ NvResult nvhost_as_gpu::FreeSpace(const std::vector<u8>& input, std::vector<u8>&
123 LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset, 227 LOG_DEBUG(Service_NVDRV, "called, offset={:X}, pages={:X}, page_size={:X}", params.offset,
124 params.pages, params.page_size); 228 params.pages, params.page_size);
125 229
126 system.GPU().MemoryManager().Unmap(params.offset, 230 std::scoped_lock lock(mutex);
127 static_cast<std::size_t>(params.pages) * params.page_size); 231
232 if (!vm.initialised) {
233 return NvResult::BadValue;
234 }
235
236 try {
237 auto allocation{allocation_map[params.offset]};
238
239 if (allocation.page_size != params.page_size ||
240 allocation.size != (static_cast<u64>(params.pages) * params.page_size)) {
241 return NvResult::BadValue;
242 }
243
244 for (const auto& mapping : allocation.mappings) {
245 FreeMappingLocked(mapping->offset);
246 }
247
248 // Unset sparse flag if required
249 if (allocation.sparse) {
250 gmmu->Unmap(params.offset, allocation.size);
251 }
252
253 auto& allocator{params.page_size == VM::YUZU_PAGESIZE ? *vm.small_page_allocator
254 : *vm.big_page_allocator};
255 u32 page_size_bits{params.page_size == VM::YUZU_PAGESIZE ? VM::PAGE_SIZE_BITS
256 : vm.big_page_size_bits};
257
258 allocator.Free(static_cast<u32>(params.offset >> page_size_bits),
259 static_cast<u32>(allocation.size >> page_size_bits));
260 allocation_map.erase(params.offset);
261 } catch (const std::out_of_range&) {
262 return NvResult::BadValue;
263 }
128 264
129 std::memcpy(output.data(), &params, output.size()); 265 std::memcpy(output.data(), &params, output.size());
130 return NvResult::Success; 266 return NvResult::Success;
@@ -135,35 +271,52 @@ NvResult nvhost_as_gpu::Remap(const std::vector<u8>& input, std::vector<u8>& out
135 271
136 LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries); 272 LOG_DEBUG(Service_NVDRV, "called, num_entries=0x{:X}", num_entries);
137 273
138 auto result = NvResult::Success;
139 std::vector<IoctlRemapEntry> entries(num_entries); 274 std::vector<IoctlRemapEntry> entries(num_entries);
140 std::memcpy(entries.data(), input.data(), input.size()); 275 std::memcpy(entries.data(), input.data(), input.size());
141 276
277 std::scoped_lock lock(mutex);
278
279 if (!vm.initialised) {
280 return NvResult::BadValue;
281 }
282
142 for (const auto& entry : entries) { 283 for (const auto& entry : entries) {
143 LOG_DEBUG(Service_NVDRV, "remap entry, offset=0x{:X} handle=0x{:X} pages=0x{:X}", 284 GPUVAddr virtual_address{static_cast<u64>(entry.as_offset_big_pages)
144 entry.offset, entry.nvmap_handle, entry.pages); 285 << vm.big_page_size_bits};
286 u64 size{static_cast<u64>(entry.big_pages) << vm.big_page_size_bits};
145 287
146 const auto object{nvmap_dev->GetObject(entry.nvmap_handle)}; 288 auto alloc{allocation_map.upper_bound(virtual_address)};
147 if (!object) { 289
148 LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", entry.nvmap_handle); 290 if (alloc-- == allocation_map.begin() ||
149 result = NvResult::InvalidState; 291 (virtual_address - alloc->first) + size > alloc->second.size) {
150 break; 292 LOG_WARNING(Service_NVDRV, "Cannot remap into an unallocated region!");
293 return NvResult::BadValue;
151 } 294 }
152 295
153 const auto offset{static_cast<GPUVAddr>(entry.offset) << 0x10}; 296 if (!alloc->second.sparse) {
154 const auto size{static_cast<u64>(entry.pages) << 0x10}; 297 LOG_WARNING(Service_NVDRV, "Cannot remap a non-sparse mapping!");
155 const auto map_offset{static_cast<u64>(entry.map_offset) << 0x10}; 298 return NvResult::BadValue;
156 const auto addr{system.GPU().MemoryManager().Map(object->addr + map_offset, offset, size)}; 299 }
157 300
158 if (!addr) { 301 const bool use_big_pages = alloc->second.big_pages;
159 LOG_CRITICAL(Service_NVDRV, "map returned an invalid address!"); 302 if (!entry.handle) {
160 result = NvResult::InvalidState; 303 gmmu->MapSparse(virtual_address, size, use_big_pages);
161 break; 304 } else {
305 auto handle{nvmap.GetHandle(entry.handle)};
306 if (!handle) {
307 return NvResult::BadValue;
308 }
309
310 VAddr cpu_address{static_cast<VAddr>(
311 handle->address +
312 (static_cast<u64>(entry.handle_offset_big_pages) << vm.big_page_size_bits))};
313
314 gmmu->Map(virtual_address, cpu_address, size, use_big_pages);
162 } 315 }
163 } 316 }
164 317
165 std::memcpy(output.data(), entries.data(), output.size()); 318 std::memcpy(output.data(), entries.data(), output.size());
166 return result; 319 return NvResult::Success;
167} 320}
168 321
169NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) { 322NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8>& output) {
@@ -173,79 +326,98 @@ NvResult nvhost_as_gpu::MapBufferEx(const std::vector<u8>& input, std::vector<u8
173 LOG_DEBUG(Service_NVDRV, 326 LOG_DEBUG(Service_NVDRV,
174 "called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}" 327 "called, flags={:X}, nvmap_handle={:X}, buffer_offset={}, mapping_size={}"
175 ", offset={}", 328 ", offset={}",
176 params.flags, params.nvmap_handle, params.buffer_offset, params.mapping_size, 329 params.flags, params.handle, params.buffer_offset, params.mapping_size,
177 params.offset); 330 params.offset);
178 331
179 const auto object{nvmap_dev->GetObject(params.nvmap_handle)}; 332 std::scoped_lock lock(mutex);
180 if (!object) {
181 LOG_CRITICAL(Service_NVDRV, "invalid nvmap_handle={:X}", params.nvmap_handle);
182 std::memcpy(output.data(), &params, output.size());
183 return NvResult::InvalidState;
184 }
185
186 // The real nvservices doesn't make a distinction between handles and ids, and
187 // object can only have one handle and it will be the same as its id. Assert that this is the
188 // case to prevent unexpected behavior.
189 ASSERT(object->id == params.nvmap_handle);
190 auto& gpu = system.GPU();
191 333
192 u64 page_size{params.page_size}; 334 if (!vm.initialised) {
193 if (!page_size) { 335 return NvResult::BadValue;
194 page_size = object->align;
195 } 336 }
196 337
197 if ((params.flags & AddressSpaceFlags::Remap) != AddressSpaceFlags::None) { 338 // Remaps a subregion of an existing mapping to a different PA
198 if (const auto buffer_map{FindBufferMap(params.offset)}; buffer_map) { 339 if ((params.flags & MappingFlags::Remap) != MappingFlags::None) {
199 const auto cpu_addr{static_cast<VAddr>(buffer_map->CpuAddr() + params.buffer_offset)}; 340 try {
200 const auto gpu_addr{static_cast<GPUVAddr>(params.offset + params.buffer_offset)}; 341 auto mapping{mapping_map.at(params.offset)};
201 342
202 if (!gpu.MemoryManager().Map(cpu_addr, gpu_addr, params.mapping_size)) { 343 if (mapping->size < params.mapping_size) {
203 LOG_CRITICAL(Service_NVDRV, 344 LOG_WARNING(Service_NVDRV,
204 "remap failed, flags={:X}, nvmap_handle={:X}, buffer_offset={}, " 345 "Cannot remap a partially mapped GPU address space region: 0x{:X}",
205 "mapping_size = {}, offset={}", 346 params.offset);
206 params.flags, params.nvmap_handle, params.buffer_offset, 347 return NvResult::BadValue;
207 params.mapping_size, params.offset);
208
209 std::memcpy(output.data(), &params, output.size());
210 return NvResult::InvalidState;
211 } 348 }
212 349
213 std::memcpy(output.data(), &params, output.size()); 350 u64 gpu_address{static_cast<u64>(params.offset + params.buffer_offset)};
214 return NvResult::Success; 351 VAddr cpu_address{mapping->ptr + params.buffer_offset};
215 } else { 352
216 LOG_CRITICAL(Service_NVDRV, "address not mapped offset={}", params.offset); 353 gmmu->Map(gpu_address, cpu_address, params.mapping_size, mapping->big_page);
217 354
218 std::memcpy(output.data(), &params, output.size()); 355 return NvResult::Success;
219 return NvResult::InvalidState; 356 } catch (const std::out_of_range&) {
357 LOG_WARNING(Service_NVDRV, "Cannot remap an unmapped GPU address space region: 0x{:X}",
358 params.offset);
359 return NvResult::BadValue;
220 } 360 }
221 } 361 }
222 362
223 // We can only map objects that have already been assigned a CPU address. 363 auto handle{nvmap.GetHandle(params.handle)};
224 ASSERT(object->status == nvmap::Object::Status::Allocated); 364 if (!handle) {
225 365 return NvResult::BadValue;
226 const auto physical_address{object->addr + params.buffer_offset};
227 u64 size{params.mapping_size};
228 if (!size) {
229 size = object->size;
230 } 366 }
231 367
232 const bool is_alloc{(params.flags & AddressSpaceFlags::FixedOffset) == AddressSpaceFlags::None}; 368 VAddr cpu_address{static_cast<VAddr>(handle->address + params.buffer_offset)};
233 if (is_alloc) { 369 u64 size{params.mapping_size ? params.mapping_size : handle->orig_size};
234 params.offset = gpu.MemoryManager().MapAllocate(physical_address, size, page_size); 370
235 } else { 371 bool big_page{[&]() {
236 params.offset = gpu.MemoryManager().Map(physical_address, params.offset, size); 372 if (Common::IsAligned(handle->align, vm.big_page_size)) {
237 } 373 return true;
374 } else if (Common::IsAligned(handle->align, VM::YUZU_PAGESIZE)) {
375 return false;
376 } else {
377 ASSERT(false);
378 return false;
379 }
380 }()};
381
382 if ((params.flags & MappingFlags::Fixed) != MappingFlags::None) {
383 auto alloc{allocation_map.upper_bound(params.offset)};
238 384
239 auto result = NvResult::Success; 385 if (alloc-- == allocation_map.begin() ||
240 if (!params.offset) { 386 (params.offset - alloc->first) + size > alloc->second.size) {
241 LOG_CRITICAL(Service_NVDRV, "failed to map size={}", size); 387 ASSERT_MSG(false, "Cannot perform a fixed mapping into an unallocated region!");
242 result = NvResult::InvalidState; 388 return NvResult::BadValue;
389 }
390
391 const bool use_big_pages = alloc->second.big_pages && big_page;
392 gmmu->Map(params.offset, cpu_address, size, use_big_pages);
393
394 auto mapping{std::make_shared<Mapping>(cpu_address, params.offset, size, true,
395 use_big_pages, alloc->second.sparse)};
396 alloc->second.mappings.push_back(mapping);
397 mapping_map[params.offset] = mapping;
243 } else { 398 } else {
244 AddBufferMap(params.offset, size, physical_address, is_alloc); 399
400 auto& allocator{big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
401 u32 page_size{big_page ? vm.big_page_size : VM::YUZU_PAGESIZE};
402 u32 page_size_bits{big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
403
404 params.offset = static_cast<u64>(allocator.Allocate(
405 static_cast<u32>(Common::AlignUp(size, page_size) >> page_size_bits)))
406 << page_size_bits;
407 if (!params.offset) {
408 ASSERT_MSG(false, "Failed to allocate free space in the GPU AS!");
409 return NvResult::InsufficientMemory;
410 }
411
412 gmmu->Map(params.offset, cpu_address, Common::AlignUp(size, page_size), big_page);
413
414 auto mapping{
415 std::make_shared<Mapping>(cpu_address, params.offset, size, false, big_page, false)};
416 mapping_map[params.offset] = mapping;
245 } 417 }
246 418
247 std::memcpy(output.data(), &params, output.size()); 419 std::memcpy(output.data(), &params, output.size());
248 return result; 420 return NvResult::Success;
249} 421}
250 422
251NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) { 423NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
@@ -254,47 +426,82 @@ NvResult nvhost_as_gpu::UnmapBuffer(const std::vector<u8>& input, std::vector<u8
254 426
255 LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset); 427 LOG_DEBUG(Service_NVDRV, "called, offset=0x{:X}", params.offset);
256 428
257 if (const auto size{RemoveBufferMap(params.offset)}; size) { 429 std::scoped_lock lock(mutex);
258 system.GPU().MemoryManager().Unmap(params.offset, *size); 430
259 } else { 431 if (!vm.initialised) {
260 LOG_ERROR(Service_NVDRV, "invalid offset=0x{:X}", params.offset); 432 return NvResult::BadValue;
433 }
434
435 try {
436 auto mapping{mapping_map.at(params.offset)};
437
438 if (!mapping->fixed) {
439 auto& allocator{mapping->big_page ? *vm.big_page_allocator : *vm.small_page_allocator};
440 u32 page_size_bits{mapping->big_page ? vm.big_page_size_bits : VM::PAGE_SIZE_BITS};
441
442 allocator.Free(static_cast<u32>(mapping->offset >> page_size_bits),
443 static_cast<u32>(mapping->size >> page_size_bits));
444 }
445
446 // Sparse mappings shouldn't be fully unmapped, just returned to their sparse state
447 // Only FreeSpace can unmap them fully
448 if (mapping->sparse_alloc) {
449 gmmu->MapSparse(params.offset, mapping->size, mapping->big_page);
450 } else {
451 gmmu->Unmap(params.offset, mapping->size);
452 }
453
454 mapping_map.erase(params.offset);
455 } catch (const std::out_of_range&) {
456 LOG_WARNING(Service_NVDRV, "Couldn't find region to unmap at 0x{:X}", params.offset);
261 } 457 }
262 458
263 std::memcpy(output.data(), &params, output.size());
264 return NvResult::Success; 459 return NvResult::Success;
265} 460}
266 461
267NvResult nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) { 462NvResult nvhost_as_gpu::BindChannel(const std::vector<u8>& input, std::vector<u8>& output) {
268 IoctlBindChannel params{}; 463 IoctlBindChannel params{};
269 std::memcpy(&params, input.data(), input.size()); 464 std::memcpy(&params, input.data(), input.size());
270 LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}", params.fd); 465 LOG_DEBUG(Service_NVDRV, "called, fd={:X}", params.fd);
271 466
272 channel = params.fd; 467 auto gpu_channel_device = module.GetDevice<nvhost_gpu>(params.fd);
468 gpu_channel_device->channel_state->memory_manager = gmmu;
273 return NvResult::Success; 469 return NvResult::Success;
274} 470}
275 471
472void nvhost_as_gpu::GetVARegionsImpl(IoctlGetVaRegions& params) {
473 params.buf_size = 2 * sizeof(VaRegion);
474
475 params.regions = std::array<VaRegion, 2>{
476 VaRegion{
477 .offset = vm.small_page_allocator->GetVAStart() << VM::PAGE_SIZE_BITS,
478 .page_size = VM::YUZU_PAGESIZE,
479 ._pad0_{},
480 .pages = vm.small_page_allocator->GetVALimit() - vm.small_page_allocator->GetVAStart(),
481 },
482 VaRegion{
483 .offset = vm.big_page_allocator->GetVAStart() << vm.big_page_size_bits,
484 .page_size = vm.big_page_size,
485 ._pad0_{},
486 .pages = vm.big_page_allocator->GetVALimit() - vm.big_page_allocator->GetVAStart(),
487 },
488 };
489}
490
276NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output) { 491NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u8>& output) {
277 IoctlGetVaRegions params{}; 492 IoctlGetVaRegions params{};
278 std::memcpy(&params, input.data(), input.size()); 493 std::memcpy(&params, input.data(), input.size());
279 494
280 LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr, 495 LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
281 params.buf_size); 496 params.buf_size);
282
283 params.buf_size = 0x30;
284 497
285 params.small = IoctlVaRegion{ 498 std::scoped_lock lock(mutex);
286 .offset = 0x04000000,
287 .page_size = DEFAULT_SMALL_PAGE_SIZE,
288 .pages = 0x3fbfff,
289 };
290 499
291 params.big = IoctlVaRegion{ 500 if (!vm.initialised) {
292 .offset = 0x04000000, 501 return NvResult::BadValue;
293 .page_size = big_page_size, 502 }
294 .pages = 0x1bffff,
295 };
296 503
297 // TODO(ogniK): This probably can stay stubbed but should add support way way later 504 GetVARegionsImpl(params);
298 505
299 std::memcpy(output.data(), &params, output.size()); 506 std::memcpy(output.data(), &params, output.size());
300 return NvResult::Success; 507 return NvResult::Success;
@@ -305,62 +512,27 @@ NvResult nvhost_as_gpu::GetVARegions(const std::vector<u8>& input, std::vector<u
305 IoctlGetVaRegions params{}; 512 IoctlGetVaRegions params{};
306 std::memcpy(&params, input.data(), input.size()); 513 std::memcpy(&params, input.data(), input.size());
307 514
308 LOG_WARNING(Service_NVDRV, "(STUBBED) called, buf_addr={:X}, buf_size={:X}", params.buf_addr, 515 LOG_DEBUG(Service_NVDRV, "called, buf_addr={:X}, buf_size={:X}", params.buf_addr,
309 params.buf_size); 516 params.buf_size);
310
311 params.buf_size = 0x30;
312 517
313 params.small = IoctlVaRegion{ 518 std::scoped_lock lock(mutex);
314 .offset = 0x04000000,
315 .page_size = 0x1000,
316 .pages = 0x3fbfff,
317 };
318 519
319 params.big = IoctlVaRegion{ 520 if (!vm.initialised) {
320 .offset = 0x04000000, 521 return NvResult::BadValue;
321 .page_size = big_page_size, 522 }
322 .pages = 0x1bffff,
323 };
324 523
325 // TODO(ogniK): This probably can stay stubbed but should add support way way later 524 GetVARegionsImpl(params);
326 525
327 std::memcpy(output.data(), &params, output.size()); 526 std::memcpy(output.data(), &params, output.size());
328 std::memcpy(inline_output.data(), &params.small, sizeof(IoctlVaRegion)); 527 std::memcpy(inline_output.data(), &params.regions[0], sizeof(VaRegion));
329 std::memcpy(inline_output.data() + sizeof(IoctlVaRegion), &params.big, sizeof(IoctlVaRegion)); 528 std::memcpy(inline_output.data() + sizeof(VaRegion), &params.regions[1], sizeof(VaRegion));
330 529
331 return NvResult::Success; 530 return NvResult::Success;
332} 531}
333 532
334std::optional<nvhost_as_gpu::BufferMap> nvhost_as_gpu::FindBufferMap(GPUVAddr gpu_addr) const { 533Kernel::KEvent* nvhost_as_gpu::QueryEvent(u32 event_id) {
335 const auto end{buffer_mappings.upper_bound(gpu_addr)}; 534 LOG_CRITICAL(Service_NVDRV, "Unknown AS GPU Event {}", event_id);
336 for (auto iter{buffer_mappings.begin()}; iter != end; ++iter) { 535 return nullptr;
337 if (gpu_addr >= iter->second.StartAddr() && gpu_addr < iter->second.EndAddr()) {
338 return iter->second;
339 }
340 }
341
342 return std::nullopt;
343}
344
345void nvhost_as_gpu::AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr,
346 bool is_allocated) {
347 buffer_mappings[gpu_addr] = {gpu_addr, size, cpu_addr, is_allocated};
348}
349
350std::optional<std::size_t> nvhost_as_gpu::RemoveBufferMap(GPUVAddr gpu_addr) {
351 if (const auto iter{buffer_mappings.find(gpu_addr)}; iter != buffer_mappings.end()) {
352 std::size_t size{};
353
354 if (iter->second.IsAllocated()) {
355 size = iter->second.Size();
356 }
357
358 buffer_mappings.erase(iter);
359
360 return size;
361 }
362
363 return std::nullopt;
364} 536}
365 537
366} // namespace Service::Nvidia::Devices 538} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
index 555843a6f..86fe71c75 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_as_gpu.h
@@ -1,35 +1,50 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project 1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
3// SPDX-License-Identifier: GPL-3.0-or-later
3 4
4#pragma once 5#pragma once
5 6
7#include <bit>
8#include <list>
6#include <map> 9#include <map>
7#include <memory> 10#include <memory>
11#include <mutex>
8#include <optional> 12#include <optional>
9#include <vector> 13#include <vector>
10 14
15#include "common/address_space.h"
11#include "common/common_funcs.h" 16#include "common/common_funcs.h"
12#include "common/common_types.h" 17#include "common/common_types.h"
13#include "common/swap.h" 18#include "common/swap.h"
19#include "core/hle/service/nvdrv/core/nvmap.h"
14#include "core/hle/service/nvdrv/devices/nvdevice.h" 20#include "core/hle/service/nvdrv/devices/nvdevice.h"
15 21
16namespace Service::Nvidia::Devices { 22namespace Tegra {
23class MemoryManager;
24} // namespace Tegra
25
26namespace Service::Nvidia {
27class Module;
28}
17 29
18constexpr u32 DEFAULT_BIG_PAGE_SIZE = 1 << 16; 30namespace Service::Nvidia::NvCore {
19constexpr u32 DEFAULT_SMALL_PAGE_SIZE = 1 << 12; 31class Container;
32class NvMap;
33} // namespace Service::Nvidia::NvCore
20 34
21class nvmap; 35namespace Service::Nvidia::Devices {
22 36
23enum class AddressSpaceFlags : u32 { 37enum class MappingFlags : u32 {
24 None = 0x0, 38 None = 0,
25 FixedOffset = 0x1, 39 Fixed = 1 << 0,
26 Remap = 0x100, 40 Sparse = 1 << 1,
41 Remap = 1 << 8,
27}; 42};
28DECLARE_ENUM_FLAG_OPERATORS(AddressSpaceFlags); 43DECLARE_ENUM_FLAG_OPERATORS(MappingFlags);
29 44
30class nvhost_as_gpu final : public nvdevice { 45class nvhost_as_gpu final : public nvdevice {
31public: 46public:
32 explicit nvhost_as_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_); 47 explicit nvhost_as_gpu(Core::System& system_, Module& module, NvCore::Container& core);
33 ~nvhost_as_gpu() override; 48 ~nvhost_as_gpu() override;
34 49
35 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 50 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -42,46 +57,17 @@ public:
42 void OnOpen(DeviceFD fd) override; 57 void OnOpen(DeviceFD fd) override;
43 void OnClose(DeviceFD fd) override; 58 void OnClose(DeviceFD fd) override;
44 59
45private: 60 Kernel::KEvent* QueryEvent(u32 event_id) override;
46 class BufferMap final { 61
47 public: 62 struct VaRegion {
48 constexpr BufferMap() = default; 63 u64 offset;
49 64 u32 page_size;
50 constexpr BufferMap(GPUVAddr start_addr_, std::size_t size_) 65 u32 _pad0_;
51 : start_addr{start_addr_}, end_addr{start_addr_ + size_} {} 66 u64 pages;
52
53 constexpr BufferMap(GPUVAddr start_addr_, std::size_t size_, VAddr cpu_addr_,
54 bool is_allocated_)
55 : start_addr{start_addr_}, end_addr{start_addr_ + size_}, cpu_addr{cpu_addr_},
56 is_allocated{is_allocated_} {}
57
58 constexpr VAddr StartAddr() const {
59 return start_addr;
60 }
61
62 constexpr VAddr EndAddr() const {
63 return end_addr;
64 }
65
66 constexpr std::size_t Size() const {
67 return end_addr - start_addr;
68 }
69
70 constexpr VAddr CpuAddr() const {
71 return cpu_addr;
72 }
73
74 constexpr bool IsAllocated() const {
75 return is_allocated;
76 }
77
78 private:
79 GPUVAddr start_addr{};
80 GPUVAddr end_addr{};
81 VAddr cpu_addr{};
82 bool is_allocated{};
83 }; 67 };
68 static_assert(sizeof(VaRegion) == 0x18);
84 69
70private:
85 struct IoctlAllocAsEx { 71 struct IoctlAllocAsEx {
86 u32_le flags{}; // usually passes 1 72 u32_le flags{}; // usually passes 1
87 s32_le as_fd{}; // ignored; passes 0 73 s32_le as_fd{}; // ignored; passes 0
@@ -96,7 +82,7 @@ private:
96 struct IoctlAllocSpace { 82 struct IoctlAllocSpace {
97 u32_le pages{}; 83 u32_le pages{};
98 u32_le page_size{}; 84 u32_le page_size{};
99 AddressSpaceFlags flags{}; 85 MappingFlags flags{};
100 INSERT_PADDING_WORDS(1); 86 INSERT_PADDING_WORDS(1);
101 union { 87 union {
102 u64_le offset; 88 u64_le offset;
@@ -113,19 +99,19 @@ private:
113 static_assert(sizeof(IoctlFreeSpace) == 16, "IoctlFreeSpace is incorrect size"); 99 static_assert(sizeof(IoctlFreeSpace) == 16, "IoctlFreeSpace is incorrect size");
114 100
115 struct IoctlRemapEntry { 101 struct IoctlRemapEntry {
116 u16_le flags{}; 102 u16 flags;
117 u16_le kind{}; 103 u16 kind;
118 u32_le nvmap_handle{}; 104 NvCore::NvMap::Handle::Id handle;
119 u32_le map_offset{}; 105 u32 handle_offset_big_pages;
120 u32_le offset{}; 106 u32 as_offset_big_pages;
121 u32_le pages{}; 107 u32 big_pages;
122 }; 108 };
123 static_assert(sizeof(IoctlRemapEntry) == 20, "IoctlRemapEntry is incorrect size"); 109 static_assert(sizeof(IoctlRemapEntry) == 20, "IoctlRemapEntry is incorrect size");
124 110
125 struct IoctlMapBufferEx { 111 struct IoctlMapBufferEx {
126 AddressSpaceFlags flags{}; // bit0: fixed_offset, bit2: cacheable 112 MappingFlags flags{}; // bit0: fixed_offset, bit2: cacheable
127 u32_le kind{}; // -1 is default 113 u32_le kind{}; // -1 is default
128 u32_le nvmap_handle{}; 114 NvCore::NvMap::Handle::Id handle;
129 u32_le page_size{}; // 0 means don't care 115 u32_le page_size{}; // 0 means don't care
130 s64_le buffer_offset{}; 116 s64_le buffer_offset{};
131 u64_le mapping_size{}; 117 u64_le mapping_size{};
@@ -143,27 +129,15 @@ private:
143 }; 129 };
144 static_assert(sizeof(IoctlBindChannel) == 4, "IoctlBindChannel is incorrect size"); 130 static_assert(sizeof(IoctlBindChannel) == 4, "IoctlBindChannel is incorrect size");
145 131
146 struct IoctlVaRegion {
147 u64_le offset{};
148 u32_le page_size{};
149 INSERT_PADDING_WORDS(1);
150 u64_le pages{};
151 };
152 static_assert(sizeof(IoctlVaRegion) == 24, "IoctlVaRegion is incorrect size");
153
154 struct IoctlGetVaRegions { 132 struct IoctlGetVaRegions {
155 u64_le buf_addr{}; // (contained output user ptr on linux, ignored) 133 u64_le buf_addr{}; // (contained output user ptr on linux, ignored)
156 u32_le buf_size{}; // forced to 2*sizeof(struct va_region) 134 u32_le buf_size{}; // forced to 2*sizeof(struct va_region)
157 u32_le reserved{}; 135 u32_le reserved{};
158 IoctlVaRegion small{}; 136 std::array<VaRegion, 2> regions{};
159 IoctlVaRegion big{};
160 }; 137 };
161 static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(IoctlVaRegion) * 2, 138 static_assert(sizeof(IoctlGetVaRegions) == 16 + sizeof(VaRegion) * 2,
162 "IoctlGetVaRegions is incorrect size"); 139 "IoctlGetVaRegions is incorrect size");
163 140
164 s32 channel{};
165 u32 big_page_size{DEFAULT_BIG_PAGE_SIZE};
166
167 NvResult AllocAsEx(const std::vector<u8>& input, std::vector<u8>& output); 141 NvResult AllocAsEx(const std::vector<u8>& input, std::vector<u8>& output);
168 NvResult AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output); 142 NvResult AllocateSpace(const std::vector<u8>& input, std::vector<u8>& output);
169 NvResult Remap(const std::vector<u8>& input, std::vector<u8>& output); 143 NvResult Remap(const std::vector<u8>& input, std::vector<u8>& output);
@@ -172,18 +146,75 @@ private:
172 NvResult FreeSpace(const std::vector<u8>& input, std::vector<u8>& output); 146 NvResult FreeSpace(const std::vector<u8>& input, std::vector<u8>& output);
173 NvResult BindChannel(const std::vector<u8>& input, std::vector<u8>& output); 147 NvResult BindChannel(const std::vector<u8>& input, std::vector<u8>& output);
174 148
149 void GetVARegionsImpl(IoctlGetVaRegions& params);
175 NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output); 150 NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output);
176 NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output, 151 NvResult GetVARegions(const std::vector<u8>& input, std::vector<u8>& output,
177 std::vector<u8>& inline_output); 152 std::vector<u8>& inline_output);
178 153
179 std::optional<BufferMap> FindBufferMap(GPUVAddr gpu_addr) const; 154 void FreeMappingLocked(u64 offset);
180 void AddBufferMap(GPUVAddr gpu_addr, std::size_t size, VAddr cpu_addr, bool is_allocated); 155
181 std::optional<std::size_t> RemoveBufferMap(GPUVAddr gpu_addr); 156 Module& module;
157
158 NvCore::Container& container;
159 NvCore::NvMap& nvmap;
182 160
183 std::shared_ptr<nvmap> nvmap_dev; 161 struct Mapping {
162 VAddr ptr;
163 u64 offset;
164 u64 size;
165 bool fixed;
166 bool big_page; // Only valid if fixed == false
167 bool sparse_alloc;
168
169 Mapping(VAddr ptr_, u64 offset_, u64 size_, bool fixed_, bool big_page_, bool sparse_alloc_)
170 : ptr(ptr_), offset(offset_), size(size_), fixed(fixed_), big_page(big_page_),
171 sparse_alloc(sparse_alloc_) {}
172 };
173
174 struct Allocation {
175 u64 size;
176 std::list<std::shared_ptr<Mapping>> mappings;
177 u32 page_size;
178 bool sparse;
179 bool big_pages;
180 };
184 181
185 // This is expected to be ordered, therefore we must use a map, not unordered_map 182 std::map<u64, std::shared_ptr<Mapping>>
186 std::map<GPUVAddr, BufferMap> buffer_mappings; 183 mapping_map; //!< This maps the base addresses of mapped buffers to their total sizes and
184 //!< mapping type, this is needed as what was originally a single buffer may
185 //!< have been split into multiple GPU side buffers with the remap flag.
186 std::map<u64, Allocation> allocation_map; //!< Holds allocations created by AllocSpace from
187 //!< which fixed buffers can be mapped into
188 std::mutex mutex; //!< Locks all AS operations
189
190 struct VM {
191 static constexpr u32 YUZU_PAGESIZE{0x1000};
192 static constexpr u32 PAGE_SIZE_BITS{std::countr_zero(YUZU_PAGESIZE)};
193
194 static constexpr u32 SUPPORTED_BIG_PAGE_SIZES{0x30000};
195 static constexpr u32 DEFAULT_BIG_PAGE_SIZE{0x20000};
196 u32 big_page_size{DEFAULT_BIG_PAGE_SIZE};
197 u32 big_page_size_bits{std::countr_zero(DEFAULT_BIG_PAGE_SIZE)};
198
199 static constexpr u32 VA_START_SHIFT{10};
200 static constexpr u64 DEFAULT_VA_SPLIT{1ULL << 34};
201 static constexpr u64 DEFAULT_VA_RANGE{1ULL << 37};
202 u64 va_range_start{DEFAULT_BIG_PAGE_SIZE << VA_START_SHIFT};
203 u64 va_range_split{DEFAULT_VA_SPLIT};
204 u64 va_range_end{DEFAULT_VA_RANGE};
205
206 using Allocator = Common::FlatAllocator<u32, 0, 32>;
207
208 std::unique_ptr<Allocator> big_page_allocator;
209 std::shared_ptr<Allocator>
210 small_page_allocator; //! Shared as this is also used by nvhost::GpuChannel
211
212 bool initialised{};
213 } vm;
214 std::shared_ptr<Tegra::MemoryManager> gmmu;
215
216 // s32 channel{};
217 // u32 big_page_size{VM::DEFAULT_BIG_PAGE_SIZE};
187}; 218};
188 219
189} // namespace Service::Nvidia::Devices 220} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
index 527531f29..5bee4a3d3 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.cpp
@@ -1,24 +1,39 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project 1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
3// SPDX-License-Identifier: GPL-3.0-or-later
3 4
5#include <bit>
4#include <cstdlib> 6#include <cstdlib>
5#include <cstring> 7#include <cstring>
6 8
9#include <fmt/format.h>
7#include "common/assert.h" 10#include "common/assert.h"
8#include "common/logging/log.h" 11#include "common/logging/log.h"
12#include "common/scope_exit.h"
9#include "core/core.h" 13#include "core/core.h"
10#include "core/hle/kernel/k_event.h" 14#include "core/hle/kernel/k_event.h"
11#include "core/hle/kernel/k_writable_event.h" 15#include "core/hle/kernel/k_writable_event.h"
16#include "core/hle/service/nvdrv/core/container.h"
17#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
12#include "core/hle/service/nvdrv/devices/nvhost_ctrl.h" 18#include "core/hle/service/nvdrv/devices/nvhost_ctrl.h"
13#include "video_core/gpu.h" 19#include "video_core/gpu.h"
20#include "video_core/host1x/host1x.h"
14 21
15namespace Service::Nvidia::Devices { 22namespace Service::Nvidia::Devices {
16 23
17nvhost_ctrl::nvhost_ctrl(Core::System& system_, EventInterface& events_interface_, 24nvhost_ctrl::nvhost_ctrl(Core::System& system_, EventInterface& events_interface_,
18 SyncpointManager& syncpoint_manager_) 25 NvCore::Container& core_)
19 : nvdevice{system_}, events_interface{events_interface_}, syncpoint_manager{ 26 : nvdevice{system_}, events_interface{events_interface_}, core{core_},
20 syncpoint_manager_} {} 27 syncpoint_manager{core_.GetSyncpointManager()} {}
21nvhost_ctrl::~nvhost_ctrl() = default; 28
29nvhost_ctrl::~nvhost_ctrl() {
30 for (auto& event : events) {
31 if (!event.registered) {
32 continue;
33 }
34 events_interface.FreeEvent(event.kevent);
35 }
36}
22 37
23NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 38NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
24 std::vector<u8>& output) { 39 std::vector<u8>& output) {
@@ -30,13 +45,15 @@ NvResult nvhost_ctrl::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>&
30 case 0x1c: 45 case 0x1c:
31 return IocCtrlClearEventWait(input, output); 46 return IocCtrlClearEventWait(input, output);
32 case 0x1d: 47 case 0x1d:
33 return IocCtrlEventWait(input, output, false);
34 case 0x1e:
35 return IocCtrlEventWait(input, output, true); 48 return IocCtrlEventWait(input, output, true);
49 case 0x1e:
50 return IocCtrlEventWait(input, output, false);
36 case 0x1f: 51 case 0x1f:
37 return IocCtrlEventRegister(input, output); 52 return IocCtrlEventRegister(input, output);
38 case 0x20: 53 case 0x20:
39 return IocCtrlEventUnregister(input, output); 54 return IocCtrlEventUnregister(input, output);
55 case 0x21:
56 return IocCtrlEventUnregisterBatch(input, output);
40 } 57 }
41 break; 58 break;
42 default: 59 default:
@@ -60,6 +77,7 @@ NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>&
60} 77}
61 78
62void nvhost_ctrl::OnOpen(DeviceFD fd) {} 79void nvhost_ctrl::OnOpen(DeviceFD fd) {}
80
63void nvhost_ctrl::OnClose(DeviceFD fd) {} 81void nvhost_ctrl::OnClose(DeviceFD fd) {}
64 82
65NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output) { 83NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output) {
@@ -71,116 +89,167 @@ NvResult nvhost_ctrl::NvOsGetConfigU32(const std::vector<u8>& input, std::vector
71} 89}
72 90
73NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, 91NvResult nvhost_ctrl::IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output,
74 bool is_async) { 92 bool is_allocation) {
75 IocCtrlEventWaitParams params{}; 93 IocCtrlEventWaitParams params{};
76 std::memcpy(&params, input.data(), sizeof(params)); 94 std::memcpy(&params, input.data(), sizeof(params));
77 LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_async={}", 95 LOG_DEBUG(Service_NVDRV, "syncpt_id={}, threshold={}, timeout={}, is_allocation={}",
78 params.syncpt_id, params.threshold, params.timeout, is_async); 96 params.fence.id, params.fence.value, params.timeout, is_allocation);
79 97
80 if (params.syncpt_id >= MaxSyncPoints) { 98 bool must_unmark_fail = !is_allocation;
81 return NvResult::BadParameter; 99 const u32 event_id = params.value.raw;
82 } 100 SCOPE_EXIT({
101 std::memcpy(output.data(), &params, sizeof(params));
102 if (must_unmark_fail) {
103 events[event_id].fails = 0;
104 }
105 });
83 106
84 u32 event_id = params.value & 0x00FF; 107 const u32 fence_id = static_cast<u32>(params.fence.id);
85 108
86 if (event_id >= MaxNvEvents) { 109 if (fence_id >= MaxSyncPoints) {
87 std::memcpy(output.data(), &params, sizeof(params));
88 return NvResult::BadParameter; 110 return NvResult::BadParameter;
89 } 111 }
90 112
91 if (syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) { 113 if (params.fence.value == 0) {
92 params.value = syncpoint_manager.GetSyncpointMin(params.syncpt_id); 114 if (!syncpoint_manager.IsSyncpointAllocated(params.fence.id)) {
93 std::memcpy(output.data(), &params, sizeof(params)); 115 LOG_WARNING(Service_NVDRV,
94 events_interface.failed[event_id] = false; 116 "Unallocated syncpt_id={}, threshold={}, timeout={}, is_allocation={}",
117 params.fence.id, params.fence.value, params.timeout, is_allocation);
118 } else {
119 params.value.raw = syncpoint_manager.ReadSyncpointMinValue(fence_id);
120 }
95 return NvResult::Success; 121 return NvResult::Success;
96 } 122 }
97 123
98 if (const auto new_value = syncpoint_manager.RefreshSyncpoint(params.syncpt_id); 124 if (syncpoint_manager.IsFenceSignalled(params.fence)) {
99 syncpoint_manager.IsSyncpointExpired(params.syncpt_id, params.threshold)) { 125 params.value.raw = syncpoint_manager.ReadSyncpointMinValue(fence_id);
100 params.value = new_value;
101 std::memcpy(output.data(), &params, sizeof(params));
102 events_interface.failed[event_id] = false;
103 return NvResult::Success; 126 return NvResult::Success;
104 } 127 }
105 128
106 auto& event = events_interface.events[event_id]; 129 if (const auto new_value = syncpoint_manager.UpdateMin(fence_id);
107 auto& gpu = system.GPU(); 130 syncpoint_manager.IsFenceSignalled(params.fence)) {
108 131 params.value.raw = new_value;
109 // This is mostly to take into account unimplemented features. As synced
110 // gpu is always synced.
111 if (!gpu.IsAsync()) {
112 event.event->GetWritableEvent().Signal();
113 return NvResult::Success;
114 }
115 const u32 current_syncpoint_value = event.fence.value;
116 const s32 diff = current_syncpoint_value - params.threshold;
117 if (diff >= 0) {
118 event.event->GetWritableEvent().Signal();
119 params.value = current_syncpoint_value;
120 std::memcpy(output.data(), &params, sizeof(params));
121 events_interface.failed[event_id] = false;
122 return NvResult::Success; 132 return NvResult::Success;
123 } 133 }
124 const u32 target_value = current_syncpoint_value - diff;
125 134
126 if (!is_async) { 135 auto& host1x_syncpoint_manager = system.Host1x().GetSyncpointManager();
127 params.value = 0; 136 const u32 target_value = params.fence.value;
137
138 auto lock = NvEventsLock();
139
140 u32 slot = [&]() {
141 if (is_allocation) {
142 params.value.raw = 0;
143 return FindFreeNvEvent(fence_id);
144 } else {
145 return params.value.raw;
146 }
147 }();
148
149 must_unmark_fail = false;
150
151 const auto check_failing = [&]() {
152 if (events[slot].fails > 2) {
153 {
154 auto lk = system.StallProcesses();
155 host1x_syncpoint_manager.WaitHost(fence_id, target_value);
156 system.UnstallProcesses();
157 }
158 params.value.raw = target_value;
159 return true;
160 }
161 return false;
162 };
163
164 if (slot >= MaxNvEvents) {
165 return NvResult::BadParameter;
128 } 166 }
129 167
130 if (params.timeout == 0) { 168 if (params.timeout == 0) {
131 std::memcpy(output.data(), &params, sizeof(params)); 169 if (check_failing()) {
170 events[slot].fails = 0;
171 return NvResult::Success;
172 }
132 return NvResult::Timeout; 173 return NvResult::Timeout;
133 } 174 }
134 175
135 EventState status = events_interface.status[event_id]; 176 auto& event = events[slot];
136 const bool bad_parameter = status == EventState::Busy; 177
137 if (bad_parameter) { 178 if (!event.registered) {
138 std::memcpy(output.data(), &params, sizeof(params));
139 return NvResult::BadParameter; 179 return NvResult::BadParameter;
140 } 180 }
141 events_interface.SetEventStatus(event_id, EventState::Waiting); 181
142 events_interface.assigned_syncpt[event_id] = params.syncpt_id; 182 if (event.IsBeingUsed()) {
143 events_interface.assigned_value[event_id] = target_value; 183 return NvResult::BadParameter;
144 if (is_async) { 184 }
145 params.value = params.syncpt_id << 4; 185
146 } else { 186 if (check_failing()) {
147 params.value = ((params.syncpt_id & 0xfff) << 16) | 0x10000000; 187 event.fails = 0;
148 }
149 params.value |= event_id;
150 event.event->GetWritableEvent().Clear();
151 if (events_interface.failed[event_id]) {
152 {
153 auto lk = system.StallProcesses();
154 gpu.WaitFence(params.syncpt_id, target_value);
155 system.UnstallProcesses();
156 }
157 std::memcpy(output.data(), &params, sizeof(params));
158 events_interface.failed[event_id] = false;
159 return NvResult::Success; 188 return NvResult::Success;
160 } 189 }
161 gpu.RegisterSyncptInterrupt(params.syncpt_id, target_value); 190
162 std::memcpy(output.data(), &params, sizeof(params)); 191 params.value.raw = 0;
192
193 event.status.store(EventState::Waiting, std::memory_order_release);
194 event.assigned_syncpt = fence_id;
195 event.assigned_value = target_value;
196 if (is_allocation) {
197 params.value.syncpoint_id_for_allocation.Assign(static_cast<u16>(fence_id));
198 params.value.event_allocated.Assign(1);
199 } else {
200 params.value.syncpoint_id.Assign(fence_id);
201 }
202 params.value.raw |= slot;
203
204 event.wait_handle =
205 host1x_syncpoint_manager.RegisterHostAction(fence_id, target_value, [this, slot]() {
206 auto& event_ = events[slot];
207 if (event_.status.exchange(EventState::Signalling, std::memory_order_acq_rel) ==
208 EventState::Waiting) {
209 event_.kevent->GetWritableEvent().Signal();
210 }
211 event_.status.store(EventState::Signalled, std::memory_order_release);
212 });
163 return NvResult::Timeout; 213 return NvResult::Timeout;
164} 214}
165 215
216NvResult nvhost_ctrl::FreeEvent(u32 slot) {
217 if (slot >= MaxNvEvents) {
218 return NvResult::BadParameter;
219 }
220
221 auto& event = events[slot];
222
223 if (!event.registered) {
224 return NvResult::Success;
225 }
226
227 if (event.IsBeingUsed()) {
228 return NvResult::Busy;
229 }
230
231 FreeNvEvent(slot);
232 return NvResult::Success;
233}
234
166NvResult nvhost_ctrl::IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output) { 235NvResult nvhost_ctrl::IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output) {
167 IocCtrlEventRegisterParams params{}; 236 IocCtrlEventRegisterParams params{};
168 std::memcpy(&params, input.data(), sizeof(params)); 237 std::memcpy(&params, input.data(), sizeof(params));
169 const u32 event_id = params.user_event_id & 0x00FF; 238 const u32 event_id = params.user_event_id;
170 LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id); 239 LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
171 if (event_id >= MaxNvEvents) { 240 if (event_id >= MaxNvEvents) {
172 return NvResult::BadParameter; 241 return NvResult::BadParameter;
173 } 242 }
174 if (events_interface.registered[event_id]) { 243
175 const auto event_state = events_interface.status[event_id]; 244 auto lock = NvEventsLock();
176 if (event_state != EventState::Free) { 245
177 LOG_WARNING(Service_NVDRV, "Event already registered! Unregistering previous event"); 246 if (events[event_id].registered) {
178 events_interface.UnregisterEvent(event_id); 247 const auto result = FreeEvent(event_id);
179 } else { 248 if (result != NvResult::Success) {
180 return NvResult::BadParameter; 249 return result;
181 } 250 }
182 } 251 }
183 events_interface.RegisterEvent(event_id); 252 CreateNvEvent(event_id);
184 return NvResult::Success; 253 return NvResult::Success;
185} 254}
186 255
@@ -190,34 +259,142 @@ NvResult nvhost_ctrl::IocCtrlEventUnregister(const std::vector<u8>& input,
190 std::memcpy(&params, input.data(), sizeof(params)); 259 std::memcpy(&params, input.data(), sizeof(params));
191 const u32 event_id = params.user_event_id & 0x00FF; 260 const u32 event_id = params.user_event_id & 0x00FF;
192 LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id); 261 LOG_DEBUG(Service_NVDRV, " called, user_event_id: {:X}", event_id);
193 if (event_id >= MaxNvEvents) { 262
194 return NvResult::BadParameter; 263 auto lock = NvEventsLock();
195 } 264 return FreeEvent(event_id);
196 if (!events_interface.registered[event_id]) { 265}
197 return NvResult::BadParameter; 266
267NvResult nvhost_ctrl::IocCtrlEventUnregisterBatch(const std::vector<u8>& input,
268 std::vector<u8>& output) {
269 IocCtrlEventUnregisterBatchParams params{};
270 std::memcpy(&params, input.data(), sizeof(params));
271 u64 event_mask = params.user_events;
272 LOG_DEBUG(Service_NVDRV, " called, event_mask: {:X}", event_mask);
273
274 auto lock = NvEventsLock();
275 while (event_mask != 0) {
276 const u64 event_id = std::countr_zero(event_mask);
277 event_mask &= ~(1ULL << event_id);
278 const auto result = FreeEvent(static_cast<u32>(event_id));
279 if (result != NvResult::Success) {
280 return result;
281 }
198 } 282 }
199 events_interface.UnregisterEvent(event_id);
200 return NvResult::Success; 283 return NvResult::Success;
201} 284}
202 285
203NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output) { 286NvResult nvhost_ctrl::IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output) {
204 IocCtrlEventSignalParams params{}; 287 IocCtrlEventClearParams params{};
205 std::memcpy(&params, input.data(), sizeof(params)); 288 std::memcpy(&params, input.data(), sizeof(params));
206 289
207 u32 event_id = params.event_id & 0x00FF; 290 u32 event_id = params.event_id.slot;
208 LOG_WARNING(Service_NVDRV, "cleared event wait on, event_id: {:X}", event_id); 291 LOG_DEBUG(Service_NVDRV, "called, event_id: {:X}", event_id);
209 292
210 if (event_id >= MaxNvEvents) { 293 if (event_id >= MaxNvEvents) {
211 return NvResult::BadParameter; 294 return NvResult::BadParameter;
212 } 295 }
213 if (events_interface.status[event_id] == EventState::Waiting) {
214 events_interface.LiberateEvent(event_id);
215 }
216 events_interface.failed[event_id] = true;
217 296
218 syncpoint_manager.RefreshSyncpoint(events_interface.events[event_id].fence.id); 297 auto lock = NvEventsLock();
298
299 auto& event = events[event_id];
300 if (event.status.exchange(EventState::Cancelling, std::memory_order_acq_rel) ==
301 EventState::Waiting) {
302 auto& host1x_syncpoint_manager = system.Host1x().GetSyncpointManager();
303 host1x_syncpoint_manager.DeregisterHostAction(event.assigned_syncpt, event.wait_handle);
304 syncpoint_manager.UpdateMin(event.assigned_syncpt);
305 event.wait_handle = {};
306 }
307 event.fails++;
308 event.status.store(EventState::Cancelled, std::memory_order_release);
309 event.kevent->GetWritableEvent().Clear();
219 310
220 return NvResult::Success; 311 return NvResult::Success;
221} 312}
222 313
314Kernel::KEvent* nvhost_ctrl::QueryEvent(u32 event_id) {
315 const auto desired_event = SyncpointEventValue{.raw = event_id};
316
317 const bool allocated = desired_event.event_allocated.Value() != 0;
318 const u32 slot{allocated ? desired_event.partial_slot.Value()
319 : static_cast<u32>(desired_event.slot)};
320 if (slot >= MaxNvEvents) {
321 ASSERT(false);
322 return nullptr;
323 }
324
325 const u32 syncpoint_id{allocated ? desired_event.syncpoint_id_for_allocation.Value()
326 : desired_event.syncpoint_id.Value()};
327
328 auto lock = NvEventsLock();
329
330 auto& event = events[slot];
331 if (event.registered && event.assigned_syncpt == syncpoint_id) {
332 ASSERT(event.kevent);
333 return event.kevent;
334 }
335 // Is this possible in hardware?
336 ASSERT_MSG(false, "Slot:{}, SyncpointID:{}, requested", slot, syncpoint_id);
337 return nullptr;
338}
339
340std::unique_lock<std::mutex> nvhost_ctrl::NvEventsLock() {
341 return std::unique_lock<std::mutex>(events_mutex);
342}
343
344void nvhost_ctrl::CreateNvEvent(u32 event_id) {
345 auto& event = events[event_id];
346 ASSERT(!event.kevent);
347 ASSERT(!event.registered);
348 ASSERT(!event.IsBeingUsed());
349 event.kevent = events_interface.CreateEvent(fmt::format("NVCTRL::NvEvent_{}", event_id));
350 event.status = EventState::Available;
351 event.registered = true;
352 const u64 mask = 1ULL << event_id;
353 event.fails = 0;
354 events_mask |= mask;
355 event.assigned_syncpt = 0;
356}
357
358void nvhost_ctrl::FreeNvEvent(u32 event_id) {
359 auto& event = events[event_id];
360 ASSERT(event.kevent);
361 ASSERT(event.registered);
362 ASSERT(!event.IsBeingUsed());
363 events_interface.FreeEvent(event.kevent);
364 event.kevent = nullptr;
365 event.status = EventState::Available;
366 event.registered = false;
367 const u64 mask = ~(1ULL << event_id);
368 events_mask &= mask;
369}
370
371u32 nvhost_ctrl::FindFreeNvEvent(u32 syncpoint_id) {
372 u32 slot{MaxNvEvents};
373 u32 free_slot{MaxNvEvents};
374 for (u32 i = 0; i < MaxNvEvents; i++) {
375 auto& event = events[i];
376 if (event.registered) {
377 if (!event.IsBeingUsed()) {
378 slot = i;
379 if (event.assigned_syncpt == syncpoint_id) {
380 return slot;
381 }
382 }
383 } else if (free_slot == MaxNvEvents) {
384 free_slot = i;
385 }
386 }
387 if (free_slot < MaxNvEvents) {
388 CreateNvEvent(free_slot);
389 return free_slot;
390 }
391
392 if (slot < MaxNvEvents) {
393 return slot;
394 }
395
396 LOG_CRITICAL(Service_NVDRV, "Failed to allocate an event");
397 return 0;
398}
399
223} // namespace Service::Nvidia::Devices 400} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
index 4fbb89b15..0b56d7070 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl.h
@@ -1,20 +1,28 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project 1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
3// SPDX-License-Identifier: GPL-3.0-or-later
3 4
4#pragma once 5#pragma once
5 6
6#include <array> 7#include <array>
7#include <vector> 8#include <vector>
9#include "common/bit_field.h"
8#include "common/common_types.h" 10#include "common/common_types.h"
9#include "core/hle/service/nvdrv/devices/nvdevice.h" 11#include "core/hle/service/nvdrv/devices/nvdevice.h"
10#include "core/hle/service/nvdrv/nvdrv.h" 12#include "core/hle/service/nvdrv/nvdrv.h"
13#include "video_core/host1x/syncpoint_manager.h"
14
15namespace Service::Nvidia::NvCore {
16class Container;
17class SyncpointManager;
18} // namespace Service::Nvidia::NvCore
11 19
12namespace Service::Nvidia::Devices { 20namespace Service::Nvidia::Devices {
13 21
14class nvhost_ctrl final : public nvdevice { 22class nvhost_ctrl final : public nvdevice {
15public: 23public:
16 explicit nvhost_ctrl(Core::System& system_, EventInterface& events_interface_, 24 explicit nvhost_ctrl(Core::System& system_, EventInterface& events_interface_,
17 SyncpointManager& syncpoint_manager_); 25 NvCore::Container& core);
18 ~nvhost_ctrl() override; 26 ~nvhost_ctrl() override;
19 27
20 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 28 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -27,7 +35,70 @@ public:
27 void OnOpen(DeviceFD fd) override; 35 void OnOpen(DeviceFD fd) override;
28 void OnClose(DeviceFD fd) override; 36 void OnClose(DeviceFD fd) override;
29 37
38 Kernel::KEvent* QueryEvent(u32 event_id) override;
39
40 union SyncpointEventValue {
41 u32 raw;
42
43 union {
44 BitField<0, 4, u32> partial_slot;
45 BitField<4, 28, u32> syncpoint_id;
46 };
47
48 struct {
49 u16 slot;
50 union {
51 BitField<0, 12, u16> syncpoint_id_for_allocation;
52 BitField<12, 1, u16> event_allocated;
53 };
54 };
55 };
56 static_assert(sizeof(SyncpointEventValue) == sizeof(u32));
57
30private: 58private:
59 struct InternalEvent {
60 // Mask representing registered events
61
62 // Each kernel event associated to an NV event
63 Kernel::KEvent* kevent{};
64 // The status of the current NVEvent
65 std::atomic<EventState> status{};
66
67 // Tells the NVEvent that it has failed.
68 u32 fails{};
69 // When an NVEvent is waiting on GPU interrupt, this is the sync_point
70 // associated with it.
71 u32 assigned_syncpt{};
72 // This is the value of the GPU interrupt for which the NVEvent is waiting
73 // for.
74 u32 assigned_value{};
75
76 // Tells if an NVEvent is registered or not
77 bool registered{};
78
79 // Used for waiting on a syncpoint & canceling it.
80 Tegra::Host1x::SyncpointManager::ActionHandle wait_handle{};
81
82 bool IsBeingUsed() const {
83 const auto current_status = status.load(std::memory_order_acquire);
84 return current_status == EventState::Waiting ||
85 current_status == EventState::Cancelling ||
86 current_status == EventState::Signalling;
87 }
88 };
89
90 std::unique_lock<std::mutex> NvEventsLock();
91
92 void CreateNvEvent(u32 event_id);
93
94 void FreeNvEvent(u32 event_id);
95
96 u32 FindFreeNvEvent(u32 syncpoint_id);
97
98 std::array<InternalEvent, MaxNvEvents> events{};
99 std::mutex events_mutex;
100 u64 events_mask{};
101
31 struct IocSyncptReadParams { 102 struct IocSyncptReadParams {
32 u32_le id{}; 103 u32_le id{};
33 u32_le value{}; 104 u32_le value{};
@@ -83,27 +154,18 @@ private:
83 }; 154 };
84 static_assert(sizeof(IocGetConfigParams) == 387, "IocGetConfigParams is incorrect size"); 155 static_assert(sizeof(IocGetConfigParams) == 387, "IocGetConfigParams is incorrect size");
85 156
86 struct IocCtrlEventSignalParams { 157 struct IocCtrlEventClearParams {
87 u32_le event_id{}; 158 SyncpointEventValue event_id{};
88 }; 159 };
89 static_assert(sizeof(IocCtrlEventSignalParams) == 4, 160 static_assert(sizeof(IocCtrlEventClearParams) == 4,
90 "IocCtrlEventSignalParams is incorrect size"); 161 "IocCtrlEventClearParams is incorrect size");
91 162
92 struct IocCtrlEventWaitParams { 163 struct IocCtrlEventWaitParams {
93 u32_le syncpt_id{}; 164 NvFence fence{};
94 u32_le threshold{};
95 s32_le timeout{};
96 u32_le value{};
97 };
98 static_assert(sizeof(IocCtrlEventWaitParams) == 16, "IocCtrlEventWaitParams is incorrect size");
99
100 struct IocCtrlEventWaitAsyncParams {
101 u32_le syncpt_id{};
102 u32_le threshold{};
103 u32_le timeout{}; 165 u32_le timeout{};
104 u32_le value{}; 166 SyncpointEventValue value{};
105 }; 167 };
106 static_assert(sizeof(IocCtrlEventWaitAsyncParams) == 16, 168 static_assert(sizeof(IocCtrlEventWaitParams) == 16,
107 "IocCtrlEventWaitAsyncParams is incorrect size"); 169 "IocCtrlEventWaitAsyncParams is incorrect size");
108 170
109 struct IocCtrlEventRegisterParams { 171 struct IocCtrlEventRegisterParams {
@@ -118,19 +180,25 @@ private:
118 static_assert(sizeof(IocCtrlEventUnregisterParams) == 4, 180 static_assert(sizeof(IocCtrlEventUnregisterParams) == 4,
119 "IocCtrlEventUnregisterParams is incorrect size"); 181 "IocCtrlEventUnregisterParams is incorrect size");
120 182
121 struct IocCtrlEventKill { 183 struct IocCtrlEventUnregisterBatchParams {
122 u64_le user_events{}; 184 u64_le user_events{};
123 }; 185 };
124 static_assert(sizeof(IocCtrlEventKill) == 8, "IocCtrlEventKill is incorrect size"); 186 static_assert(sizeof(IocCtrlEventUnregisterBatchParams) == 8,
187 "IocCtrlEventKill is incorrect size");
125 188
126 NvResult NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output); 189 NvResult NvOsGetConfigU32(const std::vector<u8>& input, std::vector<u8>& output);
127 NvResult IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output, bool is_async); 190 NvResult IocCtrlEventWait(const std::vector<u8>& input, std::vector<u8>& output,
191 bool is_allocation);
128 NvResult IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output); 192 NvResult IocCtrlEventRegister(const std::vector<u8>& input, std::vector<u8>& output);
129 NvResult IocCtrlEventUnregister(const std::vector<u8>& input, std::vector<u8>& output); 193 NvResult IocCtrlEventUnregister(const std::vector<u8>& input, std::vector<u8>& output);
194 NvResult IocCtrlEventUnregisterBatch(const std::vector<u8>& input, std::vector<u8>& output);
130 NvResult IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output); 195 NvResult IocCtrlClearEventWait(const std::vector<u8>& input, std::vector<u8>& output);
131 196
197 NvResult FreeEvent(u32 slot);
198
132 EventInterface& events_interface; 199 EventInterface& events_interface;
133 SyncpointManager& syncpoint_manager; 200 NvCore::Container& core;
201 NvCore::SyncpointManager& syncpoint_manager;
134}; 202};
135 203
136} // namespace Service::Nvidia::Devices 204} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
index 2b3b7efea..ced57dfe6 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.cpp
@@ -7,11 +7,19 @@
7#include "core/core.h" 7#include "core/core.h"
8#include "core/core_timing.h" 8#include "core/core_timing.h"
9#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h" 9#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h"
10#include "core/hle/service/nvdrv/nvdrv.h"
10 11
11namespace Service::Nvidia::Devices { 12namespace Service::Nvidia::Devices {
12 13
13nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system_) : nvdevice{system_} {} 14nvhost_ctrl_gpu::nvhost_ctrl_gpu(Core::System& system_, EventInterface& events_interface_)
14nvhost_ctrl_gpu::~nvhost_ctrl_gpu() = default; 15 : nvdevice{system_}, events_interface{events_interface_} {
16 error_notifier_event = events_interface.CreateEvent("CtrlGpuErrorNotifier");
17 unknown_event = events_interface.CreateEvent("CtrlGpuUknownEvent");
18}
19nvhost_ctrl_gpu::~nvhost_ctrl_gpu() {
20 events_interface.FreeEvent(error_notifier_event);
21 events_interface.FreeEvent(unknown_event);
22}
15 23
16NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 24NvResult nvhost_ctrl_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
17 std::vector<u8>& output) { 25 std::vector<u8>& output) {
@@ -286,4 +294,17 @@ NvResult nvhost_ctrl_gpu::GetGpuTime(const std::vector<u8>& input, std::vector<u
286 return NvResult::Success; 294 return NvResult::Success;
287} 295}
288 296
297Kernel::KEvent* nvhost_ctrl_gpu::QueryEvent(u32 event_id) {
298 switch (event_id) {
299 case 1:
300 return error_notifier_event;
301 case 2:
302 return unknown_event;
303 default: {
304 LOG_CRITICAL(Service_NVDRV, "Unknown Ctrl GPU Event {}", event_id);
305 }
306 }
307 return nullptr;
308}
309
289} // namespace Service::Nvidia::Devices 310} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
index 97e9a90cb..1e8f254e2 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h
@@ -10,11 +10,15 @@
10#include "common/swap.h" 10#include "common/swap.h"
11#include "core/hle/service/nvdrv/devices/nvdevice.h" 11#include "core/hle/service/nvdrv/devices/nvdevice.h"
12 12
13namespace Service::Nvidia {
14class EventInterface;
15}
16
13namespace Service::Nvidia::Devices { 17namespace Service::Nvidia::Devices {
14 18
15class nvhost_ctrl_gpu final : public nvdevice { 19class nvhost_ctrl_gpu final : public nvdevice {
16public: 20public:
17 explicit nvhost_ctrl_gpu(Core::System& system_); 21 explicit nvhost_ctrl_gpu(Core::System& system_, EventInterface& events_interface_);
18 ~nvhost_ctrl_gpu() override; 22 ~nvhost_ctrl_gpu() override;
19 23
20 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 24 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -27,6 +31,8 @@ public:
27 void OnOpen(DeviceFD fd) override; 31 void OnOpen(DeviceFD fd) override;
28 void OnClose(DeviceFD fd) override; 32 void OnClose(DeviceFD fd) override;
29 33
34 Kernel::KEvent* QueryEvent(u32 event_id) override;
35
30private: 36private:
31 struct IoctlGpuCharacteristics { 37 struct IoctlGpuCharacteristics {
32 u32_le arch; // 0x120 (NVGPU_GPU_ARCH_GM200) 38 u32_le arch; // 0x120 (NVGPU_GPU_ARCH_GM200)
@@ -160,6 +166,12 @@ private:
160 NvResult ZBCQueryTable(const std::vector<u8>& input, std::vector<u8>& output); 166 NvResult ZBCQueryTable(const std::vector<u8>& input, std::vector<u8>& output);
161 NvResult FlushL2(const std::vector<u8>& input, std::vector<u8>& output); 167 NvResult FlushL2(const std::vector<u8>& input, std::vector<u8>& output);
162 NvResult GetGpuTime(const std::vector<u8>& input, std::vector<u8>& output); 168 NvResult GetGpuTime(const std::vector<u8>& input, std::vector<u8>& output);
169
170 EventInterface& events_interface;
171
172 // Events
173 Kernel::KEvent* error_notifier_event;
174 Kernel::KEvent* unknown_event;
163}; 175};
164 176
165} // namespace Service::Nvidia::Devices 177} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
index b98e63011..45a759fa8 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.cpp
@@ -5,29 +5,46 @@
5#include "common/assert.h" 5#include "common/assert.h"
6#include "common/logging/log.h" 6#include "common/logging/log.h"
7#include "core/core.h" 7#include "core/core.h"
8#include "core/hle/service/nvdrv/core/container.h"
9#include "core/hle/service/nvdrv/core/nvmap.h"
10#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
8#include "core/hle/service/nvdrv/devices/nvhost_gpu.h" 11#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
9#include "core/hle/service/nvdrv/syncpoint_manager.h" 12#include "core/hle/service/nvdrv/nvdrv.h"
10#include "core/memory.h" 13#include "core/memory.h"
14#include "video_core/control/channel_state.h"
15#include "video_core/engines/puller.h"
11#include "video_core/gpu.h" 16#include "video_core/gpu.h"
17#include "video_core/host1x/host1x.h"
12 18
13namespace Service::Nvidia::Devices { 19namespace Service::Nvidia::Devices {
14namespace { 20namespace {
15Tegra::CommandHeader BuildFenceAction(Tegra::GPU::FenceOperation op, u32 syncpoint_id) { 21Tegra::CommandHeader BuildFenceAction(Tegra::Engines::Puller::FenceOperation op, u32 syncpoint_id) {
16 Tegra::GPU::FenceAction result{}; 22 Tegra::Engines::Puller::FenceAction result{};
17 result.op.Assign(op); 23 result.op.Assign(op);
18 result.syncpoint_id.Assign(syncpoint_id); 24 result.syncpoint_id.Assign(syncpoint_id);
19 return {result.raw}; 25 return {result.raw};
20} 26}
21} // namespace 27} // namespace
22 28
23nvhost_gpu::nvhost_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, 29nvhost_gpu::nvhost_gpu(Core::System& system_, EventInterface& events_interface_,
24 SyncpointManager& syncpoint_manager_) 30 NvCore::Container& core_)
25 : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, syncpoint_manager{syncpoint_manager_} { 31 : nvdevice{system_}, events_interface{events_interface_}, core{core_},
26 channel_fence.id = syncpoint_manager_.AllocateSyncpoint(); 32 syncpoint_manager{core_.GetSyncpointManager()}, nvmap{core.GetNvMapFile()},
27 channel_fence.value = system_.GPU().GetSyncpointValue(channel_fence.id); 33 channel_state{system.GPU().AllocateChannel()} {
34 channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false);
35 sm_exception_breakpoint_int_report_event =
36 events_interface.CreateEvent("GpuChannelSMExceptionBreakpointInt");
37 sm_exception_breakpoint_pause_report_event =
38 events_interface.CreateEvent("GpuChannelSMExceptionBreakpointPause");
39 error_notifier_event = events_interface.CreateEvent("GpuChannelErrorNotifier");
28} 40}
29 41
30nvhost_gpu::~nvhost_gpu() = default; 42nvhost_gpu::~nvhost_gpu() {
43 events_interface.FreeEvent(sm_exception_breakpoint_int_report_event);
44 events_interface.FreeEvent(sm_exception_breakpoint_pause_report_event);
45 events_interface.FreeEvent(error_notifier_event);
46 syncpoint_manager.FreeSyncpoint(channel_syncpoint);
47}
31 48
32NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 49NvResult nvhost_gpu::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
33 std::vector<u8>& output) { 50 std::vector<u8>& output) {
@@ -167,9 +184,14 @@ NvResult nvhost_gpu::AllocGPFIFOEx2(const std::vector<u8>& input, std::vector<u8
167 params.num_entries, params.flags, params.unk0, params.unk1, params.unk2, 184 params.num_entries, params.flags, params.unk0, params.unk1, params.unk2,
168 params.unk3); 185 params.unk3);
169 186
170 channel_fence.value = system.GPU().GetSyncpointValue(channel_fence.id); 187 if (channel_state->initialized) {
188 LOG_CRITICAL(Service_NVDRV, "Already allocated!");
189 return NvResult::AlreadyAllocated;
190 }
191
192 system.GPU().InitChannel(*channel_state);
171 193
172 params.fence_out = channel_fence; 194 params.fence_out = syncpoint_manager.GetSyncpointFence(channel_syncpoint);
173 195
174 std::memcpy(output.data(), &params, output.size()); 196 std::memcpy(output.data(), &params, output.size());
175 return NvResult::Success; 197 return NvResult::Success;
@@ -188,39 +210,37 @@ NvResult nvhost_gpu::AllocateObjectContext(const std::vector<u8>& input, std::ve
188 210
189static std::vector<Tegra::CommandHeader> BuildWaitCommandList(NvFence fence) { 211static std::vector<Tegra::CommandHeader> BuildWaitCommandList(NvFence fence) {
190 return { 212 return {
191 Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1, 213 Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointPayload, 1,
192 Tegra::SubmissionMode::Increasing), 214 Tegra::SubmissionMode::Increasing),
193 {fence.value}, 215 {fence.value},
194 Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1, 216 Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointOperation, 1,
195 Tegra::SubmissionMode::Increasing), 217 Tegra::SubmissionMode::Increasing),
196 BuildFenceAction(Tegra::GPU::FenceOperation::Acquire, fence.id), 218 BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Acquire, fence.id),
197 }; 219 };
198} 220}
199 221
200static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(NvFence fence, 222static std::vector<Tegra::CommandHeader> BuildIncrementCommandList(NvFence fence) {
201 u32 add_increment) {
202 std::vector<Tegra::CommandHeader> result{ 223 std::vector<Tegra::CommandHeader> result{
203 Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceValue, 1, 224 Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointPayload, 1,
204 Tegra::SubmissionMode::Increasing), 225 Tegra::SubmissionMode::Increasing),
205 {}}; 226 {}};
206 227
207 for (u32 count = 0; count < add_increment; ++count) { 228 for (u32 count = 0; count < 2; ++count) {
208 result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::FenceAction, 1, 229 result.emplace_back(Tegra::BuildCommandHeader(Tegra::BufferMethods::SyncpointOperation, 1,
209 Tegra::SubmissionMode::Increasing)); 230 Tegra::SubmissionMode::Increasing));
210 result.emplace_back(BuildFenceAction(Tegra::GPU::FenceOperation::Increment, fence.id)); 231 result.emplace_back(
232 BuildFenceAction(Tegra::Engines::Puller::FenceOperation::Increment, fence.id));
211 } 233 }
212 234
213 return result; 235 return result;
214} 236}
215 237
216static std::vector<Tegra::CommandHeader> BuildIncrementWithWfiCommandList(NvFence fence, 238static std::vector<Tegra::CommandHeader> BuildIncrementWithWfiCommandList(NvFence fence) {
217 u32 add_increment) {
218 std::vector<Tegra::CommandHeader> result{ 239 std::vector<Tegra::CommandHeader> result{
219 Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForInterrupt, 1, 240 Tegra::BuildCommandHeader(Tegra::BufferMethods::WaitForIdle, 1,
220 Tegra::SubmissionMode::Increasing), 241 Tegra::SubmissionMode::Increasing),
221 {}}; 242 {}};
222 const std::vector<Tegra::CommandHeader> increment{ 243 const std::vector<Tegra::CommandHeader> increment{BuildIncrementCommandList(fence)};
223 BuildIncrementCommandList(fence, add_increment)};
224 244
225 result.insert(result.end(), increment.begin(), increment.end()); 245 result.insert(result.end(), increment.begin(), increment.end());
226 246
@@ -234,33 +254,41 @@ NvResult nvhost_gpu::SubmitGPFIFOImpl(IoctlSubmitGpfifo& params, std::vector<u8>
234 254
235 auto& gpu = system.GPU(); 255 auto& gpu = system.GPU();
236 256
237 params.fence_out.id = channel_fence.id; 257 std::scoped_lock lock(channel_mutex);
238 258
239 if (params.flags.add_wait.Value() && 259 const auto bind_id = channel_state->bind_id;
240 !syncpoint_manager.IsSyncpointExpired(params.fence_out.id, params.fence_out.value)) {
241 gpu.PushGPUEntries(Tegra::CommandList{BuildWaitCommandList(params.fence_out)});
242 }
243 260
244 if (params.flags.add_increment.Value() || params.flags.increment.Value()) { 261 auto& flags = params.flags;
245 const u32 increment_value = params.flags.increment.Value() ? params.fence_out.value : 0; 262
246 params.fence_out.value = syncpoint_manager.IncreaseSyncpoint( 263 if (flags.fence_wait.Value()) {
247 params.fence_out.id, params.AddIncrementValue() + increment_value); 264 if (flags.increment_value.Value()) {
248 } else { 265 return NvResult::BadParameter;
249 params.fence_out.value = syncpoint_manager.GetSyncpointMax(params.fence_out.id); 266 }
267
268 if (!syncpoint_manager.IsFenceSignalled(params.fence)) {
269 gpu.PushGPUEntries(bind_id, Tegra::CommandList{BuildWaitCommandList(params.fence)});
270 }
250 } 271 }
251 272
252 gpu.PushGPUEntries(std::move(entries)); 273 params.fence.id = channel_syncpoint;
274
275 u32 increment{(flags.fence_increment.Value() != 0 ? 2 : 0) +
276 (flags.increment_value.Value() != 0 ? params.fence.value : 0)};
277 params.fence.value = syncpoint_manager.IncrementSyncpointMaxExt(channel_syncpoint, increment);
278 gpu.PushGPUEntries(bind_id, std::move(entries));
253 279
254 if (params.flags.add_increment.Value()) { 280 if (flags.fence_increment.Value()) {
255 if (params.flags.suppress_wfi) { 281 if (flags.suppress_wfi.Value()) {
256 gpu.PushGPUEntries(Tegra::CommandList{ 282 gpu.PushGPUEntries(bind_id,
257 BuildIncrementCommandList(params.fence_out, params.AddIncrementValue())}); 283 Tegra::CommandList{BuildIncrementCommandList(params.fence)});
258 } else { 284 } else {
259 gpu.PushGPUEntries(Tegra::CommandList{ 285 gpu.PushGPUEntries(bind_id,
260 BuildIncrementWithWfiCommandList(params.fence_out, params.AddIncrementValue())}); 286 Tegra::CommandList{BuildIncrementWithWfiCommandList(params.fence)});
261 } 287 }
262 } 288 }
263 289
290 flags.raw = 0;
291
264 std::memcpy(output.data(), &params, sizeof(IoctlSubmitGpfifo)); 292 std::memcpy(output.data(), &params, sizeof(IoctlSubmitGpfifo));
265 return NvResult::Success; 293 return NvResult::Success;
266} 294}
@@ -328,4 +356,19 @@ NvResult nvhost_gpu::ChannelSetTimeslice(const std::vector<u8>& input, std::vect
328 return NvResult::Success; 356 return NvResult::Success;
329} 357}
330 358
359Kernel::KEvent* nvhost_gpu::QueryEvent(u32 event_id) {
360 switch (event_id) {
361 case 1:
362 return sm_exception_breakpoint_int_report_event;
363 case 2:
364 return sm_exception_breakpoint_pause_report_event;
365 case 3:
366 return error_notifier_event;
367 default: {
368 LOG_CRITICAL(Service_NVDRV, "Unknown Ctrl GPU Event {}", event_id);
369 }
370 }
371 return nullptr;
372}
373
331} // namespace Service::Nvidia::Devices 374} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
index 8a9f7775a..1e4ecd55b 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_gpu.h
@@ -13,17 +13,31 @@
13#include "core/hle/service/nvdrv/nvdata.h" 13#include "core/hle/service/nvdrv/nvdata.h"
14#include "video_core/dma_pusher.h" 14#include "video_core/dma_pusher.h"
15 15
16namespace Tegra {
17namespace Control {
18struct ChannelState;
19}
20} // namespace Tegra
21
16namespace Service::Nvidia { 22namespace Service::Nvidia {
23
24namespace NvCore {
25class Container;
26class NvMap;
17class SyncpointManager; 27class SyncpointManager;
18} 28} // namespace NvCore
29
30class EventInterface;
31} // namespace Service::Nvidia
19 32
20namespace Service::Nvidia::Devices { 33namespace Service::Nvidia::Devices {
21 34
35class nvhost_as_gpu;
22class nvmap; 36class nvmap;
23class nvhost_gpu final : public nvdevice { 37class nvhost_gpu final : public nvdevice {
24public: 38public:
25 explicit nvhost_gpu(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, 39 explicit nvhost_gpu(Core::System& system_, EventInterface& events_interface_,
26 SyncpointManager& syncpoint_manager_); 40 NvCore::Container& core);
27 ~nvhost_gpu() override; 41 ~nvhost_gpu() override;
28 42
29 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 43 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -36,7 +50,10 @@ public:
36 void OnOpen(DeviceFD fd) override; 50 void OnOpen(DeviceFD fd) override;
37 void OnClose(DeviceFD fd) override; 51 void OnClose(DeviceFD fd) override;
38 52
53 Kernel::KEvent* QueryEvent(u32 event_id) override;
54
39private: 55private:
56 friend class nvhost_as_gpu;
40 enum class CtxObjects : u32_le { 57 enum class CtxObjects : u32_le {
41 Ctx2D = 0x902D, 58 Ctx2D = 0x902D,
42 Ctx3D = 0xB197, 59 Ctx3D = 0xB197,
@@ -146,17 +163,13 @@ private:
146 u32_le num_entries{}; // number of fence objects being submitted 163 u32_le num_entries{}; // number of fence objects being submitted
147 union { 164 union {
148 u32_le raw; 165 u32_le raw;
149 BitField<0, 1, u32_le> add_wait; // append a wait sync_point to the list 166 BitField<0, 1, u32_le> fence_wait; // append a wait sync_point to the list
150 BitField<1, 1, u32_le> add_increment; // append an increment to the list 167 BitField<1, 1, u32_le> fence_increment; // append an increment to the list
151 BitField<2, 1, u32_le> new_hw_format; // mostly ignored 168 BitField<2, 1, u32_le> new_hw_format; // mostly ignored
152 BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt 169 BitField<4, 1, u32_le> suppress_wfi; // suppress wait for interrupt
153 BitField<8, 1, u32_le> increment; // increment the returned fence 170 BitField<8, 1, u32_le> increment_value; // increment the returned fence
154 } flags; 171 } flags;
155 NvFence fence_out{}; // returned new fence object for others to wait on 172 NvFence fence{}; // returned new fence object for others to wait on
156
157 u32 AddIncrementValue() const {
158 return flags.add_increment.Value() << 1;
159 }
160 }; 173 };
161 static_assert(sizeof(IoctlSubmitGpfifo) == 16 + sizeof(NvFence), 174 static_assert(sizeof(IoctlSubmitGpfifo) == 16 + sizeof(NvFence),
162 "IoctlSubmitGpfifo is incorrect size"); 175 "IoctlSubmitGpfifo is incorrect size");
@@ -191,9 +204,18 @@ private:
191 NvResult ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output); 204 NvResult ChannelSetTimeout(const std::vector<u8>& input, std::vector<u8>& output);
192 NvResult ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output); 205 NvResult ChannelSetTimeslice(const std::vector<u8>& input, std::vector<u8>& output);
193 206
194 std::shared_ptr<nvmap> nvmap_dev; 207 EventInterface& events_interface;
195 SyncpointManager& syncpoint_manager; 208 NvCore::Container& core;
196 NvFence channel_fence; 209 NvCore::SyncpointManager& syncpoint_manager;
210 NvCore::NvMap& nvmap;
211 std::shared_ptr<Tegra::Control::ChannelState> channel_state;
212 u32 channel_syncpoint;
213 std::mutex channel_mutex;
214
215 // Events
216 Kernel::KEvent* sm_exception_breakpoint_int_report_event;
217 Kernel::KEvent* sm_exception_breakpoint_pause_report_event;
218 Kernel::KEvent* error_notifier_event;
197}; 219};
198 220
199} // namespace Service::Nvidia::Devices 221} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
index a7385fce8..1703f9cc3 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.cpp
@@ -5,14 +5,14 @@
5#include "common/assert.h" 5#include "common/assert.h"
6#include "common/logging/log.h" 6#include "common/logging/log.h"
7#include "core/core.h" 7#include "core/core.h"
8#include "core/hle/service/nvdrv/core/container.h"
8#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h" 9#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h"
9#include "video_core/renderer_base.h" 10#include "video_core/renderer_base.h"
10 11
11namespace Service::Nvidia::Devices { 12namespace Service::Nvidia::Devices {
12 13
13nvhost_nvdec::nvhost_nvdec(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, 14nvhost_nvdec::nvhost_nvdec(Core::System& system_, NvCore::Container& core_)
14 SyncpointManager& syncpoint_manager_) 15 : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::NvDec} {}
15 : nvhost_nvdec_common{system_, std::move(nvmap_dev_), syncpoint_manager_} {}
16nvhost_nvdec::~nvhost_nvdec() = default; 16nvhost_nvdec::~nvhost_nvdec() = default;
17 17
18NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 18NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -21,8 +21,9 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>&
21 case 0x0: 21 case 0x0:
22 switch (command.cmd) { 22 switch (command.cmd) {
23 case 0x1: { 23 case 0x1: {
24 if (!fd_to_id.contains(fd)) { 24 auto& host1x_file = core.Host1xDeviceFile();
25 fd_to_id[fd] = next_id++; 25 if (!host1x_file.fd_to_id.contains(fd)) {
26 host1x_file.fd_to_id[fd] = host1x_file.nvdec_next_id++;
26 } 27 }
27 return Submit(fd, input, output); 28 return Submit(fd, input, output);
28 } 29 }
@@ -73,8 +74,9 @@ void nvhost_nvdec::OnOpen(DeviceFD fd) {
73 74
74void nvhost_nvdec::OnClose(DeviceFD fd) { 75void nvhost_nvdec::OnClose(DeviceFD fd) {
75 LOG_INFO(Service_NVDRV, "NVDEC video stream ended"); 76 LOG_INFO(Service_NVDRV, "NVDEC video stream ended");
76 const auto iter = fd_to_id.find(fd); 77 auto& host1x_file = core.Host1xDeviceFile();
77 if (iter != fd_to_id.end()) { 78 const auto iter = host1x_file.fd_to_id.find(fd);
79 if (iter != host1x_file.fd_to_id.end()) {
78 system.GPU().ClearCdmaInstance(iter->second); 80 system.GPU().ClearCdmaInstance(iter->second);
79 } 81 }
80 system.AudioCore().SetNVDECActive(false); 82 system.AudioCore().SetNVDECActive(false);
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
index 29b3e6a36..c1b4e53e8 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec.h
@@ -10,8 +10,7 @@ namespace Service::Nvidia::Devices {
10 10
11class nvhost_nvdec final : public nvhost_nvdec_common { 11class nvhost_nvdec final : public nvhost_nvdec_common {
12public: 12public:
13 explicit nvhost_nvdec(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, 13 explicit nvhost_nvdec(Core::System& system_, NvCore::Container& core);
14 SyncpointManager& syncpoint_manager_);
15 ~nvhost_nvdec() override; 14 ~nvhost_nvdec() override;
16 15
17 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 16 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -23,9 +22,6 @@ public:
23 22
24 void OnOpen(DeviceFD fd) override; 23 void OnOpen(DeviceFD fd) override;
25 void OnClose(DeviceFD fd) override; 24 void OnClose(DeviceFD fd) override;
26
27private:
28 u32 next_id{};
29}; 25};
30 26
31} // namespace Service::Nvidia::Devices 27} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
index 8b2cd9bf1..99eede702 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.cpp
@@ -8,10 +8,12 @@
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "common/logging/log.h" 9#include "common/logging/log.h"
10#include "core/core.h" 10#include "core/core.h"
11#include "core/hle/service/nvdrv/core/container.h"
12#include "core/hle/service/nvdrv/core/nvmap.h"
13#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
11#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h" 14#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h"
12#include "core/hle/service/nvdrv/devices/nvmap.h"
13#include "core/hle/service/nvdrv/syncpoint_manager.h"
14#include "core/memory.h" 15#include "core/memory.h"
16#include "video_core/host1x/host1x.h"
15#include "video_core/memory_manager.h" 17#include "video_core/memory_manager.h"
16#include "video_core/renderer_base.h" 18#include "video_core/renderer_base.h"
17 19
@@ -44,10 +46,22 @@ std::size_t WriteVectors(std::vector<u8>& dst, const std::vector<T>& src, std::s
44} 46}
45} // Anonymous namespace 47} // Anonymous namespace
46 48
47nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, 49nvhost_nvdec_common::nvhost_nvdec_common(Core::System& system_, NvCore::Container& core_,
48 SyncpointManager& syncpoint_manager_) 50 NvCore::ChannelType channel_type_)
49 : nvdevice{system_}, nvmap_dev{std::move(nvmap_dev_)}, syncpoint_manager{syncpoint_manager_} {} 51 : nvdevice{system_}, core{core_}, syncpoint_manager{core.GetSyncpointManager()},
50nvhost_nvdec_common::~nvhost_nvdec_common() = default; 52 nvmap{core.GetNvMapFile()}, channel_type{channel_type_} {
53 auto& syncpts_accumulated = core.Host1xDeviceFile().syncpts_accumulated;
54 if (syncpts_accumulated.empty()) {
55 channel_syncpoint = syncpoint_manager.AllocateSyncpoint(false);
56 } else {
57 channel_syncpoint = syncpts_accumulated.front();
58 syncpts_accumulated.pop_front();
59 }
60}
61
62nvhost_nvdec_common::~nvhost_nvdec_common() {
63 core.Host1xDeviceFile().syncpts_accumulated.push_back(channel_syncpoint);
64}
51 65
52NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) { 66NvResult nvhost_nvdec_common::SetNVMAPfd(const std::vector<u8>& input) {
53 IoctlSetNvmapFD params{}; 67 IoctlSetNvmapFD params{};
@@ -84,16 +98,16 @@ NvResult nvhost_nvdec_common::Submit(DeviceFD fd, const std::vector<u8>& input,
84 for (std::size_t i = 0; i < syncpt_increments.size(); i++) { 98 for (std::size_t i = 0; i < syncpt_increments.size(); i++) {
85 const SyncptIncr& syncpt_incr = syncpt_increments[i]; 99 const SyncptIncr& syncpt_incr = syncpt_increments[i];
86 fence_thresholds[i] = 100 fence_thresholds[i] =
87 syncpoint_manager.IncreaseSyncpoint(syncpt_incr.id, syncpt_incr.increments); 101 syncpoint_manager.IncrementSyncpointMaxExt(syncpt_incr.id, syncpt_incr.increments);
88 } 102 }
89 } 103 }
90 for (const auto& cmd_buffer : command_buffers) { 104 for (const auto& cmd_buffer : command_buffers) {
91 const auto object = nvmap_dev->GetObject(cmd_buffer.memory_id); 105 const auto object = nvmap.GetHandle(cmd_buffer.memory_id);
92 ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;); 106 ASSERT_OR_EXECUTE(object, return NvResult::InvalidState;);
93 Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count); 107 Tegra::ChCommandHeaderList cmdlist(cmd_buffer.word_count);
94 system.Memory().ReadBlock(object->addr + cmd_buffer.offset, cmdlist.data(), 108 system.Memory().ReadBlock(object->address + cmd_buffer.offset, cmdlist.data(),
95 cmdlist.size() * sizeof(u32)); 109 cmdlist.size() * sizeof(u32));
96 gpu.PushCommandBuffer(fd_to_id[fd], cmdlist); 110 gpu.PushCommandBuffer(core.Host1xDeviceFile().fd_to_id[fd], cmdlist);
97 } 111 }
98 std::memcpy(output.data(), &params, sizeof(IoctlSubmit)); 112 std::memcpy(output.data(), &params, sizeof(IoctlSubmit));
99 // Some games expect command_buffers to be written back 113 // Some games expect command_buffers to be written back
@@ -112,10 +126,8 @@ NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector<u8>& input, std::ve
112 std::memcpy(&params, input.data(), sizeof(IoctlGetSyncpoint)); 126 std::memcpy(&params, input.data(), sizeof(IoctlGetSyncpoint));
113 LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param); 127 LOG_DEBUG(Service_NVDRV, "called GetSyncpoint, id={}", params.param);
114 128
115 if (device_syncpoints[params.param] == 0 && system.GPU().UseNvdec()) { 129 // const u32 id{NvCore::SyncpointManager::channel_syncpoints[static_cast<u32>(channel_type)]};
116 device_syncpoints[params.param] = syncpoint_manager.AllocateSyncpoint(); 130 params.value = channel_syncpoint;
117 }
118 params.value = device_syncpoints[params.param];
119 std::memcpy(output.data(), &params, sizeof(IoctlGetSyncpoint)); 131 std::memcpy(output.data(), &params, sizeof(IoctlGetSyncpoint));
120 132
121 return NvResult::Success; 133 return NvResult::Success;
@@ -123,6 +135,7 @@ NvResult nvhost_nvdec_common::GetSyncpoint(const std::vector<u8>& input, std::ve
123 135
124NvResult nvhost_nvdec_common::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) { 136NvResult nvhost_nvdec_common::GetWaitbase(const std::vector<u8>& input, std::vector<u8>& output) {
125 IoctlGetWaitbase params{}; 137 IoctlGetWaitbase params{};
138 LOG_CRITICAL(Service_NVDRV, "called WAITBASE");
126 std::memcpy(&params, input.data(), sizeof(IoctlGetWaitbase)); 139 std::memcpy(&params, input.data(), sizeof(IoctlGetWaitbase));
127 params.value = 0; // Seems to be hard coded at 0 140 params.value = 0; // Seems to be hard coded at 0
128 std::memcpy(output.data(), &params, sizeof(IoctlGetWaitbase)); 141 std::memcpy(output.data(), &params, sizeof(IoctlGetWaitbase));
@@ -136,28 +149,8 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto
136 149
137 SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer)); 150 SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
138 151
139 auto& gpu = system.GPU();
140
141 for (auto& cmd_buffer : cmd_buffer_handles) { 152 for (auto& cmd_buffer : cmd_buffer_handles) {
142 auto object{nvmap_dev->GetObject(cmd_buffer.map_handle)}; 153 cmd_buffer.map_address = nvmap.PinHandle(cmd_buffer.map_handle);
143 if (!object) {
144 LOG_ERROR(Service_NVDRV, "invalid cmd_buffer nvmap_handle={:X}", cmd_buffer.map_handle);
145 std::memcpy(output.data(), &params, output.size());
146 return NvResult::InvalidState;
147 }
148 if (object->dma_map_addr == 0) {
149 // NVDEC and VIC memory is in the 32-bit address space
150 // MapAllocate32 will attempt to map a lower 32-bit value in the shared gpu memory space
151 const GPUVAddr low_addr = gpu.MemoryManager().MapAllocate32(object->addr, object->size);
152 object->dma_map_addr = static_cast<u32>(low_addr);
153 // Ensure that the dma_map_addr is indeed in the lower 32-bit address space.
154 ASSERT(object->dma_map_addr == low_addr);
155 }
156 if (!object->dma_map_addr) {
157 LOG_ERROR(Service_NVDRV, "failed to map size={}", object->size);
158 } else {
159 cmd_buffer.map_address = object->dma_map_addr;
160 }
161 } 154 }
162 std::memcpy(output.data(), &params, sizeof(IoctlMapBuffer)); 155 std::memcpy(output.data(), &params, sizeof(IoctlMapBuffer));
163 std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(), 156 std::memcpy(output.data() + sizeof(IoctlMapBuffer), cmd_buffer_handles.data(),
@@ -167,11 +160,16 @@ NvResult nvhost_nvdec_common::MapBuffer(const std::vector<u8>& input, std::vecto
167} 160}
168 161
169NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) { 162NvResult nvhost_nvdec_common::UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output) {
170 // This is intntionally stubbed. 163 IoctlMapBuffer params{};
171 // Skip unmapping buffers here, as to not break the continuity of the VP9 reference frame 164 std::memcpy(&params, input.data(), sizeof(IoctlMapBuffer));
172 // addresses, and risk invalidating data before the async GPU thread is done with it 165 std::vector<MapBufferEntry> cmd_buffer_handles(params.num_entries);
166
167 SliceVectors(input, cmd_buffer_handles, params.num_entries, sizeof(IoctlMapBuffer));
168 for (auto& cmd_buffer : cmd_buffer_handles) {
169 nvmap.UnpinHandle(cmd_buffer.map_handle);
170 }
171
173 std::memset(output.data(), 0, output.size()); 172 std::memset(output.data(), 0, output.size());
174 LOG_DEBUG(Service_NVDRV, "(STUBBED) called");
175 return NvResult::Success; 173 return NvResult::Success;
176} 174}
177 175
@@ -182,4 +180,9 @@ NvResult nvhost_nvdec_common::SetSubmitTimeout(const std::vector<u8>& input,
182 return NvResult::Success; 180 return NvResult::Success;
183} 181}
184 182
183Kernel::KEvent* nvhost_nvdec_common::QueryEvent(u32 event_id) {
184 LOG_CRITICAL(Service_NVDRV, "Unknown HOSTX1 Event {}", event_id);
185 return nullptr;
186}
187
185} // namespace Service::Nvidia::Devices 188} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
index 12d39946d..fe76100c8 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_nvdec_common.h
@@ -3,21 +3,26 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <deque>
6#include <vector> 7#include <vector>
7#include "common/common_types.h" 8#include "common/common_types.h"
8#include "common/swap.h" 9#include "common/swap.h"
10#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
9#include "core/hle/service/nvdrv/devices/nvdevice.h" 11#include "core/hle/service/nvdrv/devices/nvdevice.h"
10 12
11namespace Service::Nvidia { 13namespace Service::Nvidia {
12class SyncpointManager; 14
15namespace NvCore {
16class Container;
17class NvMap;
18} // namespace NvCore
13 19
14namespace Devices { 20namespace Devices {
15class nvmap;
16 21
17class nvhost_nvdec_common : public nvdevice { 22class nvhost_nvdec_common : public nvdevice {
18public: 23public:
19 explicit nvhost_nvdec_common(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, 24 explicit nvhost_nvdec_common(Core::System& system_, NvCore::Container& core,
20 SyncpointManager& syncpoint_manager_); 25 NvCore::ChannelType channel_type);
21 ~nvhost_nvdec_common() override; 26 ~nvhost_nvdec_common() override;
22 27
23protected: 28protected:
@@ -110,11 +115,15 @@ protected:
110 NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output); 115 NvResult UnmapBuffer(const std::vector<u8>& input, std::vector<u8>& output);
111 NvResult SetSubmitTimeout(const std::vector<u8>& input, std::vector<u8>& output); 116 NvResult SetSubmitTimeout(const std::vector<u8>& input, std::vector<u8>& output);
112 117
113 std::unordered_map<DeviceFD, u32> fd_to_id{}; 118 Kernel::KEvent* QueryEvent(u32 event_id) override;
119
120 u32 channel_syncpoint;
114 s32_le nvmap_fd{}; 121 s32_le nvmap_fd{};
115 u32_le submit_timeout{}; 122 u32_le submit_timeout{};
116 std::shared_ptr<nvmap> nvmap_dev; 123 NvCore::Container& core;
117 SyncpointManager& syncpoint_manager; 124 NvCore::SyncpointManager& syncpoint_manager;
125 NvCore::NvMap& nvmap;
126 NvCore::ChannelType channel_type;
118 std::array<u32, MaxSyncPoints> device_syncpoints{}; 127 std::array<u32, MaxSyncPoints> device_syncpoints{};
119}; 128};
120}; // namespace Devices 129}; // namespace Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
index f58e8bada..73f97136e 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.cpp
@@ -4,13 +4,14 @@
4#include "common/assert.h" 4#include "common/assert.h"
5#include "common/logging/log.h" 5#include "common/logging/log.h"
6#include "core/core.h" 6#include "core/core.h"
7#include "core/hle/service/nvdrv/core/container.h"
7#include "core/hle/service/nvdrv/devices/nvhost_vic.h" 8#include "core/hle/service/nvdrv/devices/nvhost_vic.h"
8#include "video_core/renderer_base.h" 9#include "video_core/renderer_base.h"
9 10
10namespace Service::Nvidia::Devices { 11namespace Service::Nvidia::Devices {
11nvhost_vic::nvhost_vic(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, 12
12 SyncpointManager& syncpoint_manager_) 13nvhost_vic::nvhost_vic(Core::System& system_, NvCore::Container& core_)
13 : nvhost_nvdec_common{system_, std::move(nvmap_dev_), syncpoint_manager_} {} 14 : nvhost_nvdec_common{system_, core_, NvCore::ChannelType::VIC} {}
14 15
15nvhost_vic::~nvhost_vic() = default; 16nvhost_vic::~nvhost_vic() = default;
16 17
@@ -19,11 +20,13 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& i
19 switch (command.group) { 20 switch (command.group) {
20 case 0x0: 21 case 0x0:
21 switch (command.cmd) { 22 switch (command.cmd) {
22 case 0x1: 23 case 0x1: {
23 if (!fd_to_id.contains(fd)) { 24 auto& host1x_file = core.Host1xDeviceFile();
24 fd_to_id[fd] = next_id++; 25 if (!host1x_file.fd_to_id.contains(fd)) {
26 host1x_file.fd_to_id[fd] = host1x_file.vic_next_id++;
25 } 27 }
26 return Submit(fd, input, output); 28 return Submit(fd, input, output);
29 }
27 case 0x2: 30 case 0x2:
28 return GetSyncpoint(input, output); 31 return GetSyncpoint(input, output);
29 case 0x3: 32 case 0x3:
@@ -67,8 +70,9 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& i
67void nvhost_vic::OnOpen(DeviceFD fd) {} 70void nvhost_vic::OnOpen(DeviceFD fd) {}
68 71
69void nvhost_vic::OnClose(DeviceFD fd) { 72void nvhost_vic::OnClose(DeviceFD fd) {
70 const auto iter = fd_to_id.find(fd); 73 auto& host1x_file = core.Host1xDeviceFile();
71 if (iter != fd_to_id.end()) { 74 const auto iter = host1x_file.fd_to_id.find(fd);
75 if (iter != host1x_file.fd_to_id.end()) {
72 system.GPU().ClearCdmaInstance(iter->second); 76 system.GPU().ClearCdmaInstance(iter->second);
73 } 77 }
74} 78}
diff --git a/src/core/hle/service/nvdrv/devices/nvhost_vic.h b/src/core/hle/service/nvdrv/devices/nvhost_vic.h
index b41b195ae..f164caafb 100644
--- a/src/core/hle/service/nvdrv/devices/nvhost_vic.h
+++ b/src/core/hle/service/nvdrv/devices/nvhost_vic.h
@@ -9,8 +9,7 @@ namespace Service::Nvidia::Devices {
9 9
10class nvhost_vic final : public nvhost_nvdec_common { 10class nvhost_vic final : public nvhost_nvdec_common {
11public: 11public:
12 explicit nvhost_vic(Core::System& system_, std::shared_ptr<nvmap> nvmap_dev_, 12 explicit nvhost_vic(Core::System& system_, NvCore::Container& core);
13 SyncpointManager& syncpoint_manager_);
14 ~nvhost_vic(); 13 ~nvhost_vic();
15 14
16 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 15 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -22,8 +21,5 @@ public:
22 21
23 void OnOpen(DeviceFD fd) override; 22 void OnOpen(DeviceFD fd) override;
24 void OnClose(DeviceFD fd) override; 23 void OnClose(DeviceFD fd) override;
25
26private:
27 u32 next_id{};
28}; 24};
29} // namespace Service::Nvidia::Devices 25} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.cpp b/src/core/hle/service/nvdrv/devices/nvmap.cpp
index d8518149d..ddf273b5e 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.cpp
+++ b/src/core/hle/service/nvdrv/devices/nvmap.cpp
@@ -2,19 +2,26 @@
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include <algorithm> 4#include <algorithm>
5#include <bit>
5#include <cstring> 6#include <cstring>
6 7
8#include "common/alignment.h"
7#include "common/assert.h" 9#include "common/assert.h"
8#include "common/logging/log.h" 10#include "common/logging/log.h"
11#include "core/core.h"
12#include "core/hle/kernel/k_page_table.h"
13#include "core/hle/kernel/k_process.h"
14#include "core/hle/service/nvdrv/core/container.h"
15#include "core/hle/service/nvdrv/core/nvmap.h"
9#include "core/hle/service/nvdrv/devices/nvmap.h" 16#include "core/hle/service/nvdrv/devices/nvmap.h"
17#include "core/memory.h"
18
19using Core::Memory::YUZU_PAGESIZE;
10 20
11namespace Service::Nvidia::Devices { 21namespace Service::Nvidia::Devices {
12 22
13nvmap::nvmap(Core::System& system_) : nvdevice{system_} { 23nvmap::nvmap(Core::System& system_, NvCore::Container& container_)
14 // Handle 0 appears to be used when remapping, so we create a placeholder empty nvmap object to 24 : nvdevice{system_}, container{container_}, file{container.GetNvMapFile()} {}
15 // represent this.
16 CreateObject(0);
17}
18 25
19nvmap::~nvmap() = default; 26nvmap::~nvmap() = default;
20 27
@@ -62,39 +69,21 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
62void nvmap::OnOpen(DeviceFD fd) {} 69void nvmap::OnOpen(DeviceFD fd) {}
63void nvmap::OnClose(DeviceFD fd) {} 70void nvmap::OnClose(DeviceFD fd) {}
64 71
65VAddr nvmap::GetObjectAddress(u32 handle) const {
66 auto object = GetObject(handle);
67 ASSERT(object);
68 ASSERT(object->status == Object::Status::Allocated);
69 return object->addr;
70}
71
72u32 nvmap::CreateObject(u32 size) {
73 // Create a new nvmap object and obtain a handle to it.
74 auto object = std::make_shared<Object>();
75 object->id = next_id++;
76 object->size = size;
77 object->status = Object::Status::Created;
78 object->refcount = 1;
79
80 const u32 handle = next_handle++;
81
82 handles.insert_or_assign(handle, std::move(object));
83
84 return handle;
85}
86
87NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) { 72NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output) {
88 IocCreateParams params; 73 IocCreateParams params;
89 std::memcpy(&params, input.data(), sizeof(params)); 74 std::memcpy(&params, input.data(), sizeof(params));
90 LOG_DEBUG(Service_NVDRV, "size=0x{:08X}", params.size); 75 LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size);
91 76
92 if (!params.size) { 77 std::shared_ptr<NvCore::NvMap::Handle> handle_description{};
93 LOG_ERROR(Service_NVDRV, "Size is 0"); 78 auto result =
94 return NvResult::BadValue; 79 file.CreateHandle(Common::AlignUp(params.size, YUZU_PAGESIZE), handle_description);
80 if (result != NvResult::Success) {
81 LOG_CRITICAL(Service_NVDRV, "Failed to create Object");
82 return result;
95 } 83 }
96 84 handle_description->orig_size = params.size; // Orig size is the unaligned size
97 params.handle = CreateObject(params.size); 85 params.handle = handle_description->id;
86 LOG_DEBUG(Service_NVDRV, "handle: {}, size: 0x{:X}", handle_description->id, params.size);
98 87
99 std::memcpy(output.data(), &params, sizeof(params)); 88 std::memcpy(output.data(), &params, sizeof(params));
100 return NvResult::Success; 89 return NvResult::Success;
@@ -103,63 +92,68 @@ NvResult nvmap::IocCreate(const std::vector<u8>& input, std::vector<u8>& output)
103NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) { 92NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output) {
104 IocAllocParams params; 93 IocAllocParams params;
105 std::memcpy(&params, input.data(), sizeof(params)); 94 std::memcpy(&params, input.data(), sizeof(params));
106 LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.addr); 95 LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address);
107 96
108 if (!params.handle) { 97 if (!params.handle) {
109 LOG_ERROR(Service_NVDRV, "Handle is 0"); 98 LOG_CRITICAL(Service_NVDRV, "Handle is 0");
110 return NvResult::BadValue; 99 return NvResult::BadValue;
111 } 100 }
112 101
113 if ((params.align - 1) & params.align) { 102 if ((params.align - 1) & params.align) {
114 LOG_ERROR(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align); 103 LOG_CRITICAL(Service_NVDRV, "Incorrect alignment used, alignment={:08X}", params.align);
115 return NvResult::BadValue; 104 return NvResult::BadValue;
116 } 105 }
117 106
118 const u32 min_alignment = 0x1000; 107 // Force page size alignment at a minimum
119 if (params.align < min_alignment) { 108 if (params.align < YUZU_PAGESIZE) {
120 params.align = min_alignment; 109 params.align = YUZU_PAGESIZE;
121 } 110 }
122 111
123 auto object = GetObject(params.handle); 112 auto handle_description{file.GetHandle(params.handle)};
124 if (!object) { 113 if (!handle_description) {
125 LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); 114 LOG_CRITICAL(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
126 return NvResult::BadValue; 115 return NvResult::BadValue;
127 } 116 }
128 117
129 if (object->status == Object::Status::Allocated) { 118 if (handle_description->allocated) {
130 LOG_ERROR(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle); 119 LOG_CRITICAL(Service_NVDRV, "Object is already allocated, handle={:08X}", params.handle);
131 return NvResult::InsufficientMemory; 120 return NvResult::InsufficientMemory;
132 } 121 }
133 122
134 object->flags = params.flags; 123 const auto result =
135 object->align = params.align; 124 handle_description->Alloc(params.flags, params.align, params.kind, params.address);
136 object->kind = params.kind; 125 if (result != NvResult::Success) {
137 object->addr = params.addr; 126 LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
138 object->status = Object::Status::Allocated; 127 return result;
139 128 }
129 ASSERT(system.CurrentProcess()
130 ->PageTable()
131 .LockForDeviceAddressSpace(handle_description->address, handle_description->size)
132 .IsSuccess());
140 std::memcpy(output.data(), &params, sizeof(params)); 133 std::memcpy(output.data(), &params, sizeof(params));
141 return NvResult::Success; 134 return result;
142} 135}
143 136
144NvResult nvmap::IocGetId(const std::vector<u8>& input, std::vector<u8>& output) { 137NvResult nvmap::IocGetId(const std::vector<u8>& input, std::vector<u8>& output) {
145 IocGetIdParams params; 138 IocGetIdParams params;
146 std::memcpy(&params, input.data(), sizeof(params)); 139 std::memcpy(&params, input.data(), sizeof(params));
147 140
148 LOG_WARNING(Service_NVDRV, "called"); 141 LOG_DEBUG(Service_NVDRV, "called");
149 142
143 // See the comment in FromId for extra info on this function
150 if (!params.handle) { 144 if (!params.handle) {
151 LOG_ERROR(Service_NVDRV, "Handle is zero"); 145 LOG_CRITICAL(Service_NVDRV, "Error!");
152 return NvResult::BadValue; 146 return NvResult::BadValue;
153 } 147 }
154 148
155 auto object = GetObject(params.handle); 149 auto handle_description{file.GetHandle(params.handle)};
156 if (!object) { 150 if (!handle_description) {
157 LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); 151 LOG_CRITICAL(Service_NVDRV, "Error!");
158 return NvResult::BadValue; 152 return NvResult::AccessDenied; // This will always return EPERM irrespective of if the
153 // handle exists or not
159 } 154 }
160 155
161 params.id = object->id; 156 params.id = handle_description->id;
162
163 std::memcpy(output.data(), &params, sizeof(params)); 157 std::memcpy(output.data(), &params, sizeof(params));
164 return NvResult::Success; 158 return NvResult::Success;
165} 159}
@@ -168,26 +162,29 @@ NvResult nvmap::IocFromId(const std::vector<u8>& input, std::vector<u8>& output)
168 IocFromIdParams params; 162 IocFromIdParams params;
169 std::memcpy(&params, input.data(), sizeof(params)); 163 std::memcpy(&params, input.data(), sizeof(params));
170 164
171 LOG_WARNING(Service_NVDRV, "(STUBBED) called"); 165 LOG_DEBUG(Service_NVDRV, "called, id:{}", params.id);
172 166
173 auto itr = std::find_if(handles.begin(), handles.end(), 167 // Handles and IDs are always the same value in nvmap however IDs can be used globally given the
174 [&](const auto& entry) { return entry.second->id == params.id; }); 168 // right permissions.
175 if (itr == handles.end()) { 169 // Since we don't plan on ever supporting multiprocess we can skip implementing handle refs and
176 LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); 170 // so this function just does simple validation and passes through the handle id.
171 if (!params.id) {
172 LOG_CRITICAL(Service_NVDRV, "Zero Id is invalid!");
177 return NvResult::BadValue; 173 return NvResult::BadValue;
178 } 174 }
179 175
180 auto& object = itr->second; 176 auto handle_description{file.GetHandle(params.id)};
181 if (object->status != Object::Status::Allocated) { 177 if (!handle_description) {
182 LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle); 178 LOG_CRITICAL(Service_NVDRV, "Unregistered handle!");
183 return NvResult::BadValue; 179 return NvResult::BadValue;
184 } 180 }
185 181
186 itr->second->refcount++; 182 auto result = handle_description->Duplicate(false);
187 183 if (result != NvResult::Success) {
188 // Return the existing handle instead of creating a new one. 184 LOG_CRITICAL(Service_NVDRV, "Could not duplicate handle!");
189 params.handle = itr->first; 185 return result;
190 186 }
187 params.handle = handle_description->id;
191 std::memcpy(output.data(), &params, sizeof(params)); 188 std::memcpy(output.data(), &params, sizeof(params));
192 return NvResult::Success; 189 return NvResult::Success;
193} 190}
@@ -198,35 +195,43 @@ NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output)
198 IocParamParams params; 195 IocParamParams params;
199 std::memcpy(&params, input.data(), sizeof(params)); 196 std::memcpy(&params, input.data(), sizeof(params));
200 197
201 LOG_DEBUG(Service_NVDRV, "(STUBBED) called type={}", params.param); 198 LOG_DEBUG(Service_NVDRV, "called type={}", params.param);
202 199
203 auto object = GetObject(params.handle); 200 if (!params.handle) {
204 if (!object) { 201 LOG_CRITICAL(Service_NVDRV, "Invalid handle!");
205 LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle);
206 return NvResult::BadValue; 202 return NvResult::BadValue;
207 } 203 }
208 204
209 if (object->status != Object::Status::Allocated) { 205 auto handle_description{file.GetHandle(params.handle)};
210 LOG_ERROR(Service_NVDRV, "Object is not allocated, handle={:08X}", params.handle); 206 if (!handle_description) {
207 LOG_CRITICAL(Service_NVDRV, "Not registered handle!");
211 return NvResult::BadValue; 208 return NvResult::BadValue;
212 } 209 }
213 210
214 switch (static_cast<ParamTypes>(params.param)) { 211 switch (params.param) {
215 case ParamTypes::Size: 212 case HandleParameterType::Size:
216 params.result = object->size; 213 params.result = static_cast<u32_le>(handle_description->orig_size);
214 break;
215 case HandleParameterType::Alignment:
216 params.result = static_cast<u32_le>(handle_description->align);
217 break; 217 break;
218 case ParamTypes::Alignment: 218 case HandleParameterType::Base:
219 params.result = object->align; 219 params.result = static_cast<u32_le>(-22); // posix EINVAL
220 break; 220 break;
221 case ParamTypes::Heap: 221 case HandleParameterType::Heap:
222 // TODO(Subv): Seems to be a hardcoded value? 222 if (handle_description->allocated)
223 params.result = 0x40000000; 223 params.result = 0x40000000;
224 else
225 params.result = 0;
224 break; 226 break;
225 case ParamTypes::Kind: 227 case HandleParameterType::Kind:
226 params.result = object->kind; 228 params.result = handle_description->kind;
229 break;
230 case HandleParameterType::IsSharedMemMapped:
231 params.result = handle_description->is_shared_mem_mapped;
227 break; 232 break;
228 default: 233 default:
229 UNIMPLEMENTED(); 234 return NvResult::BadValue;
230 } 235 }
231 236
232 std::memcpy(output.data(), &params, sizeof(params)); 237 std::memcpy(output.data(), &params, sizeof(params));
@@ -234,46 +239,29 @@ NvResult nvmap::IocParam(const std::vector<u8>& input, std::vector<u8>& output)
234} 239}
235 240
236NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) { 241NvResult nvmap::IocFree(const std::vector<u8>& input, std::vector<u8>& output) {
237 // TODO(Subv): These flags are unconfirmed.
238 enum FreeFlags {
239 Freed = 0,
240 NotFreedYet = 1,
241 };
242
243 IocFreeParams params; 242 IocFreeParams params;
244 std::memcpy(&params, input.data(), sizeof(params)); 243 std::memcpy(&params, input.data(), sizeof(params));
245 244
246 LOG_DEBUG(Service_NVDRV, "(STUBBED) called"); 245 LOG_DEBUG(Service_NVDRV, "called");
247 246
248 auto itr = handles.find(params.handle); 247 if (!params.handle) {
249 if (itr == handles.end()) { 248 LOG_CRITICAL(Service_NVDRV, "Handle null freed?");
250 LOG_ERROR(Service_NVDRV, "Object does not exist, handle={:08X}", params.handle); 249 return NvResult::Success;
251 return NvResult::BadValue;
252 }
253 if (!itr->second->refcount) {
254 LOG_ERROR(
255 Service_NVDRV,
256 "There is no references to this object. The object is already freed. handle={:08X}",
257 params.handle);
258 return NvResult::BadValue;
259 } 250 }
260 251
261 itr->second->refcount--; 252 if (auto freeInfo{file.FreeHandle(params.handle, false)}) {
262 253 ASSERT(system.CurrentProcess()
263 params.size = itr->second->size; 254 ->PageTable()
264 255 .UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size)
265 if (itr->second->refcount == 0) { 256 .IsSuccess());
266 params.flags = Freed; 257 params.address = freeInfo->address;
267 // The address of the nvmap is written to the output if we're finally freeing it, otherwise 258 params.size = static_cast<u32>(freeInfo->size);
268 // 0 is written. 259 params.flags.raw = 0;
269 params.address = itr->second->addr; 260 params.flags.map_uncached.Assign(freeInfo->was_uncached);
270 } else { 261 } else {
271 params.flags = NotFreedYet; 262 // This is possible when there's internel dups or other duplicates.
272 params.address = 0;
273 } 263 }
274 264
275 handles.erase(params.handle);
276
277 std::memcpy(output.data(), &params, sizeof(params)); 265 std::memcpy(output.data(), &params, sizeof(params));
278 return NvResult::Success; 266 return NvResult::Success;
279} 267}
diff --git a/src/core/hle/service/nvdrv/devices/nvmap.h b/src/core/hle/service/nvdrv/devices/nvmap.h
index d5360d6e5..e9bfd0358 100644
--- a/src/core/hle/service/nvdrv/devices/nvmap.h
+++ b/src/core/hle/service/nvdrv/devices/nvmap.h
@@ -9,15 +9,23 @@
9#include "common/common_funcs.h" 9#include "common/common_funcs.h"
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "common/swap.h" 11#include "common/swap.h"
12#include "core/hle/service/nvdrv/core/nvmap.h"
12#include "core/hle/service/nvdrv/devices/nvdevice.h" 13#include "core/hle/service/nvdrv/devices/nvdevice.h"
13 14
15namespace Service::Nvidia::NvCore {
16class Container;
17} // namespace Service::Nvidia::NvCore
18
14namespace Service::Nvidia::Devices { 19namespace Service::Nvidia::Devices {
15 20
16class nvmap final : public nvdevice { 21class nvmap final : public nvdevice {
17public: 22public:
18 explicit nvmap(Core::System& system_); 23 explicit nvmap(Core::System& system_, NvCore::Container& container);
19 ~nvmap() override; 24 ~nvmap() override;
20 25
26 nvmap(const nvmap&) = delete;
27 nvmap& operator=(const nvmap&) = delete;
28
21 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 29 NvResult Ioctl1(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
22 std::vector<u8>& output) override; 30 std::vector<u8>& output) override;
23 NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input, 31 NvResult Ioctl2(DeviceFD fd, Ioctl command, const std::vector<u8>& input,
@@ -28,31 +36,15 @@ public:
28 void OnOpen(DeviceFD fd) override; 36 void OnOpen(DeviceFD fd) override;
29 void OnClose(DeviceFD fd) override; 37 void OnClose(DeviceFD fd) override;
30 38
31 /// Returns the allocated address of an nvmap object given its handle. 39 enum class HandleParameterType : u32_le {
32 VAddr GetObjectAddress(u32 handle) const; 40 Size = 1,
33 41 Alignment = 2,
34 /// Represents an nvmap object. 42 Base = 3,
35 struct Object { 43 Heap = 4,
36 enum class Status { Created, Allocated }; 44 Kind = 5,
37 u32 id; 45 IsSharedMemMapped = 6
38 u32 size;
39 u32 flags;
40 u32 align;
41 u8 kind;
42 VAddr addr;
43 Status status;
44 u32 refcount;
45 u32 dma_map_addr;
46 }; 46 };
47 47
48 std::shared_ptr<Object> GetObject(u32 handle) const {
49 auto itr = handles.find(handle);
50 if (itr != handles.end()) {
51 return itr->second;
52 }
53 return {};
54 }
55
56private: 48private:
57 /// Id to use for the next handle that is created. 49 /// Id to use for the next handle that is created.
58 u32 next_handle = 0; 50 u32 next_handle = 0;
@@ -60,9 +52,6 @@ private:
60 /// Id to use for the next object that is created. 52 /// Id to use for the next object that is created.
61 u32 next_id = 0; 53 u32 next_id = 0;
62 54
63 /// Mapping of currently allocated handles to the objects they represent.
64 std::unordered_map<u32, std::shared_ptr<Object>> handles;
65
66 struct IocCreateParams { 55 struct IocCreateParams {
67 // Input 56 // Input
68 u32_le size{}; 57 u32_le size{};
@@ -83,11 +72,11 @@ private:
83 // Input 72 // Input
84 u32_le handle{}; 73 u32_le handle{};
85 u32_le heap_mask{}; 74 u32_le heap_mask{};
86 u32_le flags{}; 75 NvCore::NvMap::Handle::Flags flags{};
87 u32_le align{}; 76 u32_le align{};
88 u8 kind{}; 77 u8 kind{};
89 INSERT_PADDING_BYTES(7); 78 INSERT_PADDING_BYTES(7);
90 u64_le addr{}; 79 u64_le address{};
91 }; 80 };
92 static_assert(sizeof(IocAllocParams) == 32, "IocAllocParams has wrong size"); 81 static_assert(sizeof(IocAllocParams) == 32, "IocAllocParams has wrong size");
93 82
@@ -96,14 +85,14 @@ private:
96 INSERT_PADDING_BYTES(4); 85 INSERT_PADDING_BYTES(4);
97 u64_le address{}; 86 u64_le address{};
98 u32_le size{}; 87 u32_le size{};
99 u32_le flags{}; 88 NvCore::NvMap::Handle::Flags flags{};
100 }; 89 };
101 static_assert(sizeof(IocFreeParams) == 24, "IocFreeParams has wrong size"); 90 static_assert(sizeof(IocFreeParams) == 24, "IocFreeParams has wrong size");
102 91
103 struct IocParamParams { 92 struct IocParamParams {
104 // Input 93 // Input
105 u32_le handle{}; 94 u32_le handle{};
106 u32_le param{}; 95 HandleParameterType param{};
107 // Output 96 // Output
108 u32_le result{}; 97 u32_le result{};
109 }; 98 };
@@ -117,14 +106,15 @@ private:
117 }; 106 };
118 static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size"); 107 static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
119 108
120 u32 CreateObject(u32 size);
121
122 NvResult IocCreate(const std::vector<u8>& input, std::vector<u8>& output); 109 NvResult IocCreate(const std::vector<u8>& input, std::vector<u8>& output);
123 NvResult IocAlloc(const std::vector<u8>& input, std::vector<u8>& output); 110 NvResult IocAlloc(const std::vector<u8>& input, std::vector<u8>& output);
124 NvResult IocGetId(const std::vector<u8>& input, std::vector<u8>& output); 111 NvResult IocGetId(const std::vector<u8>& input, std::vector<u8>& output);
125 NvResult IocFromId(const std::vector<u8>& input, std::vector<u8>& output); 112 NvResult IocFromId(const std::vector<u8>& input, std::vector<u8>& output);
126 NvResult IocParam(const std::vector<u8>& input, std::vector<u8>& output); 113 NvResult IocParam(const std::vector<u8>& input, std::vector<u8>& output);
127 NvResult IocFree(const std::vector<u8>& input, std::vector<u8>& output); 114 NvResult IocFree(const std::vector<u8>& input, std::vector<u8>& output);
115
116 NvCore::Container& container;
117 NvCore::NvMap& file;
128}; 118};
129 119
130} // namespace Service::Nvidia::Devices 120} // namespace Service::Nvidia::Devices
diff --git a/src/core/hle/service/nvdrv/nvdata.h b/src/core/hle/service/nvdrv/nvdata.h
index 1d00394c8..0e2f47075 100644
--- a/src/core/hle/service/nvdrv/nvdata.h
+++ b/src/core/hle/service/nvdrv/nvdata.h
@@ -1,5 +1,6 @@
1// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project 1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
3// SPDX-License-Identifier: GPL-3.0-or-later
3 4
4#pragma once 5#pragma once
5 6
@@ -78,11 +79,15 @@ enum class NvResult : u32 {
78 ModuleNotPresent = 0xA000E, 79 ModuleNotPresent = 0xA000E,
79}; 80};
80 81
82// obtained from
83// https://github.com/skyline-emu/skyline/blob/nvdec-dev/app/src/main/cpp/skyline/services/nvdrv/devices/nvhost/ctrl.h#L47
81enum class EventState { 84enum class EventState {
82 Free = 0, 85 Available = 0,
83 Registered = 1, 86 Waiting = 1,
84 Waiting = 2, 87 Cancelling = 2,
85 Busy = 3, 88 Signalling = 3,
89 Signalled = 4,
90 Cancelled = 5,
86}; 91};
87 92
88union Ioctl { 93union Ioctl {
diff --git a/src/core/hle/service/nvdrv/nvdrv.cpp b/src/core/hle/service/nvdrv/nvdrv.cpp
index 756eb7453..5e7b7468f 100644
--- a/src/core/hle/service/nvdrv/nvdrv.cpp
+++ b/src/core/hle/service/nvdrv/nvdrv.cpp
@@ -1,5 +1,6 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project 1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
3// SPDX-License-Identifier: GPL-3.0-or-later
3 4
4#include <utility> 5#include <utility>
5 6
@@ -8,6 +9,7 @@
8#include "core/hle/ipc_helpers.h" 9#include "core/hle/ipc_helpers.h"
9#include "core/hle/kernel/k_event.h" 10#include "core/hle/kernel/k_event.h"
10#include "core/hle/kernel/k_writable_event.h" 11#include "core/hle/kernel/k_writable_event.h"
12#include "core/hle/service/nvdrv/core/container.h"
11#include "core/hle/service/nvdrv/devices/nvdevice.h" 13#include "core/hle/service/nvdrv/devices/nvdevice.h"
12#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h" 14#include "core/hle/service/nvdrv/devices/nvdisp_disp0.h"
13#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h" 15#include "core/hle/service/nvdrv/devices/nvhost_as_gpu.h"
@@ -15,17 +17,31 @@
15#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h" 17#include "core/hle/service/nvdrv/devices/nvhost_ctrl_gpu.h"
16#include "core/hle/service/nvdrv/devices/nvhost_gpu.h" 18#include "core/hle/service/nvdrv/devices/nvhost_gpu.h"
17#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h" 19#include "core/hle/service/nvdrv/devices/nvhost_nvdec.h"
20#include "core/hle/service/nvdrv/devices/nvhost_nvdec_common.h"
18#include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h" 21#include "core/hle/service/nvdrv/devices/nvhost_nvjpg.h"
19#include "core/hle/service/nvdrv/devices/nvhost_vic.h" 22#include "core/hle/service/nvdrv/devices/nvhost_vic.h"
20#include "core/hle/service/nvdrv/devices/nvmap.h" 23#include "core/hle/service/nvdrv/devices/nvmap.h"
21#include "core/hle/service/nvdrv/nvdrv.h" 24#include "core/hle/service/nvdrv/nvdrv.h"
22#include "core/hle/service/nvdrv/nvdrv_interface.h" 25#include "core/hle/service/nvdrv/nvdrv_interface.h"
23#include "core/hle/service/nvdrv/nvmemp.h" 26#include "core/hle/service/nvdrv/nvmemp.h"
24#include "core/hle/service/nvdrv/syncpoint_manager.h"
25#include "core/hle/service/nvflinger/nvflinger.h" 27#include "core/hle/service/nvflinger/nvflinger.h"
28#include "video_core/gpu.h"
26 29
27namespace Service::Nvidia { 30namespace Service::Nvidia {
28 31
32EventInterface::EventInterface(Module& module_) : module{module_}, guard{}, on_signal{} {}
33
34EventInterface::~EventInterface() = default;
35
36Kernel::KEvent* EventInterface::CreateEvent(std::string name) {
37 Kernel::KEvent* new_event = module.service_context.CreateEvent(std::move(name));
38 return new_event;
39}
40
41void EventInterface::FreeEvent(Kernel::KEvent* event) {
42 module.service_context.CloseEvent(event);
43}
44
29void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger, 45void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger& nvflinger,
30 Core::System& system) { 46 Core::System& system) {
31 auto module_ = std::make_shared<Module>(system); 47 auto module_ = std::make_shared<Module>(system);
@@ -38,34 +54,54 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger
38} 54}
39 55
40Module::Module(Core::System& system) 56Module::Module(Core::System& system)
41 : syncpoint_manager{system.GPU()}, service_context{system, "nvdrv"} { 57 : service_context{system, "nvdrv"}, events_interface{*this}, container{system.Host1x()} {
42 for (u32 i = 0; i < MaxNvEvents; i++) { 58 builders["/dev/nvhost-as-gpu"] = [this, &system](DeviceFD fd) {
43 events_interface.events[i].event = 59 std::shared_ptr<Devices::nvdevice> device =
44 service_context.CreateEvent(fmt::format("NVDRV::NvEvent_{}", i)); 60 std::make_shared<Devices::nvhost_as_gpu>(system, *this, container);
45 events_interface.status[i] = EventState::Free; 61 return open_files.emplace(fd, device).first;
46 events_interface.registered[i] = false; 62 };
47 } 63 builders["/dev/nvhost-gpu"] = [this, &system](DeviceFD fd) {
48 auto nvmap_dev = std::make_shared<Devices::nvmap>(system); 64 std::shared_ptr<Devices::nvdevice> device =
49 devices["/dev/nvhost-as-gpu"] = std::make_shared<Devices::nvhost_as_gpu>(system, nvmap_dev); 65 std::make_shared<Devices::nvhost_gpu>(system, events_interface, container);
50 devices["/dev/nvhost-gpu"] = 66 return open_files.emplace(fd, device).first;
51 std::make_shared<Devices::nvhost_gpu>(system, nvmap_dev, syncpoint_manager); 67 };
52 devices["/dev/nvhost-ctrl-gpu"] = std::make_shared<Devices::nvhost_ctrl_gpu>(system); 68 builders["/dev/nvhost-ctrl-gpu"] = [this, &system](DeviceFD fd) {
53 devices["/dev/nvmap"] = nvmap_dev; 69 std::shared_ptr<Devices::nvdevice> device =
54 devices["/dev/nvdisp_disp0"] = std::make_shared<Devices::nvdisp_disp0>(system, nvmap_dev); 70 std::make_shared<Devices::nvhost_ctrl_gpu>(system, events_interface);
55 devices["/dev/nvhost-ctrl"] = 71 return open_files.emplace(fd, device).first;
56 std::make_shared<Devices::nvhost_ctrl>(system, events_interface, syncpoint_manager); 72 };
57 devices["/dev/nvhost-nvdec"] = 73 builders["/dev/nvmap"] = [this, &system](DeviceFD fd) {
58 std::make_shared<Devices::nvhost_nvdec>(system, nvmap_dev, syncpoint_manager); 74 std::shared_ptr<Devices::nvdevice> device =
59 devices["/dev/nvhost-nvjpg"] = std::make_shared<Devices::nvhost_nvjpg>(system); 75 std::make_shared<Devices::nvmap>(system, container);
60 devices["/dev/nvhost-vic"] = 76 return open_files.emplace(fd, device).first;
61 std::make_shared<Devices::nvhost_vic>(system, nvmap_dev, syncpoint_manager); 77 };
78 builders["/dev/nvdisp_disp0"] = [this, &system](DeviceFD fd) {
79 std::shared_ptr<Devices::nvdevice> device =
80 std::make_shared<Devices::nvdisp_disp0>(system, container);
81 return open_files.emplace(fd, device).first;
82 };
83 builders["/dev/nvhost-ctrl"] = [this, &system](DeviceFD fd) {
84 std::shared_ptr<Devices::nvdevice> device =
85 std::make_shared<Devices::nvhost_ctrl>(system, events_interface, container);
86 return open_files.emplace(fd, device).first;
87 };
88 builders["/dev/nvhost-nvdec"] = [this, &system](DeviceFD fd) {
89 std::shared_ptr<Devices::nvdevice> device =
90 std::make_shared<Devices::nvhost_nvdec>(system, container);
91 return open_files.emplace(fd, device).first;
92 };
93 builders["/dev/nvhost-nvjpg"] = [this, &system](DeviceFD fd) {
94 std::shared_ptr<Devices::nvdevice> device = std::make_shared<Devices::nvhost_nvjpg>(system);
95 return open_files.emplace(fd, device).first;
96 };
97 builders["/dev/nvhost-vic"] = [this, &system](DeviceFD fd) {
98 std::shared_ptr<Devices::nvdevice> device =
99 std::make_shared<Devices::nvhost_vic>(system, container);
100 return open_files.emplace(fd, device).first;
101 };
62} 102}
63 103
64Module::~Module() { 104Module::~Module() {}
65 for (u32 i = 0; i < MaxNvEvents; i++) {
66 service_context.CloseEvent(events_interface.events[i].event);
67 }
68}
69 105
70NvResult Module::VerifyFD(DeviceFD fd) const { 106NvResult Module::VerifyFD(DeviceFD fd) const {
71 if (fd < 0) { 107 if (fd < 0) {
@@ -82,18 +118,18 @@ NvResult Module::VerifyFD(DeviceFD fd) const {
82} 118}
83 119
84DeviceFD Module::Open(const std::string& device_name) { 120DeviceFD Module::Open(const std::string& device_name) {
85 if (devices.find(device_name) == devices.end()) { 121 auto it = builders.find(device_name);
122 if (it == builders.end()) {
86 LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name); 123 LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name);
87 return INVALID_NVDRV_FD; 124 return INVALID_NVDRV_FD;
88 } 125 }
89 126
90 auto device = devices[device_name];
91 const DeviceFD fd = next_fd++; 127 const DeviceFD fd = next_fd++;
128 auto& builder = it->second;
129 auto device = builder(fd)->second;
92 130
93 device->OnOpen(fd); 131 device->OnOpen(fd);
94 132
95 open_files[fd] = std::move(device);
96
97 return fd; 133 return fd;
98} 134}
99 135
@@ -168,22 +204,24 @@ NvResult Module::Close(DeviceFD fd) {
168 return NvResult::Success; 204 return NvResult::Success;
169} 205}
170 206
171void Module::SignalSyncpt(const u32 syncpoint_id, const u32 value) { 207NvResult Module::QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event) {
172 for (u32 i = 0; i < MaxNvEvents; i++) { 208 if (fd < 0) {
173 if (events_interface.assigned_syncpt[i] == syncpoint_id && 209 LOG_ERROR(Service_NVDRV, "Invalid DeviceFD={}!", fd);
174 events_interface.assigned_value[i] == value) { 210 return NvResult::InvalidState;
175 events_interface.LiberateEvent(i);
176 events_interface.events[i].event->GetWritableEvent().Signal();
177 }
178 } 211 }
179}
180 212
181Kernel::KReadableEvent& Module::GetEvent(const u32 event_id) { 213 const auto itr = open_files.find(fd);
182 return events_interface.events[event_id].event->GetReadableEvent();
183}
184 214
185Kernel::KWritableEvent& Module::GetEventWriteable(const u32 event_id) { 215 if (itr == open_files.end()) {
186 return events_interface.events[event_id].event->GetWritableEvent(); 216 LOG_ERROR(Service_NVDRV, "Could not find DeviceFD={}!", fd);
217 return NvResult::NotImplemented;
218 }
219
220 event = itr->second->QueryEvent(event_id);
221 if (!event) {
222 return NvResult::BadParameter;
223 }
224 return NvResult::Success;
187} 225}
188 226
189} // namespace Service::Nvidia 227} // namespace Service::Nvidia
diff --git a/src/core/hle/service/nvdrv/nvdrv.h b/src/core/hle/service/nvdrv/nvdrv.h
index c929e5106..146d046a9 100644
--- a/src/core/hle/service/nvdrv/nvdrv.h
+++ b/src/core/hle/service/nvdrv/nvdrv.h
@@ -1,16 +1,20 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project 1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
3// SPDX-License-Identifier: GPL-3.0-or-later
3 4
4#pragma once 5#pragma once
5 6
7#include <functional>
8#include <list>
6#include <memory> 9#include <memory>
10#include <string>
7#include <unordered_map> 11#include <unordered_map>
8#include <vector> 12#include <vector>
9 13
10#include "common/common_types.h" 14#include "common/common_types.h"
11#include "core/hle/service/kernel_helpers.h" 15#include "core/hle/service/kernel_helpers.h"
16#include "core/hle/service/nvdrv/core/container.h"
12#include "core/hle/service/nvdrv/nvdata.h" 17#include "core/hle/service/nvdrv/nvdata.h"
13#include "core/hle/service/nvdrv/syncpoint_manager.h"
14#include "core/hle/service/nvflinger/ui/fence.h" 18#include "core/hle/service/nvflinger/ui/fence.h"
15#include "core/hle/service/service.h" 19#include "core/hle/service/service.h"
16 20
@@ -28,81 +32,31 @@ class NVFlinger;
28 32
29namespace Service::Nvidia { 33namespace Service::Nvidia {
30 34
35namespace NvCore {
36class Container;
31class SyncpointManager; 37class SyncpointManager;
38} // namespace NvCore
32 39
33namespace Devices { 40namespace Devices {
34class nvdevice; 41class nvdevice;
35} 42class nvhost_ctrl;
43} // namespace Devices
36 44
37/// Represents an Nvidia event 45class Module;
38struct NvEvent {
39 Kernel::KEvent* event{};
40 NvFence fence{};
41};
42 46
43struct EventInterface { 47class EventInterface {
44 // Mask representing currently busy events 48public:
45 u64 events_mask{}; 49 explicit EventInterface(Module& module_);
46 // Each kernel event associated to an NV event 50 ~EventInterface();
47 std::array<NvEvent, MaxNvEvents> events; 51
48 // The status of the current NVEvent 52 Kernel::KEvent* CreateEvent(std::string name);
49 std::array<EventState, MaxNvEvents> status{}; 53
50 // Tells if an NVEvent is registered or not 54 void FreeEvent(Kernel::KEvent* event);
51 std::array<bool, MaxNvEvents> registered{}; 55
52 // Tells the NVEvent that it has failed. 56private:
53 std::array<bool, MaxNvEvents> failed{}; 57 Module& module;
54 // When an NVEvent is waiting on GPU interrupt, this is the sync_point 58 std::mutex guard;
55 // associated with it. 59 std::list<Devices::nvhost_ctrl*> on_signal;
56 std::array<u32, MaxNvEvents> assigned_syncpt{};
57 // This is the value of the GPU interrupt for which the NVEvent is waiting
58 // for.
59 std::array<u32, MaxNvEvents> assigned_value{};
60 // Constant to denote an unasigned syncpoint.
61 static constexpr u32 unassigned_syncpt = 0xFFFFFFFF;
62 std::optional<u32> GetFreeEvent() const {
63 u64 mask = events_mask;
64 for (u32 i = 0; i < MaxNvEvents; i++) {
65 const bool is_free = (mask & 0x1) == 0;
66 if (is_free) {
67 if (status[i] == EventState::Registered || status[i] == EventState::Free) {
68 return {i};
69 }
70 }
71 mask = mask >> 1;
72 }
73 return std::nullopt;
74 }
75 void SetEventStatus(const u32 event_id, EventState new_status) {
76 EventState old_status = status[event_id];
77 if (old_status == new_status) {
78 return;
79 }
80 status[event_id] = new_status;
81 if (new_status == EventState::Registered) {
82 registered[event_id] = true;
83 }
84 if (new_status == EventState::Waiting || new_status == EventState::Busy) {
85 events_mask |= (1ULL << event_id);
86 }
87 }
88 void RegisterEvent(const u32 event_id) {
89 registered[event_id] = true;
90 if (status[event_id] == EventState::Free) {
91 status[event_id] = EventState::Registered;
92 }
93 }
94 void UnregisterEvent(const u32 event_id) {
95 registered[event_id] = false;
96 if (status[event_id] == EventState::Registered) {
97 status[event_id] = EventState::Free;
98 }
99 }
100 void LiberateEvent(const u32 event_id) {
101 status[event_id] = registered[event_id] ? EventState::Registered : EventState::Free;
102 events_mask &= ~(1ULL << event_id);
103 assigned_syncpt[event_id] = unassigned_syncpt;
104 assigned_value[event_id] = 0;
105 }
106}; 60};
107 61
108class Module final { 62class Module final {
@@ -112,9 +66,9 @@ public:
112 66
113 /// Returns a pointer to one of the available devices, identified by its name. 67 /// Returns a pointer to one of the available devices, identified by its name.
114 template <typename T> 68 template <typename T>
115 std::shared_ptr<T> GetDevice(const std::string& name) { 69 std::shared_ptr<T> GetDevice(DeviceFD fd) {
116 auto itr = devices.find(name); 70 auto itr = open_files.find(fd);
117 if (itr == devices.end()) 71 if (itr == open_files.end())
118 return nullptr; 72 return nullptr;
119 return std::static_pointer_cast<T>(itr->second); 73 return std::static_pointer_cast<T>(itr->second);
120 } 74 }
@@ -137,28 +91,27 @@ public:
137 /// Closes a device file descriptor and returns operation success. 91 /// Closes a device file descriptor and returns operation success.
138 NvResult Close(DeviceFD fd); 92 NvResult Close(DeviceFD fd);
139 93
140 void SignalSyncpt(const u32 syncpoint_id, const u32 value); 94 NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event);
141
142 Kernel::KReadableEvent& GetEvent(u32 event_id);
143
144 Kernel::KWritableEvent& GetEventWriteable(u32 event_id);
145 95
146private: 96private:
147 /// Manages syncpoints on the host 97 friend class EventInterface;
148 SyncpointManager syncpoint_manager; 98 friend class Service::NVFlinger::NVFlinger;
149 99
150 /// Id to use for the next open file descriptor. 100 /// Id to use for the next open file descriptor.
151 DeviceFD next_fd = 1; 101 DeviceFD next_fd = 1;
152 102
103 using FilesContainerType = std::unordered_map<DeviceFD, std::shared_ptr<Devices::nvdevice>>;
153 /// Mapping of file descriptors to the devices they reference. 104 /// Mapping of file descriptors to the devices they reference.
154 std::unordered_map<DeviceFD, std::shared_ptr<Devices::nvdevice>> open_files; 105 FilesContainerType open_files;
155 106
156 /// Mapping of device node names to their implementation. 107 KernelHelpers::ServiceContext service_context;
157 std::unordered_map<std::string, std::shared_ptr<Devices::nvdevice>> devices;
158 108
159 EventInterface events_interface; 109 EventInterface events_interface;
160 110
161 KernelHelpers::ServiceContext service_context; 111 /// Manages syncpoints on the host
112 NvCore::Container container;
113
114 std::unordered_map<std::string, std::function<FilesContainerType::iterator(DeviceFD)>> builders;
162}; 115};
163 116
164/// Registers all NVDRV services with the specified service manager. 117/// Registers all NVDRV services with the specified service manager.
diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.cpp b/src/core/hle/service/nvdrv/nvdrv_interface.cpp
index b5a980384..edbdfee43 100644
--- a/src/core/hle/service/nvdrv/nvdrv_interface.cpp
+++ b/src/core/hle/service/nvdrv/nvdrv_interface.cpp
@@ -1,10 +1,12 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project 1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
3// SPDX-License-Identifier: GPL-3.0-or-later
3 4
4#include <cinttypes> 5#include <cinttypes>
5#include "common/logging/log.h" 6#include "common/logging/log.h"
6#include "core/core.h" 7#include "core/core.h"
7#include "core/hle/ipc_helpers.h" 8#include "core/hle/ipc_helpers.h"
9#include "core/hle/kernel/k_event.h"
8#include "core/hle/kernel/k_readable_event.h" 10#include "core/hle/kernel/k_readable_event.h"
9#include "core/hle/service/nvdrv/nvdata.h" 11#include "core/hle/service/nvdrv/nvdata.h"
10#include "core/hle/service/nvdrv/nvdrv.h" 12#include "core/hle/service/nvdrv/nvdrv.h"
@@ -12,10 +14,6 @@
12 14
13namespace Service::Nvidia { 15namespace Service::Nvidia {
14 16
15void NVDRV::SignalGPUInterruptSyncpt(const u32 syncpoint_id, const u32 value) {
16 nvdrv->SignalSyncpt(syncpoint_id, value);
17}
18
19void NVDRV::Open(Kernel::HLERequestContext& ctx) { 17void NVDRV::Open(Kernel::HLERequestContext& ctx) {
20 LOG_DEBUG(Service_NVDRV, "called"); 18 LOG_DEBUG(Service_NVDRV, "called");
21 IPC::ResponseBuilder rb{ctx, 4}; 19 IPC::ResponseBuilder rb{ctx, 4};
@@ -164,8 +162,7 @@ void NVDRV::Initialize(Kernel::HLERequestContext& ctx) {
164void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) { 162void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) {
165 IPC::RequestParser rp{ctx}; 163 IPC::RequestParser rp{ctx};
166 const auto fd = rp.Pop<DeviceFD>(); 164 const auto fd = rp.Pop<DeviceFD>();
167 const auto event_id = rp.Pop<u32>() & 0x00FF; 165 const auto event_id = rp.Pop<u32>();
168 LOG_WARNING(Service_NVDRV, "(STUBBED) called, fd={:X}, event_id={:X}", fd, event_id);
169 166
170 if (!is_initialized) { 167 if (!is_initialized) {
171 ServiceError(ctx, NvResult::NotInitialized); 168 ServiceError(ctx, NvResult::NotInitialized);
@@ -173,24 +170,20 @@ void NVDRV::QueryEvent(Kernel::HLERequestContext& ctx) {
173 return; 170 return;
174 } 171 }
175 172
176 const auto nv_result = nvdrv->VerifyFD(fd); 173 Kernel::KEvent* event = nullptr;
177 if (nv_result != NvResult::Success) { 174 NvResult result = nvdrv->QueryEvent(fd, event_id, event);
178 LOG_ERROR(Service_NVDRV, "Invalid FD specified DeviceFD={}!", fd);
179 ServiceError(ctx, nv_result);
180 return;
181 }
182 175
183 if (event_id < MaxNvEvents) { 176 if (result == NvResult::Success) {
184 IPC::ResponseBuilder rb{ctx, 3, 1}; 177 IPC::ResponseBuilder rb{ctx, 3, 1};
185 rb.Push(ResultSuccess); 178 rb.Push(ResultSuccess);
186 auto& event = nvdrv->GetEvent(event_id); 179 auto& readable_event = event->GetReadableEvent();
187 event.Clear(); 180 rb.PushCopyObjects(readable_event);
188 rb.PushCopyObjects(event);
189 rb.PushEnum(NvResult::Success); 181 rb.PushEnum(NvResult::Success);
190 } else { 182 } else {
183 LOG_ERROR(Service_NVDRV, "Invalid event request!");
191 IPC::ResponseBuilder rb{ctx, 3}; 184 IPC::ResponseBuilder rb{ctx, 3};
192 rb.Push(ResultSuccess); 185 rb.Push(ResultSuccess);
193 rb.PushEnum(NvResult::BadParameter); 186 rb.PushEnum(result);
194 } 187 }
195} 188}
196 189
diff --git a/src/core/hle/service/nvdrv/nvdrv_interface.h b/src/core/hle/service/nvdrv/nvdrv_interface.h
index cbd37b52b..cd58a4f35 100644
--- a/src/core/hle/service/nvdrv/nvdrv_interface.h
+++ b/src/core/hle/service/nvdrv/nvdrv_interface.h
@@ -18,8 +18,6 @@ public:
18 explicit NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char* name); 18 explicit NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char* name);
19 ~NVDRV() override; 19 ~NVDRV() override;
20 20
21 void SignalGPUInterruptSyncpt(u32 syncpoint_id, u32 value);
22
23private: 21private:
24 void Open(Kernel::HLERequestContext& ctx); 22 void Open(Kernel::HLERequestContext& ctx);
25 void Ioctl1(Kernel::HLERequestContext& ctx); 23 void Ioctl1(Kernel::HLERequestContext& ctx);
diff --git a/src/core/hle/service/nvdrv/syncpoint_manager.cpp b/src/core/hle/service/nvdrv/syncpoint_manager.cpp
deleted file mode 100644
index a6fa943e8..000000000
--- a/src/core/hle/service/nvdrv/syncpoint_manager.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
1// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/assert.h"
5#include "core/hle/service/nvdrv/syncpoint_manager.h"
6#include "video_core/gpu.h"
7
8namespace Service::Nvidia {
9
10SyncpointManager::SyncpointManager(Tegra::GPU& gpu_) : gpu{gpu_} {}
11
12SyncpointManager::~SyncpointManager() = default;
13
14u32 SyncpointManager::RefreshSyncpoint(u32 syncpoint_id) {
15 syncpoints[syncpoint_id].min = gpu.GetSyncpointValue(syncpoint_id);
16 return GetSyncpointMin(syncpoint_id);
17}
18
19u32 SyncpointManager::AllocateSyncpoint() {
20 for (u32 syncpoint_id = 1; syncpoint_id < MaxSyncPoints; syncpoint_id++) {
21 if (!syncpoints[syncpoint_id].is_allocated) {
22 syncpoints[syncpoint_id].is_allocated = true;
23 return syncpoint_id;
24 }
25 }
26 ASSERT_MSG(false, "No more available syncpoints!");
27 return {};
28}
29
30u32 SyncpointManager::IncreaseSyncpoint(u32 syncpoint_id, u32 value) {
31 for (u32 index = 0; index < value; ++index) {
32 syncpoints[syncpoint_id].max.fetch_add(1, std::memory_order_relaxed);
33 }
34
35 return GetSyncpointMax(syncpoint_id);
36}
37
38} // namespace Service::Nvidia
diff --git a/src/core/hle/service/nvdrv/syncpoint_manager.h b/src/core/hle/service/nvdrv/syncpoint_manager.h
deleted file mode 100644
index 7f080f76e..000000000
--- a/src/core/hle/service/nvdrv/syncpoint_manager.h
+++ /dev/null
@@ -1,84 +0,0 @@
1// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#pragma once
5
6#include <array>
7#include <atomic>
8
9#include "common/common_types.h"
10#include "core/hle/service/nvdrv/nvdata.h"
11
12namespace Tegra {
13class GPU;
14}
15
16namespace Service::Nvidia {
17
18class SyncpointManager final {
19public:
20 explicit SyncpointManager(Tegra::GPU& gpu_);
21 ~SyncpointManager();
22
23 /**
24 * Returns true if the specified syncpoint is expired for the given value.
25 * @param syncpoint_id Syncpoint ID to check.
26 * @param value Value to check against the specified syncpoint.
27 * @returns True if the specified syncpoint is expired for the given value, otherwise False.
28 */
29 bool IsSyncpointExpired(u32 syncpoint_id, u32 value) const {
30 return (GetSyncpointMax(syncpoint_id) - value) >= (GetSyncpointMin(syncpoint_id) - value);
31 }
32
33 /**
34 * Gets the lower bound for the specified syncpoint.
35 * @param syncpoint_id Syncpoint ID to get the lower bound for.
36 * @returns The lower bound for the specified syncpoint.
37 */
38 u32 GetSyncpointMin(u32 syncpoint_id) const {
39 return syncpoints.at(syncpoint_id).min.load(std::memory_order_relaxed);
40 }
41
42 /**
43 * Gets the uper bound for the specified syncpoint.
44 * @param syncpoint_id Syncpoint ID to get the upper bound for.
45 * @returns The upper bound for the specified syncpoint.
46 */
47 u32 GetSyncpointMax(u32 syncpoint_id) const {
48 return syncpoints.at(syncpoint_id).max.load(std::memory_order_relaxed);
49 }
50
51 /**
52 * Refreshes the minimum value for the specified syncpoint.
53 * @param syncpoint_id Syncpoint ID to be refreshed.
54 * @returns The new syncpoint minimum value.
55 */
56 u32 RefreshSyncpoint(u32 syncpoint_id);
57
58 /**
59 * Allocates a new syncoint.
60 * @returns The syncpoint ID for the newly allocated syncpoint.
61 */
62 u32 AllocateSyncpoint();
63
64 /**
65 * Increases the maximum value for the specified syncpoint.
66 * @param syncpoint_id Syncpoint ID to be increased.
67 * @param value Value to increase the specified syncpoint by.
68 * @returns The new syncpoint maximum value.
69 */
70 u32 IncreaseSyncpoint(u32 syncpoint_id, u32 value);
71
72private:
73 struct Syncpoint {
74 std::atomic<u32> min;
75 std::atomic<u32> max;
76 std::atomic<bool> is_allocated;
77 };
78
79 std::array<Syncpoint, MaxSyncPoints> syncpoints{};
80
81 Tegra::GPU& gpu;
82};
83
84} // namespace Service::Nvidia
diff --git a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp
index 4b3d5efd6..1ce67c771 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp
+++ b/src/core/hle/service/nvflinger/buffer_queue_consumer.cpp
@@ -5,15 +5,18 @@
5// https://cs.android.com/android/platform/superproject/+/android-5.1.1_r38:frameworks/native/libs/gui/BufferQueueConsumer.cpp 5// https://cs.android.com/android/platform/superproject/+/android-5.1.1_r38:frameworks/native/libs/gui/BufferQueueConsumer.cpp
6 6
7#include "common/logging/log.h" 7#include "common/logging/log.h"
8#include "core/hle/service/nvdrv/core/nvmap.h"
8#include "core/hle/service/nvflinger/buffer_item.h" 9#include "core/hle/service/nvflinger/buffer_item.h"
9#include "core/hle/service/nvflinger/buffer_queue_consumer.h" 10#include "core/hle/service/nvflinger/buffer_queue_consumer.h"
10#include "core/hle/service/nvflinger/buffer_queue_core.h" 11#include "core/hle/service/nvflinger/buffer_queue_core.h"
11#include "core/hle/service/nvflinger/producer_listener.h" 12#include "core/hle/service/nvflinger/producer_listener.h"
13#include "core/hle/service/nvflinger/ui/graphic_buffer.h"
12 14
13namespace Service::android { 15namespace Service::android {
14 16
15BufferQueueConsumer::BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_) 17BufferQueueConsumer::BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_,
16 : core{std::move(core_)}, slots{core->slots} {} 18 Service::Nvidia::NvCore::NvMap& nvmap_)
19 : core{std::move(core_)}, slots{core->slots}, nvmap(nvmap_) {}
17 20
18BufferQueueConsumer::~BufferQueueConsumer() = default; 21BufferQueueConsumer::~BufferQueueConsumer() = default;
19 22
@@ -133,6 +136,8 @@ Status BufferQueueConsumer::ReleaseBuffer(s32 slot, u64 frame_number, const Fenc
133 136
134 slots[slot].buffer_state = BufferState::Free; 137 slots[slot].buffer_state = BufferState::Free;
135 138
139 nvmap.FreeHandle(slots[slot].graphic_buffer->BufferId(), true);
140
136 listener = core->connected_producer_listener; 141 listener = core->connected_producer_listener;
137 142
138 LOG_DEBUG(Service_NVFlinger, "releasing slot {}", slot); 143 LOG_DEBUG(Service_NVFlinger, "releasing slot {}", slot);
diff --git a/src/core/hle/service/nvflinger/buffer_queue_consumer.h b/src/core/hle/service/nvflinger/buffer_queue_consumer.h
index b598c314f..4ec06ca13 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_consumer.h
+++ b/src/core/hle/service/nvflinger/buffer_queue_consumer.h
@@ -13,6 +13,10 @@
13#include "core/hle/service/nvflinger/buffer_queue_defs.h" 13#include "core/hle/service/nvflinger/buffer_queue_defs.h"
14#include "core/hle/service/nvflinger/status.h" 14#include "core/hle/service/nvflinger/status.h"
15 15
16namespace Service::Nvidia::NvCore {
17class NvMap;
18} // namespace Service::Nvidia::NvCore
19
16namespace Service::android { 20namespace Service::android {
17 21
18class BufferItem; 22class BufferItem;
@@ -21,7 +25,8 @@ class IConsumerListener;
21 25
22class BufferQueueConsumer final { 26class BufferQueueConsumer final {
23public: 27public:
24 explicit BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_); 28 explicit BufferQueueConsumer(std::shared_ptr<BufferQueueCore> core_,
29 Service::Nvidia::NvCore::NvMap& nvmap_);
25 ~BufferQueueConsumer(); 30 ~BufferQueueConsumer();
26 31
27 Status AcquireBuffer(BufferItem* out_buffer, std::chrono::nanoseconds expected_present); 32 Status AcquireBuffer(BufferItem* out_buffer, std::chrono::nanoseconds expected_present);
@@ -32,6 +37,7 @@ public:
32private: 37private:
33 std::shared_ptr<BufferQueueCore> core; 38 std::shared_ptr<BufferQueueCore> core;
34 BufferQueueDefs::SlotsType& slots; 39 BufferQueueDefs::SlotsType& slots;
40 Service::Nvidia::NvCore::NvMap& nvmap;
35}; 41};
36 42
37} // namespace Service::android 43} // namespace Service::android
diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
index 337431488..d4ab23a10 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
+++ b/src/core/hle/service/nvflinger/buffer_queue_producer.cpp
@@ -14,7 +14,7 @@
14#include "core/hle/kernel/k_writable_event.h" 14#include "core/hle/kernel/k_writable_event.h"
15#include "core/hle/kernel/kernel.h" 15#include "core/hle/kernel/kernel.h"
16#include "core/hle/service/kernel_helpers.h" 16#include "core/hle/service/kernel_helpers.h"
17#include "core/hle/service/nvdrv/nvdrv.h" 17#include "core/hle/service/nvdrv/core/nvmap.h"
18#include "core/hle/service/nvflinger/buffer_queue_core.h" 18#include "core/hle/service/nvflinger/buffer_queue_core.h"
19#include "core/hle/service/nvflinger/buffer_queue_producer.h" 19#include "core/hle/service/nvflinger/buffer_queue_producer.h"
20#include "core/hle/service/nvflinger/consumer_listener.h" 20#include "core/hle/service/nvflinger/consumer_listener.h"
@@ -26,8 +26,10 @@
26namespace Service::android { 26namespace Service::android {
27 27
28BufferQueueProducer::BufferQueueProducer(Service::KernelHelpers::ServiceContext& service_context_, 28BufferQueueProducer::BufferQueueProducer(Service::KernelHelpers::ServiceContext& service_context_,
29 std::shared_ptr<BufferQueueCore> buffer_queue_core_) 29 std::shared_ptr<BufferQueueCore> buffer_queue_core_,
30 : service_context{service_context_}, core{std::move(buffer_queue_core_)}, slots(core->slots) { 30 Service::Nvidia::NvCore::NvMap& nvmap_)
31 : service_context{service_context_}, core{std::move(buffer_queue_core_)}, slots(core->slots),
32 nvmap(nvmap_) {
31 buffer_wait_event = service_context.CreateEvent("BufferQueue:WaitEvent"); 33 buffer_wait_event = service_context.CreateEvent("BufferQueue:WaitEvent");
32} 34}
33 35
@@ -530,6 +532,8 @@ Status BufferQueueProducer::QueueBuffer(s32 slot, const QueueBufferInput& input,
530 item.is_droppable = core->dequeue_buffer_cannot_block || async; 532 item.is_droppable = core->dequeue_buffer_cannot_block || async;
531 item.swap_interval = swap_interval; 533 item.swap_interval = swap_interval;
532 534
535 nvmap.DuplicateHandle(item.graphic_buffer->BufferId(), true);
536
533 sticky_transform = sticky_transform_; 537 sticky_transform = sticky_transform_;
534 538
535 if (core->queue.empty()) { 539 if (core->queue.empty()) {
diff --git a/src/core/hle/service/nvflinger/buffer_queue_producer.h b/src/core/hle/service/nvflinger/buffer_queue_producer.h
index 42d4722dc..0ba03a568 100644
--- a/src/core/hle/service/nvflinger/buffer_queue_producer.h
+++ b/src/core/hle/service/nvflinger/buffer_queue_producer.h
@@ -31,6 +31,10 @@ namespace Service::KernelHelpers {
31class ServiceContext; 31class ServiceContext;
32} // namespace Service::KernelHelpers 32} // namespace Service::KernelHelpers
33 33
34namespace Service::Nvidia::NvCore {
35class NvMap;
36} // namespace Service::Nvidia::NvCore
37
34namespace Service::android { 38namespace Service::android {
35 39
36class BufferQueueCore; 40class BufferQueueCore;
@@ -39,7 +43,8 @@ class IProducerListener;
39class BufferQueueProducer final : public IBinder { 43class BufferQueueProducer final : public IBinder {
40public: 44public:
41 explicit BufferQueueProducer(Service::KernelHelpers::ServiceContext& service_context_, 45 explicit BufferQueueProducer(Service::KernelHelpers::ServiceContext& service_context_,
42 std::shared_ptr<BufferQueueCore> buffer_queue_core_); 46 std::shared_ptr<BufferQueueCore> buffer_queue_core_,
47 Service::Nvidia::NvCore::NvMap& nvmap_);
43 ~BufferQueueProducer(); 48 ~BufferQueueProducer();
44 49
45 void Transact(Kernel::HLERequestContext& ctx, android::TransactionId code, u32 flags) override; 50 void Transact(Kernel::HLERequestContext& ctx, android::TransactionId code, u32 flags) override;
@@ -78,6 +83,8 @@ private:
78 s32 next_callback_ticket{}; 83 s32 next_callback_ticket{};
79 s32 current_callback_ticket{}; 84 s32 current_callback_ticket{};
80 std::condition_variable_any callback_condition; 85 std::condition_variable_any callback_condition;
86
87 Service::Nvidia::NvCore::NvMap& nvmap;
81}; 88};
82 89
83} // namespace Service::android 90} // namespace Service::android
diff --git a/src/core/hle/service/nvflinger/nvflinger.cpp b/src/core/hle/service/nvflinger/nvflinger.cpp
index 9b382bf56..aa14d2cbc 100644
--- a/src/core/hle/service/nvflinger/nvflinger.cpp
+++ b/src/core/hle/service/nvflinger/nvflinger.cpp
@@ -22,7 +22,10 @@
22#include "core/hle/service/nvflinger/ui/graphic_buffer.h" 22#include "core/hle/service/nvflinger/ui/graphic_buffer.h"
23#include "core/hle/service/vi/display/vi_display.h" 23#include "core/hle/service/vi/display/vi_display.h"
24#include "core/hle/service/vi/layer/vi_layer.h" 24#include "core/hle/service/vi/layer/vi_layer.h"
25#include "core/hle/service/vi/vi_results.h"
25#include "video_core/gpu.h" 26#include "video_core/gpu.h"
27#include "video_core/host1x/host1x.h"
28#include "video_core/host1x/syncpoint_manager.h"
26 29
27namespace Service::NVFlinger { 30namespace Service::NVFlinger {
28 31
@@ -30,7 +33,7 @@ constexpr auto frame_ns = std::chrono::nanoseconds{1000000000 / 60};
30 33
31void NVFlinger::SplitVSync(std::stop_token stop_token) { 34void NVFlinger::SplitVSync(std::stop_token stop_token) {
32 system.RegisterHostThread(); 35 system.RegisterHostThread();
33 std::string name = "yuzu:VSyncThread"; 36 std::string name = "VSyncThread";
34 MicroProfileOnThreadCreate(name.c_str()); 37 MicroProfileOnThreadCreate(name.c_str());
35 38
36 // Cleanup 39 // Cleanup
@@ -104,10 +107,15 @@ NVFlinger::~NVFlinger() {
104 display.GetLayer(layer).Core().NotifyShutdown(); 107 display.GetLayer(layer).Core().NotifyShutdown();
105 } 108 }
106 } 109 }
110
111 if (nvdrv) {
112 nvdrv->Close(disp_fd);
113 }
107} 114}
108 115
109void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) { 116void NVFlinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
110 nvdrv = std::move(instance); 117 nvdrv = std::move(instance);
118 disp_fd = nvdrv->Open("/dev/nvdisp_disp0");
111} 119}
112 120
113std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) { 121std::optional<u64> NVFlinger::OpenDisplay(std::string_view name) {
@@ -141,7 +149,7 @@ std::optional<u64> NVFlinger::CreateLayer(u64 display_id) {
141 149
142void NVFlinger::CreateLayerAtId(VI::Display& display, u64 layer_id) { 150void NVFlinger::CreateLayerAtId(VI::Display& display, u64 layer_id) {
143 const auto buffer_id = next_buffer_queue_id++; 151 const auto buffer_id = next_buffer_queue_id++;
144 display.CreateLayer(layer_id, buffer_id); 152 display.CreateLayer(layer_id, buffer_id, nvdrv->container);
145} 153}
146 154
147void NVFlinger::CloseLayer(u64 layer_id) { 155void NVFlinger::CloseLayer(u64 layer_id) {
@@ -163,15 +171,15 @@ std::optional<u32> NVFlinger::FindBufferQueueId(u64 display_id, u64 layer_id) {
163 return layer->GetBinderId(); 171 return layer->GetBinderId();
164} 172}
165 173
166Kernel::KReadableEvent* NVFlinger::FindVsyncEvent(u64 display_id) { 174ResultVal<Kernel::KReadableEvent*> NVFlinger::FindVsyncEvent(u64 display_id) {
167 const auto lock_guard = Lock(); 175 const auto lock_guard = Lock();
168 auto* const display = FindDisplay(display_id); 176 auto* const display = FindDisplay(display_id);
169 177
170 if (display == nullptr) { 178 if (display == nullptr) {
171 return nullptr; 179 return VI::ResultNotFound;
172 } 180 }
173 181
174 return &display->GetVSyncEvent(); 182 return display->GetVSyncEvent();
175} 183}
176 184
177VI::Display* NVFlinger::FindDisplay(u64 display_id) { 185VI::Display* NVFlinger::FindDisplay(u64 display_id) {
@@ -261,30 +269,24 @@ void NVFlinger::Compose() {
261 return; // We are likely shutting down 269 return; // We are likely shutting down
262 } 270 }
263 271
264 auto& gpu = system.GPU();
265 const auto& multi_fence = buffer.fence;
266 guard->unlock();
267 for (u32 fence_id = 0; fence_id < multi_fence.num_fences; fence_id++) {
268 const auto& fence = multi_fence.fences[fence_id];
269 gpu.WaitFence(fence.id, fence.value);
270 }
271 guard->lock();
272
273 MicroProfileFlip();
274
275 // Now send the buffer to the GPU for drawing. 272 // Now send the buffer to the GPU for drawing.
276 // TODO(Subv): Support more than just disp0. The display device selection is probably based 273 // TODO(Subv): Support more than just disp0. The display device selection is probably based
277 // on which display we're drawing (Default, Internal, External, etc) 274 // on which display we're drawing (Default, Internal, External, etc)
278 auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>("/dev/nvdisp_disp0"); 275 auto nvdisp = nvdrv->GetDevice<Nvidia::Devices::nvdisp_disp0>(disp_fd);
279 ASSERT(nvdisp); 276 ASSERT(nvdisp);
280 277
278 guard->unlock();
281 Common::Rectangle<int> crop_rect{ 279 Common::Rectangle<int> crop_rect{
282 static_cast<int>(buffer.crop.Left()), static_cast<int>(buffer.crop.Top()), 280 static_cast<int>(buffer.crop.Left()), static_cast<int>(buffer.crop.Top()),
283 static_cast<int>(buffer.crop.Right()), static_cast<int>(buffer.crop.Bottom())}; 281 static_cast<int>(buffer.crop.Right()), static_cast<int>(buffer.crop.Bottom())};
284 282
285 nvdisp->flip(igbp_buffer.BufferId(), igbp_buffer.Offset(), igbp_buffer.ExternalFormat(), 283 nvdisp->flip(igbp_buffer.BufferId(), igbp_buffer.Offset(), igbp_buffer.ExternalFormat(),
286 igbp_buffer.Width(), igbp_buffer.Height(), igbp_buffer.Stride(), 284 igbp_buffer.Width(), igbp_buffer.Height(), igbp_buffer.Stride(),
287 static_cast<android::BufferTransformFlags>(buffer.transform), crop_rect); 285 static_cast<android::BufferTransformFlags>(buffer.transform), crop_rect,
286 buffer.fence.fences, buffer.fence.num_fences);
287
288 MicroProfileFlip();
289 guard->lock();
288 290
289 swap_interval = buffer.swap_interval; 291 swap_interval = buffer.swap_interval;
290 292
diff --git a/src/core/hle/service/nvflinger/nvflinger.h b/src/core/hle/service/nvflinger/nvflinger.h
index 044ac6ac8..b62615de2 100644
--- a/src/core/hle/service/nvflinger/nvflinger.h
+++ b/src/core/hle/service/nvflinger/nvflinger.h
@@ -11,6 +11,7 @@
11#include <vector> 11#include <vector>
12 12
13#include "common/common_types.h" 13#include "common/common_types.h"
14#include "core/hle/result.h"
14#include "core/hle/service/kernel_helpers.h" 15#include "core/hle/service/kernel_helpers.h"
15 16
16namespace Common { 17namespace Common {
@@ -71,8 +72,9 @@ public:
71 72
72 /// Gets the vsync event for the specified display. 73 /// Gets the vsync event for the specified display.
73 /// 74 ///
74 /// If an invalid display ID is provided, then nullptr is returned. 75 /// If an invalid display ID is provided, then VI::ResultNotFound is returned.
75 [[nodiscard]] Kernel::KReadableEvent* FindVsyncEvent(u64 display_id); 76 /// If the vsync event has already been retrieved, then VI::ResultPermissionDenied is returned.
77 [[nodiscard]] ResultVal<Kernel::KReadableEvent*> FindVsyncEvent(u64 display_id);
76 78
77 /// Performs a composition request to the emulated nvidia GPU and triggers the vsync events when 79 /// Performs a composition request to the emulated nvidia GPU and triggers the vsync events when
78 /// finished. 80 /// finished.
@@ -114,6 +116,7 @@ private:
114 void SplitVSync(std::stop_token stop_token); 116 void SplitVSync(std::stop_token stop_token);
115 117
116 std::shared_ptr<Nvidia::Module> nvdrv; 118 std::shared_ptr<Nvidia::Module> nvdrv;
119 s32 disp_fd;
117 120
118 std::list<VI::Display> displays; 121 std::list<VI::Display> displays;
119 122
diff --git a/src/core/hle/service/sockets/bsd.cpp b/src/core/hle/service/sockets/bsd.cpp
index cc679cc81..9e94a462f 100644
--- a/src/core/hle/service/sockets/bsd.cpp
+++ b/src/core/hle/service/sockets/bsd.cpp
@@ -929,7 +929,7 @@ BSD::BSD(Core::System& system_, const char* name)
929 proxy_packet_received = room_member->BindOnProxyPacketReceived( 929 proxy_packet_received = room_member->BindOnProxyPacketReceived(
930 [this](const Network::ProxyPacket& packet) { OnProxyPacketReceived(packet); }); 930 [this](const Network::ProxyPacket& packet) { OnProxyPacketReceived(packet); });
931 } else { 931 } else {
932 LOG_ERROR(Service, "Network isn't initalized"); 932 LOG_ERROR(Service, "Network isn't initialized");
933 } 933 }
934} 934}
935 935
diff --git a/src/core/hle/service/vi/display/vi_display.cpp b/src/core/hle/service/vi/display/vi_display.cpp
index b34febb50..288aafaaf 100644
--- a/src/core/hle/service/vi/display/vi_display.cpp
+++ b/src/core/hle/service/vi/display/vi_display.cpp
@@ -12,6 +12,7 @@
12#include "core/hle/kernel/k_readable_event.h" 12#include "core/hle/kernel/k_readable_event.h"
13#include "core/hle/kernel/k_writable_event.h" 13#include "core/hle/kernel/k_writable_event.h"
14#include "core/hle/service/kernel_helpers.h" 14#include "core/hle/service/kernel_helpers.h"
15#include "core/hle/service/nvdrv/core/container.h"
15#include "core/hle/service/nvflinger/buffer_item_consumer.h" 16#include "core/hle/service/nvflinger/buffer_item_consumer.h"
16#include "core/hle/service/nvflinger/buffer_queue_consumer.h" 17#include "core/hle/service/nvflinger/buffer_queue_consumer.h"
17#include "core/hle/service/nvflinger/buffer_queue_core.h" 18#include "core/hle/service/nvflinger/buffer_queue_core.h"
@@ -19,6 +20,7 @@
19#include "core/hle/service/nvflinger/hos_binder_driver_server.h" 20#include "core/hle/service/nvflinger/hos_binder_driver_server.h"
20#include "core/hle/service/vi/display/vi_display.h" 21#include "core/hle/service/vi/display/vi_display.h"
21#include "core/hle/service/vi/layer/vi_layer.h" 22#include "core/hle/service/vi/layer/vi_layer.h"
23#include "core/hle/service/vi/vi_results.h"
22 24
23namespace Service::VI { 25namespace Service::VI {
24 26
@@ -28,11 +30,13 @@ struct BufferQueue {
28 std::unique_ptr<android::BufferQueueConsumer> consumer; 30 std::unique_ptr<android::BufferQueueConsumer> consumer;
29}; 31};
30 32
31static BufferQueue CreateBufferQueue(KernelHelpers::ServiceContext& service_context) { 33static BufferQueue CreateBufferQueue(KernelHelpers::ServiceContext& service_context,
34 Service::Nvidia::NvCore::NvMap& nvmap) {
32 auto buffer_queue_core = std::make_shared<android::BufferQueueCore>(); 35 auto buffer_queue_core = std::make_shared<android::BufferQueueCore>();
33 return {buffer_queue_core, 36 return {
34 std::make_unique<android::BufferQueueProducer>(service_context, buffer_queue_core), 37 buffer_queue_core,
35 std::make_unique<android::BufferQueueConsumer>(buffer_queue_core)}; 38 std::make_unique<android::BufferQueueProducer>(service_context, buffer_queue_core, nvmap),
39 std::make_unique<android::BufferQueueConsumer>(buffer_queue_core, nvmap)};
36} 40}
37 41
38Display::Display(u64 id, std::string name_, 42Display::Display(u64 id, std::string name_,
@@ -55,18 +59,29 @@ const Layer& Display::GetLayer(std::size_t index) const {
55 return *layers.at(index); 59 return *layers.at(index);
56} 60}
57 61
58Kernel::KReadableEvent& Display::GetVSyncEvent() { 62ResultVal<Kernel::KReadableEvent*> Display::GetVSyncEvent() {
59 return vsync_event->GetReadableEvent(); 63 if (got_vsync_event) {
64 return ResultPermissionDenied;
65 }
66
67 got_vsync_event = true;
68
69 return GetVSyncEventUnchecked();
70}
71
72Kernel::KReadableEvent* Display::GetVSyncEventUnchecked() {
73 return &vsync_event->GetReadableEvent();
60} 74}
61 75
62void Display::SignalVSyncEvent() { 76void Display::SignalVSyncEvent() {
63 vsync_event->GetWritableEvent().Signal(); 77 vsync_event->GetWritableEvent().Signal();
64} 78}
65 79
66void Display::CreateLayer(u64 layer_id, u32 binder_id) { 80void Display::CreateLayer(u64 layer_id, u32 binder_id,
81 Service::Nvidia::NvCore::Container& nv_core) {
67 ASSERT_MSG(layers.empty(), "Only one layer is supported per display at the moment"); 82 ASSERT_MSG(layers.empty(), "Only one layer is supported per display at the moment");
68 83
69 auto [core, producer, consumer] = CreateBufferQueue(service_context); 84 auto [core, producer, consumer] = CreateBufferQueue(service_context, nv_core.GetNvMapFile());
70 85
71 auto buffer_item_consumer = std::make_shared<android::BufferItemConsumer>(std::move(consumer)); 86 auto buffer_item_consumer = std::make_shared<android::BufferItemConsumer>(std::move(consumer));
72 buffer_item_consumer->Connect(false); 87 buffer_item_consumer->Connect(false);
diff --git a/src/core/hle/service/vi/display/vi_display.h b/src/core/hle/service/vi/display/vi_display.h
index 3838bb599..33d5f398c 100644
--- a/src/core/hle/service/vi/display/vi_display.h
+++ b/src/core/hle/service/vi/display/vi_display.h
@@ -9,6 +9,7 @@
9 9
10#include "common/common_funcs.h" 10#include "common/common_funcs.h"
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "core/hle/result.h"
12 13
13namespace Kernel { 14namespace Kernel {
14class KEvent; 15class KEvent;
@@ -26,6 +27,11 @@ namespace Service::NVFlinger {
26class HosBinderDriverServer; 27class HosBinderDriverServer;
27} 28}
28 29
30namespace Service::Nvidia::NvCore {
31class Container;
32class NvMap;
33} // namespace Service::Nvidia::NvCore
34
29namespace Service::VI { 35namespace Service::VI {
30 36
31class Layer; 37class Layer;
@@ -73,8 +79,16 @@ public:
73 return layers.size(); 79 return layers.size();
74 } 80 }
75 81
76 /// Gets the readable vsync event. 82 /**
77 Kernel::KReadableEvent& GetVSyncEvent(); 83 * Gets the internal vsync event.
84 *
85 * @returns The internal Vsync event if it has not yet been retrieved,
86 * VI::ResultPermissionDenied otherwise.
87 */
88 [[nodiscard]] ResultVal<Kernel::KReadableEvent*> GetVSyncEvent();
89
90 /// Gets the internal vsync event.
91 Kernel::KReadableEvent* GetVSyncEventUnchecked();
78 92
79 /// Signals the internal vsync event. 93 /// Signals the internal vsync event.
80 void SignalVSyncEvent(); 94 void SignalVSyncEvent();
@@ -84,7 +98,7 @@ public:
84 /// @param layer_id The ID to assign to the created layer. 98 /// @param layer_id The ID to assign to the created layer.
85 /// @param binder_id The ID assigned to the buffer queue. 99 /// @param binder_id The ID assigned to the buffer queue.
86 /// 100 ///
87 void CreateLayer(u64 layer_id, u32 binder_id); 101 void CreateLayer(u64 layer_id, u32 binder_id, Service::Nvidia::NvCore::Container& core);
88 102
89 /// Closes and removes a layer from this display with the given ID. 103 /// Closes and removes a layer from this display with the given ID.
90 /// 104 ///
@@ -118,6 +132,7 @@ private:
118 132
119 std::vector<std::unique_ptr<Layer>> layers; 133 std::vector<std::unique_ptr<Layer>> layers;
120 Kernel::KEvent* vsync_event{}; 134 Kernel::KEvent* vsync_event{};
135 bool got_vsync_event{false};
121}; 136};
122 137
123} // namespace Service::VI 138} // namespace Service::VI
diff --git a/src/core/hle/service/vi/vi.cpp b/src/core/hle/service/vi/vi.cpp
index 546879648..9c917cacf 100644
--- a/src/core/hle/service/vi/vi.cpp
+++ b/src/core/hle/service/vi/vi.cpp
@@ -29,16 +29,12 @@
29#include "core/hle/service/service.h" 29#include "core/hle/service/service.h"
30#include "core/hle/service/vi/vi.h" 30#include "core/hle/service/vi/vi.h"
31#include "core/hle/service/vi/vi_m.h" 31#include "core/hle/service/vi/vi_m.h"
32#include "core/hle/service/vi/vi_results.h"
32#include "core/hle/service/vi/vi_s.h" 33#include "core/hle/service/vi/vi_s.h"
33#include "core/hle/service/vi/vi_u.h" 34#include "core/hle/service/vi/vi_u.h"
34 35
35namespace Service::VI { 36namespace Service::VI {
36 37
37constexpr Result ERR_OPERATION_FAILED{ErrorModule::VI, 1};
38constexpr Result ERR_PERMISSION_DENIED{ErrorModule::VI, 5};
39constexpr Result ERR_UNSUPPORTED{ErrorModule::VI, 6};
40constexpr Result ERR_NOT_FOUND{ErrorModule::VI, 7};
41
42struct DisplayInfo { 38struct DisplayInfo {
43 /// The name of this particular display. 39 /// The name of this particular display.
44 char display_name[0x40]{"Default"}; 40 char display_name[0x40]{"Default"};
@@ -62,6 +58,7 @@ static_assert(sizeof(DisplayInfo) == 0x60, "DisplayInfo has wrong size");
62class NativeWindow final { 58class NativeWindow final {
63public: 59public:
64 constexpr explicit NativeWindow(u32 id_) : id{id_} {} 60 constexpr explicit NativeWindow(u32 id_) : id{id_} {}
61 constexpr explicit NativeWindow(const NativeWindow& other) = default;
65 62
66private: 63private:
67 const u32 magic = 2; 64 const u32 magic = 2;
@@ -348,7 +345,7 @@ private:
348 if (!layer_id) { 345 if (!layer_id) {
349 LOG_ERROR(Service_VI, "Layer not found! display=0x{:016X}", display); 346 LOG_ERROR(Service_VI, "Layer not found! display=0x{:016X}", display);
350 IPC::ResponseBuilder rb{ctx, 2}; 347 IPC::ResponseBuilder rb{ctx, 2};
351 rb.Push(ERR_NOT_FOUND); 348 rb.Push(ResultNotFound);
352 return; 349 return;
353 } 350 }
354 351
@@ -498,7 +495,7 @@ private:
498 if (!display_id) { 495 if (!display_id) {
499 LOG_ERROR(Service_VI, "Display not found! display_name={}", name); 496 LOG_ERROR(Service_VI, "Display not found! display_name={}", name);
500 IPC::ResponseBuilder rb{ctx, 2}; 497 IPC::ResponseBuilder rb{ctx, 2};
501 rb.Push(ERR_NOT_FOUND); 498 rb.Push(ResultNotFound);
502 return; 499 return;
503 } 500 }
504 501
@@ -554,14 +551,14 @@ private:
554 551
555 if (scaling_mode > NintendoScaleMode::PreserveAspectRatio) { 552 if (scaling_mode > NintendoScaleMode::PreserveAspectRatio) {
556 LOG_ERROR(Service_VI, "Invalid scaling mode provided."); 553 LOG_ERROR(Service_VI, "Invalid scaling mode provided.");
557 rb.Push(ERR_OPERATION_FAILED); 554 rb.Push(ResultOperationFailed);
558 return; 555 return;
559 } 556 }
560 557
561 if (scaling_mode != NintendoScaleMode::ScaleToWindow && 558 if (scaling_mode != NintendoScaleMode::ScaleToWindow &&
562 scaling_mode != NintendoScaleMode::PreserveAspectRatio) { 559 scaling_mode != NintendoScaleMode::PreserveAspectRatio) {
563 LOG_ERROR(Service_VI, "Unsupported scaling mode supplied."); 560 LOG_ERROR(Service_VI, "Unsupported scaling mode supplied.");
564 rb.Push(ERR_UNSUPPORTED); 561 rb.Push(ResultNotSupported);
565 return; 562 return;
566 } 563 }
567 564
@@ -594,7 +591,7 @@ private:
594 if (!display_id) { 591 if (!display_id) {
595 LOG_ERROR(Service_VI, "Layer not found! layer_id={}", layer_id); 592 LOG_ERROR(Service_VI, "Layer not found! layer_id={}", layer_id);
596 IPC::ResponseBuilder rb{ctx, 2}; 593 IPC::ResponseBuilder rb{ctx, 2};
597 rb.Push(ERR_NOT_FOUND); 594 rb.Push(ResultNotFound);
598 return; 595 return;
599 } 596 }
600 597
@@ -602,7 +599,7 @@ private:
602 if (!buffer_queue_id) { 599 if (!buffer_queue_id) {
603 LOG_ERROR(Service_VI, "Buffer queue id not found! display_id={}", *display_id); 600 LOG_ERROR(Service_VI, "Buffer queue id not found! display_id={}", *display_id);
604 IPC::ResponseBuilder rb{ctx, 2}; 601 IPC::ResponseBuilder rb{ctx, 2};
605 rb.Push(ERR_NOT_FOUND); 602 rb.Push(ResultNotFound);
606 return; 603 return;
607 } 604 }
608 605
@@ -640,7 +637,7 @@ private:
640 if (!layer_id) { 637 if (!layer_id) {
641 LOG_ERROR(Service_VI, "Layer not found! display_id={}", display_id); 638 LOG_ERROR(Service_VI, "Layer not found! display_id={}", display_id);
642 IPC::ResponseBuilder rb{ctx, 2}; 639 IPC::ResponseBuilder rb{ctx, 2};
643 rb.Push(ERR_NOT_FOUND); 640 rb.Push(ResultNotFound);
644 return; 641 return;
645 } 642 }
646 643
@@ -648,7 +645,7 @@ private:
648 if (!buffer_queue_id) { 645 if (!buffer_queue_id) {
649 LOG_ERROR(Service_VI, "Buffer queue id not found! display_id={}", display_id); 646 LOG_ERROR(Service_VI, "Buffer queue id not found! display_id={}", display_id);
650 IPC::ResponseBuilder rb{ctx, 2}; 647 IPC::ResponseBuilder rb{ctx, 2};
651 rb.Push(ERR_NOT_FOUND); 648 rb.Push(ResultNotFound);
652 return; 649 return;
653 } 650 }
654 651
@@ -675,19 +672,23 @@ private:
675 IPC::RequestParser rp{ctx}; 672 IPC::RequestParser rp{ctx};
676 const u64 display_id = rp.Pop<u64>(); 673 const u64 display_id = rp.Pop<u64>();
677 674
678 LOG_WARNING(Service_VI, "(STUBBED) called. display_id=0x{:016X}", display_id); 675 LOG_DEBUG(Service_VI, "called. display_id={}", display_id);
679 676
680 const auto vsync_event = nv_flinger.FindVsyncEvent(display_id); 677 const auto vsync_event = nv_flinger.FindVsyncEvent(display_id);
681 if (!vsync_event) { 678 if (vsync_event.Failed()) {
682 LOG_ERROR(Service_VI, "Vsync event was not found for display_id={}", display_id); 679 const auto result = vsync_event.Code();
680 if (result == ResultNotFound) {
681 LOG_ERROR(Service_VI, "Vsync event was not found for display_id={}", display_id);
682 }
683
683 IPC::ResponseBuilder rb{ctx, 2}; 684 IPC::ResponseBuilder rb{ctx, 2};
684 rb.Push(ERR_NOT_FOUND); 685 rb.Push(result);
685 return; 686 return;
686 } 687 }
687 688
688 IPC::ResponseBuilder rb{ctx, 2, 1}; 689 IPC::ResponseBuilder rb{ctx, 2, 1};
689 rb.Push(ResultSuccess); 690 rb.Push(ResultSuccess);
690 rb.PushCopyObjects(vsync_event); 691 rb.PushCopyObjects(*vsync_event);
691 } 692 }
692 693
693 void ConvertScalingMode(Kernel::HLERequestContext& ctx) { 694 void ConvertScalingMode(Kernel::HLERequestContext& ctx) {
@@ -764,7 +765,7 @@ private:
764 return ConvertedScaleMode::PreserveAspectRatio; 765 return ConvertedScaleMode::PreserveAspectRatio;
765 default: 766 default:
766 LOG_ERROR(Service_VI, "Invalid scaling mode specified, mode={}", mode); 767 LOG_ERROR(Service_VI, "Invalid scaling mode specified, mode={}", mode);
767 return ERR_OPERATION_FAILED; 768 return ResultOperationFailed;
768 } 769 }
769 } 770 }
770 771
@@ -794,7 +795,7 @@ void detail::GetDisplayServiceImpl(Kernel::HLERequestContext& ctx, Core::System&
794 if (!IsValidServiceAccess(permission, policy)) { 795 if (!IsValidServiceAccess(permission, policy)) {
795 LOG_ERROR(Service_VI, "Permission denied for policy {}", policy); 796 LOG_ERROR(Service_VI, "Permission denied for policy {}", policy);
796 IPC::ResponseBuilder rb{ctx, 2}; 797 IPC::ResponseBuilder rb{ctx, 2};
797 rb.Push(ERR_PERMISSION_DENIED); 798 rb.Push(ResultPermissionDenied);
798 return; 799 return;
799 } 800 }
800 801
diff --git a/src/core/hle/service/vi/vi_results.h b/src/core/hle/service/vi/vi_results.h
new file mode 100644
index 000000000..a46c247d2
--- /dev/null
+++ b/src/core/hle/service/vi/vi_results.h
@@ -0,0 +1,13 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "core/hle/result.h"
5
6namespace Service::VI {
7
8constexpr Result ResultOperationFailed{ErrorModule::VI, 1};
9constexpr Result ResultPermissionDenied{ErrorModule::VI, 5};
10constexpr Result ResultNotSupported{ErrorModule::VI, 6};
11constexpr Result ResultNotFound{ErrorModule::VI, 7};
12
13} // namespace Service::VI
diff --git a/src/core/internal_network/network.cpp b/src/core/internal_network/network.cpp
index cdf38a2a4..447fbffaa 100644
--- a/src/core/internal_network/network.cpp
+++ b/src/core/internal_network/network.cpp
@@ -364,7 +364,7 @@ std::pair<s32, Errno> Poll(std::vector<PollFD>& pollfds, s32 timeout) {
364 std::vector<WSAPOLLFD> host_pollfds(pollfds.size()); 364 std::vector<WSAPOLLFD> host_pollfds(pollfds.size());
365 std::transform(pollfds.begin(), pollfds.end(), host_pollfds.begin(), [](PollFD fd) { 365 std::transform(pollfds.begin(), pollfds.end(), host_pollfds.begin(), [](PollFD fd) {
366 WSAPOLLFD result; 366 WSAPOLLFD result;
367 result.fd = fd.socket->fd; 367 result.fd = fd.socket->GetFD();
368 result.events = TranslatePollEvents(fd.events); 368 result.events = TranslatePollEvents(fd.events);
369 result.revents = 0; 369 result.revents = 0;
370 return result; 370 return result;
@@ -430,12 +430,12 @@ std::pair<SocketBase::AcceptResult, Errno> Socket::Accept() {
430 return {AcceptResult{}, GetAndLogLastError()}; 430 return {AcceptResult{}, GetAndLogLastError()};
431 } 431 }
432 432
433 AcceptResult result;
434 result.socket = std::make_unique<Socket>();
435 result.socket->fd = new_socket;
436
437 ASSERT(addrlen == sizeof(sockaddr_in)); 433 ASSERT(addrlen == sizeof(sockaddr_in));
438 result.sockaddr_in = TranslateToSockAddrIn(addr); 434
435 AcceptResult result{
436 .socket = std::make_unique<Socket>(new_socket),
437 .sockaddr_in = TranslateToSockAddrIn(addr),
438 };
439 439
440 return {std::move(result), Errno::SUCCESS}; 440 return {std::move(result), Errno::SUCCESS};
441} 441}
diff --git a/src/core/internal_network/network_interface.cpp b/src/core/internal_network/network_interface.cpp
index 0f0a66160..057fd3661 100644
--- a/src/core/internal_network/network_interface.cpp
+++ b/src/core/internal_network/network_interface.cpp
@@ -188,7 +188,7 @@ std::vector<NetworkInterface> GetAvailableNetworkInterfaces() {
188std::optional<NetworkInterface> GetSelectedNetworkInterface() { 188std::optional<NetworkInterface> GetSelectedNetworkInterface() {
189 const auto& selected_network_interface = Settings::values.network_interface.GetValue(); 189 const auto& selected_network_interface = Settings::values.network_interface.GetValue();
190 const auto network_interfaces = Network::GetAvailableNetworkInterfaces(); 190 const auto network_interfaces = Network::GetAvailableNetworkInterfaces();
191 if (network_interfaces.size() == 0) { 191 if (network_interfaces.empty()) {
192 LOG_ERROR(Network, "GetAvailableNetworkInterfaces returned no interfaces"); 192 LOG_ERROR(Network, "GetAvailableNetworkInterfaces returned no interfaces");
193 return std::nullopt; 193 return std::nullopt;
194 } 194 }
@@ -206,4 +206,14 @@ std::optional<NetworkInterface> GetSelectedNetworkInterface() {
206 return *res; 206 return *res;
207} 207}
208 208
209void SelectFirstNetworkInterface() {
210 const auto network_interfaces = Network::GetAvailableNetworkInterfaces();
211
212 if (network_interfaces.empty()) {
213 return;
214 }
215
216 Settings::values.network_interface.SetValue(network_interfaces[0].name);
217}
218
209} // namespace Network 219} // namespace Network
diff --git a/src/core/internal_network/network_interface.h b/src/core/internal_network/network_interface.h
index 9b98b6b42..175e61b1f 100644
--- a/src/core/internal_network/network_interface.h
+++ b/src/core/internal_network/network_interface.h
@@ -24,5 +24,6 @@ struct NetworkInterface {
24 24
25std::vector<NetworkInterface> GetAvailableNetworkInterfaces(); 25std::vector<NetworkInterface> GetAvailableNetworkInterfaces();
26std::optional<NetworkInterface> GetSelectedNetworkInterface(); 26std::optional<NetworkInterface> GetSelectedNetworkInterface();
27void SelectFirstNetworkInterface();
27 28
28} // namespace Network 29} // namespace Network
diff --git a/src/core/internal_network/socket_proxy.cpp b/src/core/internal_network/socket_proxy.cpp
index 0c746bd82..7d5d37bbc 100644
--- a/src/core/internal_network/socket_proxy.cpp
+++ b/src/core/internal_network/socket_proxy.cpp
@@ -6,6 +6,7 @@
6 6
7#include "common/assert.h" 7#include "common/assert.h"
8#include "common/logging/log.h" 8#include "common/logging/log.h"
9#include "common/zstd_compression.h"
9#include "core/internal_network/network.h" 10#include "core/internal_network/network.h"
10#include "core/internal_network/network_interface.h" 11#include "core/internal_network/network_interface.h"
11#include "core/internal_network/socket_proxy.h" 12#include "core/internal_network/socket_proxy.h"
@@ -32,8 +33,11 @@ void ProxySocket::HandleProxyPacket(const ProxyPacket& packet) {
32 return; 33 return;
33 } 34 }
34 35
36 auto decompressed = packet;
37 decompressed.data = Common::Compression::DecompressDataZSTD(packet.data);
38
35 std::lock_guard guard(packets_mutex); 39 std::lock_guard guard(packets_mutex);
36 received_packets.push(packet); 40 received_packets.push(decompressed);
37} 41}
38 42
39template <typename T> 43template <typename T>
@@ -185,6 +189,8 @@ std::pair<s32, Errno> ProxySocket::Send(const std::vector<u8>& message, int flag
185void ProxySocket::SendPacket(ProxyPacket& packet) { 189void ProxySocket::SendPacket(ProxyPacket& packet) {
186 if (auto room_member = room_network.GetRoomMember().lock()) { 190 if (auto room_member = room_network.GetRoomMember().lock()) {
187 if (room_member->IsConnected()) { 191 if (room_member->IsConnected()) {
192 packet.data = Common::Compression::CompressDataZSTDDefault(packet.data.data(),
193 packet.data.size());
188 room_member->SendProxyPacket(packet); 194 room_member->SendProxyPacket(packet);
189 } 195 }
190 } 196 }
diff --git a/src/core/internal_network/sockets.h b/src/core/internal_network/sockets.h
index a70429b19..2e328c645 100644
--- a/src/core/internal_network/sockets.h
+++ b/src/core/internal_network/sockets.h
@@ -32,6 +32,10 @@ public:
32 std::unique_ptr<SocketBase> socket; 32 std::unique_ptr<SocketBase> socket;
33 SockAddrIn sockaddr_in; 33 SockAddrIn sockaddr_in;
34 }; 34 };
35
36 SocketBase() = default;
37 explicit SocketBase(SOCKET fd_) : fd{fd_} {}
38
35 virtual ~SocketBase() = default; 39 virtual ~SocketBase() = default;
36 40
37 virtual SocketBase& operator=(const SocketBase&) = delete; 41 virtual SocketBase& operator=(const SocketBase&) = delete;
@@ -89,12 +93,19 @@ public:
89 93
90 virtual void HandleProxyPacket(const ProxyPacket& packet) = 0; 94 virtual void HandleProxyPacket(const ProxyPacket& packet) = 0;
91 95
96 [[nodiscard]] SOCKET GetFD() const {
97 return fd;
98 }
99
100protected:
92 SOCKET fd = INVALID_SOCKET; 101 SOCKET fd = INVALID_SOCKET;
93}; 102};
94 103
95class Socket : public SocketBase { 104class Socket : public SocketBase {
96public: 105public:
97 Socket() = default; 106 Socket() = default;
107 explicit Socket(SOCKET fd_) : SocketBase{fd_} {}
108
98 ~Socket() override; 109 ~Socket() override;
99 110
100 Socket(const Socket&) = delete; 111 Socket(const Socket&) = delete;
diff --git a/src/core/loader/loader.cpp b/src/core/loader/loader.cpp
index 104d16efa..f24474ed8 100644
--- a/src/core/loader/loader.cpp
+++ b/src/core/loader/loader.cpp
@@ -244,6 +244,10 @@ static std::unique_ptr<AppLoader> GetFileLoader(Core::System& system, FileSys::V
244 244
245std::unique_ptr<AppLoader> GetLoader(Core::System& system, FileSys::VirtualFile file, 245std::unique_ptr<AppLoader> GetLoader(Core::System& system, FileSys::VirtualFile file,
246 u64 program_id, std::size_t program_index) { 246 u64 program_id, std::size_t program_index) {
247 if (!file) {
248 return nullptr;
249 }
250
247 FileType type = IdentifyFile(file); 251 FileType type = IdentifyFile(file);
248 const FileType filename_type = GuessFromFilename(file->GetName()); 252 const FileType filename_type = GuessFromFilename(file->GetName());
249 253
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 34ad7cadd..2ac792566 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -551,6 +551,11 @@ struct Memory::Impl {
551 []() {}); 551 []() {});
552 } 552 }
553 553
554 [[nodiscard]] u8* GetPointerSilent(const VAddr vaddr) const {
555 return GetPointerImpl(
556 vaddr, []() {}, []() {});
557 }
558
554 /** 559 /**
555 * Reads a particular data type out of memory at the given virtual address. 560 * Reads a particular data type out of memory at the given virtual address.
556 * 561 *
@@ -686,6 +691,10 @@ u8* Memory::GetPointer(VAddr vaddr) {
686 return impl->GetPointer(vaddr); 691 return impl->GetPointer(vaddr);
687} 692}
688 693
694u8* Memory::GetPointerSilent(VAddr vaddr) {
695 return impl->GetPointerSilent(vaddr);
696}
697
689const u8* Memory::GetPointer(VAddr vaddr) const { 698const u8* Memory::GetPointer(VAddr vaddr) const {
690 return impl->GetPointer(vaddr); 699 return impl->GetPointer(vaddr);
691} 700}
diff --git a/src/core/memory.h b/src/core/memory.h
index a11ff8766..81eac448b 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -114,6 +114,7 @@ public:
114 * If the address is not valid, nullptr will be returned. 114 * If the address is not valid, nullptr will be returned.
115 */ 115 */
116 u8* GetPointer(VAddr vaddr); 116 u8* GetPointer(VAddr vaddr);
117 u8* GetPointerSilent(VAddr vaddr);
117 118
118 template <typename T> 119 template <typename T>
119 T* GetPointer(VAddr vaddr) { 120 T* GetPointer(VAddr vaddr) {
diff --git a/src/dedicated_room/CMakeLists.txt b/src/dedicated_room/CMakeLists.txt
index 1efdbc1f7..2d9731f19 100644
--- a/src/dedicated_room/CMakeLists.txt
+++ b/src/dedicated_room/CMakeLists.txt
@@ -23,5 +23,5 @@ endif()
23target_link_libraries(yuzu-room PRIVATE ${PLATFORM_LIBRARIES} Threads::Threads) 23target_link_libraries(yuzu-room PRIVATE ${PLATFORM_LIBRARIES} Threads::Threads)
24 24
25if(UNIX AND NOT APPLE) 25if(UNIX AND NOT APPLE)
26 install(TARGETS yuzu-room RUNTIME DESTINATION "${CMAKE_INSTALL_PREFIX}/bin") 26 install(TARGETS yuzu-room)
27endif() 27endif()
diff --git a/src/dedicated_room/yuzu_room.cpp b/src/dedicated_room/yuzu_room.cpp
index 7b6deba41..359891883 100644
--- a/src/dedicated_room/yuzu_room.cpp
+++ b/src/dedicated_room/yuzu_room.cpp
@@ -76,7 +76,18 @@ static constexpr char BanListMagic[] = "YuzuRoom-BanList-1";
76static constexpr char token_delimiter{':'}; 76static constexpr char token_delimiter{':'};
77 77
78static void PadToken(std::string& token) { 78static void PadToken(std::string& token) {
79 while (token.size() % 4 != 0) { 79 std::size_t outlen = 0;
80
81 std::array<unsigned char, 512> output{};
82 std::array<unsigned char, 2048> roundtrip{};
83 for (size_t i = 0; i < 3; i++) {
84 mbedtls_base64_decode(output.data(), output.size(), &outlen,
85 reinterpret_cast<const unsigned char*>(token.c_str()),
86 token.length());
87 mbedtls_base64_encode(roundtrip.data(), roundtrip.size(), &outlen, output.data(), outlen);
88 if (memcmp(roundtrip.data(), token.data(), token.size()) == 0) {
89 break;
90 }
80 token.push_back('='); 91 token.push_back('=');
81 } 92 }
82} 93}
diff --git a/src/input_common/CMakeLists.txt b/src/input_common/CMakeLists.txt
index 4b91b88ce..2cf9eb97f 100644
--- a/src/input_common/CMakeLists.txt
+++ b/src/input_common/CMakeLists.txt
@@ -18,6 +18,8 @@ add_library(input_common STATIC
18 drivers/touch_screen.h 18 drivers/touch_screen.h
19 drivers/udp_client.cpp 19 drivers/udp_client.cpp
20 drivers/udp_client.h 20 drivers/udp_client.h
21 drivers/virtual_amiibo.cpp
22 drivers/virtual_amiibo.h
21 helpers/stick_from_buttons.cpp 23 helpers/stick_from_buttons.cpp
22 helpers/stick_from_buttons.h 24 helpers/stick_from_buttons.h
23 helpers/touch_from_buttons.cpp 25 helpers/touch_from_buttons.cpp
diff --git a/src/input_common/drivers/gc_adapter.cpp b/src/input_common/drivers/gc_adapter.cpp
index 27a0ffb0d..f4dd24e7d 100644
--- a/src/input_common/drivers/gc_adapter.cpp
+++ b/src/input_common/drivers/gc_adapter.cpp
@@ -90,7 +90,7 @@ GCAdapter::~GCAdapter() {
90 90
91void GCAdapter::AdapterInputThread(std::stop_token stop_token) { 91void GCAdapter::AdapterInputThread(std::stop_token stop_token) {
92 LOG_DEBUG(Input, "Input thread started"); 92 LOG_DEBUG(Input, "Input thread started");
93 Common::SetCurrentThreadName("yuzu:input:GCAdapter"); 93 Common::SetCurrentThreadName("GCAdapter");
94 s32 payload_size{}; 94 s32 payload_size{};
95 AdapterPayload adapter_payload{}; 95 AdapterPayload adapter_payload{};
96 96
@@ -214,7 +214,7 @@ void GCAdapter::UpdateStateAxes(std::size_t port, const AdapterPayload& adapter_
214} 214}
215 215
216void GCAdapter::AdapterScanThread(std::stop_token stop_token) { 216void GCAdapter::AdapterScanThread(std::stop_token stop_token) {
217 Common::SetCurrentThreadName("yuzu:input:ScanGCAdapter"); 217 Common::SetCurrentThreadName("ScanGCAdapter");
218 usb_adapter_handle = nullptr; 218 usb_adapter_handle = nullptr;
219 pads = {}; 219 pads = {};
220 while (!stop_token.stop_requested() && !Setup()) { 220 while (!stop_token.stop_requested() && !Setup()) {
diff --git a/src/input_common/drivers/mouse.cpp b/src/input_common/drivers/mouse.cpp
index 4909fa8d7..98c3157a8 100644
--- a/src/input_common/drivers/mouse.cpp
+++ b/src/input_common/drivers/mouse.cpp
@@ -37,7 +37,7 @@ Mouse::Mouse(std::string input_engine_) : InputEngine(std::move(input_engine_))
37} 37}
38 38
39void Mouse::UpdateThread(std::stop_token stop_token) { 39void Mouse::UpdateThread(std::stop_token stop_token) {
40 Common::SetCurrentThreadName("yuzu:input:Mouse"); 40 Common::SetCurrentThreadName("Mouse");
41 constexpr int update_time = 10; 41 constexpr int update_time = 10;
42 while (!stop_token.stop_requested()) { 42 while (!stop_token.stop_requested()) {
43 if (Settings::values.mouse_panning && !Settings::values.mouse_enabled) { 43 if (Settings::values.mouse_panning && !Settings::values.mouse_enabled) {
diff --git a/src/input_common/drivers/sdl_driver.cpp b/src/input_common/drivers/sdl_driver.cpp
index 5cc1ccbd9..b72e4b397 100644
--- a/src/input_common/drivers/sdl_driver.cpp
+++ b/src/input_common/drivers/sdl_driver.cpp
@@ -436,7 +436,7 @@ SDLDriver::SDLDriver(std::string input_engine_) : InputEngine(std::move(input_en
436 initialized = true; 436 initialized = true;
437 if (start_thread) { 437 if (start_thread) {
438 poll_thread = std::thread([this] { 438 poll_thread = std::thread([this] {
439 Common::SetCurrentThreadName("yuzu:input:SDL"); 439 Common::SetCurrentThreadName("SDL_MainLoop");
440 using namespace std::chrono_literals; 440 using namespace std::chrono_literals;
441 while (initialized) { 441 while (initialized) {
442 SDL_PumpEvents(); 442 SDL_PumpEvents();
@@ -444,7 +444,7 @@ SDLDriver::SDLDriver(std::string input_engine_) : InputEngine(std::move(input_en
444 } 444 }
445 }); 445 });
446 vibration_thread = std::thread([this] { 446 vibration_thread = std::thread([this] {
447 Common::SetCurrentThreadName("yuzu:input:SDL_Vibration"); 447 Common::SetCurrentThreadName("SDL_Vibration");
448 using namespace std::chrono_literals; 448 using namespace std::chrono_literals;
449 while (initialized) { 449 while (initialized) {
450 SendVibrations(); 450 SendVibrations();
diff --git a/src/input_common/drivers/virtual_amiibo.cpp b/src/input_common/drivers/virtual_amiibo.cpp
new file mode 100644
index 000000000..0cd5129da
--- /dev/null
+++ b/src/input_common/drivers/virtual_amiibo.cpp
@@ -0,0 +1,101 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include <cstring>
5#include <fmt/format.h>
6
7#include "common/fs/file.h"
8#include "common/fs/fs.h"
9#include "common/fs/path_util.h"
10#include "common/logging/log.h"
11#include "common/settings.h"
12#include "input_common/drivers/virtual_amiibo.h"
13
14namespace InputCommon {
15constexpr PadIdentifier identifier = {
16 .guid = Common::UUID{},
17 .port = 0,
18 .pad = 0,
19};
20
21VirtualAmiibo::VirtualAmiibo(std::string input_engine_) : InputEngine(std::move(input_engine_)) {}
22
23VirtualAmiibo::~VirtualAmiibo() = default;
24
25Common::Input::PollingError VirtualAmiibo::SetPollingMode(
26 [[maybe_unused]] const PadIdentifier& identifier_,
27 const Common::Input::PollingMode polling_mode_) {
28 polling_mode = polling_mode_;
29
30 if (polling_mode == Common::Input::PollingMode::NFC) {
31 if (state == State::Initialized) {
32 state = State::WaitingForAmiibo;
33 }
34 } else {
35 if (state == State::AmiiboIsOpen) {
36 CloseAmiibo();
37 }
38 }
39
40 return Common::Input::PollingError::None;
41}
42
43Common::Input::NfcState VirtualAmiibo::SupportsNfc(
44 [[maybe_unused]] const PadIdentifier& identifier_) const {
45 return Common::Input::NfcState::Success;
46}
47
48Common::Input::NfcState VirtualAmiibo::WriteNfcData(
49 [[maybe_unused]] const PadIdentifier& identifier_, const std::vector<u8>& data) {
50 const Common::FS::IOFile amiibo_file{file_path, Common::FS::FileAccessMode::ReadWrite,
51 Common::FS::FileType::BinaryFile};
52
53 if (!amiibo_file.IsOpen()) {
54 LOG_ERROR(Core, "Amiibo is already on use");
55 return Common::Input::NfcState::WriteFailed;
56 }
57
58 if (!amiibo_file.Write(data)) {
59 LOG_ERROR(Service_NFP, "Error writting to file");
60 return Common::Input::NfcState::WriteFailed;
61 }
62
63 return Common::Input::NfcState::Success;
64}
65
66VirtualAmiibo::State VirtualAmiibo::GetCurrentState() const {
67 return state;
68}
69
70VirtualAmiibo::Info VirtualAmiibo::LoadAmiibo(const std::string& filename) {
71 const Common::FS::IOFile amiibo_file{filename, Common::FS::FileAccessMode::Read,
72 Common::FS::FileType::BinaryFile};
73
74 if (state != State::WaitingForAmiibo) {
75 return Info::WrongDeviceState;
76 }
77
78 if (!amiibo_file.IsOpen()) {
79 return Info::UnableToLoad;
80 }
81
82 amiibo_data.resize(amiibo_size);
83
84 if (amiibo_file.Read(amiibo_data) < amiibo_size_without_password) {
85 return Info::NotAnAmiibo;
86 }
87
88 file_path = filename;
89 state = State::AmiiboIsOpen;
90 SetNfc(identifier, {Common::Input::NfcState::NewAmiibo, amiibo_data});
91 return Info::Success;
92}
93
94VirtualAmiibo::Info VirtualAmiibo::CloseAmiibo() {
95 state = polling_mode == Common::Input::PollingMode::NFC ? State::WaitingForAmiibo
96 : State::Initialized;
97 SetNfc(identifier, {Common::Input::NfcState::AmiiboRemoved, {}});
98 return Info::Success;
99}
100
101} // namespace InputCommon
diff --git a/src/input_common/drivers/virtual_amiibo.h b/src/input_common/drivers/virtual_amiibo.h
new file mode 100644
index 000000000..9eac07544
--- /dev/null
+++ b/src/input_common/drivers/virtual_amiibo.h
@@ -0,0 +1,61 @@
1// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#pragma once
5
6#include <array>
7#include <string>
8#include <vector>
9
10#include "common/common_types.h"
11#include "input_common/input_engine.h"
12
13namespace Common::FS {
14class IOFile;
15}
16
17namespace InputCommon {
18
19class VirtualAmiibo final : public InputEngine {
20public:
21 enum class State {
22 Initialized,
23 WaitingForAmiibo,
24 AmiiboIsOpen,
25 };
26
27 enum class Info {
28 Success,
29 UnableToLoad,
30 NotAnAmiibo,
31 WrongDeviceState,
32 Unknown,
33 };
34
35 explicit VirtualAmiibo(std::string input_engine_);
36 ~VirtualAmiibo() override;
37
38 // Sets polling mode to a controller
39 Common::Input::PollingError SetPollingMode(
40 const PadIdentifier& identifier_, const Common::Input::PollingMode polling_mode_) override;
41
42 Common::Input::NfcState SupportsNfc(const PadIdentifier& identifier_) const override;
43
44 Common::Input::NfcState WriteNfcData(const PadIdentifier& identifier_,
45 const std::vector<u8>& data) override;
46
47 State GetCurrentState() const;
48
49 Info LoadAmiibo(const std::string& amiibo_file);
50 Info CloseAmiibo();
51
52private:
53 static constexpr std::size_t amiibo_size = 0x21C;
54 static constexpr std::size_t amiibo_size_without_password = amiibo_size - 0x8;
55
56 std::string file_path{};
57 State state{State::Initialized};
58 std::vector<u8> amiibo_data;
59 Common::Input::PollingMode polling_mode{Common::Input::PollingMode::Pasive};
60};
61} // namespace InputCommon
diff --git a/src/input_common/input_engine.cpp b/src/input_common/input_engine.cpp
index 6ede0e4b0..61cfd0911 100644
--- a/src/input_common/input_engine.cpp
+++ b/src/input_common/input_engine.cpp
@@ -102,6 +102,17 @@ void InputEngine::SetCamera(const PadIdentifier& identifier,
102 TriggerOnCameraChange(identifier, value); 102 TriggerOnCameraChange(identifier, value);
103} 103}
104 104
105void InputEngine::SetNfc(const PadIdentifier& identifier, const Common::Input::NfcStatus& value) {
106 {
107 std::scoped_lock lock{mutex};
108 ControllerData& controller = controller_list.at(identifier);
109 if (!configuring) {
110 controller.nfc = value;
111 }
112 }
113 TriggerOnNfcChange(identifier, value);
114}
115
105bool InputEngine::GetButton(const PadIdentifier& identifier, int button) const { 116bool InputEngine::GetButton(const PadIdentifier& identifier, int button) const {
106 std::scoped_lock lock{mutex}; 117 std::scoped_lock lock{mutex};
107 const auto controller_iter = controller_list.find(identifier); 118 const auto controller_iter = controller_list.find(identifier);
@@ -189,6 +200,18 @@ Common::Input::CameraStatus InputEngine::GetCamera(const PadIdentifier& identifi
189 return controller.camera; 200 return controller.camera;
190} 201}
191 202
203Common::Input::NfcStatus InputEngine::GetNfc(const PadIdentifier& identifier) const {
204 std::scoped_lock lock{mutex};
205 const auto controller_iter = controller_list.find(identifier);
206 if (controller_iter == controller_list.cend()) {
207 LOG_ERROR(Input, "Invalid identifier guid={}, pad={}, port={}", identifier.guid.RawString(),
208 identifier.pad, identifier.port);
209 return {};
210 }
211 const ControllerData& controller = controller_iter->second;
212 return controller.nfc;
213}
214
192void InputEngine::ResetButtonState() { 215void InputEngine::ResetButtonState() {
193 for (const auto& controller : controller_list) { 216 for (const auto& controller : controller_list) {
194 for (const auto& button : controller.second.buttons) { 217 for (const auto& button : controller.second.buttons) {
@@ -355,6 +378,20 @@ void InputEngine::TriggerOnCameraChange(const PadIdentifier& identifier,
355 } 378 }
356} 379}
357 380
381void InputEngine::TriggerOnNfcChange(const PadIdentifier& identifier,
382 [[maybe_unused]] const Common::Input::NfcStatus& value) {
383 std::scoped_lock lock{mutex_callback};
384 for (const auto& poller_pair : callback_list) {
385 const InputIdentifier& poller = poller_pair.second;
386 if (!IsInputIdentifierEqual(poller, identifier, EngineInputType::Nfc, 0)) {
387 continue;
388 }
389 if (poller.callback.on_change) {
390 poller.callback.on_change();
391 }
392 }
393}
394
358bool InputEngine::IsInputIdentifierEqual(const InputIdentifier& input_identifier, 395bool InputEngine::IsInputIdentifierEqual(const InputIdentifier& input_identifier,
359 const PadIdentifier& identifier, EngineInputType type, 396 const PadIdentifier& identifier, EngineInputType type,
360 int index) const { 397 int index) const {
diff --git a/src/input_common/input_engine.h b/src/input_common/input_engine.h
index f6b3c4610..cfbdb26bd 100644
--- a/src/input_common/input_engine.h
+++ b/src/input_common/input_engine.h
@@ -42,6 +42,7 @@ enum class EngineInputType {
42 Camera, 42 Camera,
43 HatButton, 43 HatButton,
44 Motion, 44 Motion,
45 Nfc,
45}; 46};
46 47
47namespace std { 48namespace std {
@@ -127,6 +128,18 @@ public:
127 return Common::Input::CameraError::NotSupported; 128 return Common::Input::CameraError::NotSupported;
128 } 129 }
129 130
131 // Request nfc data from a controller
132 virtual Common::Input::NfcState SupportsNfc(
133 [[maybe_unused]] const PadIdentifier& identifier) const {
134 return Common::Input::NfcState::NotSupported;
135 }
136
137 // Writes data to an nfc tag
138 virtual Common::Input::NfcState WriteNfcData([[maybe_unused]] const PadIdentifier& identifier,
139 [[maybe_unused]] const std::vector<u8>& data) {
140 return Common::Input::NfcState::NotSupported;
141 }
142
130 // Returns the engine name 143 // Returns the engine name
131 [[nodiscard]] const std::string& GetEngineName() const; 144 [[nodiscard]] const std::string& GetEngineName() const;
132 145
@@ -183,6 +196,7 @@ public:
183 Common::Input::BatteryLevel GetBattery(const PadIdentifier& identifier) const; 196 Common::Input::BatteryLevel GetBattery(const PadIdentifier& identifier) const;
184 BasicMotion GetMotion(const PadIdentifier& identifier, int motion) const; 197 BasicMotion GetMotion(const PadIdentifier& identifier, int motion) const;
185 Common::Input::CameraStatus GetCamera(const PadIdentifier& identifier) const; 198 Common::Input::CameraStatus GetCamera(const PadIdentifier& identifier) const;
199 Common::Input::NfcStatus GetNfc(const PadIdentifier& identifier) const;
186 200
187 int SetCallback(InputIdentifier input_identifier); 201 int SetCallback(InputIdentifier input_identifier);
188 void SetMappingCallback(MappingCallback callback); 202 void SetMappingCallback(MappingCallback callback);
@@ -195,6 +209,7 @@ protected:
195 void SetBattery(const PadIdentifier& identifier, Common::Input::BatteryLevel value); 209 void SetBattery(const PadIdentifier& identifier, Common::Input::BatteryLevel value);
196 void SetMotion(const PadIdentifier& identifier, int motion, const BasicMotion& value); 210 void SetMotion(const PadIdentifier& identifier, int motion, const BasicMotion& value);
197 void SetCamera(const PadIdentifier& identifier, const Common::Input::CameraStatus& value); 211 void SetCamera(const PadIdentifier& identifier, const Common::Input::CameraStatus& value);
212 void SetNfc(const PadIdentifier& identifier, const Common::Input::NfcStatus& value);
198 213
199 virtual std::string GetHatButtonName([[maybe_unused]] u8 direction_value) const { 214 virtual std::string GetHatButtonName([[maybe_unused]] u8 direction_value) const {
200 return "Unknown"; 215 return "Unknown";
@@ -208,6 +223,7 @@ private:
208 std::unordered_map<int, BasicMotion> motions; 223 std::unordered_map<int, BasicMotion> motions;
209 Common::Input::BatteryLevel battery{}; 224 Common::Input::BatteryLevel battery{};
210 Common::Input::CameraStatus camera{}; 225 Common::Input::CameraStatus camera{};
226 Common::Input::NfcStatus nfc{};
211 }; 227 };
212 228
213 void TriggerOnButtonChange(const PadIdentifier& identifier, int button, bool value); 229 void TriggerOnButtonChange(const PadIdentifier& identifier, int button, bool value);
@@ -218,6 +234,7 @@ private:
218 const BasicMotion& value); 234 const BasicMotion& value);
219 void TriggerOnCameraChange(const PadIdentifier& identifier, 235 void TriggerOnCameraChange(const PadIdentifier& identifier,
220 const Common::Input::CameraStatus& value); 236 const Common::Input::CameraStatus& value);
237 void TriggerOnNfcChange(const PadIdentifier& identifier, const Common::Input::NfcStatus& value);
221 238
222 bool IsInputIdentifierEqual(const InputIdentifier& input_identifier, 239 bool IsInputIdentifierEqual(const InputIdentifier& input_identifier,
223 const PadIdentifier& identifier, EngineInputType type, 240 const PadIdentifier& identifier, EngineInputType type,
diff --git a/src/input_common/input_poller.cpp b/src/input_common/input_poller.cpp
index ffb9b945e..75705b67e 100644
--- a/src/input_common/input_poller.cpp
+++ b/src/input_common/input_poller.cpp
@@ -705,6 +705,47 @@ private:
705 InputEngine* input_engine; 705 InputEngine* input_engine;
706}; 706};
707 707
708class InputFromNfc final : public Common::Input::InputDevice {
709public:
710 explicit InputFromNfc(PadIdentifier identifier_, InputEngine* input_engine_)
711 : identifier(identifier_), input_engine(input_engine_) {
712 UpdateCallback engine_callback{[this]() { OnChange(); }};
713 const InputIdentifier input_identifier{
714 .identifier = identifier,
715 .type = EngineInputType::Nfc,
716 .index = 0,
717 .callback = engine_callback,
718 };
719 callback_key = input_engine->SetCallback(input_identifier);
720 }
721
722 ~InputFromNfc() override {
723 input_engine->DeleteCallback(callback_key);
724 }
725
726 Common::Input::NfcStatus GetStatus() const {
727 return input_engine->GetNfc(identifier);
728 }
729
730 void ForceUpdate() override {
731 OnChange();
732 }
733
734 void OnChange() {
735 const Common::Input::CallbackStatus status{
736 .type = Common::Input::InputType::Nfc,
737 .nfc_status = GetStatus(),
738 };
739
740 TriggerOnChange(status);
741 }
742
743private:
744 const PadIdentifier identifier;
745 int callback_key;
746 InputEngine* input_engine;
747};
748
708class OutputFromIdentifier final : public Common::Input::OutputDevice { 749class OutputFromIdentifier final : public Common::Input::OutputDevice {
709public: 750public:
710 explicit OutputFromIdentifier(PadIdentifier identifier_, InputEngine* input_engine_) 751 explicit OutputFromIdentifier(PadIdentifier identifier_, InputEngine* input_engine_)
@@ -727,6 +768,14 @@ public:
727 return input_engine->SetCameraFormat(identifier, camera_format); 768 return input_engine->SetCameraFormat(identifier, camera_format);
728 } 769 }
729 770
771 Common::Input::NfcState SupportsNfc() const override {
772 return input_engine->SupportsNfc(identifier);
773 }
774
775 Common::Input::NfcState WriteNfcData(const std::vector<u8>& data) override {
776 return input_engine->WriteNfcData(identifier, data);
777 }
778
730private: 779private:
731 const PadIdentifier identifier; 780 const PadIdentifier identifier;
732 InputEngine* input_engine; 781 InputEngine* input_engine;
@@ -978,6 +1027,18 @@ std::unique_ptr<Common::Input::InputDevice> InputFactory::CreateCameraDevice(
978 return std::make_unique<InputFromCamera>(identifier, input_engine.get()); 1027 return std::make_unique<InputFromCamera>(identifier, input_engine.get());
979} 1028}
980 1029
1030std::unique_ptr<Common::Input::InputDevice> InputFactory::CreateNfcDevice(
1031 const Common::ParamPackage& params) {
1032 const PadIdentifier identifier = {
1033 .guid = Common::UUID{params.Get("guid", "")},
1034 .port = static_cast<std::size_t>(params.Get("port", 0)),
1035 .pad = static_cast<std::size_t>(params.Get("pad", 0)),
1036 };
1037
1038 input_engine->PreSetController(identifier);
1039 return std::make_unique<InputFromNfc>(identifier, input_engine.get());
1040}
1041
981InputFactory::InputFactory(std::shared_ptr<InputEngine> input_engine_) 1042InputFactory::InputFactory(std::shared_ptr<InputEngine> input_engine_)
982 : input_engine(std::move(input_engine_)) {} 1043 : input_engine(std::move(input_engine_)) {}
983 1044
@@ -989,6 +1050,9 @@ std::unique_ptr<Common::Input::InputDevice> InputFactory::Create(
989 if (params.Has("camera")) { 1050 if (params.Has("camera")) {
990 return CreateCameraDevice(params); 1051 return CreateCameraDevice(params);
991 } 1052 }
1053 if (params.Has("nfc")) {
1054 return CreateNfcDevice(params);
1055 }
992 if (params.Has("button") && params.Has("axis")) { 1056 if (params.Has("button") && params.Has("axis")) {
993 return CreateTriggerDevice(params); 1057 return CreateTriggerDevice(params);
994 } 1058 }
diff --git a/src/input_common/input_poller.h b/src/input_common/input_poller.h
index 4410a8415..d7db13ce4 100644
--- a/src/input_common/input_poller.h
+++ b/src/input_common/input_poller.h
@@ -222,6 +222,16 @@ private:
222 std::unique_ptr<Common::Input::InputDevice> CreateCameraDevice( 222 std::unique_ptr<Common::Input::InputDevice> CreateCameraDevice(
223 const Common::ParamPackage& params); 223 const Common::ParamPackage& params);
224 224
225 /**
226 * Creates a nfc device from the parameters given.
227 * @param params contains parameters for creating the device:
228 * - "guid": text string for identifying controllers
229 * - "port": port of the connected device
230 * - "pad": slot of the connected controller
231 * @returns a unique input device with the parameters specified
232 */
233 std::unique_ptr<Common::Input::InputDevice> CreateNfcDevice(const Common::ParamPackage& params);
234
225 std::shared_ptr<InputEngine> input_engine; 235 std::shared_ptr<InputEngine> input_engine;
226}; 236};
227} // namespace InputCommon 237} // namespace InputCommon
diff --git a/src/input_common/main.cpp b/src/input_common/main.cpp
index 75a57b9fc..b2064ef95 100644
--- a/src/input_common/main.cpp
+++ b/src/input_common/main.cpp
@@ -11,6 +11,7 @@
11#include "input_common/drivers/tas_input.h" 11#include "input_common/drivers/tas_input.h"
12#include "input_common/drivers/touch_screen.h" 12#include "input_common/drivers/touch_screen.h"
13#include "input_common/drivers/udp_client.h" 13#include "input_common/drivers/udp_client.h"
14#include "input_common/drivers/virtual_amiibo.h"
14#include "input_common/helpers/stick_from_buttons.h" 15#include "input_common/helpers/stick_from_buttons.h"
15#include "input_common/helpers/touch_from_buttons.h" 16#include "input_common/helpers/touch_from_buttons.h"
16#include "input_common/input_engine.h" 17#include "input_common/input_engine.h"
@@ -87,6 +88,15 @@ struct InputSubsystem::Impl {
87 Common::Input::RegisterFactory<Common::Input::OutputDevice>(camera->GetEngineName(), 88 Common::Input::RegisterFactory<Common::Input::OutputDevice>(camera->GetEngineName(),
88 camera_output_factory); 89 camera_output_factory);
89 90
91 virtual_amiibo = std::make_shared<VirtualAmiibo>("virtual_amiibo");
92 virtual_amiibo->SetMappingCallback(mapping_callback);
93 virtual_amiibo_input_factory = std::make_shared<InputFactory>(virtual_amiibo);
94 virtual_amiibo_output_factory = std::make_shared<OutputFactory>(virtual_amiibo);
95 Common::Input::RegisterFactory<Common::Input::InputDevice>(virtual_amiibo->GetEngineName(),
96 virtual_amiibo_input_factory);
97 Common::Input::RegisterFactory<Common::Input::OutputDevice>(virtual_amiibo->GetEngineName(),
98 virtual_amiibo_output_factory);
99
90#ifdef HAVE_SDL2 100#ifdef HAVE_SDL2
91 sdl = std::make_shared<SDLDriver>("sdl"); 101 sdl = std::make_shared<SDLDriver>("sdl");
92 sdl->SetMappingCallback(mapping_callback); 102 sdl->SetMappingCallback(mapping_callback);
@@ -327,6 +337,7 @@ struct InputSubsystem::Impl {
327 std::shared_ptr<TasInput::Tas> tas_input; 337 std::shared_ptr<TasInput::Tas> tas_input;
328 std::shared_ptr<CemuhookUDP::UDPClient> udp_client; 338 std::shared_ptr<CemuhookUDP::UDPClient> udp_client;
329 std::shared_ptr<Camera> camera; 339 std::shared_ptr<Camera> camera;
340 std::shared_ptr<VirtualAmiibo> virtual_amiibo;
330 341
331 std::shared_ptr<InputFactory> keyboard_factory; 342 std::shared_ptr<InputFactory> keyboard_factory;
332 std::shared_ptr<InputFactory> mouse_factory; 343 std::shared_ptr<InputFactory> mouse_factory;
@@ -335,6 +346,7 @@ struct InputSubsystem::Impl {
335 std::shared_ptr<InputFactory> udp_client_input_factory; 346 std::shared_ptr<InputFactory> udp_client_input_factory;
336 std::shared_ptr<InputFactory> tas_input_factory; 347 std::shared_ptr<InputFactory> tas_input_factory;
337 std::shared_ptr<InputFactory> camera_input_factory; 348 std::shared_ptr<InputFactory> camera_input_factory;
349 std::shared_ptr<InputFactory> virtual_amiibo_input_factory;
338 350
339 std::shared_ptr<OutputFactory> keyboard_output_factory; 351 std::shared_ptr<OutputFactory> keyboard_output_factory;
340 std::shared_ptr<OutputFactory> mouse_output_factory; 352 std::shared_ptr<OutputFactory> mouse_output_factory;
@@ -342,6 +354,7 @@ struct InputSubsystem::Impl {
342 std::shared_ptr<OutputFactory> udp_client_output_factory; 354 std::shared_ptr<OutputFactory> udp_client_output_factory;
343 std::shared_ptr<OutputFactory> tas_output_factory; 355 std::shared_ptr<OutputFactory> tas_output_factory;
344 std::shared_ptr<OutputFactory> camera_output_factory; 356 std::shared_ptr<OutputFactory> camera_output_factory;
357 std::shared_ptr<OutputFactory> virtual_amiibo_output_factory;
345 358
346#ifdef HAVE_SDL2 359#ifdef HAVE_SDL2
347 std::shared_ptr<SDLDriver> sdl; 360 std::shared_ptr<SDLDriver> sdl;
@@ -402,6 +415,14 @@ const Camera* InputSubsystem::GetCamera() const {
402 return impl->camera.get(); 415 return impl->camera.get();
403} 416}
404 417
418VirtualAmiibo* InputSubsystem::GetVirtualAmiibo() {
419 return impl->virtual_amiibo.get();
420}
421
422const VirtualAmiibo* InputSubsystem::GetVirtualAmiibo() const {
423 return impl->virtual_amiibo.get();
424}
425
405std::vector<Common::ParamPackage> InputSubsystem::GetInputDevices() const { 426std::vector<Common::ParamPackage> InputSubsystem::GetInputDevices() const {
406 return impl->GetInputDevices(); 427 return impl->GetInputDevices();
407} 428}
diff --git a/src/input_common/main.h b/src/input_common/main.h
index 9a969e747..ced252383 100644
--- a/src/input_common/main.h
+++ b/src/input_common/main.h
@@ -33,6 +33,7 @@ class Camera;
33class Keyboard; 33class Keyboard;
34class Mouse; 34class Mouse;
35class TouchScreen; 35class TouchScreen;
36class VirtualAmiibo;
36struct MappingData; 37struct MappingData;
37} // namespace InputCommon 38} // namespace InputCommon
38 39
@@ -101,6 +102,12 @@ public:
101 /// Retrieves the underlying camera input device. 102 /// Retrieves the underlying camera input device.
102 [[nodiscard]] const Camera* GetCamera() const; 103 [[nodiscard]] const Camera* GetCamera() const;
103 104
105 /// Retrieves the underlying virtual amiibo input device.
106 [[nodiscard]] VirtualAmiibo* GetVirtualAmiibo();
107
108 /// Retrieves the underlying virtual amiibo input device.
109 [[nodiscard]] const VirtualAmiibo* GetVirtualAmiibo() const;
110
104 /** 111 /**
105 * Returns all available input devices that this Factory can create a new device with. 112 * Returns all available input devices that this Factory can create a new device with.
106 * Each returned ParamPackage should have a `display` field used for display, a `engine` field 113 * Each returned ParamPackage should have a `display` field used for display, a `engine` field
diff --git a/src/network/network.cpp b/src/network/network.cpp
index 0841e4134..6652a186b 100644
--- a/src/network/network.cpp
+++ b/src/network/network.cpp
@@ -15,7 +15,7 @@ RoomNetwork::RoomNetwork() {
15 15
16bool RoomNetwork::Init() { 16bool RoomNetwork::Init() {
17 if (enet_initialize() != 0) { 17 if (enet_initialize() != 0) {
18 LOG_ERROR(Network, "Error initalizing ENet"); 18 LOG_ERROR(Network, "Error initializing ENet");
19 return false; 19 return false;
20 } 20 }
21 m_room = std::make_shared<Room>(); 21 m_room = std::make_shared<Room>();
diff --git a/src/network/room.cpp b/src/network/room.cpp
index 8c63b255b..dc5dbce7f 100644
--- a/src/network/room.cpp
+++ b/src/network/room.cpp
@@ -212,6 +212,12 @@ public:
212 void HandleProxyPacket(const ENetEvent* event); 212 void HandleProxyPacket(const ENetEvent* event);
213 213
214 /** 214 /**
215 * Broadcasts this packet to all members except the sender.
216 * @param event The ENet event containing the data
217 */
218 void HandleLdnPacket(const ENetEvent* event);
219
220 /**
215 * Extracts a chat entry from a received ENet packet and adds it to the chat queue. 221 * Extracts a chat entry from a received ENet packet and adds it to the chat queue.
216 * @param event The ENet event that was received. 222 * @param event The ENet event that was received.
217 */ 223 */
@@ -247,6 +253,9 @@ void Room::RoomImpl::ServerLoop() {
247 case IdProxyPacket: 253 case IdProxyPacket:
248 HandleProxyPacket(&event); 254 HandleProxyPacket(&event);
249 break; 255 break;
256 case IdLdnPacket:
257 HandleLdnPacket(&event);
258 break;
250 case IdChatMessage: 259 case IdChatMessage:
251 HandleChatPacket(&event); 260 HandleChatPacket(&event);
252 break; 261 break;
@@ -861,6 +870,60 @@ void Room::RoomImpl::HandleProxyPacket(const ENetEvent* event) {
861 enet_host_flush(server); 870 enet_host_flush(server);
862} 871}
863 872
873void Room::RoomImpl::HandleLdnPacket(const ENetEvent* event) {
874 Packet in_packet;
875 in_packet.Append(event->packet->data, event->packet->dataLength);
876
877 in_packet.IgnoreBytes(sizeof(u8)); // Message type
878
879 in_packet.IgnoreBytes(sizeof(u8)); // LAN packet type
880 in_packet.IgnoreBytes(sizeof(IPv4Address)); // Local IP
881
882 IPv4Address remote_ip;
883 in_packet.Read(remote_ip); // Remote IP
884
885 bool broadcast;
886 in_packet.Read(broadcast); // Broadcast
887
888 Packet out_packet;
889 out_packet.Append(event->packet->data, event->packet->dataLength);
890 ENetPacket* enet_packet = enet_packet_create(out_packet.GetData(), out_packet.GetDataSize(),
891 ENET_PACKET_FLAG_RELIABLE);
892
893 const auto& destination_address = remote_ip;
894 if (broadcast) { // Send the data to everyone except the sender
895 std::lock_guard lock(member_mutex);
896 bool sent_packet = false;
897 for (const auto& member : members) {
898 if (member.peer != event->peer) {
899 sent_packet = true;
900 enet_peer_send(member.peer, 0, enet_packet);
901 }
902 }
903
904 if (!sent_packet) {
905 enet_packet_destroy(enet_packet);
906 }
907 } else {
908 std::lock_guard lock(member_mutex);
909 auto member = std::find_if(members.begin(), members.end(),
910 [destination_address](const Member& member_entry) -> bool {
911 return member_entry.fake_ip == destination_address;
912 });
913 if (member != members.end()) {
914 enet_peer_send(member->peer, 0, enet_packet);
915 } else {
916 LOG_ERROR(Network,
917 "Attempting to send to unknown IP address: "
918 "{}.{}.{}.{}",
919 destination_address[0], destination_address[1], destination_address[2],
920 destination_address[3]);
921 enet_packet_destroy(enet_packet);
922 }
923 }
924 enet_host_flush(server);
925}
926
864void Room::RoomImpl::HandleChatPacket(const ENetEvent* event) { 927void Room::RoomImpl::HandleChatPacket(const ENetEvent* event) {
865 Packet in_packet; 928 Packet in_packet;
866 in_packet.Append(event->packet->data, event->packet->dataLength); 929 in_packet.Append(event->packet->data, event->packet->dataLength);
diff --git a/src/network/room.h b/src/network/room.h
index c2a4b1a70..edbd3ecfb 100644
--- a/src/network/room.h
+++ b/src/network/room.h
@@ -40,6 +40,7 @@ enum RoomMessageTypes : u8 {
40 IdRoomInformation, 40 IdRoomInformation,
41 IdSetGameInfo, 41 IdSetGameInfo,
42 IdProxyPacket, 42 IdProxyPacket,
43 IdLdnPacket,
43 IdChatMessage, 44 IdChatMessage,
44 IdNameCollision, 45 IdNameCollision,
45 IdIpCollision, 46 IdIpCollision,
diff --git a/src/network/room_member.cpp b/src/network/room_member.cpp
index 06818af78..b94cb24ad 100644
--- a/src/network/room_member.cpp
+++ b/src/network/room_member.cpp
@@ -58,6 +58,7 @@ public:
58 58
59 private: 59 private:
60 CallbackSet<ProxyPacket> callback_set_proxy_packet; 60 CallbackSet<ProxyPacket> callback_set_proxy_packet;
61 CallbackSet<LDNPacket> callback_set_ldn_packet;
61 CallbackSet<ChatEntry> callback_set_chat_messages; 62 CallbackSet<ChatEntry> callback_set_chat_messages;
62 CallbackSet<StatusMessageEntry> callback_set_status_messages; 63 CallbackSet<StatusMessageEntry> callback_set_status_messages;
63 CallbackSet<RoomInformation> callback_set_room_information; 64 CallbackSet<RoomInformation> callback_set_room_information;
@@ -108,6 +109,12 @@ public:
108 void HandleProxyPackets(const ENetEvent* event); 109 void HandleProxyPackets(const ENetEvent* event);
109 110
110 /** 111 /**
112 * Extracts an LdnPacket from a received ENet packet.
113 * @param event The ENet event that was received.
114 */
115 void HandleLdnPackets(const ENetEvent* event);
116
117 /**
111 * Extracts a chat entry from a received ENet packet and adds it to the chat queue. 118 * Extracts a chat entry from a received ENet packet and adds it to the chat queue.
112 * @param event The ENet event that was received. 119 * @param event The ENet event that was received.
113 */ 120 */
@@ -166,6 +173,9 @@ void RoomMember::RoomMemberImpl::MemberLoop() {
166 case IdProxyPacket: 173 case IdProxyPacket:
167 HandleProxyPackets(&event); 174 HandleProxyPackets(&event);
168 break; 175 break;
176 case IdLdnPacket:
177 HandleLdnPackets(&event);
178 break;
169 case IdChatMessage: 179 case IdChatMessage:
170 HandleChatPacket(&event); 180 HandleChatPacket(&event);
171 break; 181 break;
@@ -372,6 +382,27 @@ void RoomMember::RoomMemberImpl::HandleProxyPackets(const ENetEvent* event) {
372 Invoke<ProxyPacket>(proxy_packet); 382 Invoke<ProxyPacket>(proxy_packet);
373} 383}
374 384
385void RoomMember::RoomMemberImpl::HandleLdnPackets(const ENetEvent* event) {
386 LDNPacket ldn_packet{};
387 Packet packet;
388 packet.Append(event->packet->data, event->packet->dataLength);
389
390 // Ignore the first byte, which is the message id.
391 packet.IgnoreBytes(sizeof(u8)); // Ignore the message type
392
393 u8 packet_type;
394 packet.Read(packet_type);
395 ldn_packet.type = static_cast<LDNPacketType>(packet_type);
396
397 packet.Read(ldn_packet.local_ip);
398 packet.Read(ldn_packet.remote_ip);
399 packet.Read(ldn_packet.broadcast);
400
401 packet.Read(ldn_packet.data);
402
403 Invoke<LDNPacket>(ldn_packet);
404}
405
375void RoomMember::RoomMemberImpl::HandleChatPacket(const ENetEvent* event) { 406void RoomMember::RoomMemberImpl::HandleChatPacket(const ENetEvent* event) {
376 Packet packet; 407 Packet packet;
377 packet.Append(event->packet->data, event->packet->dataLength); 408 packet.Append(event->packet->data, event->packet->dataLength);
@@ -450,6 +481,11 @@ RoomMember::RoomMemberImpl::CallbackSet<ProxyPacket>& RoomMember::RoomMemberImpl
450} 481}
451 482
452template <> 483template <>
484RoomMember::RoomMemberImpl::CallbackSet<LDNPacket>& RoomMember::RoomMemberImpl::Callbacks::Get() {
485 return callback_set_ldn_packet;
486}
487
488template <>
453RoomMember::RoomMemberImpl::CallbackSet<RoomMember::State>& 489RoomMember::RoomMemberImpl::CallbackSet<RoomMember::State>&
454RoomMember::RoomMemberImpl::Callbacks::Get() { 490RoomMember::RoomMemberImpl::Callbacks::Get() {
455 return callback_set_state; 491 return callback_set_state;
@@ -607,6 +643,21 @@ void RoomMember::SendProxyPacket(const ProxyPacket& proxy_packet) {
607 room_member_impl->Send(std::move(packet)); 643 room_member_impl->Send(std::move(packet));
608} 644}
609 645
646void RoomMember::SendLdnPacket(const LDNPacket& ldn_packet) {
647 Packet packet;
648 packet.Write(static_cast<u8>(IdLdnPacket));
649
650 packet.Write(static_cast<u8>(ldn_packet.type));
651
652 packet.Write(ldn_packet.local_ip);
653 packet.Write(ldn_packet.remote_ip);
654 packet.Write(ldn_packet.broadcast);
655
656 packet.Write(ldn_packet.data);
657
658 room_member_impl->Send(std::move(packet));
659}
660
610void RoomMember::SendChatMessage(const std::string& message) { 661void RoomMember::SendChatMessage(const std::string& message) {
611 Packet packet; 662 Packet packet;
612 packet.Write(static_cast<u8>(IdChatMessage)); 663 packet.Write(static_cast<u8>(IdChatMessage));
@@ -663,6 +714,11 @@ RoomMember::CallbackHandle<ProxyPacket> RoomMember::BindOnProxyPacketReceived(
663 return room_member_impl->Bind(callback); 714 return room_member_impl->Bind(callback);
664} 715}
665 716
717RoomMember::CallbackHandle<LDNPacket> RoomMember::BindOnLdnPacketReceived(
718 std::function<void(const LDNPacket&)> callback) {
719 return room_member_impl->Bind(std::move(callback));
720}
721
666RoomMember::CallbackHandle<RoomInformation> RoomMember::BindOnRoomInformationChanged( 722RoomMember::CallbackHandle<RoomInformation> RoomMember::BindOnRoomInformationChanged(
667 std::function<void(const RoomInformation&)> callback) { 723 std::function<void(const RoomInformation&)> callback) {
668 return room_member_impl->Bind(callback); 724 return room_member_impl->Bind(callback);
@@ -699,6 +755,7 @@ void RoomMember::Leave() {
699} 755}
700 756
701template void RoomMember::Unbind(CallbackHandle<ProxyPacket>); 757template void RoomMember::Unbind(CallbackHandle<ProxyPacket>);
758template void RoomMember::Unbind(CallbackHandle<LDNPacket>);
702template void RoomMember::Unbind(CallbackHandle<RoomMember::State>); 759template void RoomMember::Unbind(CallbackHandle<RoomMember::State>);
703template void RoomMember::Unbind(CallbackHandle<RoomMember::Error>); 760template void RoomMember::Unbind(CallbackHandle<RoomMember::Error>);
704template void RoomMember::Unbind(CallbackHandle<RoomInformation>); 761template void RoomMember::Unbind(CallbackHandle<RoomInformation>);
diff --git a/src/network/room_member.h b/src/network/room_member.h
index f578f7f6a..0d6417294 100644
--- a/src/network/room_member.h
+++ b/src/network/room_member.h
@@ -17,7 +17,24 @@ namespace Network {
17using AnnounceMultiplayerRoom::GameInfo; 17using AnnounceMultiplayerRoom::GameInfo;
18using AnnounceMultiplayerRoom::RoomInformation; 18using AnnounceMultiplayerRoom::RoomInformation;
19 19
20/// Information about the received WiFi packets. 20enum class LDNPacketType : u8 {
21 Scan,
22 ScanResp,
23 Connect,
24 SyncNetwork,
25 Disconnect,
26 DestroyNetwork,
27};
28
29struct LDNPacket {
30 LDNPacketType type;
31 IPv4Address local_ip;
32 IPv4Address remote_ip;
33 bool broadcast;
34 std::vector<u8> data;
35};
36
37/// Information about the received proxy packets.
21struct ProxyPacket { 38struct ProxyPacket {
22 SockAddrIn local_endpoint; 39 SockAddrIn local_endpoint;
23 SockAddrIn remote_endpoint; 40 SockAddrIn remote_endpoint;
@@ -152,6 +169,12 @@ public:
152 void SendProxyPacket(const ProxyPacket& packet); 169 void SendProxyPacket(const ProxyPacket& packet);
153 170
154 /** 171 /**
172 * Sends an LDN packet to the room.
173 * @param packet The WiFi packet to send.
174 */
175 void SendLdnPacket(const LDNPacket& packet);
176
177 /**
155 * Sends a chat message to the room. 178 * Sends a chat message to the room.
156 * @param message The contents of the message. 179 * @param message The contents of the message.
157 */ 180 */
@@ -205,6 +228,16 @@ public:
205 std::function<void(const ProxyPacket&)> callback); 228 std::function<void(const ProxyPacket&)> callback);
206 229
207 /** 230 /**
231 * Binds a function to an event that will be triggered every time an LDNPacket is received.
232 * The function wil be called everytime the event is triggered.
233 * The callback function must not bind or unbind a function. Doing so will cause a deadlock
234 * @param callback The function to call
235 * @return A handle used for removing the function from the registered list
236 */
237 CallbackHandle<LDNPacket> BindOnLdnPacketReceived(
238 std::function<void(const LDNPacket&)> callback);
239
240 /**
208 * Binds a function to an event that will be triggered every time the RoomInformation changes. 241 * Binds a function to an event that will be triggered every time the RoomInformation changes.
209 * The function wil be called every time the event is triggered. 242 * The function wil be called every time the event is triggered.
210 * The callback function must not bind or unbind a function. Doing so will cause a deadlock 243 * The callback function must not bind or unbind a function. Doing so will cause a deadlock
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm.cpp b/src/shader_recompiler/backend/glasm/emit_glasm.cpp
index 97a6b383b..01f9abc71 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm.cpp
@@ -175,7 +175,7 @@ bool IsReference(IR::Inst& inst) {
175} 175}
176 176
177void PrecolorInst(IR::Inst& phi) { 177void PrecolorInst(IR::Inst& phi) {
178 // Insert phi moves before references to avoid overwritting other phis 178 // Insert phi moves before references to avoid overwriting other phis
179 const size_t num_args{phi.NumArgs()}; 179 const size_t num_args{phi.NumArgs()};
180 for (size_t i = 0; i < num_args; ++i) { 180 for (size_t i = 0; i < num_args; ++i) {
181 IR::Block& phi_block{*phi.PhiBlock(i)}; 181 IR::Block& phi_block{*phi.PhiBlock(i)};
diff --git a/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp b/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp
index b5c08d611..7e8f37563 100644
--- a/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp
+++ b/src/shader_recompiler/backend/glasm/emit_glasm_context_get_set.cpp
@@ -13,9 +13,6 @@ namespace Shader::Backend::GLASM {
13namespace { 13namespace {
14void GetCbuf(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset, 14void GetCbuf(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset,
15 std::string_view size) { 15 std::string_view size) {
16 if (!binding.IsImmediate()) {
17 throw NotImplementedException("Indirect constant buffer loading");
18 }
19 const Register ret{ctx.reg_alloc.Define(inst)}; 16 const Register ret{ctx.reg_alloc.Define(inst)};
20 if (offset.type == Type::U32) { 17 if (offset.type == Type::U32) {
21 // Avoid reading arrays out of bounds, matching hardware's behavior 18 // Avoid reading arrays out of bounds, matching hardware's behavior
@@ -24,7 +21,27 @@ void GetCbuf(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU
24 return; 21 return;
25 } 22 }
26 } 23 }
27 ctx.Add("LDC.{} {},c{}[{}];", size, ret, binding.U32(), offset); 24
25 if (binding.IsImmediate()) {
26 ctx.Add("LDC.{} {},c{}[{}];", size, ret, binding.U32(), offset);
27 return;
28 }
29
30 const ScalarU32 idx{ctx.reg_alloc.Consume(binding)};
31 for (u32 i = 0; i < Info::MAX_INDIRECT_CBUFS; i++) {
32 ctx.Add("SEQ.S.CC RC.x,{},{};"
33 "IF NE.x;"
34 "LDC.{} {},c{}[{}];",
35 idx, i, size, ret, i, offset);
36
37 if (i != Info::MAX_INDIRECT_CBUFS - 1) {
38 ctx.Add("ELSE;");
39 }
40 }
41
42 for (u32 i = 0; i < Info::MAX_INDIRECT_CBUFS; i++) {
43 ctx.Add("ENDIF;");
44 }
28} 45}
29 46
30bool IsInputArray(Stage stage) { 47bool IsInputArray(Stage stage) {
diff --git a/src/shader_recompiler/backend/glsl/emit_glsl.cpp b/src/shader_recompiler/backend/glsl/emit_glsl.cpp
index 76c18e488..e8a4390f6 100644
--- a/src/shader_recompiler/backend/glsl/emit_glsl.cpp
+++ b/src/shader_recompiler/backend/glsl/emit_glsl.cpp
@@ -101,7 +101,7 @@ bool IsReference(IR::Inst& inst) {
101} 101}
102 102
103void PrecolorInst(IR::Inst& phi) { 103void PrecolorInst(IR::Inst& phi) {
104 // Insert phi moves before references to avoid overwritting other phis 104 // Insert phi moves before references to avoid overwriting other phis
105 const size_t num_args{phi.NumArgs()}; 105 const size_t num_args{phi.NumArgs()};
106 for (size_t i = 0; i < num_args; ++i) { 106 for (size_t i = 0; i < num_args; ++i) {
107 IR::Block& phi_block{*phi.PhiBlock(i)}; 107 IR::Block& phi_block{*phi.PhiBlock(i)};
diff --git a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
index 578bc8c1b..ce42475d4 100644
--- a/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
+++ b/src/shader_recompiler/frontend/maxwell/structured_control_flow.cpp
@@ -964,9 +964,9 @@ private:
964 demote_endif_node.type = Type::EndIf; 964 demote_endif_node.type = Type::EndIf;
965 demote_endif_node.data.end_if.merge = return_block_it->data.block; 965 demote_endif_node.data.end_if.merge = return_block_it->data.block;
966 966
967 asl.insert(return_block_it, demote_endif_node); 967 const auto next_it_1 = asl.insert(return_block_it, demote_endif_node);
968 asl.insert(return_block_it, demote_node); 968 const auto next_it_2 = asl.insert(next_it_1, demote_node);
969 asl.insert(return_block_it, demote_if_node); 969 asl.insert(next_it_2, demote_if_node);
970 } 970 }
971 971
972 ObjectPool<Statement>& stmt_pool; 972 ObjectPool<Statement>& stmt_pool;
diff --git a/src/shader_recompiler/ir_opt/texture_pass.cpp b/src/shader_recompiler/ir_opt/texture_pass.cpp
index 597112ba4..e8be58357 100644
--- a/src/shader_recompiler/ir_opt/texture_pass.cpp
+++ b/src/shader_recompiler/ir_opt/texture_pass.cpp
@@ -19,8 +19,10 @@ namespace {
19struct ConstBufferAddr { 19struct ConstBufferAddr {
20 u32 index; 20 u32 index;
21 u32 offset; 21 u32 offset;
22 u32 shift_left;
22 u32 secondary_index; 23 u32 secondary_index;
23 u32 secondary_offset; 24 u32 secondary_offset;
25 u32 secondary_shift_left;
24 IR::U32 dynamic_offset; 26 IR::U32 dynamic_offset;
25 u32 count; 27 u32 count;
26 bool has_secondary; 28 bool has_secondary;
@@ -172,19 +174,41 @@ bool IsTextureInstruction(const IR::Inst& inst) {
172 return IndexedInstruction(inst) != IR::Opcode::Void; 174 return IndexedInstruction(inst) != IR::Opcode::Void;
173} 175}
174 176
175std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst); 177std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst, Environment& env);
176 178
177std::optional<ConstBufferAddr> Track(const IR::Value& value) { 179std::optional<ConstBufferAddr> Track(const IR::Value& value, Environment& env) {
178 return IR::BreadthFirstSearch(value, TryGetConstBuffer); 180 return IR::BreadthFirstSearch(
181 value, [&env](const IR::Inst* inst) { return TryGetConstBuffer(inst, env); });
179} 182}
180 183
181std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) { 184std::optional<u32> TryGetConstant(IR::Value& value, Environment& env) {
185 const IR::Inst* inst = value.InstRecursive();
186 if (inst->GetOpcode() != IR::Opcode::GetCbufU32) {
187 return std::nullopt;
188 }
189 const IR::Value index{inst->Arg(0)};
190 const IR::Value offset{inst->Arg(1)};
191 if (!index.IsImmediate()) {
192 return std::nullopt;
193 }
194 if (!offset.IsImmediate()) {
195 return std::nullopt;
196 }
197 const auto index_number = index.U32();
198 if (index_number != 1) {
199 return std::nullopt;
200 }
201 const auto offset_number = offset.U32();
202 return env.ReadCbufValue(index_number, offset_number);
203}
204
205std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst, Environment& env) {
182 switch (inst->GetOpcode()) { 206 switch (inst->GetOpcode()) {
183 default: 207 default:
184 return std::nullopt; 208 return std::nullopt;
185 case IR::Opcode::BitwiseOr32: { 209 case IR::Opcode::BitwiseOr32: {
186 std::optional lhs{Track(inst->Arg(0))}; 210 std::optional lhs{Track(inst->Arg(0), env)};
187 std::optional rhs{Track(inst->Arg(1))}; 211 std::optional rhs{Track(inst->Arg(1), env)};
188 if (!lhs || !rhs) { 212 if (!lhs || !rhs) {
189 return std::nullopt; 213 return std::nullopt;
190 } 214 }
@@ -194,19 +218,62 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) {
194 if (lhs->count > 1 || rhs->count > 1) { 218 if (lhs->count > 1 || rhs->count > 1) {
195 return std::nullopt; 219 return std::nullopt;
196 } 220 }
197 if (lhs->index > rhs->index || lhs->offset > rhs->offset) { 221 if (lhs->shift_left > 0 || lhs->index > rhs->index || lhs->offset > rhs->offset) {
198 std::swap(lhs, rhs); 222 std::swap(lhs, rhs);
199 } 223 }
200 return ConstBufferAddr{ 224 return ConstBufferAddr{
201 .index = lhs->index, 225 .index = lhs->index,
202 .offset = lhs->offset, 226 .offset = lhs->offset,
227 .shift_left = lhs->shift_left,
203 .secondary_index = rhs->index, 228 .secondary_index = rhs->index,
204 .secondary_offset = rhs->offset, 229 .secondary_offset = rhs->offset,
230 .secondary_shift_left = rhs->shift_left,
205 .dynamic_offset = {}, 231 .dynamic_offset = {},
206 .count = 1, 232 .count = 1,
207 .has_secondary = true, 233 .has_secondary = true,
208 }; 234 };
209 } 235 }
236 case IR::Opcode::ShiftLeftLogical32: {
237 const IR::Value shift{inst->Arg(1)};
238 if (!shift.IsImmediate()) {
239 return std::nullopt;
240 }
241 std::optional lhs{Track(inst->Arg(0), env)};
242 if (lhs) {
243 lhs->shift_left = shift.U32();
244 }
245 return lhs;
246 break;
247 }
248 case IR::Opcode::BitwiseAnd32: {
249 IR::Value op1{inst->Arg(0)};
250 IR::Value op2{inst->Arg(1)};
251 if (op1.IsImmediate()) {
252 std::swap(op1, op2);
253 }
254 if (!op2.IsImmediate() && !op1.IsImmediate()) {
255 do {
256 auto try_index = TryGetConstant(op1, env);
257 if (try_index) {
258 op1 = op2;
259 op2 = IR::Value{*try_index};
260 break;
261 }
262 auto try_index_2 = TryGetConstant(op2, env);
263 if (try_index_2) {
264 op2 = IR::Value{*try_index_2};
265 break;
266 }
267 return std::nullopt;
268 } while (false);
269 }
270 std::optional lhs{Track(op1, env)};
271 if (lhs) {
272 lhs->shift_left = static_cast<u32>(std::countr_zero(op2.U32()));
273 }
274 return lhs;
275 break;
276 }
210 case IR::Opcode::GetCbufU32x2: 277 case IR::Opcode::GetCbufU32x2:
211 case IR::Opcode::GetCbufU32: 278 case IR::Opcode::GetCbufU32:
212 break; 279 break;
@@ -222,8 +289,10 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) {
222 return ConstBufferAddr{ 289 return ConstBufferAddr{
223 .index = index.U32(), 290 .index = index.U32(),
224 .offset = offset.U32(), 291 .offset = offset.U32(),
292 .shift_left = 0,
225 .secondary_index = 0, 293 .secondary_index = 0,
226 .secondary_offset = 0, 294 .secondary_offset = 0,
295 .secondary_shift_left = 0,
227 .dynamic_offset = {}, 296 .dynamic_offset = {},
228 .count = 1, 297 .count = 1,
229 .has_secondary = false, 298 .has_secondary = false,
@@ -247,8 +316,10 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) {
247 return ConstBufferAddr{ 316 return ConstBufferAddr{
248 .index = index.U32(), 317 .index = index.U32(),
249 .offset = base_offset, 318 .offset = base_offset,
319 .shift_left = 0,
250 .secondary_index = 0, 320 .secondary_index = 0,
251 .secondary_offset = 0, 321 .secondary_offset = 0,
322 .secondary_shift_left = 0,
252 .dynamic_offset = dynamic_offset, 323 .dynamic_offset = dynamic_offset,
253 .count = 8, 324 .count = 8,
254 .has_secondary = false, 325 .has_secondary = false,
@@ -258,7 +329,7 @@ std::optional<ConstBufferAddr> TryGetConstBuffer(const IR::Inst* inst) {
258TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) { 329TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) {
259 ConstBufferAddr addr; 330 ConstBufferAddr addr;
260 if (IsBindless(inst)) { 331 if (IsBindless(inst)) {
261 const std::optional<ConstBufferAddr> track_addr{Track(inst.Arg(0))}; 332 const std::optional<ConstBufferAddr> track_addr{Track(inst.Arg(0), env)};
262 if (!track_addr) { 333 if (!track_addr) {
263 throw NotImplementedException("Failed to track bindless texture constant buffer"); 334 throw NotImplementedException("Failed to track bindless texture constant buffer");
264 } 335 }
@@ -267,8 +338,10 @@ TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) {
267 addr = ConstBufferAddr{ 338 addr = ConstBufferAddr{
268 .index = env.TextureBoundBuffer(), 339 .index = env.TextureBoundBuffer(),
269 .offset = inst.Arg(0).U32(), 340 .offset = inst.Arg(0).U32(),
341 .shift_left = 0,
270 .secondary_index = 0, 342 .secondary_index = 0,
271 .secondary_offset = 0, 343 .secondary_offset = 0,
344 .secondary_shift_left = 0,
272 .dynamic_offset = {}, 345 .dynamic_offset = {},
273 .count = 1, 346 .count = 1,
274 .has_secondary = false, 347 .has_secondary = false,
@@ -284,8 +357,9 @@ TextureInst MakeInst(Environment& env, IR::Block* block, IR::Inst& inst) {
284TextureType ReadTextureType(Environment& env, const ConstBufferAddr& cbuf) { 357TextureType ReadTextureType(Environment& env, const ConstBufferAddr& cbuf) {
285 const u32 secondary_index{cbuf.has_secondary ? cbuf.secondary_index : cbuf.index}; 358 const u32 secondary_index{cbuf.has_secondary ? cbuf.secondary_index : cbuf.index};
286 const u32 secondary_offset{cbuf.has_secondary ? cbuf.secondary_offset : cbuf.offset}; 359 const u32 secondary_offset{cbuf.has_secondary ? cbuf.secondary_offset : cbuf.offset};
287 const u32 lhs_raw{env.ReadCbufValue(cbuf.index, cbuf.offset)}; 360 const u32 lhs_raw{env.ReadCbufValue(cbuf.index, cbuf.offset) << cbuf.shift_left};
288 const u32 rhs_raw{env.ReadCbufValue(secondary_index, secondary_offset)}; 361 const u32 rhs_raw{env.ReadCbufValue(secondary_index, secondary_offset)
362 << cbuf.secondary_shift_left};
289 return env.ReadTextureType(lhs_raw | rhs_raw); 363 return env.ReadTextureType(lhs_raw | rhs_raw);
290} 364}
291 365
@@ -487,8 +561,10 @@ void TexturePass(Environment& env, IR::Program& program) {
487 .has_secondary = cbuf.has_secondary, 561 .has_secondary = cbuf.has_secondary,
488 .cbuf_index = cbuf.index, 562 .cbuf_index = cbuf.index,
489 .cbuf_offset = cbuf.offset, 563 .cbuf_offset = cbuf.offset,
564 .shift_left = cbuf.shift_left,
490 .secondary_cbuf_index = cbuf.secondary_index, 565 .secondary_cbuf_index = cbuf.secondary_index,
491 .secondary_cbuf_offset = cbuf.secondary_offset, 566 .secondary_cbuf_offset = cbuf.secondary_offset,
567 .secondary_shift_left = cbuf.secondary_shift_left,
492 .count = cbuf.count, 568 .count = cbuf.count,
493 .size_shift = DESCRIPTOR_SIZE_SHIFT, 569 .size_shift = DESCRIPTOR_SIZE_SHIFT,
494 }); 570 });
@@ -499,8 +575,10 @@ void TexturePass(Environment& env, IR::Program& program) {
499 .has_secondary = cbuf.has_secondary, 575 .has_secondary = cbuf.has_secondary,
500 .cbuf_index = cbuf.index, 576 .cbuf_index = cbuf.index,
501 .cbuf_offset = cbuf.offset, 577 .cbuf_offset = cbuf.offset,
578 .shift_left = cbuf.shift_left,
502 .secondary_cbuf_index = cbuf.secondary_index, 579 .secondary_cbuf_index = cbuf.secondary_index,
503 .secondary_cbuf_offset = cbuf.secondary_offset, 580 .secondary_cbuf_offset = cbuf.secondary_offset,
581 .secondary_shift_left = cbuf.secondary_shift_left,
504 .count = cbuf.count, 582 .count = cbuf.count,
505 .size_shift = DESCRIPTOR_SIZE_SHIFT, 583 .size_shift = DESCRIPTOR_SIZE_SHIFT,
506 }); 584 });
diff --git a/src/shader_recompiler/shader_info.h b/src/shader_recompiler/shader_info.h
index f5690805c..cc596da4f 100644
--- a/src/shader_recompiler/shader_info.h
+++ b/src/shader_recompiler/shader_info.h
@@ -61,8 +61,10 @@ struct TextureBufferDescriptor {
61 bool has_secondary; 61 bool has_secondary;
62 u32 cbuf_index; 62 u32 cbuf_index;
63 u32 cbuf_offset; 63 u32 cbuf_offset;
64 u32 shift_left;
64 u32 secondary_cbuf_index; 65 u32 secondary_cbuf_index;
65 u32 secondary_cbuf_offset; 66 u32 secondary_cbuf_offset;
67 u32 secondary_shift_left;
66 u32 count; 68 u32 count;
67 u32 size_shift; 69 u32 size_shift;
68}; 70};
@@ -85,8 +87,10 @@ struct TextureDescriptor {
85 bool has_secondary; 87 bool has_secondary;
86 u32 cbuf_index; 88 u32 cbuf_index;
87 u32 cbuf_offset; 89 u32 cbuf_offset;
90 u32 shift_left;
88 u32 secondary_cbuf_index; 91 u32 secondary_cbuf_index;
89 u32 secondary_cbuf_offset; 92 u32 secondary_cbuf_offset;
93 u32 secondary_shift_left;
90 u32 count; 94 u32 count;
91 u32 size_shift; 95 u32 size_shift;
92}; 96};
diff --git a/src/video_core/CMakeLists.txt b/src/video_core/CMakeLists.txt
index 5b3808351..40e6d1ec4 100644
--- a/src/video_core/CMakeLists.txt
+++ b/src/video_core/CMakeLists.txt
@@ -4,7 +4,7 @@
4add_subdirectory(host_shaders) 4add_subdirectory(host_shaders)
5 5
6if(LIBVA_FOUND) 6if(LIBVA_FOUND)
7 set_source_files_properties(command_classes/codecs/codec.cpp 7 set_source_files_properties(host1x/codecs/codec.cpp
8 PROPERTIES COMPILE_DEFINITIONS LIBVA_FOUND=1) 8 PROPERTIES COMPILE_DEFINITIONS LIBVA_FOUND=1)
9 list(APPEND FFmpeg_LIBRARIES ${LIBVA_LIBRARIES}) 9 list(APPEND FFmpeg_LIBRARIES ${LIBVA_LIBRARIES})
10endif() 10endif()
@@ -15,26 +15,14 @@ add_library(video_core STATIC
15 buffer_cache/buffer_cache.h 15 buffer_cache/buffer_cache.h
16 cdma_pusher.cpp 16 cdma_pusher.cpp
17 cdma_pusher.h 17 cdma_pusher.h
18 command_classes/codecs/codec.cpp
19 command_classes/codecs/codec.h
20 command_classes/codecs/h264.cpp
21 command_classes/codecs/h264.h
22 command_classes/codecs/vp8.cpp
23 command_classes/codecs/vp8.h
24 command_classes/codecs/vp9.cpp
25 command_classes/codecs/vp9.h
26 command_classes/codecs/vp9_types.h
27 command_classes/host1x.cpp
28 command_classes/host1x.h
29 command_classes/nvdec.cpp
30 command_classes/nvdec.h
31 command_classes/nvdec_common.h
32 command_classes/sync_manager.cpp
33 command_classes/sync_manager.h
34 command_classes/vic.cpp
35 command_classes/vic.h
36 compatible_formats.cpp 18 compatible_formats.cpp
37 compatible_formats.h 19 compatible_formats.h
20 control/channel_state.cpp
21 control/channel_state.h
22 control/channel_state_cache.cpp
23 control/channel_state_cache.h
24 control/scheduler.cpp
25 control/scheduler.h
38 delayed_destruction_ring.h 26 delayed_destruction_ring.h
39 dirty_flags.cpp 27 dirty_flags.cpp
40 dirty_flags.h 28 dirty_flags.h
@@ -54,7 +42,31 @@ add_library(video_core STATIC
54 engines/maxwell_3d.h 42 engines/maxwell_3d.h
55 engines/maxwell_dma.cpp 43 engines/maxwell_dma.cpp
56 engines/maxwell_dma.h 44 engines/maxwell_dma.h
45 engines/puller.cpp
46 engines/puller.h
57 framebuffer_config.h 47 framebuffer_config.h
48 host1x/codecs/codec.cpp
49 host1x/codecs/codec.h
50 host1x/codecs/h264.cpp
51 host1x/codecs/h264.h
52 host1x/codecs/vp8.cpp
53 host1x/codecs/vp8.h
54 host1x/codecs/vp9.cpp
55 host1x/codecs/vp9.h
56 host1x/codecs/vp9_types.h
57 host1x/control.cpp
58 host1x/control.h
59 host1x/host1x.cpp
60 host1x/host1x.h
61 host1x/nvdec.cpp
62 host1x/nvdec.h
63 host1x/nvdec_common.h
64 host1x/sync_manager.cpp
65 host1x/sync_manager.h
66 host1x/syncpoint_manager.cpp
67 host1x/syncpoint_manager.h
68 host1x/vic.cpp
69 host1x/vic.h
58 macro/macro.cpp 70 macro/macro.cpp
59 macro/macro.h 71 macro/macro.h
60 macro/macro_hle.cpp 72 macro/macro_hle.cpp
@@ -195,6 +207,7 @@ add_library(video_core STATIC
195 texture_cache/render_targets.h 207 texture_cache/render_targets.h
196 texture_cache/samples_helper.h 208 texture_cache/samples_helper.h
197 texture_cache/slot_vector.h 209 texture_cache/slot_vector.h
210 texture_cache/texture_cache.cpp
198 texture_cache/texture_cache.h 211 texture_cache/texture_cache.h
199 texture_cache/texture_cache_base.h 212 texture_cache/texture_cache_base.h
200 texture_cache/types.h 213 texture_cache/types.h
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index f015dae56..8e26b3f95 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -5,7 +5,6 @@
5 5
6#include <algorithm> 6#include <algorithm>
7#include <array> 7#include <array>
8#include <deque>
9#include <memory> 8#include <memory>
10#include <mutex> 9#include <mutex>
11#include <numeric> 10#include <numeric>
@@ -23,6 +22,7 @@
23#include "common/settings.h" 22#include "common/settings.h"
24#include "core/memory.h" 23#include "core/memory.h"
25#include "video_core/buffer_cache/buffer_base.h" 24#include "video_core/buffer_cache/buffer_base.h"
25#include "video_core/control/channel_state_cache.h"
26#include "video_core/delayed_destruction_ring.h" 26#include "video_core/delayed_destruction_ring.h"
27#include "video_core/dirty_flags.h" 27#include "video_core/dirty_flags.h"
28#include "video_core/engines/kepler_compute.h" 28#include "video_core/engines/kepler_compute.h"
@@ -56,7 +56,7 @@ using UniformBufferSizes = std::array<std::array<u32, NUM_GRAPHICS_UNIFORM_BUFFE
56using ComputeUniformBufferSizes = std::array<u32, NUM_COMPUTE_UNIFORM_BUFFERS>; 56using ComputeUniformBufferSizes = std::array<u32, NUM_COMPUTE_UNIFORM_BUFFERS>;
57 57
58template <typename P> 58template <typename P>
59class BufferCache { 59class BufferCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
60 60
61 // Page size for caching purposes. 61 // Page size for caching purposes.
62 // This is unrelated to the CPU page size and it can be changed as it seems optimal. 62 // This is unrelated to the CPU page size and it can be changed as it seems optimal.
@@ -116,10 +116,7 @@ public:
116 static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast<u32>(4_KiB); 116 static constexpr u32 DEFAULT_SKIP_CACHE_SIZE = static_cast<u32>(4_KiB);
117 117
118 explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_, 118 explicit BufferCache(VideoCore::RasterizerInterface& rasterizer_,
119 Tegra::Engines::Maxwell3D& maxwell3d_, 119 Core::Memory::Memory& cpu_memory_, Runtime& runtime_);
120 Tegra::Engines::KeplerCompute& kepler_compute_,
121 Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
122 Runtime& runtime_);
123 120
124 void TickFrame(); 121 void TickFrame();
125 122
@@ -129,7 +126,7 @@ public:
129 126
130 void DownloadMemory(VAddr cpu_addr, u64 size); 127 void DownloadMemory(VAddr cpu_addr, u64 size);
131 128
132 bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<u8> inlined_buffer); 129 bool InlineMemory(VAddr dest_address, size_t copy_size, std::span<const u8> inlined_buffer);
133 130
134 void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size); 131 void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size);
135 132
@@ -353,7 +350,7 @@ private:
353 350
354 void NotifyBufferDeletion(); 351 void NotifyBufferDeletion();
355 352
356 [[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr) const; 353 [[nodiscard]] Binding StorageBufferBinding(GPUVAddr ssbo_addr, bool is_written = false) const;
357 354
358 [[nodiscard]] TextureBufferBinding GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size, 355 [[nodiscard]] TextureBufferBinding GetTextureBufferBinding(GPUVAddr gpu_addr, u32 size,
359 PixelFormat format); 356 PixelFormat format);
@@ -367,9 +364,6 @@ private:
367 void ClearDownload(IntervalType subtract_interval); 364 void ClearDownload(IntervalType subtract_interval);
368 365
369 VideoCore::RasterizerInterface& rasterizer; 366 VideoCore::RasterizerInterface& rasterizer;
370 Tegra::Engines::Maxwell3D& maxwell3d;
371 Tegra::Engines::KeplerCompute& kepler_compute;
372 Tegra::MemoryManager& gpu_memory;
373 Core::Memory::Memory& cpu_memory; 367 Core::Memory::Memory& cpu_memory;
374 368
375 SlotVector<Buffer> slot_buffers; 369 SlotVector<Buffer> slot_buffers;
@@ -444,12 +438,8 @@ private:
444 438
445template <class P> 439template <class P>
446BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_, 440BufferCache<P>::BufferCache(VideoCore::RasterizerInterface& rasterizer_,
447 Tegra::Engines::Maxwell3D& maxwell3d_, 441 Core::Memory::Memory& cpu_memory_, Runtime& runtime_)
448 Tegra::Engines::KeplerCompute& kepler_compute_, 442 : runtime{runtime_}, rasterizer{rasterizer_}, cpu_memory{cpu_memory_} {
449 Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_,
450 Runtime& runtime_)
451 : runtime{runtime_}, rasterizer{rasterizer_}, maxwell3d{maxwell3d_},
452 kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_}, cpu_memory{cpu_memory_} {
453 // Ensure the first slot is used for the null buffer 443 // Ensure the first slot is used for the null buffer
454 void(slot_buffers.insert(runtime, NullBufferParams{})); 444 void(slot_buffers.insert(runtime, NullBufferParams{}));
455 common_ranges.clear(); 445 common_ranges.clear();
@@ -552,8 +542,8 @@ void BufferCache<P>::ClearDownload(IntervalType subtract_interval) {
552 542
553template <class P> 543template <class P>
554bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { 544bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
555 const std::optional<VAddr> cpu_src_address = gpu_memory.GpuToCpuAddress(src_address); 545 const std::optional<VAddr> cpu_src_address = gpu_memory->GpuToCpuAddress(src_address);
556 const std::optional<VAddr> cpu_dest_address = gpu_memory.GpuToCpuAddress(dest_address); 546 const std::optional<VAddr> cpu_dest_address = gpu_memory->GpuToCpuAddress(dest_address);
557 if (!cpu_src_address || !cpu_dest_address) { 547 if (!cpu_src_address || !cpu_dest_address) {
558 return false; 548 return false;
559 } 549 }
@@ -611,7 +601,7 @@ bool BufferCache<P>::DMACopy(GPUVAddr src_address, GPUVAddr dest_address, u64 am
611 601
612template <class P> 602template <class P>
613bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) { 603bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) {
614 const std::optional<VAddr> cpu_dst_address = gpu_memory.GpuToCpuAddress(dst_address); 604 const std::optional<VAddr> cpu_dst_address = gpu_memory->GpuToCpuAddress(dst_address);
615 if (!cpu_dst_address) { 605 if (!cpu_dst_address) {
616 return false; 606 return false;
617 } 607 }
@@ -635,7 +625,7 @@ bool BufferCache<P>::DMAClear(GPUVAddr dst_address, u64 amount, u32 value) {
635template <class P> 625template <class P>
636void BufferCache<P>::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, 626void BufferCache<P>::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr,
637 u32 size) { 627 u32 size) {
638 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); 628 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
639 const Binding binding{ 629 const Binding binding{
640 .cpu_addr = *cpu_addr, 630 .cpu_addr = *cpu_addr,
641 .size = size, 631 .size = size,
@@ -673,7 +663,7 @@ void BufferCache<P>::BindHostGeometryBuffers(bool is_indexed) {
673 if (is_indexed) { 663 if (is_indexed) {
674 BindHostIndexBuffer(); 664 BindHostIndexBuffer();
675 } else if constexpr (!HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) { 665 } else if constexpr (!HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
676 const auto& regs = maxwell3d.regs; 666 const auto& regs = maxwell3d->regs;
677 if (regs.draw.topology == Maxwell::PrimitiveTopology::Quads) { 667 if (regs.draw.topology == Maxwell::PrimitiveTopology::Quads) {
678 runtime.BindQuadArrayIndexBuffer(regs.vertex_buffer.first, regs.vertex_buffer.count); 668 runtime.BindQuadArrayIndexBuffer(regs.vertex_buffer.first, regs.vertex_buffer.count);
679 } 669 }
@@ -733,9 +723,9 @@ void BufferCache<P>::BindGraphicsStorageBuffer(size_t stage, size_t ssbo_index,
733 enabled_storage_buffers[stage] |= 1U << ssbo_index; 723 enabled_storage_buffers[stage] |= 1U << ssbo_index;
734 written_storage_buffers[stage] |= (is_written ? 1U : 0U) << ssbo_index; 724 written_storage_buffers[stage] |= (is_written ? 1U : 0U) << ssbo_index;
735 725
736 const auto& cbufs = maxwell3d.state.shader_stages[stage]; 726 const auto& cbufs = maxwell3d->state.shader_stages[stage];
737 const GPUVAddr ssbo_addr = cbufs.const_buffers[cbuf_index].address + cbuf_offset; 727 const GPUVAddr ssbo_addr = cbufs.const_buffers[cbuf_index].address + cbuf_offset;
738 storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr); 728 storage_buffers[stage][ssbo_index] = StorageBufferBinding(ssbo_addr, is_written);
739} 729}
740 730
741template <class P> 731template <class P>
@@ -770,12 +760,12 @@ void BufferCache<P>::BindComputeStorageBuffer(size_t ssbo_index, u32 cbuf_index,
770 enabled_compute_storage_buffers |= 1U << ssbo_index; 760 enabled_compute_storage_buffers |= 1U << ssbo_index;
771 written_compute_storage_buffers |= (is_written ? 1U : 0U) << ssbo_index; 761 written_compute_storage_buffers |= (is_written ? 1U : 0U) << ssbo_index;
772 762
773 const auto& launch_desc = kepler_compute.launch_description; 763 const auto& launch_desc = kepler_compute->launch_description;
774 ASSERT(((launch_desc.const_buffer_enable_mask >> cbuf_index) & 1) != 0); 764 ASSERT(((launch_desc.const_buffer_enable_mask >> cbuf_index) & 1) != 0);
775 765
776 const auto& cbufs = launch_desc.const_buffer_config; 766 const auto& cbufs = launch_desc.const_buffer_config;
777 const GPUVAddr ssbo_addr = cbufs[cbuf_index].Address() + cbuf_offset; 767 const GPUVAddr ssbo_addr = cbufs[cbuf_index].Address() + cbuf_offset;
778 compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr); 768 compute_storage_buffers[ssbo_index] = StorageBufferBinding(ssbo_addr, is_written);
779} 769}
780 770
781template <class P> 771template <class P>
@@ -836,6 +826,19 @@ void BufferCache<P>::CommitAsyncFlushesHigh() {
836 const bool is_accuracy_normal = 826 const bool is_accuracy_normal =
837 Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::Normal; 827 Settings::values.gpu_accuracy.GetValue() == Settings::GPUAccuracy::Normal;
838 828
829 auto it = committed_ranges.begin();
830 while (it != committed_ranges.end()) {
831 auto& current_intervals = *it;
832 auto next_it = std::next(it);
833 while (next_it != committed_ranges.end()) {
834 for (auto& interval : *next_it) {
835 current_intervals.subtract(interval);
836 }
837 next_it++;
838 }
839 it++;
840 }
841
839 boost::container::small_vector<std::pair<BufferCopy, BufferId>, 1> downloads; 842 boost::container::small_vector<std::pair<BufferCopy, BufferId>, 1> downloads;
840 u64 total_size_bytes = 0; 843 u64 total_size_bytes = 0;
841 u64 largest_copy = 0; 844 u64 largest_copy = 0;
@@ -991,19 +994,19 @@ void BufferCache<P>::BindHostIndexBuffer() {
991 const u32 size = index_buffer.size; 994 const u32 size = index_buffer.size;
992 SynchronizeBuffer(buffer, index_buffer.cpu_addr, size); 995 SynchronizeBuffer(buffer, index_buffer.cpu_addr, size);
993 if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) { 996 if constexpr (HAS_FULL_INDEX_AND_PRIMITIVE_SUPPORT) {
994 const u32 new_offset = offset + maxwell3d.regs.index_array.first * 997 const u32 new_offset = offset + maxwell3d->regs.index_array.first *
995 maxwell3d.regs.index_array.FormatSizeInBytes(); 998 maxwell3d->regs.index_array.FormatSizeInBytes();
996 runtime.BindIndexBuffer(buffer, new_offset, size); 999 runtime.BindIndexBuffer(buffer, new_offset, size);
997 } else { 1000 } else {
998 runtime.BindIndexBuffer(maxwell3d.regs.draw.topology, maxwell3d.regs.index_array.format, 1001 runtime.BindIndexBuffer(maxwell3d->regs.draw.topology, maxwell3d->regs.index_array.format,
999 maxwell3d.regs.index_array.first, maxwell3d.regs.index_array.count, 1002 maxwell3d->regs.index_array.first,
1000 buffer, offset, size); 1003 maxwell3d->regs.index_array.count, buffer, offset, size);
1001 } 1004 }
1002} 1005}
1003 1006
1004template <class P> 1007template <class P>
1005void BufferCache<P>::BindHostVertexBuffers() { 1008void BufferCache<P>::BindHostVertexBuffers() {
1006 auto& flags = maxwell3d.dirty.flags; 1009 auto& flags = maxwell3d->dirty.flags;
1007 for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) { 1010 for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
1008 const Binding& binding = vertex_buffers[index]; 1011 const Binding& binding = vertex_buffers[index];
1009 Buffer& buffer = slot_buffers[binding.buffer_id]; 1012 Buffer& buffer = slot_buffers[binding.buffer_id];
@@ -1014,7 +1017,7 @@ void BufferCache<P>::BindHostVertexBuffers() {
1014 } 1017 }
1015 flags[Dirty::VertexBuffer0 + index] = false; 1018 flags[Dirty::VertexBuffer0 + index] = false;
1016 1019
1017 const u32 stride = maxwell3d.regs.vertex_array[index].stride; 1020 const u32 stride = maxwell3d->regs.vertex_array[index].stride;
1018 const u32 offset = buffer.Offset(binding.cpu_addr); 1021 const u32 offset = buffer.Offset(binding.cpu_addr);
1019 runtime.BindVertexBuffer(index, buffer, offset, binding.size, stride); 1022 runtime.BindVertexBuffer(index, buffer, offset, binding.size, stride);
1020 } 1023 }
@@ -1154,7 +1157,7 @@ void BufferCache<P>::BindHostGraphicsTextureBuffers(size_t stage) {
1154 1157
1155template <class P> 1158template <class P>
1156void BufferCache<P>::BindHostTransformFeedbackBuffers() { 1159void BufferCache<P>::BindHostTransformFeedbackBuffers() {
1157 if (maxwell3d.regs.tfb_enabled == 0) { 1160 if (maxwell3d->regs.tfb_enabled == 0) {
1158 return; 1161 return;
1159 } 1162 }
1160 for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) { 1163 for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) {
@@ -1239,16 +1242,19 @@ void BufferCache<P>::BindHostComputeTextureBuffers() {
1239 1242
1240template <class P> 1243template <class P>
1241void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) { 1244void BufferCache<P>::DoUpdateGraphicsBuffers(bool is_indexed) {
1242 if (is_indexed) { 1245 do {
1243 UpdateIndexBuffer(); 1246 has_deleted_buffers = false;
1244 } 1247 if (is_indexed) {
1245 UpdateVertexBuffers(); 1248 UpdateIndexBuffer();
1246 UpdateTransformFeedbackBuffers(); 1249 }
1247 for (size_t stage = 0; stage < NUM_STAGES; ++stage) { 1250 UpdateVertexBuffers();
1248 UpdateUniformBuffers(stage); 1251 UpdateTransformFeedbackBuffers();
1249 UpdateStorageBuffers(stage); 1252 for (size_t stage = 0; stage < NUM_STAGES; ++stage) {
1250 UpdateTextureBuffers(stage); 1253 UpdateUniformBuffers(stage);
1251 } 1254 UpdateStorageBuffers(stage);
1255 UpdateTextureBuffers(stage);
1256 }
1257 } while (has_deleted_buffers);
1252} 1258}
1253 1259
1254template <class P> 1260template <class P>
@@ -1262,8 +1268,8 @@ template <class P>
1262void BufferCache<P>::UpdateIndexBuffer() { 1268void BufferCache<P>::UpdateIndexBuffer() {
1263 // We have to check for the dirty flags and index count 1269 // We have to check for the dirty flags and index count
1264 // The index count is currently changed without updating the dirty flags 1270 // The index count is currently changed without updating the dirty flags
1265 const auto& index_array = maxwell3d.regs.index_array; 1271 const auto& index_array = maxwell3d->regs.index_array;
1266 auto& flags = maxwell3d.dirty.flags; 1272 auto& flags = maxwell3d->dirty.flags;
1267 if (!flags[Dirty::IndexBuffer] && last_index_count == index_array.count) { 1273 if (!flags[Dirty::IndexBuffer] && last_index_count == index_array.count) {
1268 return; 1274 return;
1269 } 1275 }
@@ -1272,7 +1278,7 @@ void BufferCache<P>::UpdateIndexBuffer() {
1272 1278
1273 const GPUVAddr gpu_addr_begin = index_array.StartAddress(); 1279 const GPUVAddr gpu_addr_begin = index_array.StartAddress();
1274 const GPUVAddr gpu_addr_end = index_array.EndAddress(); 1280 const GPUVAddr gpu_addr_end = index_array.EndAddress();
1275 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr_begin); 1281 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
1276 const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin); 1282 const u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin);
1277 const u32 draw_size = (index_array.count + index_array.first) * index_array.FormatSizeInBytes(); 1283 const u32 draw_size = (index_array.count + index_array.first) * index_array.FormatSizeInBytes();
1278 const u32 size = std::min(address_size, draw_size); 1284 const u32 size = std::min(address_size, draw_size);
@@ -1289,8 +1295,8 @@ void BufferCache<P>::UpdateIndexBuffer() {
1289 1295
1290template <class P> 1296template <class P>
1291void BufferCache<P>::UpdateVertexBuffers() { 1297void BufferCache<P>::UpdateVertexBuffers() {
1292 auto& flags = maxwell3d.dirty.flags; 1298 auto& flags = maxwell3d->dirty.flags;
1293 if (!maxwell3d.dirty.flags[Dirty::VertexBuffers]) { 1299 if (!maxwell3d->dirty.flags[Dirty::VertexBuffers]) {
1294 return; 1300 return;
1295 } 1301 }
1296 flags[Dirty::VertexBuffers] = false; 1302 flags[Dirty::VertexBuffers] = false;
@@ -1302,33 +1308,25 @@ void BufferCache<P>::UpdateVertexBuffers() {
1302 1308
1303template <class P> 1309template <class P>
1304void BufferCache<P>::UpdateVertexBuffer(u32 index) { 1310void BufferCache<P>::UpdateVertexBuffer(u32 index) {
1305 if (!maxwell3d.dirty.flags[Dirty::VertexBuffer0 + index]) { 1311 if (!maxwell3d->dirty.flags[Dirty::VertexBuffer0 + index]) {
1306 return; 1312 return;
1307 } 1313 }
1308 const auto& array = maxwell3d.regs.vertex_array[index]; 1314 const auto& array = maxwell3d->regs.vertex_array[index];
1309 const auto& limit = maxwell3d.regs.vertex_array_limit[index]; 1315 const auto& limit = maxwell3d->regs.vertex_array_limit[index];
1310 const GPUVAddr gpu_addr_begin = array.StartAddress(); 1316 const GPUVAddr gpu_addr_begin = array.StartAddress();
1311 const GPUVAddr gpu_addr_end = limit.LimitAddress() + 1; 1317 const GPUVAddr gpu_addr_end = limit.LimitAddress() + 1;
1312 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr_begin); 1318 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr_begin);
1313 u32 address_size = static_cast<u32>(gpu_addr_end - gpu_addr_begin); 1319 u32 address_size = static_cast<u32>(
1314 if (address_size >= 64_MiB) { 1320 std::min(gpu_addr_end - gpu_addr_begin, static_cast<u64>(std::numeric_limits<u32>::max())));
1315 // Reported vertex buffer size is very large, cap to mapped buffer size 1321 if (array.enable == 0 || address_size == 0 || !cpu_addr) {
1316 GPUVAddr submapped_addr_end = gpu_addr_begin;
1317
1318 const auto ranges{gpu_memory.GetSubmappedRange(gpu_addr_begin, address_size)};
1319 if (ranges.size() > 0) {
1320 const auto& [addr, size] = *ranges.begin();
1321 submapped_addr_end = addr + size;
1322 }
1323
1324 address_size =
1325 std::min(address_size, static_cast<u32>(submapped_addr_end - gpu_addr_begin));
1326 }
1327 const u32 size = address_size; // TODO: Analyze stride and number of vertices
1328 if (array.enable == 0 || size == 0 || !cpu_addr) {
1329 vertex_buffers[index] = NULL_BINDING; 1322 vertex_buffers[index] = NULL_BINDING;
1330 return; 1323 return;
1331 } 1324 }
1325 if (!gpu_memory->IsWithinGPUAddressRange(gpu_addr_end)) {
1326 address_size =
1327 static_cast<u32>(gpu_memory->MaxContinousRange(gpu_addr_begin, address_size));
1328 }
1329 const u32 size = address_size; // TODO: Analyze stride and number of vertices
1332 vertex_buffers[index] = Binding{ 1330 vertex_buffers[index] = Binding{
1333 .cpu_addr = *cpu_addr, 1331 .cpu_addr = *cpu_addr,
1334 .size = size, 1332 .size = size,
@@ -1382,7 +1380,7 @@ void BufferCache<P>::UpdateTextureBuffers(size_t stage) {
1382 1380
1383template <class P> 1381template <class P>
1384void BufferCache<P>::UpdateTransformFeedbackBuffers() { 1382void BufferCache<P>::UpdateTransformFeedbackBuffers() {
1385 if (maxwell3d.regs.tfb_enabled == 0) { 1383 if (maxwell3d->regs.tfb_enabled == 0) {
1386 return; 1384 return;
1387 } 1385 }
1388 for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) { 1386 for (u32 index = 0; index < NUM_TRANSFORM_FEEDBACK_BUFFERS; ++index) {
@@ -1392,10 +1390,10 @@ void BufferCache<P>::UpdateTransformFeedbackBuffers() {
1392 1390
1393template <class P> 1391template <class P>
1394void BufferCache<P>::UpdateTransformFeedbackBuffer(u32 index) { 1392void BufferCache<P>::UpdateTransformFeedbackBuffer(u32 index) {
1395 const auto& binding = maxwell3d.regs.tfb_bindings[index]; 1393 const auto& binding = maxwell3d->regs.tfb_bindings[index];
1396 const GPUVAddr gpu_addr = binding.Address() + binding.buffer_offset; 1394 const GPUVAddr gpu_addr = binding.Address() + binding.buffer_offset;
1397 const u32 size = binding.buffer_size; 1395 const u32 size = binding.buffer_size;
1398 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); 1396 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
1399 if (binding.buffer_enable == 0 || size == 0 || !cpu_addr) { 1397 if (binding.buffer_enable == 0 || size == 0 || !cpu_addr) {
1400 transform_feedback_buffers[index] = NULL_BINDING; 1398 transform_feedback_buffers[index] = NULL_BINDING;
1401 return; 1399 return;
@@ -1414,10 +1412,10 @@ void BufferCache<P>::UpdateComputeUniformBuffers() {
1414 ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) { 1412 ForEachEnabledBit(enabled_compute_uniform_buffer_mask, [&](u32 index) {
1415 Binding& binding = compute_uniform_buffers[index]; 1413 Binding& binding = compute_uniform_buffers[index];
1416 binding = NULL_BINDING; 1414 binding = NULL_BINDING;
1417 const auto& launch_desc = kepler_compute.launch_description; 1415 const auto& launch_desc = kepler_compute->launch_description;
1418 if (((launch_desc.const_buffer_enable_mask >> index) & 1) != 0) { 1416 if (((launch_desc.const_buffer_enable_mask >> index) & 1) != 0) {
1419 const auto& cbuf = launch_desc.const_buffer_config[index]; 1417 const auto& cbuf = launch_desc.const_buffer_config[index];
1420 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(cbuf.Address()); 1418 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(cbuf.Address());
1421 if (cpu_addr) { 1419 if (cpu_addr) {
1422 binding.cpu_addr = *cpu_addr; 1420 binding.cpu_addr = *cpu_addr;
1423 binding.size = cbuf.size; 1421 binding.size = cbuf.size;
@@ -1567,6 +1565,8 @@ BufferId BufferCache<P>::CreateBuffer(VAddr cpu_addr, u32 wanted_size) {
1567 const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size); 1565 const OverlapResult overlap = ResolveOverlaps(cpu_addr, wanted_size);
1568 const u32 size = static_cast<u32>(overlap.end - overlap.begin); 1566 const u32 size = static_cast<u32>(overlap.end - overlap.begin);
1569 const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size); 1567 const BufferId new_buffer_id = slot_buffers.insert(runtime, rasterizer, overlap.begin, size);
1568 auto& new_buffer = slot_buffers[new_buffer_id];
1569 runtime.ClearBuffer(new_buffer, 0, new_buffer.SizeBytes(), 0);
1570 for (const BufferId overlap_id : overlap.ids) { 1570 for (const BufferId overlap_id : overlap.ids) {
1571 JoinOverlap(new_buffer_id, overlap_id, !overlap.has_stream_leap); 1571 JoinOverlap(new_buffer_id, overlap_id, !overlap.has_stream_leap);
1572 } 1572 }
@@ -1695,7 +1695,7 @@ void BufferCache<P>::MappedUploadMemory(Buffer& buffer, u64 total_size_bytes,
1695 1695
1696template <class P> 1696template <class P>
1697bool BufferCache<P>::InlineMemory(VAddr dest_address, size_t copy_size, 1697bool BufferCache<P>::InlineMemory(VAddr dest_address, size_t copy_size,
1698 std::span<u8> inlined_buffer) { 1698 std::span<const u8> inlined_buffer) {
1699 const bool is_dirty = IsRegionRegistered(dest_address, copy_size); 1699 const bool is_dirty = IsRegionRegistered(dest_address, copy_size);
1700 if (!is_dirty) { 1700 if (!is_dirty) {
1701 return false; 1701 return false;
@@ -1831,7 +1831,7 @@ void BufferCache<P>::NotifyBufferDeletion() {
1831 dirty_uniform_buffers.fill(~u32{0}); 1831 dirty_uniform_buffers.fill(~u32{0});
1832 uniform_buffer_binding_sizes.fill({}); 1832 uniform_buffer_binding_sizes.fill({});
1833 } 1833 }
1834 auto& flags = maxwell3d.dirty.flags; 1834 auto& flags = maxwell3d->dirty.flags;
1835 flags[Dirty::IndexBuffer] = true; 1835 flags[Dirty::IndexBuffer] = true;
1836 flags[Dirty::VertexBuffers] = true; 1836 flags[Dirty::VertexBuffers] = true;
1837 for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) { 1837 for (u32 index = 0; index < NUM_VERTEX_BUFFERS; ++index) {
@@ -1841,16 +1841,18 @@ void BufferCache<P>::NotifyBufferDeletion() {
1841} 1841}
1842 1842
1843template <class P> 1843template <class P>
1844typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr) const { 1844typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr ssbo_addr,
1845 const GPUVAddr gpu_addr = gpu_memory.Read<u64>(ssbo_addr); 1845 bool is_written) const {
1846 const u32 size = gpu_memory.Read<u32>(ssbo_addr + 8); 1846 const GPUVAddr gpu_addr = gpu_memory->Read<u64>(ssbo_addr);
1847 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); 1847 const u32 size = gpu_memory->Read<u32>(ssbo_addr + 8);
1848 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
1848 if (!cpu_addr || size == 0) { 1849 if (!cpu_addr || size == 0) {
1849 return NULL_BINDING; 1850 return NULL_BINDING;
1850 } 1851 }
1852 const VAddr cpu_end = Common::AlignUp(*cpu_addr + size, Core::Memory::YUZU_PAGESIZE);
1851 const Binding binding{ 1853 const Binding binding{
1852 .cpu_addr = *cpu_addr, 1854 .cpu_addr = *cpu_addr,
1853 .size = size, 1855 .size = is_written ? size : static_cast<u32>(cpu_end - *cpu_addr),
1854 .buffer_id = BufferId{}, 1856 .buffer_id = BufferId{},
1855 }; 1857 };
1856 return binding; 1858 return binding;
@@ -1859,7 +1861,7 @@ typename BufferCache<P>::Binding BufferCache<P>::StorageBufferBinding(GPUVAddr s
1859template <class P> 1861template <class P>
1860typename BufferCache<P>::TextureBufferBinding BufferCache<P>::GetTextureBufferBinding( 1862typename BufferCache<P>::TextureBufferBinding BufferCache<P>::GetTextureBufferBinding(
1861 GPUVAddr gpu_addr, u32 size, PixelFormat format) { 1863 GPUVAddr gpu_addr, u32 size, PixelFormat format) {
1862 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); 1864 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
1863 TextureBufferBinding binding; 1865 TextureBufferBinding binding;
1864 if (!cpu_addr || size == 0) { 1866 if (!cpu_addr || size == 0) {
1865 binding.cpu_addr = 0; 1867 binding.cpu_addr = 0;
diff --git a/src/video_core/cdma_pusher.cpp b/src/video_core/cdma_pusher.cpp
index 8e890a85e..28a2d2090 100644
--- a/src/video_core/cdma_pusher.cpp
+++ b/src/video_core/cdma_pusher.cpp
@@ -2,20 +2,22 @@
2// SPDX-License-Identifier: MIT 2// SPDX-License-Identifier: MIT
3 3
4#include <bit> 4#include <bit>
5#include "command_classes/host1x.h"
6#include "command_classes/nvdec.h"
7#include "command_classes/vic.h"
8#include "video_core/cdma_pusher.h" 5#include "video_core/cdma_pusher.h"
9#include "video_core/command_classes/sync_manager.h"
10#include "video_core/engines/maxwell_3d.h" 6#include "video_core/engines/maxwell_3d.h"
11#include "video_core/gpu.h" 7#include "video_core/host1x/control.h"
8#include "video_core/host1x/host1x.h"
9#include "video_core/host1x/nvdec.h"
10#include "video_core/host1x/nvdec_common.h"
11#include "video_core/host1x/sync_manager.h"
12#include "video_core/host1x/vic.h"
13#include "video_core/memory_manager.h"
12 14
13namespace Tegra { 15namespace Tegra {
14CDmaPusher::CDmaPusher(GPU& gpu_) 16CDmaPusher::CDmaPusher(Host1x::Host1x& host1x_)
15 : gpu{gpu_}, nvdec_processor(std::make_shared<Nvdec>(gpu)), 17 : host1x{host1x_}, nvdec_processor(std::make_shared<Host1x::Nvdec>(host1x)),
16 vic_processor(std::make_unique<Vic>(gpu, nvdec_processor)), 18 vic_processor(std::make_unique<Host1x::Vic>(host1x, nvdec_processor)),
17 host1x_processor(std::make_unique<Host1x>(gpu)), 19 host1x_processor(std::make_unique<Host1x::Control>(host1x)),
18 sync_manager(std::make_unique<SyncptIncrManager>(gpu)) {} 20 sync_manager(std::make_unique<Host1x::SyncptIncrManager>(host1x)) {}
19 21
20CDmaPusher::~CDmaPusher() = default; 22CDmaPusher::~CDmaPusher() = default;
21 23
@@ -109,16 +111,17 @@ void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) {
109 case ThiMethod::SetMethod1: 111 case ThiMethod::SetMethod1:
110 LOG_DEBUG(Service_NVDRV, "VIC method 0x{:X}, Args=({})", 112 LOG_DEBUG(Service_NVDRV, "VIC method 0x{:X}, Args=({})",
111 static_cast<u32>(vic_thi_state.method_0), data); 113 static_cast<u32>(vic_thi_state.method_0), data);
112 vic_processor->ProcessMethod(static_cast<Vic::Method>(vic_thi_state.method_0), data); 114 vic_processor->ProcessMethod(static_cast<Host1x::Vic::Method>(vic_thi_state.method_0),
115 data);
113 break; 116 break;
114 default: 117 default:
115 break; 118 break;
116 } 119 }
117 break; 120 break;
118 case ChClassId::Host1x: 121 case ChClassId::Control:
119 // This device is mainly for syncpoint synchronization 122 // This device is mainly for syncpoint synchronization
120 LOG_DEBUG(Service_NVDRV, "Host1X Class Method"); 123 LOG_DEBUG(Service_NVDRV, "Host1X Class Method");
121 host1x_processor->ProcessMethod(static_cast<Host1x::Method>(offset), data); 124 host1x_processor->ProcessMethod(static_cast<Host1x::Control::Method>(offset), data);
122 break; 125 break;
123 default: 126 default:
124 UNIMPLEMENTED_MSG("Current class not implemented {:X}", static_cast<u32>(current_class)); 127 UNIMPLEMENTED_MSG("Current class not implemented {:X}", static_cast<u32>(current_class));
diff --git a/src/video_core/cdma_pusher.h b/src/video_core/cdma_pusher.h
index d6ffef95f..83112dfce 100644
--- a/src/video_core/cdma_pusher.h
+++ b/src/video_core/cdma_pusher.h
@@ -12,11 +12,13 @@
12 12
13namespace Tegra { 13namespace Tegra {
14 14
15class GPU; 15namespace Host1x {
16class Control;
16class Host1x; 17class Host1x;
17class Nvdec; 18class Nvdec;
18class SyncptIncrManager; 19class SyncptIncrManager;
19class Vic; 20class Vic;
21} // namespace Host1x
20 22
21enum class ChSubmissionMode : u32 { 23enum class ChSubmissionMode : u32 {
22 SetClass = 0, 24 SetClass = 0,
@@ -30,7 +32,7 @@ enum class ChSubmissionMode : u32 {
30 32
31enum class ChClassId : u32 { 33enum class ChClassId : u32 {
32 NoClass = 0x0, 34 NoClass = 0x0,
33 Host1x = 0x1, 35 Control = 0x1,
34 VideoEncodeMpeg = 0x20, 36 VideoEncodeMpeg = 0x20,
35 VideoEncodeNvEnc = 0x21, 37 VideoEncodeNvEnc = 0x21,
36 VideoStreamingVi = 0x30, 38 VideoStreamingVi = 0x30,
@@ -88,7 +90,7 @@ enum class ThiMethod : u32 {
88 90
89class CDmaPusher { 91class CDmaPusher {
90public: 92public:
91 explicit CDmaPusher(GPU& gpu_); 93 explicit CDmaPusher(Host1x::Host1x& host1x);
92 ~CDmaPusher(); 94 ~CDmaPusher();
93 95
94 /// Process the command entry 96 /// Process the command entry
@@ -101,11 +103,11 @@ private:
101 /// Write arguments value to the ThiRegisters member at the specified offset 103 /// Write arguments value to the ThiRegisters member at the specified offset
102 void ThiStateWrite(ThiRegisters& state, u32 offset, u32 argument); 104 void ThiStateWrite(ThiRegisters& state, u32 offset, u32 argument);
103 105
104 GPU& gpu; 106 Host1x::Host1x& host1x;
105 std::shared_ptr<Tegra::Nvdec> nvdec_processor; 107 std::shared_ptr<Tegra::Host1x::Nvdec> nvdec_processor;
106 std::unique_ptr<Tegra::Vic> vic_processor; 108 std::unique_ptr<Tegra::Host1x::Vic> vic_processor;
107 std::unique_ptr<Tegra::Host1x> host1x_processor; 109 std::unique_ptr<Tegra::Host1x::Control> host1x_processor;
108 std::unique_ptr<SyncptIncrManager> sync_manager; 110 std::unique_ptr<Host1x::SyncptIncrManager> sync_manager;
109 ChClassId current_class{}; 111 ChClassId current_class{};
110 ThiRegisters vic_thi_state{}; 112 ThiRegisters vic_thi_state{};
111 ThiRegisters nvdec_thi_state{}; 113 ThiRegisters nvdec_thi_state{};
diff --git a/src/video_core/command_classes/host1x.cpp b/src/video_core/command_classes/host1x.cpp
deleted file mode 100644
index 11855fe10..000000000
--- a/src/video_core/command_classes/host1x.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
1// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later
3
4#include "common/assert.h"
5#include "video_core/command_classes/host1x.h"
6#include "video_core/gpu.h"
7
8Tegra::Host1x::Host1x(GPU& gpu_) : gpu(gpu_) {}
9
10Tegra::Host1x::~Host1x() = default;
11
12void Tegra::Host1x::ProcessMethod(Method method, u32 argument) {
13 switch (method) {
14 case Method::LoadSyncptPayload32:
15 syncpoint_value = argument;
16 break;
17 case Method::WaitSyncpt:
18 case Method::WaitSyncpt32:
19 Execute(argument);
20 break;
21 default:
22 UNIMPLEMENTED_MSG("Host1x method 0x{:X}", static_cast<u32>(method));
23 break;
24 }
25}
26
27void Tegra::Host1x::Execute(u32 data) {
28 gpu.WaitFence(data, syncpoint_value);
29}
diff --git a/src/video_core/control/channel_state.cpp b/src/video_core/control/channel_state.cpp
new file mode 100644
index 000000000..cdecc3a91
--- /dev/null
+++ b/src/video_core/control/channel_state.cpp
@@ -0,0 +1,40 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include "common/assert.h"
5#include "video_core/control/channel_state.h"
6#include "video_core/dma_pusher.h"
7#include "video_core/engines/fermi_2d.h"
8#include "video_core/engines/kepler_compute.h"
9#include "video_core/engines/kepler_memory.h"
10#include "video_core/engines/maxwell_3d.h"
11#include "video_core/engines/maxwell_dma.h"
12#include "video_core/engines/puller.h"
13#include "video_core/memory_manager.h"
14
15namespace Tegra::Control {
16
17ChannelState::ChannelState(s32 bind_id_) : bind_id{bind_id_}, initialized{} {}
18
19void ChannelState::Init(Core::System& system, GPU& gpu) {
20 ASSERT(memory_manager);
21 dma_pusher = std::make_unique<Tegra::DmaPusher>(system, gpu, *memory_manager, *this);
22 maxwell_3d = std::make_unique<Engines::Maxwell3D>(system, *memory_manager);
23 fermi_2d = std::make_unique<Engines::Fermi2D>();
24 kepler_compute = std::make_unique<Engines::KeplerCompute>(system, *memory_manager);
25 maxwell_dma = std::make_unique<Engines::MaxwellDMA>(system, *memory_manager);
26 kepler_memory = std::make_unique<Engines::KeplerMemory>(system, *memory_manager);
27 initialized = true;
28}
29
30void ChannelState::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) {
31 dma_pusher->BindRasterizer(rasterizer);
32 memory_manager->BindRasterizer(rasterizer);
33 maxwell_3d->BindRasterizer(rasterizer);
34 fermi_2d->BindRasterizer(rasterizer);
35 kepler_memory->BindRasterizer(rasterizer);
36 kepler_compute->BindRasterizer(rasterizer);
37 maxwell_dma->BindRasterizer(rasterizer);
38}
39
40} // namespace Tegra::Control
diff --git a/src/video_core/control/channel_state.h b/src/video_core/control/channel_state.h
new file mode 100644
index 000000000..3a7b9872c
--- /dev/null
+++ b/src/video_core/control/channel_state.h
@@ -0,0 +1,68 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#pragma once
5
6#include <memory>
7
8#include "common/common_types.h"
9
10namespace Core {
11class System;
12}
13
14namespace VideoCore {
15class RasterizerInterface;
16}
17
18namespace Tegra {
19
20class GPU;
21
22namespace Engines {
23class Puller;
24class Fermi2D;
25class Maxwell3D;
26class MaxwellDMA;
27class KeplerCompute;
28class KeplerMemory;
29} // namespace Engines
30
31class MemoryManager;
32class DmaPusher;
33
34namespace Control {
35
36struct ChannelState {
37 explicit ChannelState(s32 bind_id);
38 ChannelState(const ChannelState& state) = delete;
39 ChannelState& operator=(const ChannelState&) = delete;
40 ChannelState(ChannelState&& other) noexcept = default;
41 ChannelState& operator=(ChannelState&& other) noexcept = default;
42
43 void Init(Core::System& system, GPU& gpu);
44
45 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
46
47 s32 bind_id = -1;
48 /// 3D engine
49 std::unique_ptr<Engines::Maxwell3D> maxwell_3d;
50 /// 2D engine
51 std::unique_ptr<Engines::Fermi2D> fermi_2d;
52 /// Compute engine
53 std::unique_ptr<Engines::KeplerCompute> kepler_compute;
54 /// DMA engine
55 std::unique_ptr<Engines::MaxwellDMA> maxwell_dma;
56 /// Inline memory engine
57 std::unique_ptr<Engines::KeplerMemory> kepler_memory;
58
59 std::shared_ptr<MemoryManager> memory_manager;
60
61 std::unique_ptr<DmaPusher> dma_pusher;
62
63 bool initialized{};
64};
65
66} // namespace Control
67
68} // namespace Tegra
diff --git a/src/video_core/control/channel_state_cache.cpp b/src/video_core/control/channel_state_cache.cpp
new file mode 100644
index 000000000..4ebeb6356
--- /dev/null
+++ b/src/video_core/control/channel_state_cache.cpp
@@ -0,0 +1,14 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include "video_core/control/channel_state_cache.inc"
5
6namespace VideoCommon {
7
8ChannelInfo::ChannelInfo(Tegra::Control::ChannelState& channel_state)
9 : maxwell3d{*channel_state.maxwell_3d}, kepler_compute{*channel_state.kepler_compute},
10 gpu_memory{*channel_state.memory_manager} {}
11
12template class VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo>;
13
14} // namespace VideoCommon
diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h
new file mode 100644
index 000000000..584a0c26c
--- /dev/null
+++ b/src/video_core/control/channel_state_cache.h
@@ -0,0 +1,101 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#pragma once
5
6#include <deque>
7#include <limits>
8#include <mutex>
9#include <optional>
10#include <unordered_map>
11#include <vector>
12
13#include "common/common_types.h"
14
15namespace Tegra {
16
17namespace Engines {
18class Maxwell3D;
19class KeplerCompute;
20} // namespace Engines
21
22class MemoryManager;
23
24namespace Control {
25struct ChannelState;
26}
27
28} // namespace Tegra
29
30namespace VideoCommon {
31
32class ChannelInfo {
33public:
34 ChannelInfo() = delete;
35 explicit ChannelInfo(Tegra::Control::ChannelState& state);
36 ChannelInfo(const ChannelInfo& state) = delete;
37 ChannelInfo& operator=(const ChannelInfo&) = delete;
38 ChannelInfo(ChannelInfo&& other) = default;
39 ChannelInfo& operator=(ChannelInfo&& other) = default;
40
41 Tegra::Engines::Maxwell3D& maxwell3d;
42 Tegra::Engines::KeplerCompute& kepler_compute;
43 Tegra::MemoryManager& gpu_memory;
44};
45
46template <class P>
47class ChannelSetupCaches {
48public:
49 /// Operations for seting the channel of execution.
50 virtual ~ChannelSetupCaches();
51
52 /// Create channel state.
53 virtual void CreateChannel(Tegra::Control::ChannelState& channel);
54
55 /// Bind a channel for execution.
56 void BindToChannel(s32 id);
57
58 /// Erase channel's state.
59 void EraseChannel(s32 id);
60
61 Tegra::MemoryManager* GetFromID(size_t id) const {
62 std::unique_lock<std::mutex> lk(config_mutex);
63 const auto ref = address_spaces.find(id);
64 return ref->second.gpu_memory;
65 }
66
67 std::optional<size_t> getStorageID(size_t id) const {
68 std::unique_lock<std::mutex> lk(config_mutex);
69 const auto ref = address_spaces.find(id);
70 if (ref == address_spaces.end()) {
71 return std::nullopt;
72 }
73 return ref->second.storage_id;
74 }
75
76protected:
77 static constexpr size_t UNSET_CHANNEL{std::numeric_limits<size_t>::max()};
78
79 P* channel_state;
80 size_t current_channel_id{UNSET_CHANNEL};
81 size_t current_address_space{};
82 Tegra::Engines::Maxwell3D* maxwell3d;
83 Tegra::Engines::KeplerCompute* kepler_compute;
84 Tegra::MemoryManager* gpu_memory;
85
86 std::deque<P> channel_storage;
87 std::deque<size_t> free_channel_ids;
88 std::unordered_map<s32, size_t> channel_map;
89 std::vector<size_t> active_channel_ids;
90 struct AddresSpaceRef {
91 size_t ref_count;
92 size_t storage_id;
93 Tegra::MemoryManager* gpu_memory;
94 };
95 std::unordered_map<size_t, AddresSpaceRef> address_spaces;
96 mutable std::mutex config_mutex;
97
98 virtual void OnGPUASRegister([[maybe_unused]] size_t map_id) {}
99};
100
101} // namespace VideoCommon
diff --git a/src/video_core/control/channel_state_cache.inc b/src/video_core/control/channel_state_cache.inc
new file mode 100644
index 000000000..460313893
--- /dev/null
+++ b/src/video_core/control/channel_state_cache.inc
@@ -0,0 +1,86 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include <algorithm>
5
6#include "video_core/control/channel_state.h"
7#include "video_core/control/channel_state_cache.h"
8#include "video_core/engines/kepler_compute.h"
9#include "video_core/engines/maxwell_3d.h"
10#include "video_core/memory_manager.h"
11
12namespace VideoCommon {
13
14template <class P>
15ChannelSetupCaches<P>::~ChannelSetupCaches() = default;
16
17template <class P>
18void ChannelSetupCaches<P>::CreateChannel(struct Tegra::Control::ChannelState& channel) {
19 std::unique_lock<std::mutex> lk(config_mutex);
20 ASSERT(channel_map.find(channel.bind_id) == channel_map.end() && channel.bind_id >= 0);
21 auto new_id = [this, &channel]() {
22 if (!free_channel_ids.empty()) {
23 auto id = free_channel_ids.front();
24 free_channel_ids.pop_front();
25 new (&channel_storage[id]) P(channel);
26 return id;
27 }
28 channel_storage.emplace_back(channel);
29 return channel_storage.size() - 1;
30 }();
31 channel_map.emplace(channel.bind_id, new_id);
32 if (current_channel_id != UNSET_CHANNEL) {
33 channel_state = &channel_storage[current_channel_id];
34 }
35 active_channel_ids.push_back(new_id);
36 auto as_it = address_spaces.find(channel.memory_manager->GetID());
37 if (as_it != address_spaces.end()) {
38 as_it->second.ref_count++;
39 return;
40 }
41 AddresSpaceRef new_gpu_mem_ref{
42 .ref_count = 1,
43 .storage_id = address_spaces.size(),
44 .gpu_memory = channel.memory_manager.get(),
45 };
46 address_spaces.emplace(channel.memory_manager->GetID(), new_gpu_mem_ref);
47 OnGPUASRegister(channel.memory_manager->GetID());
48}
49
50/// Bind a channel for execution.
51template <class P>
52void ChannelSetupCaches<P>::BindToChannel(s32 id) {
53 std::unique_lock<std::mutex> lk(config_mutex);
54 auto it = channel_map.find(id);
55 ASSERT(it != channel_map.end() && id >= 0);
56 current_channel_id = it->second;
57 channel_state = &channel_storage[current_channel_id];
58 maxwell3d = &channel_state->maxwell3d;
59 kepler_compute = &channel_state->kepler_compute;
60 gpu_memory = &channel_state->gpu_memory;
61 current_address_space = gpu_memory->GetID();
62}
63
64/// Erase channel's channel_state.
65template <class P>
66void ChannelSetupCaches<P>::EraseChannel(s32 id) {
67 std::unique_lock<std::mutex> lk(config_mutex);
68 const auto it = channel_map.find(id);
69 ASSERT(it != channel_map.end() && id >= 0);
70 const auto this_id = it->second;
71 free_channel_ids.push_back(this_id);
72 channel_map.erase(it);
73 if (this_id == current_channel_id) {
74 current_channel_id = UNSET_CHANNEL;
75 channel_state = nullptr;
76 maxwell3d = nullptr;
77 kepler_compute = nullptr;
78 gpu_memory = nullptr;
79 } else if (current_channel_id != UNSET_CHANNEL) {
80 channel_state = &channel_storage[current_channel_id];
81 }
82 active_channel_ids.erase(
83 std::find(active_channel_ids.begin(), active_channel_ids.end(), this_id));
84}
85
86} // namespace VideoCommon
diff --git a/src/video_core/control/scheduler.cpp b/src/video_core/control/scheduler.cpp
new file mode 100644
index 000000000..f7cbe204e
--- /dev/null
+++ b/src/video_core/control/scheduler.cpp
@@ -0,0 +1,32 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include <memory>
5
6#include "common/assert.h"
7#include "video_core/control/channel_state.h"
8#include "video_core/control/scheduler.h"
9#include "video_core/gpu.h"
10
11namespace Tegra::Control {
12Scheduler::Scheduler(GPU& gpu_) : gpu{gpu_} {}
13
14Scheduler::~Scheduler() = default;
15
16void Scheduler::Push(s32 channel, CommandList&& entries) {
17 std::unique_lock lk(scheduling_guard);
18 auto it = channels.find(channel);
19 ASSERT(it != channels.end());
20 auto channel_state = it->second;
21 gpu.BindChannel(channel_state->bind_id);
22 channel_state->dma_pusher->Push(std::move(entries));
23 channel_state->dma_pusher->DispatchCalls();
24}
25
26void Scheduler::DeclareChannel(std::shared_ptr<ChannelState> new_channel) {
27 s32 channel = new_channel->bind_id;
28 std::unique_lock lk(scheduling_guard);
29 channels.emplace(channel, new_channel);
30}
31
32} // namespace Tegra::Control
diff --git a/src/video_core/control/scheduler.h b/src/video_core/control/scheduler.h
new file mode 100644
index 000000000..44addf61c
--- /dev/null
+++ b/src/video_core/control/scheduler.h
@@ -0,0 +1,37 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#pragma once
5
6#include <memory>
7#include <mutex>
8#include <unordered_map>
9
10#include "video_core/dma_pusher.h"
11
12namespace Tegra {
13
14class GPU;
15
16namespace Control {
17
18struct ChannelState;
19
20class Scheduler {
21public:
22 explicit Scheduler(GPU& gpu_);
23 ~Scheduler();
24
25 void Push(s32 channel, CommandList&& entries);
26
27 void DeclareChannel(std::shared_ptr<ChannelState> new_channel);
28
29private:
30 std::unordered_map<s32, std::shared_ptr<ChannelState>> channels;
31 std::mutex scheduling_guard;
32 GPU& gpu;
33};
34
35} // namespace Control
36
37} // namespace Tegra
diff --git a/src/video_core/dma_pusher.cpp b/src/video_core/dma_pusher.cpp
index 29b8582ab..9835e3ac1 100644
--- a/src/video_core/dma_pusher.cpp
+++ b/src/video_core/dma_pusher.cpp
@@ -12,7 +12,10 @@
12 12
13namespace Tegra { 13namespace Tegra {
14 14
15DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_) : gpu{gpu_}, system{system_} {} 15DmaPusher::DmaPusher(Core::System& system_, GPU& gpu_, MemoryManager& memory_manager_,
16 Control::ChannelState& channel_state_)
17 : gpu{gpu_}, system{system_}, memory_manager{memory_manager_}, puller{gpu_, memory_manager_,
18 *this, channel_state_} {}
16 19
17DmaPusher::~DmaPusher() = default; 20DmaPusher::~DmaPusher() = default;
18 21
@@ -21,8 +24,6 @@ MICROPROFILE_DEFINE(DispatchCalls, "GPU", "Execute command buffer", MP_RGB(128,
21void DmaPusher::DispatchCalls() { 24void DmaPusher::DispatchCalls() {
22 MICROPROFILE_SCOPE(DispatchCalls); 25 MICROPROFILE_SCOPE(DispatchCalls);
23 26
24 gpu.SyncGuestHost();
25
26 dma_pushbuffer_subindex = 0; 27 dma_pushbuffer_subindex = 0;
27 28
28 dma_state.is_last_call = true; 29 dma_state.is_last_call = true;
@@ -33,7 +34,6 @@ void DmaPusher::DispatchCalls() {
33 } 34 }
34 } 35 }
35 gpu.FlushCommands(); 36 gpu.FlushCommands();
36 gpu.SyncGuestHost();
37 gpu.OnCommandListEnd(); 37 gpu.OnCommandListEnd();
38} 38}
39 39
@@ -76,11 +76,11 @@ bool DmaPusher::Step() {
76 // Push buffer non-empty, read a word 76 // Push buffer non-empty, read a word
77 command_headers.resize(command_list_header.size); 77 command_headers.resize(command_list_header.size);
78 if (Settings::IsGPULevelHigh()) { 78 if (Settings::IsGPULevelHigh()) {
79 gpu.MemoryManager().ReadBlock(dma_get, command_headers.data(), 79 memory_manager.ReadBlock(dma_get, command_headers.data(),
80 command_list_header.size * sizeof(u32)); 80 command_list_header.size * sizeof(u32));
81 } else { 81 } else {
82 gpu.MemoryManager().ReadBlockUnsafe(dma_get, command_headers.data(), 82 memory_manager.ReadBlockUnsafe(dma_get, command_headers.data(),
83 command_list_header.size * sizeof(u32)); 83 command_list_header.size * sizeof(u32));
84 } 84 }
85 } 85 }
86 for (std::size_t index = 0; index < command_headers.size();) { 86 for (std::size_t index = 0; index < command_headers.size();) {
@@ -154,7 +154,7 @@ void DmaPusher::SetState(const CommandHeader& command_header) {
154 154
155void DmaPusher::CallMethod(u32 argument) const { 155void DmaPusher::CallMethod(u32 argument) const {
156 if (dma_state.method < non_puller_methods) { 156 if (dma_state.method < non_puller_methods) {
157 gpu.CallMethod(GPU::MethodCall{ 157 puller.CallPullerMethod(Engines::Puller::MethodCall{
158 dma_state.method, 158 dma_state.method,
159 argument, 159 argument,
160 dma_state.subchannel, 160 dma_state.subchannel,
@@ -168,12 +168,16 @@ void DmaPusher::CallMethod(u32 argument) const {
168 168
169void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const { 169void DmaPusher::CallMultiMethod(const u32* base_start, u32 num_methods) const {
170 if (dma_state.method < non_puller_methods) { 170 if (dma_state.method < non_puller_methods) {
171 gpu.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods, 171 puller.CallMultiMethod(dma_state.method, dma_state.subchannel, base_start, num_methods,
172 dma_state.method_count); 172 dma_state.method_count);
173 } else { 173 } else {
174 subchannels[dma_state.subchannel]->CallMultiMethod(dma_state.method, base_start, 174 subchannels[dma_state.subchannel]->CallMultiMethod(dma_state.method, base_start,
175 num_methods, dma_state.method_count); 175 num_methods, dma_state.method_count);
176 } 176 }
177} 177}
178 178
179void DmaPusher::BindRasterizer(VideoCore::RasterizerInterface* rasterizer) {
180 puller.BindRasterizer(rasterizer);
181}
182
179} // namespace Tegra 183} // namespace Tegra
diff --git a/src/video_core/dma_pusher.h b/src/video_core/dma_pusher.h
index 872fd146a..938f0f11c 100644
--- a/src/video_core/dma_pusher.h
+++ b/src/video_core/dma_pusher.h
@@ -10,6 +10,7 @@
10#include "common/bit_field.h" 10#include "common/bit_field.h"
11#include "common/common_types.h" 11#include "common/common_types.h"
12#include "video_core/engines/engine_interface.h" 12#include "video_core/engines/engine_interface.h"
13#include "video_core/engines/puller.h"
13 14
14namespace Core { 15namespace Core {
15class System; 16class System;
@@ -17,7 +18,12 @@ class System;
17 18
18namespace Tegra { 19namespace Tegra {
19 20
21namespace Control {
22struct ChannelState;
23}
24
20class GPU; 25class GPU;
26class MemoryManager;
21 27
22enum class SubmissionMode : u32 { 28enum class SubmissionMode : u32 {
23 IncreasingOld = 0, 29 IncreasingOld = 0,
@@ -31,24 +37,32 @@ enum class SubmissionMode : u32 {
31// Note that, traditionally, methods are treated as 4-byte addressable locations, and hence 37// Note that, traditionally, methods are treated as 4-byte addressable locations, and hence
32// their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4. 38// their numbers are written down multiplied by 4 in Docs. Here we are not multiply by 4.
33// So the values you see in docs might be multiplied by 4. 39// So the values you see in docs might be multiplied by 4.
40// Register documentation:
41// https://github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/classes/host/cla26f.h
42//
43// Register Description (approx):
44// https://github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/manuals/volta/gv100/dev_pbdma.ref.txt
34enum class BufferMethods : u32 { 45enum class BufferMethods : u32 {
35 BindObject = 0x0, 46 BindObject = 0x0,
47 Illegal = 0x1,
36 Nop = 0x2, 48 Nop = 0x2,
37 SemaphoreAddressHigh = 0x4, 49 SemaphoreAddressHigh = 0x4,
38 SemaphoreAddressLow = 0x5, 50 SemaphoreAddressLow = 0x5,
39 SemaphoreSequence = 0x6, 51 SemaphoreSequencePayload = 0x6,
40 SemaphoreTrigger = 0x7, 52 SemaphoreOperation = 0x7,
41 NotifyIntr = 0x8, 53 NonStallInterrupt = 0x8,
42 WrcacheFlush = 0x9, 54 WrcacheFlush = 0x9,
43 Unk28 = 0xA, 55 MemOpA = 0xA,
44 UnkCacheFlush = 0xB, 56 MemOpB = 0xB,
57 MemOpC = 0xC,
58 MemOpD = 0xD,
45 RefCnt = 0x14, 59 RefCnt = 0x14,
46 SemaphoreAcquire = 0x1A, 60 SemaphoreAcquire = 0x1A,
47 SemaphoreRelease = 0x1B, 61 SemaphoreRelease = 0x1B,
48 FenceValue = 0x1C, 62 SyncpointPayload = 0x1C,
49 FenceAction = 0x1D, 63 SyncpointOperation = 0x1D,
50 WaitForInterrupt = 0x1E, 64 WaitForIdle = 0x1E,
51 Unk7c = 0x1F, 65 CRCCheck = 0x1F,
52 Yield = 0x20, 66 Yield = 0x20,
53 NonPullerMethods = 0x40, 67 NonPullerMethods = 0x40,
54}; 68};
@@ -102,7 +116,8 @@ struct CommandList final {
102 */ 116 */
103class DmaPusher final { 117class DmaPusher final {
104public: 118public:
105 explicit DmaPusher(Core::System& system_, GPU& gpu_); 119 explicit DmaPusher(Core::System& system_, GPU& gpu_, MemoryManager& memory_manager_,
120 Control::ChannelState& channel_state_);
106 ~DmaPusher(); 121 ~DmaPusher();
107 122
108 void Push(CommandList&& entries) { 123 void Push(CommandList&& entries) {
@@ -115,6 +130,8 @@ public:
115 subchannels[subchannel_id] = engine; 130 subchannels[subchannel_id] = engine;
116 } 131 }
117 132
133 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
134
118private: 135private:
119 static constexpr u32 non_puller_methods = 0x40; 136 static constexpr u32 non_puller_methods = 0x40;
120 static constexpr u32 max_subchannels = 8; 137 static constexpr u32 max_subchannels = 8;
@@ -148,6 +165,8 @@ private:
148 165
149 GPU& gpu; 166 GPU& gpu;
150 Core::System& system; 167 Core::System& system;
168 MemoryManager& memory_manager;
169 mutable Engines::Puller puller;
151}; 170};
152 171
153} // namespace Tegra 172} // namespace Tegra
diff --git a/src/video_core/engines/engine_upload.cpp b/src/video_core/engines/engine_upload.cpp
index 6ff5b1eca..a34819234 100644
--- a/src/video_core/engines/engine_upload.cpp
+++ b/src/video_core/engines/engine_upload.cpp
@@ -3,6 +3,7 @@
3 3
4#include <cstring> 4#include <cstring>
5 5
6#include "common/algorithm.h"
6#include "common/assert.h" 7#include "common/assert.h"
7#include "video_core/engines/engine_upload.h" 8#include "video_core/engines/engine_upload.h"
8#include "video_core/memory_manager.h" 9#include "video_core/memory_manager.h"
@@ -34,21 +35,48 @@ void State::ProcessData(const u32 data, const bool is_last_call) {
34 if (!is_last_call) { 35 if (!is_last_call) {
35 return; 36 return;
36 } 37 }
38 ProcessData(inner_buffer);
39}
40
41void State::ProcessData(const u32* data, size_t num_data) {
42 std::span<const u8> read_buffer(reinterpret_cast<const u8*>(data), num_data * sizeof(u32));
43 ProcessData(read_buffer);
44}
45
46void State::ProcessData(std::span<const u8> read_buffer) {
37 const GPUVAddr address{regs.dest.Address()}; 47 const GPUVAddr address{regs.dest.Address()};
38 if (is_linear) { 48 if (is_linear) {
39 rasterizer->AccelerateInlineToMemory(address, copy_size, inner_buffer); 49 if (regs.line_count == 1) {
50 rasterizer->AccelerateInlineToMemory(address, copy_size, read_buffer);
51 } else {
52 for (u32 line = 0; line < regs.line_count; ++line) {
53 const GPUVAddr dest_line = address + static_cast<size_t>(line) * regs.dest.pitch;
54 memory_manager.WriteBlockUnsafe(
55 dest_line, read_buffer.data() + static_cast<size_t>(line) * regs.line_length_in,
56 regs.line_length_in);
57 }
58 memory_manager.InvalidateRegion(address, regs.dest.pitch * regs.line_count);
59 }
40 } else { 60 } else {
41 UNIMPLEMENTED_IF(regs.dest.z != 0); 61 u32 width = regs.dest.width;
42 UNIMPLEMENTED_IF(regs.dest.depth != 1); 62 u32 x_elements = regs.line_length_in;
43 UNIMPLEMENTED_IF(regs.dest.BlockWidth() != 0); 63 u32 x_offset = regs.dest.x;
44 UNIMPLEMENTED_IF(regs.dest.BlockDepth() != 0); 64 const u32 bpp_shift = Common::FoldRight(
65 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
66 width, x_elements, x_offset, static_cast<u32>(address));
67 width >>= bpp_shift;
68 x_elements >>= bpp_shift;
69 x_offset >>= bpp_shift;
70 const u32 bytes_per_pixel = 1U << bpp_shift;
45 const std::size_t dst_size = Tegra::Texture::CalculateSize( 71 const std::size_t dst_size = Tegra::Texture::CalculateSize(
46 true, 1, regs.dest.width, regs.dest.height, 1, regs.dest.BlockHeight(), 0); 72 true, bytes_per_pixel, width, regs.dest.height, regs.dest.depth,
73 regs.dest.BlockHeight(), regs.dest.BlockDepth());
47 tmp_buffer.resize(dst_size); 74 tmp_buffer.resize(dst_size);
48 memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size); 75 memory_manager.ReadBlock(address, tmp_buffer.data(), dst_size);
49 Tegra::Texture::SwizzleKepler(regs.dest.width, regs.dest.height, regs.dest.x, regs.dest.y, 76 Tegra::Texture::SwizzleSubrect(tmp_buffer, read_buffer, bytes_per_pixel, width,
50 regs.dest.BlockHeight(), copy_size, inner_buffer.data(), 77 regs.dest.height, regs.dest.depth, x_offset, regs.dest.y,
51 tmp_buffer.data()); 78 x_elements, regs.line_count, regs.dest.BlockHeight(),
79 regs.dest.BlockDepth(), regs.line_length_in);
52 memory_manager.WriteBlock(address, tmp_buffer.data(), dst_size); 80 memory_manager.WriteBlock(address, tmp_buffer.data(), dst_size);
53 } 81 }
54} 82}
diff --git a/src/video_core/engines/engine_upload.h b/src/video_core/engines/engine_upload.h
index 94ff3314a..f08f6e36a 100644
--- a/src/video_core/engines/engine_upload.h
+++ b/src/video_core/engines/engine_upload.h
@@ -3,6 +3,7 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <span>
6#include <vector> 7#include <vector>
7#include "common/bit_field.h" 8#include "common/bit_field.h"
8#include "common/common_types.h" 9#include "common/common_types.h"
@@ -33,7 +34,7 @@ struct Registers {
33 u32 width; 34 u32 width;
34 u32 height; 35 u32 height;
35 u32 depth; 36 u32 depth;
36 u32 z; 37 u32 layer;
37 u32 x; 38 u32 x;
38 u32 y; 39 u32 y;
39 40
@@ -62,11 +63,14 @@ public:
62 63
63 void ProcessExec(bool is_linear_); 64 void ProcessExec(bool is_linear_);
64 void ProcessData(u32 data, bool is_last_call); 65 void ProcessData(u32 data, bool is_last_call);
66 void ProcessData(const u32* data, size_t num_data);
65 67
66 /// Binds a rasterizer to this engine. 68 /// Binds a rasterizer to this engine.
67 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); 69 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
68 70
69private: 71private:
72 void ProcessData(std::span<const u8> read_buffer);
73
70 u32 write_offset = 0; 74 u32 write_offset = 0;
71 u32 copy_size = 0; 75 u32 copy_size = 0;
72 std::vector<u8> inner_buffer; 76 std::vector<u8> inner_buffer;
diff --git a/src/video_core/engines/kepler_compute.cpp b/src/video_core/engines/kepler_compute.cpp
index 5db254d94..7c50bdbe0 100644
--- a/src/video_core/engines/kepler_compute.cpp
+++ b/src/video_core/engines/kepler_compute.cpp
@@ -36,8 +36,6 @@ void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_cal
36 } 36 }
37 case KEPLER_COMPUTE_REG_INDEX(data_upload): { 37 case KEPLER_COMPUTE_REG_INDEX(data_upload): {
38 upload_state.ProcessData(method_argument, is_last_call); 38 upload_state.ProcessData(method_argument, is_last_call);
39 if (is_last_call) {
40 }
41 break; 39 break;
42 } 40 }
43 case KEPLER_COMPUTE_REG_INDEX(launch): 41 case KEPLER_COMPUTE_REG_INDEX(launch):
@@ -50,8 +48,15 @@ void KeplerCompute::CallMethod(u32 method, u32 method_argument, bool is_last_cal
50 48
51void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount, 49void KeplerCompute::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
52 u32 methods_pending) { 50 u32 methods_pending) {
53 for (std::size_t i = 0; i < amount; i++) { 51 switch (method) {
54 CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1); 52 case KEPLER_COMPUTE_REG_INDEX(data_upload):
53 upload_state.ProcessData(base_start, static_cast<size_t>(amount));
54 return;
55 default:
56 for (std::size_t i = 0; i < amount; i++) {
57 CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
58 }
59 break;
55 } 60 }
56} 61}
57 62
diff --git a/src/video_core/engines/kepler_memory.cpp b/src/video_core/engines/kepler_memory.cpp
index e2b029542..a3fbab1e5 100644
--- a/src/video_core/engines/kepler_memory.cpp
+++ b/src/video_core/engines/kepler_memory.cpp
@@ -33,8 +33,6 @@ void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call
33 } 33 }
34 case KEPLERMEMORY_REG_INDEX(data): { 34 case KEPLERMEMORY_REG_INDEX(data): {
35 upload_state.ProcessData(method_argument, is_last_call); 35 upload_state.ProcessData(method_argument, is_last_call);
36 if (is_last_call) {
37 }
38 break; 36 break;
39 } 37 }
40 } 38 }
@@ -42,8 +40,15 @@ void KeplerMemory::CallMethod(u32 method, u32 method_argument, bool is_last_call
42 40
43void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount, 41void KeplerMemory::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
44 u32 methods_pending) { 42 u32 methods_pending) {
45 for (std::size_t i = 0; i < amount; i++) { 43 switch (method) {
46 CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1); 44 case KEPLERMEMORY_REG_INDEX(data):
45 upload_state.ProcessData(base_start, static_cast<size_t>(amount));
46 return;
47 default:
48 for (std::size_t i = 0; i < amount; i++) {
49 CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
50 }
51 break;
47 } 52 }
48} 53}
49 54
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 3a4646289..3c6e44a25 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -219,6 +219,8 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume
219 regs.index_array.count = regs.small_index_2.count; 219 regs.index_array.count = regs.small_index_2.count;
220 regs.index_array.first = regs.small_index_2.first; 220 regs.index_array.first = regs.small_index_2.first;
221 dirty.flags[VideoCommon::Dirty::IndexBuffer] = true; 221 dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
222 // a macro calls this one over and over, should it increase instancing?
223 // Used by Hades and likely other Vulkan games.
222 return DrawArrays(); 224 return DrawArrays();
223 case MAXWELL3D_REG_INDEX(topology_override): 225 case MAXWELL3D_REG_INDEX(topology_override):
224 use_topology_override = true; 226 use_topology_override = true;
@@ -237,11 +239,12 @@ void Maxwell3D::ProcessMethodCall(u32 method, u32 argument, u32 nonshadow_argume
237 return upload_state.ProcessExec(regs.exec_upload.linear != 0); 239 return upload_state.ProcessExec(regs.exec_upload.linear != 0);
238 case MAXWELL3D_REG_INDEX(data_upload): 240 case MAXWELL3D_REG_INDEX(data_upload):
239 upload_state.ProcessData(argument, is_last_call); 241 upload_state.ProcessData(argument, is_last_call);
240 if (is_last_call) {
241 }
242 return; 242 return;
243 case MAXWELL3D_REG_INDEX(fragment_barrier): 243 case MAXWELL3D_REG_INDEX(fragment_barrier):
244 return rasterizer->FragmentBarrier(); 244 return rasterizer->FragmentBarrier();
245 case MAXWELL3D_REG_INDEX(invalidate_texture_data_cache):
246 rasterizer->InvalidateGPUCache();
247 return rasterizer->WaitForIdle();
245 case MAXWELL3D_REG_INDEX(tiled_cache_barrier): 248 case MAXWELL3D_REG_INDEX(tiled_cache_barrier):
246 return rasterizer->TiledCacheBarrier(); 249 return rasterizer->TiledCacheBarrier();
247 } 250 }
@@ -311,6 +314,9 @@ void Maxwell3D::CallMultiMethod(u32 method, const u32* base_start, u32 amount,
311 case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 15: 314 case MAXWELL3D_REG_INDEX(const_buffer.cb_data) + 15:
312 ProcessCBMultiData(base_start, amount); 315 ProcessCBMultiData(base_start, amount);
313 break; 316 break;
317 case MAXWELL3D_REG_INDEX(data_upload):
318 upload_state.ProcessData(base_start, static_cast<size_t>(amount));
319 return;
314 default: 320 default:
315 for (std::size_t i = 0; i < amount; i++) { 321 for (std::size_t i = 0; i < amount; i++) {
316 CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1); 322 CallMethod(method, base_start[i], methods_pending - static_cast<u32>(i) <= 1);
@@ -447,18 +453,10 @@ void Maxwell3D::ProcessFirmwareCall4() {
447} 453}
448 454
449void Maxwell3D::StampQueryResult(u64 payload, bool long_query) { 455void Maxwell3D::StampQueryResult(u64 payload, bool long_query) {
450 struct LongQueryResult {
451 u64_le value;
452 u64_le timestamp;
453 };
454 static_assert(sizeof(LongQueryResult) == 16, "LongQueryResult has wrong size");
455 const GPUVAddr sequence_address{regs.query.QueryAddress()}; 456 const GPUVAddr sequence_address{regs.query.QueryAddress()};
456 if (long_query) { 457 if (long_query) {
457 // Write the 128-bit result structure in long mode. Note: We emulate an infinitely fast 458 memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks());
458 // GPU, this command may actually take a while to complete in real hardware due to GPU 459 memory_manager.Write<u64>(sequence_address, payload);
459 // wait queues.
460 LongQueryResult query_result{payload, system.GPU().GetTicks()};
461 memory_manager.WriteBlock(sequence_address, &query_result, sizeof(query_result));
462 } else { 460 } else {
463 memory_manager.Write<u32>(sequence_address, static_cast<u32>(payload)); 461 memory_manager.Write<u32>(sequence_address, static_cast<u32>(payload));
464 } 462 }
@@ -472,10 +470,25 @@ void Maxwell3D::ProcessQueryGet() {
472 470
473 switch (regs.query.query_get.operation) { 471 switch (regs.query.query_get.operation) {
474 case Regs::QueryOperation::Release: 472 case Regs::QueryOperation::Release:
475 if (regs.query.query_get.fence == 1) { 473 if (regs.query.query_get.fence == 1 || regs.query.query_get.short_query != 0) {
476 rasterizer->SignalSemaphore(regs.query.QueryAddress(), regs.query.query_sequence); 474 const GPUVAddr sequence_address{regs.query.QueryAddress()};
475 const u32 payload = regs.query.query_sequence;
476 std::function<void()> operation([this, sequence_address, payload] {
477 memory_manager.Write<u32>(sequence_address, payload);
478 });
479 rasterizer->SignalFence(std::move(operation));
477 } else { 480 } else {
478 StampQueryResult(regs.query.query_sequence, regs.query.query_get.short_query == 0); 481 struct LongQueryResult {
482 u64_le value;
483 u64_le timestamp;
484 };
485 const GPUVAddr sequence_address{regs.query.QueryAddress()};
486 const u32 payload = regs.query.query_sequence;
487 std::function<void()> operation([this, sequence_address, payload] {
488 memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks());
489 memory_manager.Write<u64>(sequence_address, payload);
490 });
491 rasterizer->SyncOperation(std::move(operation));
479 } 492 }
480 break; 493 break;
481 case Regs::QueryOperation::Acquire: 494 case Regs::QueryOperation::Acquire:
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 0efe58282..3909d36c1 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -1,6 +1,7 @@
1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project 1// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include "common/algorithm.h"
4#include "common/assert.h" 5#include "common/assert.h"
5#include "common/logging/log.h" 6#include "common/logging/log.h"
6#include "common/microprofile.h" 7#include "common/microprofile.h"
@@ -54,8 +55,6 @@ void MaxwellDMA::Launch() {
54 const LaunchDMA& launch = regs.launch_dma; 55 const LaunchDMA& launch = regs.launch_dma;
55 ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE); 56 ASSERT(launch.interrupt_type == LaunchDMA::InterruptType::NONE);
56 ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED); 57 ASSERT(launch.data_transfer_type == LaunchDMA::DataTransferType::NON_PIPELINED);
57 ASSERT(regs.dst_params.origin.x == 0);
58 ASSERT(regs.dst_params.origin.y == 0);
59 58
60 const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH; 59 const bool is_src_pitch = launch.src_memory_layout == LaunchDMA::MemoryLayout::PITCH;
61 const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH; 60 const bool is_dst_pitch = launch.dst_memory_layout == LaunchDMA::MemoryLayout::PITCH;
@@ -121,23 +120,40 @@ void MaxwellDMA::CopyPitchToPitch() {
121 120
122void MaxwellDMA::CopyBlockLinearToPitch() { 121void MaxwellDMA::CopyBlockLinearToPitch() {
123 UNIMPLEMENTED_IF(regs.src_params.block_size.width != 0); 122 UNIMPLEMENTED_IF(regs.src_params.block_size.width != 0);
124 UNIMPLEMENTED_IF(regs.src_params.block_size.depth != 0);
125 UNIMPLEMENTED_IF(regs.src_params.layer != 0); 123 UNIMPLEMENTED_IF(regs.src_params.layer != 0);
126 124
125 const bool is_remapping = regs.launch_dma.remap_enable != 0;
126
127 // Optimized path for micro copies. 127 // Optimized path for micro copies.
128 const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count; 128 const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
129 if (dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X && 129 if (!is_remapping && dst_size < GOB_SIZE && regs.pitch_out <= GOB_SIZE_X &&
130 regs.src_params.height > GOB_SIZE_Y) { 130 regs.src_params.height > GOB_SIZE_Y) {
131 FastCopyBlockLinearToPitch(); 131 FastCopyBlockLinearToPitch();
132 return; 132 return;
133 } 133 }
134 134
135 // Deswizzle the input and copy it over. 135 // Deswizzle the input and copy it over.
136 UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0);
137 const u32 bytes_per_pixel =
138 regs.launch_dma.remap_enable ? regs.pitch_out / regs.line_length_in : 1;
139 const Parameters& src_params = regs.src_params; 136 const Parameters& src_params = regs.src_params;
140 const u32 width = src_params.width; 137
138 const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1;
139 const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1;
140
141 const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size;
142
143 u32 width = src_params.width;
144 u32 x_elements = regs.line_length_in;
145 u32 x_offset = src_params.origin.x;
146 u32 bpp_shift = 0U;
147 if (!is_remapping) {
148 bpp_shift = Common::FoldRight(
149 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
150 width, x_elements, x_offset, static_cast<u32>(regs.offset_in));
151 width >>= bpp_shift;
152 x_elements >>= bpp_shift;
153 x_offset >>= bpp_shift;
154 }
155
156 const u32 bytes_per_pixel = base_bpp << bpp_shift;
141 const u32 height = src_params.height; 157 const u32 height = src_params.height;
142 const u32 depth = src_params.depth; 158 const u32 depth = src_params.depth;
143 const u32 block_height = src_params.block_size.height; 159 const u32 block_height = src_params.block_size.height;
@@ -155,30 +171,45 @@ void MaxwellDMA::CopyBlockLinearToPitch() {
155 memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size); 171 memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
156 memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); 172 memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
157 173
158 UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, width, bytes_per_pixel, 174 UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
159 block_height, src_params.origin.x, src_params.origin.y, write_buffer.data(), 175 src_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
160 read_buffer.data()); 176 regs.pitch_out);
161 177
162 memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); 178 memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
163} 179}
164 180
165void MaxwellDMA::CopyPitchToBlockLinear() { 181void MaxwellDMA::CopyPitchToBlockLinear() {
166 UNIMPLEMENTED_IF_MSG(regs.dst_params.block_size.width != 0, "Block width is not one"); 182 UNIMPLEMENTED_IF_MSG(regs.dst_params.block_size.width != 0, "Block width is not one");
167 UNIMPLEMENTED_IF(regs.launch_dma.remap_enable != 0); 183 UNIMPLEMENTED_IF(regs.dst_params.layer != 0);
184
185 const bool is_remapping = regs.launch_dma.remap_enable != 0;
186 const u32 num_remap_components = regs.remap_const.num_dst_components_minus_one + 1;
187 const u32 remap_components_size = regs.remap_const.component_size_minus_one + 1;
168 188
169 const auto& dst_params = regs.dst_params; 189 const auto& dst_params = regs.dst_params;
170 const u32 bytes_per_pixel = 190
171 regs.launch_dma.remap_enable ? regs.pitch_in / regs.line_length_in : 1; 191 const u32 base_bpp = !is_remapping ? 1U : num_remap_components * remap_components_size;
172 const u32 width = dst_params.width; 192
193 u32 width = dst_params.width;
194 u32 x_elements = regs.line_length_in;
195 u32 x_offset = dst_params.origin.x;
196 u32 bpp_shift = 0U;
197 if (!is_remapping) {
198 bpp_shift = Common::FoldRight(
199 4U, [](u32 x, u32 y) { return std::min(x, static_cast<u32>(std::countr_zero(y))); },
200 width, x_elements, x_offset, static_cast<u32>(regs.offset_out));
201 width >>= bpp_shift;
202 x_elements >>= bpp_shift;
203 x_offset >>= bpp_shift;
204 }
205
206 const u32 bytes_per_pixel = base_bpp << bpp_shift;
173 const u32 height = dst_params.height; 207 const u32 height = dst_params.height;
174 const u32 depth = dst_params.depth; 208 const u32 depth = dst_params.depth;
175 const u32 block_height = dst_params.block_size.height; 209 const u32 block_height = dst_params.block_size.height;
176 const u32 block_depth = dst_params.block_size.depth; 210 const u32 block_depth = dst_params.block_size.depth;
177 const size_t dst_size = 211 const size_t dst_size =
178 CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth); 212 CalculateSize(true, bytes_per_pixel, width, height, depth, block_height, block_depth);
179 const size_t dst_layer_size =
180 CalculateSize(true, bytes_per_pixel, width, height, 1, block_height, block_depth);
181
182 const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count; 213 const size_t src_size = static_cast<size_t>(regs.pitch_in) * regs.line_count;
183 214
184 if (read_buffer.size() < src_size) { 215 if (read_buffer.size() < src_size) {
@@ -188,32 +219,23 @@ void MaxwellDMA::CopyPitchToBlockLinear() {
188 write_buffer.resize(dst_size); 219 write_buffer.resize(dst_size);
189 } 220 }
190 221
222 memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
191 if (Settings::IsGPULevelExtreme()) { 223 if (Settings::IsGPULevelExtreme()) {
192 memory_manager.ReadBlock(regs.offset_in, read_buffer.data(), src_size);
193 memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size); 224 memory_manager.ReadBlock(regs.offset_out, write_buffer.data(), dst_size);
194 } else { 225 } else {
195 memory_manager.ReadBlockUnsafe(regs.offset_in, read_buffer.data(), src_size);
196 memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size); 226 memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
197 } 227 }
198 228
199 // If the input is linear and the output is tiled, swizzle the input and copy it over. 229 // If the input is linear and the output is tiled, swizzle the input and copy it over.
200 if (regs.dst_params.block_size.depth > 0) { 230 SwizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, width, height, depth, x_offset,
201 ASSERT(dst_params.layer == 0); 231 dst_params.origin.y, x_elements, regs.line_count, block_height, block_depth,
202 SwizzleSliceToVoxel(regs.line_length_in, regs.line_count, regs.pitch_in, width, height, 232 regs.pitch_in);
203 bytes_per_pixel, block_height, block_depth, dst_params.origin.x,
204 dst_params.origin.y, write_buffer.data(), read_buffer.data());
205 } else {
206 SwizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_in, width, bytes_per_pixel,
207 write_buffer.data() + dst_layer_size * dst_params.layer, read_buffer.data(),
208 block_height, dst_params.origin.x, dst_params.origin.y);
209 }
210 233
211 memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); 234 memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
212} 235}
213 236
214void MaxwellDMA::FastCopyBlockLinearToPitch() { 237void MaxwellDMA::FastCopyBlockLinearToPitch() {
215 const u32 bytes_per_pixel = 238 const u32 bytes_per_pixel = 1U;
216 regs.launch_dma.remap_enable ? regs.pitch_out / regs.line_length_in : 1;
217 const size_t src_size = GOB_SIZE; 239 const size_t src_size = GOB_SIZE;
218 const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count; 240 const size_t dst_size = static_cast<size_t>(regs.pitch_out) * regs.line_count;
219 u32 pos_x = regs.src_params.origin.x; 241 u32 pos_x = regs.src_params.origin.x;
@@ -239,9 +261,10 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() {
239 memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size); 261 memory_manager.ReadBlockUnsafe(regs.offset_out, write_buffer.data(), dst_size);
240 } 262 }
241 263
242 UnswizzleSubrect(regs.line_length_in, regs.line_count, regs.pitch_out, regs.src_params.width, 264 UnswizzleSubrect(write_buffer, read_buffer, bytes_per_pixel, regs.src_params.width,
243 bytes_per_pixel, regs.src_params.block_size.height, pos_x, pos_y, 265 regs.src_params.height, 1, pos_x, pos_y, regs.line_length_in, regs.line_count,
244 write_buffer.data(), read_buffer.data()); 266 regs.src_params.block_size.height, regs.src_params.block_size.depth,
267 regs.pitch_out);
245 268
246 memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size); 269 memory_manager.WriteBlock(regs.offset_out, write_buffer.data(), dst_size);
247} 270}
@@ -249,16 +272,24 @@ void MaxwellDMA::FastCopyBlockLinearToPitch() {
249void MaxwellDMA::ReleaseSemaphore() { 272void MaxwellDMA::ReleaseSemaphore() {
250 const auto type = regs.launch_dma.semaphore_type; 273 const auto type = regs.launch_dma.semaphore_type;
251 const GPUVAddr address = regs.semaphore.address; 274 const GPUVAddr address = regs.semaphore.address;
275 const u32 payload = regs.semaphore.payload;
252 switch (type) { 276 switch (type) {
253 case LaunchDMA::SemaphoreType::NONE: 277 case LaunchDMA::SemaphoreType::NONE:
254 break; 278 break;
255 case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE: 279 case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE: {
256 memory_manager.Write<u32>(address, regs.semaphore.payload); 280 std::function<void()> operation(
281 [this, address, payload] { memory_manager.Write<u32>(address, payload); });
282 rasterizer->SignalFence(std::move(operation));
257 break; 283 break;
258 case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE: 284 }
259 memory_manager.Write<u64>(address, static_cast<u64>(regs.semaphore.payload)); 285 case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE: {
260 memory_manager.Write<u64>(address + 8, system.GPU().GetTicks()); 286 std::function<void()> operation([this, address, payload] {
287 memory_manager.Write<u64>(address + sizeof(u64), system.GPU().GetTicks());
288 memory_manager.Write<u64>(address, payload);
289 });
290 rasterizer->SignalFence(std::move(operation));
261 break; 291 break;
292 }
262 default: 293 default:
263 ASSERT_MSG(false, "Unknown semaphore type: {}", static_cast<u32>(type.Value())); 294 ASSERT_MSG(false, "Unknown semaphore type: {}", static_cast<u32>(type.Value()));
264 } 295 }
diff --git a/src/video_core/engines/maxwell_dma.h b/src/video_core/engines/maxwell_dma.h
index 074bac92c..bc48320ce 100644
--- a/src/video_core/engines/maxwell_dma.h
+++ b/src/video_core/engines/maxwell_dma.h
@@ -189,10 +189,16 @@ public:
189 BitField<4, 3, Swizzle> dst_y; 189 BitField<4, 3, Swizzle> dst_y;
190 BitField<8, 3, Swizzle> dst_z; 190 BitField<8, 3, Swizzle> dst_z;
191 BitField<12, 3, Swizzle> dst_w; 191 BitField<12, 3, Swizzle> dst_w;
192 BitField<0, 12, u32> dst_components_raw;
192 BitField<16, 2, u32> component_size_minus_one; 193 BitField<16, 2, u32> component_size_minus_one;
193 BitField<20, 2, u32> num_src_components_minus_one; 194 BitField<20, 2, u32> num_src_components_minus_one;
194 BitField<24, 2, u32> num_dst_components_minus_one; 195 BitField<24, 2, u32> num_dst_components_minus_one;
195 }; 196 };
197
198 Swizzle GetComponent(size_t i) const {
199 const u32 raw = dst_components_raw;
200 return static_cast<Swizzle>((raw >> (i * 3)) & 0x7);
201 }
196 }; 202 };
197 static_assert(sizeof(RemapConst) == 12); 203 static_assert(sizeof(RemapConst) == 12);
198 204
diff --git a/src/video_core/engines/puller.cpp b/src/video_core/engines/puller.cpp
new file mode 100644
index 000000000..cca890792
--- /dev/null
+++ b/src/video_core/engines/puller.cpp
@@ -0,0 +1,306 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include "common/assert.h"
5#include "common/logging/log.h"
6#include "common/settings.h"
7#include "core/core.h"
8#include "video_core/control/channel_state.h"
9#include "video_core/dma_pusher.h"
10#include "video_core/engines/fermi_2d.h"
11#include "video_core/engines/kepler_compute.h"
12#include "video_core/engines/kepler_memory.h"
13#include "video_core/engines/maxwell_3d.h"
14#include "video_core/engines/maxwell_dma.h"
15#include "video_core/engines/puller.h"
16#include "video_core/gpu.h"
17#include "video_core/memory_manager.h"
18#include "video_core/rasterizer_interface.h"
19
20namespace Tegra::Engines {
21
22Puller::Puller(GPU& gpu_, MemoryManager& memory_manager_, DmaPusher& dma_pusher_,
23 Control::ChannelState& channel_state_)
24 : gpu{gpu_}, memory_manager{memory_manager_}, dma_pusher{dma_pusher_}, channel_state{
25 channel_state_} {}
26
27Puller::~Puller() = default;
28
29void Puller::ProcessBindMethod(const MethodCall& method_call) {
30 // Bind the current subchannel to the desired engine id.
31 LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel,
32 method_call.argument);
33 const auto engine_id = static_cast<EngineID>(method_call.argument);
34 bound_engines[method_call.subchannel] = static_cast<EngineID>(engine_id);
35 switch (engine_id) {
36 case EngineID::FERMI_TWOD_A:
37 dma_pusher.BindSubchannel(channel_state.fermi_2d.get(), method_call.subchannel);
38 break;
39 case EngineID::MAXWELL_B:
40 dma_pusher.BindSubchannel(channel_state.maxwell_3d.get(), method_call.subchannel);
41 break;
42 case EngineID::KEPLER_COMPUTE_B:
43 dma_pusher.BindSubchannel(channel_state.kepler_compute.get(), method_call.subchannel);
44 break;
45 case EngineID::MAXWELL_DMA_COPY_A:
46 dma_pusher.BindSubchannel(channel_state.maxwell_dma.get(), method_call.subchannel);
47 break;
48 case EngineID::KEPLER_INLINE_TO_MEMORY_B:
49 dma_pusher.BindSubchannel(channel_state.kepler_memory.get(), method_call.subchannel);
50 break;
51 default:
52 UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id);
53 }
54}
55
56void Puller::ProcessFenceActionMethod() {
57 switch (regs.fence_action.op) {
58 case Puller::FenceOperation::Acquire:
59 // UNIMPLEMENTED_MSG("Channel Scheduling pending.");
60 // WaitFence(regs.fence_action.syncpoint_id, regs.fence_value);
61 rasterizer->ReleaseFences();
62 break;
63 case Puller::FenceOperation::Increment:
64 rasterizer->SignalSyncPoint(regs.fence_action.syncpoint_id);
65 break;
66 default:
67 UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value());
68 }
69}
70
71void Puller::ProcessSemaphoreTriggerMethod() {
72 const auto semaphoreOperationMask = 0xF;
73 const auto op =
74 static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask);
75 if (op == GpuSemaphoreOperation::WriteLong) {
76 const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()};
77 const u32 payload = regs.semaphore_sequence;
78 std::function<void()> operation([this, sequence_address, payload] {
79 memory_manager.Write<u64>(sequence_address + sizeof(u64), gpu.GetTicks());
80 memory_manager.Write<u64>(sequence_address, payload);
81 });
82 rasterizer->SignalFence(std::move(operation));
83 } else {
84 do {
85 const u32 word{memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress())};
86 regs.acquire_source = true;
87 regs.acquire_value = regs.semaphore_sequence;
88 if (op == GpuSemaphoreOperation::AcquireEqual) {
89 regs.acquire_active = true;
90 regs.acquire_mode = false;
91 if (word != regs.acquire_value) {
92 rasterizer->ReleaseFences();
93 continue;
94 }
95 } else if (op == GpuSemaphoreOperation::AcquireGequal) {
96 regs.acquire_active = true;
97 regs.acquire_mode = true;
98 if (word < regs.acquire_value) {
99 rasterizer->ReleaseFences();
100 continue;
101 }
102 } else if (op == GpuSemaphoreOperation::AcquireMask) {
103 if (word && regs.semaphore_sequence == 0) {
104 rasterizer->ReleaseFences();
105 continue;
106 }
107 } else {
108 LOG_ERROR(HW_GPU, "Invalid semaphore operation");
109 }
110 } while (false);
111 }
112}
113
114void Puller::ProcessSemaphoreRelease() {
115 const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()};
116 const u32 payload = regs.semaphore_release;
117 std::function<void()> operation([this, sequence_address, payload] {
118 memory_manager.Write<u32>(sequence_address, payload);
119 });
120 rasterizer->SyncOperation(std::move(operation));
121}
122
123void Puller::ProcessSemaphoreAcquire() {
124 u32 word = memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress());
125 const auto value = regs.semaphore_acquire;
126 while (word != value) {
127 regs.acquire_active = true;
128 regs.acquire_value = value;
129 std::this_thread::sleep_for(std::chrono::milliseconds(1));
130 rasterizer->ReleaseFences();
131 word = memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress());
132 // TODO(kemathe73) figure out how to do the acquire_timeout
133 regs.acquire_mode = false;
134 regs.acquire_source = false;
135 }
136}
137
138/// Calls a GPU puller method.
139void Puller::CallPullerMethod(const MethodCall& method_call) {
140 regs.reg_array[method_call.method] = method_call.argument;
141 const auto method = static_cast<BufferMethods>(method_call.method);
142
143 switch (method) {
144 case BufferMethods::BindObject: {
145 ProcessBindMethod(method_call);
146 break;
147 }
148 case BufferMethods::Nop:
149 case BufferMethods::SemaphoreAddressHigh:
150 case BufferMethods::SemaphoreAddressLow:
151 case BufferMethods::SemaphoreSequencePayload:
152 case BufferMethods::SyncpointPayload:
153 break;
154 case BufferMethods::WrcacheFlush:
155 case BufferMethods::RefCnt:
156 rasterizer->SignalReference();
157 break;
158 case BufferMethods::SyncpointOperation:
159 ProcessFenceActionMethod();
160 break;
161 case BufferMethods::WaitForIdle:
162 rasterizer->WaitForIdle();
163 break;
164 case BufferMethods::SemaphoreOperation: {
165 ProcessSemaphoreTriggerMethod();
166 break;
167 }
168 case BufferMethods::NonStallInterrupt: {
169 LOG_ERROR(HW_GPU, "Special puller engine method NonStallInterrupt not implemented");
170 break;
171 }
172 case BufferMethods::MemOpA: {
173 LOG_ERROR(HW_GPU, "Memory Operation A");
174 break;
175 }
176 case BufferMethods::MemOpB: {
177 // Implement this better.
178 rasterizer->InvalidateGPUCache();
179 break;
180 }
181 case BufferMethods::MemOpC:
182 case BufferMethods::MemOpD: {
183 LOG_ERROR(HW_GPU, "Memory Operation C,D");
184 break;
185 }
186 case BufferMethods::SemaphoreAcquire: {
187 ProcessSemaphoreAcquire();
188 break;
189 }
190 case BufferMethods::SemaphoreRelease: {
191 ProcessSemaphoreRelease();
192 break;
193 }
194 case BufferMethods::Yield: {
195 // TODO(Kmather73): Research and implement this method.
196 LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented");
197 break;
198 }
199 default:
200 LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", method);
201 break;
202 }
203}
204
205/// Calls a GPU engine method.
206void Puller::CallEngineMethod(const MethodCall& method_call) {
207 const EngineID engine = bound_engines[method_call.subchannel];
208
209 switch (engine) {
210 case EngineID::FERMI_TWOD_A:
211 channel_state.fermi_2d->CallMethod(method_call.method, method_call.argument,
212 method_call.IsLastCall());
213 break;
214 case EngineID::MAXWELL_B:
215 channel_state.maxwell_3d->CallMethod(method_call.method, method_call.argument,
216 method_call.IsLastCall());
217 break;
218 case EngineID::KEPLER_COMPUTE_B:
219 channel_state.kepler_compute->CallMethod(method_call.method, method_call.argument,
220 method_call.IsLastCall());
221 break;
222 case EngineID::MAXWELL_DMA_COPY_A:
223 channel_state.maxwell_dma->CallMethod(method_call.method, method_call.argument,
224 method_call.IsLastCall());
225 break;
226 case EngineID::KEPLER_INLINE_TO_MEMORY_B:
227 channel_state.kepler_memory->CallMethod(method_call.method, method_call.argument,
228 method_call.IsLastCall());
229 break;
230 default:
231 UNIMPLEMENTED_MSG("Unimplemented engine");
232 }
233}
234
235/// Calls a GPU engine multivalue method.
236void Puller::CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
237 u32 methods_pending) {
238 const EngineID engine = bound_engines[subchannel];
239
240 switch (engine) {
241 case EngineID::FERMI_TWOD_A:
242 channel_state.fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending);
243 break;
244 case EngineID::MAXWELL_B:
245 channel_state.maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending);
246 break;
247 case EngineID::KEPLER_COMPUTE_B:
248 channel_state.kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending);
249 break;
250 case EngineID::MAXWELL_DMA_COPY_A:
251 channel_state.maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending);
252 break;
253 case EngineID::KEPLER_INLINE_TO_MEMORY_B:
254 channel_state.kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending);
255 break;
256 default:
257 UNIMPLEMENTED_MSG("Unimplemented engine");
258 }
259}
260
261/// Calls a GPU method.
262void Puller::CallMethod(const MethodCall& method_call) {
263 LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method_call.method,
264 method_call.subchannel);
265
266 ASSERT(method_call.subchannel < bound_engines.size());
267
268 if (ExecuteMethodOnEngine(method_call.method)) {
269 CallEngineMethod(method_call);
270 } else {
271 CallPullerMethod(method_call);
272 }
273}
274
275/// Calls a GPU multivalue method.
276void Puller::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
277 u32 methods_pending) {
278 LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel);
279
280 ASSERT(subchannel < bound_engines.size());
281
282 if (ExecuteMethodOnEngine(method)) {
283 CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending);
284 } else {
285 for (std::size_t i = 0; i < amount; i++) {
286 CallPullerMethod(MethodCall{
287 method,
288 base_start[i],
289 subchannel,
290 methods_pending - static_cast<u32>(i),
291 });
292 }
293 }
294}
295
296void Puller::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
297 rasterizer = rasterizer_;
298}
299
300/// Determines where the method should be executed.
301[[nodiscard]] bool Puller::ExecuteMethodOnEngine(u32 method) {
302 const auto buffer_method = static_cast<BufferMethods>(method);
303 return buffer_method >= BufferMethods::NonPullerMethods;
304}
305
306} // namespace Tegra::Engines
diff --git a/src/video_core/engines/puller.h b/src/video_core/engines/puller.h
new file mode 100644
index 000000000..d4175ee94
--- /dev/null
+++ b/src/video_core/engines/puller.h
@@ -0,0 +1,177 @@
1// SPDX-FileCopyrightText: 2022 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#pragma once
5
6#include <array>
7#include <cstddef>
8#include <vector>
9#include "common/bit_field.h"
10#include "common/common_funcs.h"
11#include "common/common_types.h"
12#include "video_core/engines/engine_interface.h"
13
14namespace Core {
15class System;
16}
17
18namespace Tegra {
19class MemoryManager;
20class DmaPusher;
21
22enum class EngineID {
23 FERMI_TWOD_A = 0x902D, // 2D Engine
24 MAXWELL_B = 0xB197, // 3D Engine
25 KEPLER_COMPUTE_B = 0xB1C0,
26 KEPLER_INLINE_TO_MEMORY_B = 0xA140,
27 MAXWELL_DMA_COPY_A = 0xB0B5,
28};
29
30namespace Control {
31struct ChannelState;
32}
33} // namespace Tegra
34
35namespace VideoCore {
36class RasterizerInterface;
37}
38
39namespace Tegra::Engines {
40
41class Puller final {
42public:
43 struct MethodCall {
44 u32 method{};
45 u32 argument{};
46 u32 subchannel{};
47 u32 method_count{};
48
49 explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0)
50 : method(method_), argument(argument_), subchannel(subchannel_),
51 method_count(method_count_) {}
52
53 [[nodiscard]] bool IsLastCall() const {
54 return method_count <= 1;
55 }
56 };
57
58 enum class FenceOperation : u32 {
59 Acquire = 0,
60 Increment = 1,
61 };
62
63 union FenceAction {
64 u32 raw;
65 BitField<0, 1, FenceOperation> op;
66 BitField<8, 24, u32> syncpoint_id;
67 };
68
69 explicit Puller(GPU& gpu_, MemoryManager& memory_manager_, DmaPusher& dma_pusher,
70 Control::ChannelState& channel_state);
71 ~Puller();
72
73 void CallMethod(const MethodCall& method_call);
74
75 void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
76 u32 methods_pending);
77
78 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
79
80 void CallPullerMethod(const MethodCall& method_call);
81
82 void CallEngineMethod(const MethodCall& method_call);
83
84 void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
85 u32 methods_pending);
86
87private:
88 Tegra::GPU& gpu;
89
90 MemoryManager& memory_manager;
91 DmaPusher& dma_pusher;
92 Control::ChannelState& channel_state;
93 VideoCore::RasterizerInterface* rasterizer = nullptr;
94
95 static constexpr std::size_t NUM_REGS = 0x800;
96 struct Regs {
97 static constexpr size_t NUM_REGS = 0x40;
98
99 union {
100 struct {
101 INSERT_PADDING_WORDS_NOINIT(0x4);
102 struct {
103 u32 address_high;
104 u32 address_low;
105
106 [[nodiscard]] GPUVAddr SemaphoreAddress() const {
107 return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
108 address_low);
109 }
110 } semaphore_address;
111
112 u32 semaphore_sequence;
113 u32 semaphore_trigger;
114 INSERT_PADDING_WORDS_NOINIT(0xC);
115
116 // The pusher and the puller share the reference counter, the pusher only has read
117 // access
118 u32 reference_count;
119 INSERT_PADDING_WORDS_NOINIT(0x5);
120
121 u32 semaphore_acquire;
122 u32 semaphore_release;
123 u32 fence_value;
124 FenceAction fence_action;
125 INSERT_PADDING_WORDS_NOINIT(0xE2);
126
127 // Puller state
128 u32 acquire_mode;
129 u32 acquire_source;
130 u32 acquire_active;
131 u32 acquire_timeout;
132 u32 acquire_value;
133 };
134 std::array<u32, NUM_REGS> reg_array;
135 };
136 } regs{};
137
138 void ProcessBindMethod(const MethodCall& method_call);
139 void ProcessFenceActionMethod();
140 void ProcessSemaphoreAcquire();
141 void ProcessSemaphoreRelease();
142 void ProcessSemaphoreTriggerMethod();
143 [[nodiscard]] bool ExecuteMethodOnEngine(u32 method);
144
145 /// Mapping of command subchannels to their bound engine ids
146 std::array<EngineID, 8> bound_engines{};
147
148 enum class GpuSemaphoreOperation {
149 AcquireEqual = 0x1,
150 WriteLong = 0x2,
151 AcquireGequal = 0x4,
152 AcquireMask = 0x8,
153 };
154
155#define ASSERT_REG_POSITION(field_name, position) \
156 static_assert(offsetof(Regs, field_name) == position * 4, \
157 "Field " #field_name " has invalid position")
158
159 ASSERT_REG_POSITION(semaphore_address, 0x4);
160 ASSERT_REG_POSITION(semaphore_sequence, 0x6);
161 ASSERT_REG_POSITION(semaphore_trigger, 0x7);
162 ASSERT_REG_POSITION(reference_count, 0x14);
163 ASSERT_REG_POSITION(semaphore_acquire, 0x1A);
164 ASSERT_REG_POSITION(semaphore_release, 0x1B);
165 ASSERT_REG_POSITION(fence_value, 0x1C);
166 ASSERT_REG_POSITION(fence_action, 0x1D);
167
168 ASSERT_REG_POSITION(acquire_mode, 0x100);
169 ASSERT_REG_POSITION(acquire_source, 0x101);
170 ASSERT_REG_POSITION(acquire_active, 0x102);
171 ASSERT_REG_POSITION(acquire_timeout, 0x103);
172 ASSERT_REG_POSITION(acquire_value, 0x104);
173
174#undef ASSERT_REG_POSITION
175};
176
177} // namespace Tegra::Engines
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h
index 1e9832ddd..c390ac91b 100644
--- a/src/video_core/fence_manager.h
+++ b/src/video_core/fence_manager.h
@@ -4,40 +4,24 @@
4#pragma once 4#pragma once
5 5
6#include <algorithm> 6#include <algorithm>
7#include <cstring>
8#include <deque>
9#include <functional>
10#include <memory>
7#include <queue> 11#include <queue>
8 12
9#include "common/common_types.h" 13#include "common/common_types.h"
10#include "video_core/delayed_destruction_ring.h" 14#include "video_core/delayed_destruction_ring.h"
11#include "video_core/gpu.h" 15#include "video_core/gpu.h"
12#include "video_core/memory_manager.h" 16#include "video_core/host1x/host1x.h"
17#include "video_core/host1x/syncpoint_manager.h"
13#include "video_core/rasterizer_interface.h" 18#include "video_core/rasterizer_interface.h"
14 19
15namespace VideoCommon { 20namespace VideoCommon {
16 21
17class FenceBase { 22class FenceBase {
18public: 23public:
19 explicit FenceBase(u32 payload_, bool is_stubbed_) 24 explicit FenceBase(bool is_stubbed_) : is_stubbed{is_stubbed_} {}
20 : address{}, payload{payload_}, is_semaphore{false}, is_stubbed{is_stubbed_} {}
21
22 explicit FenceBase(GPUVAddr address_, u32 payload_, bool is_stubbed_)
23 : address{address_}, payload{payload_}, is_semaphore{true}, is_stubbed{is_stubbed_} {}
24
25 GPUVAddr GetAddress() const {
26 return address;
27 }
28
29 u32 GetPayload() const {
30 return payload;
31 }
32
33 bool IsSemaphore() const {
34 return is_semaphore;
35 }
36
37private:
38 GPUVAddr address;
39 u32 payload;
40 bool is_semaphore;
41 25
42protected: 26protected:
43 bool is_stubbed; 27 bool is_stubbed;
@@ -57,30 +41,28 @@ public:
57 buffer_cache.AccumulateFlushes(); 41 buffer_cache.AccumulateFlushes();
58 } 42 }
59 43
60 void SignalSemaphore(GPUVAddr addr, u32 value) { 44 void SyncOperation(std::function<void()>&& func) {
45 uncommitted_operations.emplace_back(std::move(func));
46 }
47
48 void SignalFence(std::function<void()>&& func) {
61 TryReleasePendingFences(); 49 TryReleasePendingFences();
62 const bool should_flush = ShouldFlush(); 50 const bool should_flush = ShouldFlush();
63 CommitAsyncFlushes(); 51 CommitAsyncFlushes();
64 TFence new_fence = CreateFence(addr, value, !should_flush); 52 uncommitted_operations.emplace_back(std::move(func));
53 CommitOperations();
54 TFence new_fence = CreateFence(!should_flush);
65 fences.push(new_fence); 55 fences.push(new_fence);
66 QueueFence(new_fence); 56 QueueFence(new_fence);
67 if (should_flush) { 57 if (should_flush) {
68 rasterizer.FlushCommands(); 58 rasterizer.FlushCommands();
69 } 59 }
70 rasterizer.SyncGuestHost();
71 } 60 }
72 61
73 void SignalSyncPoint(u32 value) { 62 void SignalSyncPoint(u32 value) {
74 TryReleasePendingFences(); 63 syncpoint_manager.IncrementGuest(value);
75 const bool should_flush = ShouldFlush(); 64 std::function<void()> func([this, value] { syncpoint_manager.IncrementHost(value); });
76 CommitAsyncFlushes(); 65 SignalFence(std::move(func));
77 TFence new_fence = CreateFence(value, !should_flush);
78 fences.push(new_fence);
79 QueueFence(new_fence);
80 if (should_flush) {
81 rasterizer.FlushCommands();
82 }
83 rasterizer.SyncGuestHost();
84 } 66 }
85 67
86 void WaitPendingFences() { 68 void WaitPendingFences() {
@@ -90,11 +72,10 @@ public:
90 WaitFence(current_fence); 72 WaitFence(current_fence);
91 } 73 }
92 PopAsyncFlushes(); 74 PopAsyncFlushes();
93 if (current_fence->IsSemaphore()) { 75 auto operations = std::move(pending_operations.front());
94 gpu_memory.template Write<u32>(current_fence->GetAddress(), 76 pending_operations.pop_front();
95 current_fence->GetPayload()); 77 for (auto& operation : operations) {
96 } else { 78 operation();
97 gpu.IncrementSyncPoint(current_fence->GetPayload());
98 } 79 }
99 PopFence(); 80 PopFence();
100 } 81 }
@@ -104,16 +85,14 @@ protected:
104 explicit FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_, 85 explicit FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::GPU& gpu_,
105 TTextureCache& texture_cache_, TTBufferCache& buffer_cache_, 86 TTextureCache& texture_cache_, TTBufferCache& buffer_cache_,
106 TQueryCache& query_cache_) 87 TQueryCache& query_cache_)
107 : rasterizer{rasterizer_}, gpu{gpu_}, gpu_memory{gpu.MemoryManager()}, 88 : rasterizer{rasterizer_}, gpu{gpu_}, syncpoint_manager{gpu.Host1x().GetSyncpointManager()},
108 texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, query_cache{query_cache_} {} 89 texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, query_cache{query_cache_} {}
109 90
110 virtual ~FenceManager() = default; 91 virtual ~FenceManager() = default;
111 92
112 /// Creates a Sync Point Fence Interface, does not create a backend fence if 'is_stubbed' is 93 /// Creates a Fence Interface, does not create a backend fence if 'is_stubbed' is
113 /// true 94 /// true
114 virtual TFence CreateFence(u32 value, bool is_stubbed) = 0; 95 virtual TFence CreateFence(bool is_stubbed) = 0;
115 /// Creates a Semaphore Fence Interface, does not create a backend fence if 'is_stubbed' is true
116 virtual TFence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) = 0;
117 /// Queues a fence into the backend if the fence isn't stubbed. 96 /// Queues a fence into the backend if the fence isn't stubbed.
118 virtual void QueueFence(TFence& fence) = 0; 97 virtual void QueueFence(TFence& fence) = 0;
119 /// Notifies that the backend fence has been signaled/reached in host GPU. 98 /// Notifies that the backend fence has been signaled/reached in host GPU.
@@ -123,7 +102,7 @@ protected:
123 102
124 VideoCore::RasterizerInterface& rasterizer; 103 VideoCore::RasterizerInterface& rasterizer;
125 Tegra::GPU& gpu; 104 Tegra::GPU& gpu;
126 Tegra::MemoryManager& gpu_memory; 105 Tegra::Host1x::SyncpointManager& syncpoint_manager;
127 TTextureCache& texture_cache; 106 TTextureCache& texture_cache;
128 TTBufferCache& buffer_cache; 107 TTBufferCache& buffer_cache;
129 TQueryCache& query_cache; 108 TQueryCache& query_cache;
@@ -136,11 +115,10 @@ private:
136 return; 115 return;
137 } 116 }
138 PopAsyncFlushes(); 117 PopAsyncFlushes();
139 if (current_fence->IsSemaphore()) { 118 auto operations = std::move(pending_operations.front());
140 gpu_memory.template Write<u32>(current_fence->GetAddress(), 119 pending_operations.pop_front();
141 current_fence->GetPayload()); 120 for (auto& operation : operations) {
142 } else { 121 operation();
143 gpu.IncrementSyncPoint(current_fence->GetPayload());
144 } 122 }
145 PopFence(); 123 PopFence();
146 } 124 }
@@ -159,16 +137,20 @@ private:
159 } 137 }
160 138
161 void PopAsyncFlushes() { 139 void PopAsyncFlushes() {
162 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 140 {
163 texture_cache.PopAsyncFlushes(); 141 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
164 buffer_cache.PopAsyncFlushes(); 142 texture_cache.PopAsyncFlushes();
143 buffer_cache.PopAsyncFlushes();
144 }
165 query_cache.PopAsyncFlushes(); 145 query_cache.PopAsyncFlushes();
166 } 146 }
167 147
168 void CommitAsyncFlushes() { 148 void CommitAsyncFlushes() {
169 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 149 {
170 texture_cache.CommitAsyncFlushes(); 150 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
171 buffer_cache.CommitAsyncFlushes(); 151 texture_cache.CommitAsyncFlushes();
152 buffer_cache.CommitAsyncFlushes();
153 }
172 query_cache.CommitAsyncFlushes(); 154 query_cache.CommitAsyncFlushes();
173 } 155 }
174 156
@@ -177,7 +159,13 @@ private:
177 fences.pop(); 159 fences.pop();
178 } 160 }
179 161
162 void CommitOperations() {
163 pending_operations.emplace_back(std::move(uncommitted_operations));
164 }
165
180 std::queue<TFence> fences; 166 std::queue<TFence> fences;
167 std::deque<std::function<void()>> uncommitted_operations;
168 std::deque<std::deque<std::function<void()>>> pending_operations;
181 169
182 DelayedDestructionRing<TFence, 6> delayed_destruction_ring; 170 DelayedDestructionRing<TFence, 6> delayed_destruction_ring;
183}; 171};
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index 33431f2a0..28b38273e 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -14,10 +14,11 @@
14#include "core/core.h" 14#include "core/core.h"
15#include "core/core_timing.h" 15#include "core/core_timing.h"
16#include "core/frontend/emu_window.h" 16#include "core/frontend/emu_window.h"
17#include "core/hardware_interrupt_manager.h"
18#include "core/hle/service/nvdrv/nvdata.h" 17#include "core/hle/service/nvdrv/nvdata.h"
19#include "core/perf_stats.h" 18#include "core/perf_stats.h"
20#include "video_core/cdma_pusher.h" 19#include "video_core/cdma_pusher.h"
20#include "video_core/control/channel_state.h"
21#include "video_core/control/scheduler.h"
21#include "video_core/dma_pusher.h" 22#include "video_core/dma_pusher.h"
22#include "video_core/engines/fermi_2d.h" 23#include "video_core/engines/fermi_2d.h"
23#include "video_core/engines/kepler_compute.h" 24#include "video_core/engines/kepler_compute.h"
@@ -26,75 +27,64 @@
26#include "video_core/engines/maxwell_dma.h" 27#include "video_core/engines/maxwell_dma.h"
27#include "video_core/gpu.h" 28#include "video_core/gpu.h"
28#include "video_core/gpu_thread.h" 29#include "video_core/gpu_thread.h"
30#include "video_core/host1x/host1x.h"
31#include "video_core/host1x/syncpoint_manager.h"
29#include "video_core/memory_manager.h" 32#include "video_core/memory_manager.h"
30#include "video_core/renderer_base.h" 33#include "video_core/renderer_base.h"
31#include "video_core/shader_notify.h" 34#include "video_core/shader_notify.h"
32 35
33namespace Tegra { 36namespace Tegra {
34 37
35MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
36
37struct GPU::Impl { 38struct GPU::Impl {
38 explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_) 39 explicit Impl(GPU& gpu_, Core::System& system_, bool is_async_, bool use_nvdec_)
39 : gpu{gpu_}, system{system_}, memory_manager{std::make_unique<Tegra::MemoryManager>( 40 : gpu{gpu_}, system{system_}, host1x{system.Host1x()}, use_nvdec{use_nvdec_},
40 system)},
41 dma_pusher{std::make_unique<Tegra::DmaPusher>(system, gpu)}, use_nvdec{use_nvdec_},
42 maxwell_3d{std::make_unique<Engines::Maxwell3D>(system, *memory_manager)},
43 fermi_2d{std::make_unique<Engines::Fermi2D>()},
44 kepler_compute{std::make_unique<Engines::KeplerCompute>(system, *memory_manager)},
45 maxwell_dma{std::make_unique<Engines::MaxwellDMA>(system, *memory_manager)},
46 kepler_memory{std::make_unique<Engines::KeplerMemory>(system, *memory_manager)},
47 shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_}, 41 shader_notify{std::make_unique<VideoCore::ShaderNotify>()}, is_async{is_async_},
48 gpu_thread{system_, is_async_} {} 42 gpu_thread{system_, is_async_}, scheduler{std::make_unique<Control::Scheduler>(gpu)} {}
49 43
50 ~Impl() = default; 44 ~Impl() = default;
51 45
52 /// Binds a renderer to the GPU. 46 std::shared_ptr<Control::ChannelState> CreateChannel(s32 channel_id) {
53 void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) { 47 auto channel_state = std::make_shared<Tegra::Control::ChannelState>(channel_id);
54 renderer = std::move(renderer_); 48 channels.emplace(channel_id, channel_state);
55 rasterizer = renderer->ReadRasterizer(); 49 scheduler->DeclareChannel(channel_state);
56 50 return channel_state;
57 memory_manager->BindRasterizer(rasterizer);
58 maxwell_3d->BindRasterizer(rasterizer);
59 fermi_2d->BindRasterizer(rasterizer);
60 kepler_compute->BindRasterizer(rasterizer);
61 kepler_memory->BindRasterizer(rasterizer);
62 maxwell_dma->BindRasterizer(rasterizer);
63 } 51 }
64 52
65 /// Calls a GPU method. 53 void BindChannel(s32 channel_id) {
66 void CallMethod(const GPU::MethodCall& method_call) { 54 if (bound_channel == channel_id) {
67 LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method_call.method, 55 return;
68 method_call.subchannel); 56 }
57 auto it = channels.find(channel_id);
58 ASSERT(it != channels.end());
59 bound_channel = channel_id;
60 current_channel = it->second.get();
69 61
70 ASSERT(method_call.subchannel < bound_engines.size()); 62 rasterizer->BindChannel(*current_channel);
63 }
71 64
72 if (ExecuteMethodOnEngine(method_call.method)) { 65 std::shared_ptr<Control::ChannelState> AllocateChannel() {
73 CallEngineMethod(method_call); 66 return CreateChannel(new_channel_id++);
74 } else {
75 CallPullerMethod(method_call);
76 }
77 } 67 }
78 68
79 /// Calls a GPU multivalue method. 69 void InitChannel(Control::ChannelState& to_init) {
80 void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, 70 to_init.Init(system, gpu);
81 u32 methods_pending) { 71 to_init.BindRasterizer(rasterizer);
82 LOG_TRACE(HW_GPU, "Processing method {:08X} on subchannel {}", method, subchannel); 72 rasterizer->InitializeChannel(to_init);
73 }
83 74
84 ASSERT(subchannel < bound_engines.size()); 75 void InitAddressSpace(Tegra::MemoryManager& memory_manager) {
76 memory_manager.BindRasterizer(rasterizer);
77 }
85 78
86 if (ExecuteMethodOnEngine(method)) { 79 void ReleaseChannel(Control::ChannelState& to_release) {
87 CallEngineMultiMethod(method, subchannel, base_start, amount, methods_pending); 80 UNIMPLEMENTED();
88 } else { 81 }
89 for (std::size_t i = 0; i < amount; i++) { 82
90 CallPullerMethod(GPU::MethodCall{ 83 /// Binds a renderer to the GPU.
91 method, 84 void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) {
92 base_start[i], 85 renderer = std::move(renderer_);
93 subchannel, 86 rasterizer = renderer->ReadRasterizer();
94 methods_pending - static_cast<u32>(i), 87 host1x.MemoryManager().BindRasterizer(rasterizer);
95 });
96 }
97 }
98 } 88 }
99 89
100 /// Flush all current written commands into the host GPU for execution. 90 /// Flush all current written commands into the host GPU for execution.
@@ -103,85 +93,82 @@ struct GPU::Impl {
103 } 93 }
104 94
105 /// Synchronizes CPU writes with Host GPU memory. 95 /// Synchronizes CPU writes with Host GPU memory.
106 void SyncGuestHost() { 96 void InvalidateGPUCache() {
107 rasterizer->SyncGuestHost(); 97 rasterizer->InvalidateGPUCache();
108 } 98 }
109 99
110 /// Signal the ending of command list. 100 /// Signal the ending of command list.
111 void OnCommandListEnd() { 101 void OnCommandListEnd() {
112 if (is_async) { 102 gpu_thread.OnCommandListEnd();
113 // This command only applies to asynchronous GPU mode
114 gpu_thread.OnCommandListEnd();
115 }
116 } 103 }
117 104
118 /// Request a host GPU memory flush from the CPU. 105 /// Request a host GPU memory flush from the CPU.
119 [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size) { 106 template <typename Func>
120 std::unique_lock lck{flush_request_mutex}; 107 [[nodiscard]] u64 RequestSyncOperation(Func&& action) {
121 const u64 fence = ++last_flush_fence; 108 std::unique_lock lck{sync_request_mutex};
122 flush_requests.emplace_back(fence, addr, size); 109 const u64 fence = ++last_sync_fence;
110 sync_requests.emplace_back(action);
123 return fence; 111 return fence;
124 } 112 }
125 113
126 /// Obtains current flush request fence id. 114 /// Obtains current flush request fence id.
127 [[nodiscard]] u64 CurrentFlushRequestFence() const { 115 [[nodiscard]] u64 CurrentSyncRequestFence() const {
128 return current_flush_fence.load(std::memory_order_relaxed); 116 return current_sync_fence.load(std::memory_order_relaxed);
117 }
118
119 void WaitForSyncOperation(const u64 fence) {
120 std::unique_lock lck{sync_request_mutex};
121 sync_request_cv.wait(lck, [this, fence] { return CurrentSyncRequestFence() >= fence; });
129 } 122 }
130 123
131 /// Tick pending requests within the GPU. 124 /// Tick pending requests within the GPU.
132 void TickWork() { 125 void TickWork() {
133 std::unique_lock lck{flush_request_mutex}; 126 std::unique_lock lck{sync_request_mutex};
134 while (!flush_requests.empty()) { 127 while (!sync_requests.empty()) {
135 auto& request = flush_requests.front(); 128 auto request = std::move(sync_requests.front());
136 const u64 fence = request.fence; 129 sync_requests.pop_front();
137 const VAddr addr = request.addr; 130 sync_request_mutex.unlock();
138 const std::size_t size = request.size; 131 request();
139 flush_requests.pop_front(); 132 current_sync_fence.fetch_add(1, std::memory_order_release);
140 flush_request_mutex.unlock(); 133 sync_request_mutex.lock();
141 rasterizer->FlushRegion(addr, size); 134 sync_request_cv.notify_all();
142 current_flush_fence.store(fence);
143 flush_request_mutex.lock();
144 } 135 }
145 } 136 }
146 137
147 /// Returns a reference to the Maxwell3D GPU engine. 138 /// Returns a reference to the Maxwell3D GPU engine.
148 [[nodiscard]] Engines::Maxwell3D& Maxwell3D() { 139 [[nodiscard]] Engines::Maxwell3D& Maxwell3D() {
149 return *maxwell_3d; 140 ASSERT(current_channel);
141 return *current_channel->maxwell_3d;
150 } 142 }
151 143
152 /// Returns a const reference to the Maxwell3D GPU engine. 144 /// Returns a const reference to the Maxwell3D GPU engine.
153 [[nodiscard]] const Engines::Maxwell3D& Maxwell3D() const { 145 [[nodiscard]] const Engines::Maxwell3D& Maxwell3D() const {
154 return *maxwell_3d; 146 ASSERT(current_channel);
147 return *current_channel->maxwell_3d;
155 } 148 }
156 149
157 /// Returns a reference to the KeplerCompute GPU engine. 150 /// Returns a reference to the KeplerCompute GPU engine.
158 [[nodiscard]] Engines::KeplerCompute& KeplerCompute() { 151 [[nodiscard]] Engines::KeplerCompute& KeplerCompute() {
159 return *kepler_compute; 152 ASSERT(current_channel);
153 return *current_channel->kepler_compute;
160 } 154 }
161 155
162 /// Returns a reference to the KeplerCompute GPU engine. 156 /// Returns a reference to the KeplerCompute GPU engine.
163 [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const { 157 [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const {
164 return *kepler_compute; 158 ASSERT(current_channel);
165 } 159 return *current_channel->kepler_compute;
166
167 /// Returns a reference to the GPU memory manager.
168 [[nodiscard]] Tegra::MemoryManager& MemoryManager() {
169 return *memory_manager;
170 }
171
172 /// Returns a const reference to the GPU memory manager.
173 [[nodiscard]] const Tegra::MemoryManager& MemoryManager() const {
174 return *memory_manager;
175 } 160 }
176 161
177 /// Returns a reference to the GPU DMA pusher. 162 /// Returns a reference to the GPU DMA pusher.
178 [[nodiscard]] Tegra::DmaPusher& DmaPusher() { 163 [[nodiscard]] Tegra::DmaPusher& DmaPusher() {
179 return *dma_pusher; 164 ASSERT(current_channel);
165 return *current_channel->dma_pusher;
180 } 166 }
181 167
182 /// Returns a const reference to the GPU DMA pusher. 168 /// Returns a const reference to the GPU DMA pusher.
183 [[nodiscard]] const Tegra::DmaPusher& DmaPusher() const { 169 [[nodiscard]] const Tegra::DmaPusher& DmaPusher() const {
184 return *dma_pusher; 170 ASSERT(current_channel);
171 return *current_channel->dma_pusher;
185 } 172 }
186 173
187 /// Returns a reference to the underlying renderer. 174 /// Returns a reference to the underlying renderer.
@@ -204,77 +191,6 @@ struct GPU::Impl {
204 return *shader_notify; 191 return *shader_notify;
205 } 192 }
206 193
207 /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
208 void WaitFence(u32 syncpoint_id, u32 value) {
209 // Synced GPU, is always in sync
210 if (!is_async) {
211 return;
212 }
213 if (syncpoint_id == UINT32_MAX) {
214 // TODO: Research what this does.
215 LOG_ERROR(HW_GPU, "Waiting for syncpoint -1 not implemented");
216 return;
217 }
218 MICROPROFILE_SCOPE(GPU_wait);
219 std::unique_lock lock{sync_mutex};
220 sync_cv.wait(lock, [=, this] {
221 if (shutting_down.load(std::memory_order_relaxed)) {
222 // We're shutting down, ensure no threads continue to wait for the next syncpoint
223 return true;
224 }
225 return syncpoints.at(syncpoint_id).load() >= value;
226 });
227 }
228
229 void IncrementSyncPoint(u32 syncpoint_id) {
230 auto& syncpoint = syncpoints.at(syncpoint_id);
231 syncpoint++;
232 std::scoped_lock lock{sync_mutex};
233 sync_cv.notify_all();
234 auto& interrupt = syncpt_interrupts.at(syncpoint_id);
235 if (!interrupt.empty()) {
236 u32 value = syncpoint.load();
237 auto it = interrupt.begin();
238 while (it != interrupt.end()) {
239 if (value >= *it) {
240 TriggerCpuInterrupt(syncpoint_id, *it);
241 it = interrupt.erase(it);
242 continue;
243 }
244 it++;
245 }
246 }
247 }
248
249 [[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const {
250 return syncpoints.at(syncpoint_id).load();
251 }
252
253 void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) {
254 std::scoped_lock lock{sync_mutex};
255 auto& interrupt = syncpt_interrupts.at(syncpoint_id);
256 bool contains = std::any_of(interrupt.begin(), interrupt.end(),
257 [value](u32 in_value) { return in_value == value; });
258 if (contains) {
259 return;
260 }
261 interrupt.emplace_back(value);
262 }
263
264 [[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value) {
265 std::scoped_lock lock{sync_mutex};
266 auto& interrupt = syncpt_interrupts.at(syncpoint_id);
267 const auto iter =
268 std::find_if(interrupt.begin(), interrupt.end(),
269 [value](u32 interrupt_value) { return value == interrupt_value; });
270
271 if (iter == interrupt.end()) {
272 return false;
273 }
274 interrupt.erase(iter);
275 return true;
276 }
277
278 [[nodiscard]] u64 GetTicks() const { 194 [[nodiscard]] u64 GetTicks() const {
279 // This values were reversed engineered by fincs from NVN 195 // This values were reversed engineered by fincs from NVN
280 // The gpu clock is reported in units of 385/625 nanoseconds 196 // The gpu clock is reported in units of 385/625 nanoseconds
@@ -306,7 +222,7 @@ struct GPU::Impl {
306 /// This can be used to launch any necessary threads and register any necessary 222 /// This can be used to launch any necessary threads and register any necessary
307 /// core timing events. 223 /// core timing events.
308 void Start() { 224 void Start() {
309 gpu_thread.StartThread(*renderer, renderer->Context(), *dma_pusher); 225 gpu_thread.StartThread(*renderer, renderer->Context(), *scheduler);
310 cpu_context = renderer->GetRenderWindow().CreateSharedContext(); 226 cpu_context = renderer->GetRenderWindow().CreateSharedContext();
311 cpu_context->MakeCurrent(); 227 cpu_context->MakeCurrent();
312 } 228 }
@@ -328,8 +244,8 @@ struct GPU::Impl {
328 } 244 }
329 245
330 /// Push GPU command entries to be processed 246 /// Push GPU command entries to be processed
331 void PushGPUEntries(Tegra::CommandList&& entries) { 247 void PushGPUEntries(s32 channel, Tegra::CommandList&& entries) {
332 gpu_thread.SubmitList(std::move(entries)); 248 gpu_thread.SubmitList(channel, std::move(entries));
333 } 249 }
334 250
335 /// Push GPU command buffer entries to be processed 251 /// Push GPU command buffer entries to be processed
@@ -339,7 +255,7 @@ struct GPU::Impl {
339 } 255 }
340 256
341 if (!cdma_pushers.contains(id)) { 257 if (!cdma_pushers.contains(id)) {
342 cdma_pushers.insert_or_assign(id, std::make_unique<Tegra::CDmaPusher>(gpu)); 258 cdma_pushers.insert_or_assign(id, std::make_unique<Tegra::CDmaPusher>(host1x));
343 } 259 }
344 260
345 // SubmitCommandBuffer would make the nvdec operations async, this is not currently working 261 // SubmitCommandBuffer would make the nvdec operations async, this is not currently working
@@ -376,308 +292,55 @@ struct GPU::Impl {
376 gpu_thread.FlushAndInvalidateRegion(addr, size); 292 gpu_thread.FlushAndInvalidateRegion(addr, size);
377 } 293 }
378 294
379 void TriggerCpuInterrupt(u32 syncpoint_id, u32 value) const { 295 void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
380 auto& interrupt_manager = system.InterruptManager(); 296 std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences) {
381 interrupt_manager.GPUInterruptSyncpt(syncpoint_id, value); 297 size_t current_request_counter{};
382 } 298 {
383 299 std::unique_lock<std::mutex> lk(request_swap_mutex);
384 void ProcessBindMethod(const GPU::MethodCall& method_call) { 300 if (free_swap_counters.empty()) {
385 // Bind the current subchannel to the desired engine id. 301 current_request_counter = request_swap_counters.size();
386 LOG_DEBUG(HW_GPU, "Binding subchannel {} to engine {}", method_call.subchannel, 302 request_swap_counters.emplace_back(num_fences);
387 method_call.argument);
388 const auto engine_id = static_cast<EngineID>(method_call.argument);
389 bound_engines[method_call.subchannel] = static_cast<EngineID>(engine_id);
390 switch (engine_id) {
391 case EngineID::FERMI_TWOD_A:
392 dma_pusher->BindSubchannel(fermi_2d.get(), method_call.subchannel);
393 break;
394 case EngineID::MAXWELL_B:
395 dma_pusher->BindSubchannel(maxwell_3d.get(), method_call.subchannel);
396 break;
397 case EngineID::KEPLER_COMPUTE_B:
398 dma_pusher->BindSubchannel(kepler_compute.get(), method_call.subchannel);
399 break;
400 case EngineID::MAXWELL_DMA_COPY_A:
401 dma_pusher->BindSubchannel(maxwell_dma.get(), method_call.subchannel);
402 break;
403 case EngineID::KEPLER_INLINE_TO_MEMORY_B:
404 dma_pusher->BindSubchannel(kepler_memory.get(), method_call.subchannel);
405 break;
406 default:
407 UNIMPLEMENTED_MSG("Unimplemented engine {:04X}", engine_id);
408 }
409 }
410
411 void ProcessFenceActionMethod() {
412 switch (regs.fence_action.op) {
413 case GPU::FenceOperation::Acquire:
414 WaitFence(regs.fence_action.syncpoint_id, regs.fence_value);
415 break;
416 case GPU::FenceOperation::Increment:
417 IncrementSyncPoint(regs.fence_action.syncpoint_id);
418 break;
419 default:
420 UNIMPLEMENTED_MSG("Unimplemented operation {}", regs.fence_action.op.Value());
421 }
422 }
423
424 void ProcessWaitForInterruptMethod() {
425 // TODO(bunnei) ImplementMe
426 LOG_WARNING(HW_GPU, "(STUBBED) called");
427 }
428
429 void ProcessSemaphoreTriggerMethod() {
430 const auto semaphoreOperationMask = 0xF;
431 const auto op =
432 static_cast<GpuSemaphoreOperation>(regs.semaphore_trigger & semaphoreOperationMask);
433 if (op == GpuSemaphoreOperation::WriteLong) {
434 struct Block {
435 u32 sequence;
436 u32 zeros = 0;
437 u64 timestamp;
438 };
439
440 Block block{};
441 block.sequence = regs.semaphore_sequence;
442 // TODO(Kmather73): Generate a real GPU timestamp and write it here instead of
443 // CoreTiming
444 block.timestamp = GetTicks();
445 memory_manager->WriteBlock(regs.semaphore_address.SemaphoreAddress(), &block,
446 sizeof(block));
447 } else {
448 const u32 word{memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress())};
449 if ((op == GpuSemaphoreOperation::AcquireEqual && word == regs.semaphore_sequence) ||
450 (op == GpuSemaphoreOperation::AcquireGequal &&
451 static_cast<s32>(word - regs.semaphore_sequence) > 0) ||
452 (op == GpuSemaphoreOperation::AcquireMask && (word & regs.semaphore_sequence))) {
453 // Nothing to do in this case
454 } else { 303 } else {
455 regs.acquire_source = true; 304 current_request_counter = free_swap_counters.front();
456 regs.acquire_value = regs.semaphore_sequence; 305 request_swap_counters[current_request_counter] = num_fences;
457 if (op == GpuSemaphoreOperation::AcquireEqual) { 306 free_swap_counters.pop_front();
458 regs.acquire_active = true;
459 regs.acquire_mode = false;
460 } else if (op == GpuSemaphoreOperation::AcquireGequal) {
461 regs.acquire_active = true;
462 regs.acquire_mode = true;
463 } else if (op == GpuSemaphoreOperation::AcquireMask) {
464 // TODO(kemathe) The acquire mask operation waits for a value that, ANDed with
465 // semaphore_sequence, gives a non-0 result
466 LOG_ERROR(HW_GPU, "Invalid semaphore operation AcquireMask not implemented");
467 } else {
468 LOG_ERROR(HW_GPU, "Invalid semaphore operation");
469 }
470 } 307 }
471 } 308 }
472 } 309 const auto wait_fence =
473 310 RequestSyncOperation([this, current_request_counter, framebuffer, fences, num_fences] {
474 void ProcessSemaphoreRelease() { 311 auto& syncpoint_manager = host1x.GetSyncpointManager();
475 memory_manager->Write<u32>(regs.semaphore_address.SemaphoreAddress(), 312 if (num_fences == 0) {
476 regs.semaphore_release); 313 renderer->SwapBuffers(framebuffer);
477 } 314 }
478 315 const auto executer = [this, current_request_counter,
479 void ProcessSemaphoreAcquire() { 316 framebuffer_copy = *framebuffer]() {
480 const u32 word = memory_manager->Read<u32>(regs.semaphore_address.SemaphoreAddress()); 317 {
481 const auto value = regs.semaphore_acquire; 318 std::unique_lock<std::mutex> lk(request_swap_mutex);
482 if (word != value) { 319 if (--request_swap_counters[current_request_counter] != 0) {
483 regs.acquire_active = true; 320 return;
484 regs.acquire_value = value; 321 }
485 // TODO(kemathe73) figure out how to do the acquire_timeout 322 free_swap_counters.push_back(current_request_counter);
486 regs.acquire_mode = false;
487 regs.acquire_source = false;
488 }
489 }
490
491 /// Calls a GPU puller method.
492 void CallPullerMethod(const GPU::MethodCall& method_call) {
493 regs.reg_array[method_call.method] = method_call.argument;
494 const auto method = static_cast<BufferMethods>(method_call.method);
495
496 switch (method) {
497 case BufferMethods::BindObject: {
498 ProcessBindMethod(method_call);
499 break;
500 }
501 case BufferMethods::Nop:
502 case BufferMethods::SemaphoreAddressHigh:
503 case BufferMethods::SemaphoreAddressLow:
504 case BufferMethods::SemaphoreSequence:
505 break;
506 case BufferMethods::UnkCacheFlush:
507 rasterizer->SyncGuestHost();
508 break;
509 case BufferMethods::WrcacheFlush:
510 rasterizer->SignalReference();
511 break;
512 case BufferMethods::FenceValue:
513 break;
514 case BufferMethods::RefCnt:
515 rasterizer->SignalReference();
516 break;
517 case BufferMethods::FenceAction:
518 ProcessFenceActionMethod();
519 break;
520 case BufferMethods::WaitForInterrupt:
521 rasterizer->WaitForIdle();
522 break;
523 case BufferMethods::SemaphoreTrigger: {
524 ProcessSemaphoreTriggerMethod();
525 break;
526 }
527 case BufferMethods::NotifyIntr: {
528 // TODO(Kmather73): Research and implement this method.
529 LOG_ERROR(HW_GPU, "Special puller engine method NotifyIntr not implemented");
530 break;
531 }
532 case BufferMethods::Unk28: {
533 // TODO(Kmather73): Research and implement this method.
534 LOG_ERROR(HW_GPU, "Special puller engine method Unk28 not implemented");
535 break;
536 }
537 case BufferMethods::SemaphoreAcquire: {
538 ProcessSemaphoreAcquire();
539 break;
540 }
541 case BufferMethods::SemaphoreRelease: {
542 ProcessSemaphoreRelease();
543 break;
544 }
545 case BufferMethods::Yield: {
546 // TODO(Kmather73): Research and implement this method.
547 LOG_ERROR(HW_GPU, "Special puller engine method Yield not implemented");
548 break;
549 }
550 default:
551 LOG_ERROR(HW_GPU, "Special puller engine method {:X} not implemented", method);
552 break;
553 }
554 }
555
556 /// Calls a GPU engine method.
557 void CallEngineMethod(const GPU::MethodCall& method_call) {
558 const EngineID engine = bound_engines[method_call.subchannel];
559
560 switch (engine) {
561 case EngineID::FERMI_TWOD_A:
562 fermi_2d->CallMethod(method_call.method, method_call.argument,
563 method_call.IsLastCall());
564 break;
565 case EngineID::MAXWELL_B:
566 maxwell_3d->CallMethod(method_call.method, method_call.argument,
567 method_call.IsLastCall());
568 break;
569 case EngineID::KEPLER_COMPUTE_B:
570 kepler_compute->CallMethod(method_call.method, method_call.argument,
571 method_call.IsLastCall());
572 break;
573 case EngineID::MAXWELL_DMA_COPY_A:
574 maxwell_dma->CallMethod(method_call.method, method_call.argument,
575 method_call.IsLastCall());
576 break;
577 case EngineID::KEPLER_INLINE_TO_MEMORY_B:
578 kepler_memory->CallMethod(method_call.method, method_call.argument,
579 method_call.IsLastCall());
580 break;
581 default:
582 UNIMPLEMENTED_MSG("Unimplemented engine");
583 }
584 }
585
586 /// Calls a GPU engine multivalue method.
587 void CallEngineMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
588 u32 methods_pending) {
589 const EngineID engine = bound_engines[subchannel];
590
591 switch (engine) {
592 case EngineID::FERMI_TWOD_A:
593 fermi_2d->CallMultiMethod(method, base_start, amount, methods_pending);
594 break;
595 case EngineID::MAXWELL_B:
596 maxwell_3d->CallMultiMethod(method, base_start, amount, methods_pending);
597 break;
598 case EngineID::KEPLER_COMPUTE_B:
599 kepler_compute->CallMultiMethod(method, base_start, amount, methods_pending);
600 break;
601 case EngineID::MAXWELL_DMA_COPY_A:
602 maxwell_dma->CallMultiMethod(method, base_start, amount, methods_pending);
603 break;
604 case EngineID::KEPLER_INLINE_TO_MEMORY_B:
605 kepler_memory->CallMultiMethod(method, base_start, amount, methods_pending);
606 break;
607 default:
608 UNIMPLEMENTED_MSG("Unimplemented engine");
609 }
610 }
611
612 /// Determines where the method should be executed.
613 [[nodiscard]] bool ExecuteMethodOnEngine(u32 method) {
614 const auto buffer_method = static_cast<BufferMethods>(method);
615 return buffer_method >= BufferMethods::NonPullerMethods;
616 }
617
618 struct Regs {
619 static constexpr size_t NUM_REGS = 0x40;
620
621 union {
622 struct {
623 INSERT_PADDING_WORDS_NOINIT(0x4);
624 struct {
625 u32 address_high;
626 u32 address_low;
627
628 [[nodiscard]] GPUVAddr SemaphoreAddress() const {
629 return static_cast<GPUVAddr>((static_cast<GPUVAddr>(address_high) << 32) |
630 address_low);
631 } 323 }
632 } semaphore_address; 324 renderer->SwapBuffers(&framebuffer_copy);
633 325 };
634 u32 semaphore_sequence; 326 for (size_t i = 0; i < num_fences; i++) {
635 u32 semaphore_trigger; 327 syncpoint_manager.RegisterGuestAction(fences[i].id, fences[i].value, executer);
636 INSERT_PADDING_WORDS_NOINIT(0xC); 328 }
637 329 });
638 // The pusher and the puller share the reference counter, the pusher only has read 330 gpu_thread.TickGPU();
639 // access 331 WaitForSyncOperation(wait_fence);
640 u32 reference_count; 332 }
641 INSERT_PADDING_WORDS_NOINIT(0x5);
642
643 u32 semaphore_acquire;
644 u32 semaphore_release;
645 u32 fence_value;
646 GPU::FenceAction fence_action;
647 INSERT_PADDING_WORDS_NOINIT(0xE2);
648
649 // Puller state
650 u32 acquire_mode;
651 u32 acquire_source;
652 u32 acquire_active;
653 u32 acquire_timeout;
654 u32 acquire_value;
655 };
656 std::array<u32, NUM_REGS> reg_array;
657 };
658 } regs{};
659 333
660 GPU& gpu; 334 GPU& gpu;
661 Core::System& system; 335 Core::System& system;
662 std::unique_ptr<Tegra::MemoryManager> memory_manager; 336 Host1x::Host1x& host1x;
663 std::unique_ptr<Tegra::DmaPusher> dma_pusher; 337
664 std::map<u32, std::unique_ptr<Tegra::CDmaPusher>> cdma_pushers; 338 std::map<u32, std::unique_ptr<Tegra::CDmaPusher>> cdma_pushers;
665 std::unique_ptr<VideoCore::RendererBase> renderer; 339 std::unique_ptr<VideoCore::RendererBase> renderer;
666 VideoCore::RasterizerInterface* rasterizer = nullptr; 340 VideoCore::RasterizerInterface* rasterizer = nullptr;
667 const bool use_nvdec; 341 const bool use_nvdec;
668 342
669 /// Mapping of command subchannels to their bound engine ids 343 s32 new_channel_id{1};
670 std::array<EngineID, 8> bound_engines{};
671 /// 3D engine
672 std::unique_ptr<Engines::Maxwell3D> maxwell_3d;
673 /// 2D engine
674 std::unique_ptr<Engines::Fermi2D> fermi_2d;
675 /// Compute engine
676 std::unique_ptr<Engines::KeplerCompute> kepler_compute;
677 /// DMA engine
678 std::unique_ptr<Engines::MaxwellDMA> maxwell_dma;
679 /// Inline memory engine
680 std::unique_ptr<Engines::KeplerMemory> kepler_memory;
681 /// Shader build notifier 344 /// Shader build notifier
682 std::unique_ptr<VideoCore::ShaderNotify> shader_notify; 345 std::unique_ptr<VideoCore::ShaderNotify> shader_notify;
683 /// When true, we are about to shut down emulation session, so terminate outstanding tasks 346 /// When true, we are about to shut down emulation session, so terminate outstanding tasks
@@ -692,51 +355,25 @@ struct GPU::Impl {
692 355
693 std::condition_variable sync_cv; 356 std::condition_variable sync_cv;
694 357
695 struct FlushRequest { 358 std::list<std::function<void()>> sync_requests;
696 explicit FlushRequest(u64 fence_, VAddr addr_, std::size_t size_) 359 std::atomic<u64> current_sync_fence{};
697 : fence{fence_}, addr{addr_}, size{size_} {} 360 u64 last_sync_fence{};
698 u64 fence; 361 std::mutex sync_request_mutex;
699 VAddr addr; 362 std::condition_variable sync_request_cv;
700 std::size_t size;
701 };
702
703 std::list<FlushRequest> flush_requests;
704 std::atomic<u64> current_flush_fence{};
705 u64 last_flush_fence{};
706 std::mutex flush_request_mutex;
707 363
708 const bool is_async; 364 const bool is_async;
709 365
710 VideoCommon::GPUThread::ThreadManager gpu_thread; 366 VideoCommon::GPUThread::ThreadManager gpu_thread;
711 std::unique_ptr<Core::Frontend::GraphicsContext> cpu_context; 367 std::unique_ptr<Core::Frontend::GraphicsContext> cpu_context;
712 368
713#define ASSERT_REG_POSITION(field_name, position) \ 369 std::unique_ptr<Tegra::Control::Scheduler> scheduler;
714 static_assert(offsetof(Regs, field_name) == position * 4, \ 370 std::unordered_map<s32, std::shared_ptr<Tegra::Control::ChannelState>> channels;
715 "Field " #field_name " has invalid position") 371 Tegra::Control::ChannelState* current_channel;
716 372 s32 bound_channel{-1};
717 ASSERT_REG_POSITION(semaphore_address, 0x4); 373
718 ASSERT_REG_POSITION(semaphore_sequence, 0x6); 374 std::deque<size_t> free_swap_counters;
719 ASSERT_REG_POSITION(semaphore_trigger, 0x7); 375 std::deque<size_t> request_swap_counters;
720 ASSERT_REG_POSITION(reference_count, 0x14); 376 std::mutex request_swap_mutex;
721 ASSERT_REG_POSITION(semaphore_acquire, 0x1A);
722 ASSERT_REG_POSITION(semaphore_release, 0x1B);
723 ASSERT_REG_POSITION(fence_value, 0x1C);
724 ASSERT_REG_POSITION(fence_action, 0x1D);
725
726 ASSERT_REG_POSITION(acquire_mode, 0x100);
727 ASSERT_REG_POSITION(acquire_source, 0x101);
728 ASSERT_REG_POSITION(acquire_active, 0x102);
729 ASSERT_REG_POSITION(acquire_timeout, 0x103);
730 ASSERT_REG_POSITION(acquire_value, 0x104);
731
732#undef ASSERT_REG_POSITION
733
734 enum class GpuSemaphoreOperation {
735 AcquireEqual = 0x1,
736 WriteLong = 0x2,
737 AcquireGequal = 0x4,
738 AcquireMask = 0x8,
739 };
740}; 377};
741 378
742GPU::GPU(Core::System& system, bool is_async, bool use_nvdec) 379GPU::GPU(Core::System& system, bool is_async, bool use_nvdec)
@@ -744,25 +381,36 @@ GPU::GPU(Core::System& system, bool is_async, bool use_nvdec)
744 381
745GPU::~GPU() = default; 382GPU::~GPU() = default;
746 383
747void GPU::BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer) { 384std::shared_ptr<Control::ChannelState> GPU::AllocateChannel() {
748 impl->BindRenderer(std::move(renderer)); 385 return impl->AllocateChannel();
386}
387
388void GPU::InitChannel(Control::ChannelState& to_init) {
389 impl->InitChannel(to_init);
390}
391
392void GPU::BindChannel(s32 channel_id) {
393 impl->BindChannel(channel_id);
749} 394}
750 395
751void GPU::CallMethod(const MethodCall& method_call) { 396void GPU::ReleaseChannel(Control::ChannelState& to_release) {
752 impl->CallMethod(method_call); 397 impl->ReleaseChannel(to_release);
753} 398}
754 399
755void GPU::CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount, 400void GPU::InitAddressSpace(Tegra::MemoryManager& memory_manager) {
756 u32 methods_pending) { 401 impl->InitAddressSpace(memory_manager);
757 impl->CallMultiMethod(method, subchannel, base_start, amount, methods_pending); 402}
403
404void GPU::BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer) {
405 impl->BindRenderer(std::move(renderer));
758} 406}
759 407
760void GPU::FlushCommands() { 408void GPU::FlushCommands() {
761 impl->FlushCommands(); 409 impl->FlushCommands();
762} 410}
763 411
764void GPU::SyncGuestHost() { 412void GPU::InvalidateGPUCache() {
765 impl->SyncGuestHost(); 413 impl->InvalidateGPUCache();
766} 414}
767 415
768void GPU::OnCommandListEnd() { 416void GPU::OnCommandListEnd() {
@@ -770,17 +418,32 @@ void GPU::OnCommandListEnd() {
770} 418}
771 419
772u64 GPU::RequestFlush(VAddr addr, std::size_t size) { 420u64 GPU::RequestFlush(VAddr addr, std::size_t size) {
773 return impl->RequestFlush(addr, size); 421 return impl->RequestSyncOperation(
422 [this, addr, size]() { impl->rasterizer->FlushRegion(addr, size); });
774} 423}
775 424
776u64 GPU::CurrentFlushRequestFence() const { 425u64 GPU::CurrentSyncRequestFence() const {
777 return impl->CurrentFlushRequestFence(); 426 return impl->CurrentSyncRequestFence();
427}
428
429void GPU::WaitForSyncOperation(u64 fence) {
430 return impl->WaitForSyncOperation(fence);
778} 431}
779 432
780void GPU::TickWork() { 433void GPU::TickWork() {
781 impl->TickWork(); 434 impl->TickWork();
782} 435}
783 436
437/// Gets a mutable reference to the Host1x interface
438Host1x::Host1x& GPU::Host1x() {
439 return impl->host1x;
440}
441
442/// Gets an immutable reference to the Host1x interface.
443const Host1x::Host1x& GPU::Host1x() const {
444 return impl->host1x;
445}
446
784Engines::Maxwell3D& GPU::Maxwell3D() { 447Engines::Maxwell3D& GPU::Maxwell3D() {
785 return impl->Maxwell3D(); 448 return impl->Maxwell3D();
786} 449}
@@ -797,14 +460,6 @@ const Engines::KeplerCompute& GPU::KeplerCompute() const {
797 return impl->KeplerCompute(); 460 return impl->KeplerCompute();
798} 461}
799 462
800Tegra::MemoryManager& GPU::MemoryManager() {
801 return impl->MemoryManager();
802}
803
804const Tegra::MemoryManager& GPU::MemoryManager() const {
805 return impl->MemoryManager();
806}
807
808Tegra::DmaPusher& GPU::DmaPusher() { 463Tegra::DmaPusher& GPU::DmaPusher() {
809 return impl->DmaPusher(); 464 return impl->DmaPusher();
810} 465}
@@ -829,24 +484,9 @@ const VideoCore::ShaderNotify& GPU::ShaderNotify() const {
829 return impl->ShaderNotify(); 484 return impl->ShaderNotify();
830} 485}
831 486
832void GPU::WaitFence(u32 syncpoint_id, u32 value) { 487void GPU::RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
833 impl->WaitFence(syncpoint_id, value); 488 std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences) {
834} 489 impl->RequestSwapBuffers(framebuffer, fences, num_fences);
835
836void GPU::IncrementSyncPoint(u32 syncpoint_id) {
837 impl->IncrementSyncPoint(syncpoint_id);
838}
839
840u32 GPU::GetSyncpointValue(u32 syncpoint_id) const {
841 return impl->GetSyncpointValue(syncpoint_id);
842}
843
844void GPU::RegisterSyncptInterrupt(u32 syncpoint_id, u32 value) {
845 impl->RegisterSyncptInterrupt(syncpoint_id, value);
846}
847
848bool GPU::CancelSyncptInterrupt(u32 syncpoint_id, u32 value) {
849 return impl->CancelSyncptInterrupt(syncpoint_id, value);
850} 490}
851 491
852u64 GPU::GetTicks() const { 492u64 GPU::GetTicks() const {
@@ -881,8 +521,8 @@ void GPU::ReleaseContext() {
881 impl->ReleaseContext(); 521 impl->ReleaseContext();
882} 522}
883 523
884void GPU::PushGPUEntries(Tegra::CommandList&& entries) { 524void GPU::PushGPUEntries(s32 channel, Tegra::CommandList&& entries) {
885 impl->PushGPUEntries(std::move(entries)); 525 impl->PushGPUEntries(channel, std::move(entries));
886} 526}
887 527
888void GPU::PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries) { 528void GPU::PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries) {
diff --git a/src/video_core/gpu.h b/src/video_core/gpu.h
index b939ba315..0a4a8b14f 100644
--- a/src/video_core/gpu.h
+++ b/src/video_core/gpu.h
@@ -89,73 +89,58 @@ class Maxwell3D;
89class KeplerCompute; 89class KeplerCompute;
90} // namespace Engines 90} // namespace Engines
91 91
92enum class EngineID { 92namespace Control {
93 FERMI_TWOD_A = 0x902D, // 2D Engine 93struct ChannelState;
94 MAXWELL_B = 0xB197, // 3D Engine 94}
95 KEPLER_COMPUTE_B = 0xB1C0, 95
96 KEPLER_INLINE_TO_MEMORY_B = 0xA140, 96namespace Host1x {
97 MAXWELL_DMA_COPY_A = 0xB0B5, 97class Host1x;
98}; 98} // namespace Host1x
99 99
100class MemoryManager; 100class MemoryManager;
101 101
102class GPU final { 102class GPU final {
103public: 103public:
104 struct MethodCall {
105 u32 method{};
106 u32 argument{};
107 u32 subchannel{};
108 u32 method_count{};
109
110 explicit MethodCall(u32 method_, u32 argument_, u32 subchannel_ = 0, u32 method_count_ = 0)
111 : method(method_), argument(argument_), subchannel(subchannel_),
112 method_count(method_count_) {}
113
114 [[nodiscard]] bool IsLastCall() const {
115 return method_count <= 1;
116 }
117 };
118
119 enum class FenceOperation : u32 {
120 Acquire = 0,
121 Increment = 1,
122 };
123
124 union FenceAction {
125 u32 raw;
126 BitField<0, 1, FenceOperation> op;
127 BitField<8, 24, u32> syncpoint_id;
128 };
129
130 explicit GPU(Core::System& system, bool is_async, bool use_nvdec); 104 explicit GPU(Core::System& system, bool is_async, bool use_nvdec);
131 ~GPU(); 105 ~GPU();
132 106
133 /// Binds a renderer to the GPU. 107 /// Binds a renderer to the GPU.
134 void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer); 108 void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer);
135 109
136 /// Calls a GPU method.
137 void CallMethod(const MethodCall& method_call);
138
139 /// Calls a GPU multivalue method.
140 void CallMultiMethod(u32 method, u32 subchannel, const u32* base_start, u32 amount,
141 u32 methods_pending);
142
143 /// Flush all current written commands into the host GPU for execution. 110 /// Flush all current written commands into the host GPU for execution.
144 void FlushCommands(); 111 void FlushCommands();
145 /// Synchronizes CPU writes with Host GPU memory. 112 /// Synchronizes CPU writes with Host GPU memory.
146 void SyncGuestHost(); 113 void InvalidateGPUCache();
147 /// Signal the ending of command list. 114 /// Signal the ending of command list.
148 void OnCommandListEnd(); 115 void OnCommandListEnd();
149 116
117 std::shared_ptr<Control::ChannelState> AllocateChannel();
118
119 void InitChannel(Control::ChannelState& to_init);
120
121 void BindChannel(s32 channel_id);
122
123 void ReleaseChannel(Control::ChannelState& to_release);
124
125 void InitAddressSpace(Tegra::MemoryManager& memory_manager);
126
150 /// Request a host GPU memory flush from the CPU. 127 /// Request a host GPU memory flush from the CPU.
151 [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size); 128 [[nodiscard]] u64 RequestFlush(VAddr addr, std::size_t size);
152 129
153 /// Obtains current flush request fence id. 130 /// Obtains current flush request fence id.
154 [[nodiscard]] u64 CurrentFlushRequestFence() const; 131 [[nodiscard]] u64 CurrentSyncRequestFence() const;
132
133 void WaitForSyncOperation(u64 fence);
155 134
156 /// Tick pending requests within the GPU. 135 /// Tick pending requests within the GPU.
157 void TickWork(); 136 void TickWork();
158 137
138 /// Gets a mutable reference to the Host1x interface
139 [[nodiscard]] Host1x::Host1x& Host1x();
140
141 /// Gets an immutable reference to the Host1x interface.
142 [[nodiscard]] const Host1x::Host1x& Host1x() const;
143
159 /// Returns a reference to the Maxwell3D GPU engine. 144 /// Returns a reference to the Maxwell3D GPU engine.
160 [[nodiscard]] Engines::Maxwell3D& Maxwell3D(); 145 [[nodiscard]] Engines::Maxwell3D& Maxwell3D();
161 146
@@ -168,12 +153,6 @@ public:
168 /// Returns a reference to the KeplerCompute GPU engine. 153 /// Returns a reference to the KeplerCompute GPU engine.
169 [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const; 154 [[nodiscard]] const Engines::KeplerCompute& KeplerCompute() const;
170 155
171 /// Returns a reference to the GPU memory manager.
172 [[nodiscard]] Tegra::MemoryManager& MemoryManager();
173
174 /// Returns a const reference to the GPU memory manager.
175 [[nodiscard]] const Tegra::MemoryManager& MemoryManager() const;
176
177 /// Returns a reference to the GPU DMA pusher. 156 /// Returns a reference to the GPU DMA pusher.
178 [[nodiscard]] Tegra::DmaPusher& DmaPusher(); 157 [[nodiscard]] Tegra::DmaPusher& DmaPusher();
179 158
@@ -192,17 +171,6 @@ public:
192 /// Returns a const reference to the shader notifier. 171 /// Returns a const reference to the shader notifier.
193 [[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const; 172 [[nodiscard]] const VideoCore::ShaderNotify& ShaderNotify() const;
194 173
195 /// Allows the CPU/NvFlinger to wait on the GPU before presenting a frame.
196 void WaitFence(u32 syncpoint_id, u32 value);
197
198 void IncrementSyncPoint(u32 syncpoint_id);
199
200 [[nodiscard]] u32 GetSyncpointValue(u32 syncpoint_id) const;
201
202 void RegisterSyncptInterrupt(u32 syncpoint_id, u32 value);
203
204 [[nodiscard]] bool CancelSyncptInterrupt(u32 syncpoint_id, u32 value);
205
206 [[nodiscard]] u64 GetTicks() const; 174 [[nodiscard]] u64 GetTicks() const;
207 175
208 [[nodiscard]] bool IsAsync() const; 176 [[nodiscard]] bool IsAsync() const;
@@ -211,6 +179,9 @@ public:
211 179
212 void RendererFrameEndNotify(); 180 void RendererFrameEndNotify();
213 181
182 void RequestSwapBuffers(const Tegra::FramebufferConfig* framebuffer,
183 std::array<Service::Nvidia::NvFence, 4>& fences, size_t num_fences);
184
214 /// Performs any additional setup necessary in order to begin GPU emulation. 185 /// Performs any additional setup necessary in order to begin GPU emulation.
215 /// This can be used to launch any necessary threads and register any necessary 186 /// This can be used to launch any necessary threads and register any necessary
216 /// core timing events. 187 /// core timing events.
@@ -226,7 +197,7 @@ public:
226 void ReleaseContext(); 197 void ReleaseContext();
227 198
228 /// Push GPU command entries to be processed 199 /// Push GPU command entries to be processed
229 void PushGPUEntries(Tegra::CommandList&& entries); 200 void PushGPUEntries(s32 channel, Tegra::CommandList&& entries);
230 201
231 /// Push GPU command buffer entries to be processed 202 /// Push GPU command buffer entries to be processed
232 void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries); 203 void PushCommandBuffer(u32 id, Tegra::ChCommandHeaderList& entries);
@@ -248,7 +219,7 @@ public:
248 219
249private: 220private:
250 struct Impl; 221 struct Impl;
251 std::unique_ptr<Impl> impl; 222 mutable std::unique_ptr<Impl> impl;
252}; 223};
253 224
254} // namespace Tegra 225} // namespace Tegra
diff --git a/src/video_core/gpu_thread.cpp b/src/video_core/gpu_thread.cpp
index d43f7175a..1bd477011 100644
--- a/src/video_core/gpu_thread.cpp
+++ b/src/video_core/gpu_thread.cpp
@@ -8,6 +8,7 @@
8#include "common/thread.h" 8#include "common/thread.h"
9#include "core/core.h" 9#include "core/core.h"
10#include "core/frontend/emu_window.h" 10#include "core/frontend/emu_window.h"
11#include "video_core/control/scheduler.h"
11#include "video_core/dma_pusher.h" 12#include "video_core/dma_pusher.h"
12#include "video_core/gpu.h" 13#include "video_core/gpu.h"
13#include "video_core/gpu_thread.h" 14#include "video_core/gpu_thread.h"
@@ -18,8 +19,8 @@ namespace VideoCommon::GPUThread {
18/// Runs the GPU thread 19/// Runs the GPU thread
19static void RunThread(std::stop_token stop_token, Core::System& system, 20static void RunThread(std::stop_token stop_token, Core::System& system,
20 VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context, 21 VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context,
21 Tegra::DmaPusher& dma_pusher, SynchState& state) { 22 Tegra::Control::Scheduler& scheduler, SynchState& state) {
22 std::string name = "yuzu:GPU"; 23 std::string name = "GPU";
23 MicroProfileOnThreadCreate(name.c_str()); 24 MicroProfileOnThreadCreate(name.c_str());
24 SCOPE_EXIT({ MicroProfileOnThreadExit(); }); 25 SCOPE_EXIT({ MicroProfileOnThreadExit(); });
25 26
@@ -36,8 +37,7 @@ static void RunThread(std::stop_token stop_token, Core::System& system,
36 break; 37 break;
37 } 38 }
38 if (auto* submit_list = std::get_if<SubmitListCommand>(&next.data)) { 39 if (auto* submit_list = std::get_if<SubmitListCommand>(&next.data)) {
39 dma_pusher.Push(std::move(submit_list->entries)); 40 scheduler.Push(submit_list->channel, std::move(submit_list->entries));
40 dma_pusher.DispatchCalls();
41 } else if (const auto* data = std::get_if<SwapBuffersCommand>(&next.data)) { 41 } else if (const auto* data = std::get_if<SwapBuffersCommand>(&next.data)) {
42 renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr); 42 renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr);
43 } else if (std::holds_alternative<OnCommandListEndCommand>(next.data)) { 43 } else if (std::holds_alternative<OnCommandListEndCommand>(next.data)) {
@@ -68,14 +68,14 @@ ThreadManager::~ThreadManager() = default;
68 68
69void ThreadManager::StartThread(VideoCore::RendererBase& renderer, 69void ThreadManager::StartThread(VideoCore::RendererBase& renderer,
70 Core::Frontend::GraphicsContext& context, 70 Core::Frontend::GraphicsContext& context,
71 Tegra::DmaPusher& dma_pusher) { 71 Tegra::Control::Scheduler& scheduler) {
72 rasterizer = renderer.ReadRasterizer(); 72 rasterizer = renderer.ReadRasterizer();
73 thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context), 73 thread = std::jthread(RunThread, std::ref(system), std::ref(renderer), std::ref(context),
74 std::ref(dma_pusher), std::ref(state)); 74 std::ref(scheduler), std::ref(state));
75} 75}
76 76
77void ThreadManager::SubmitList(Tegra::CommandList&& entries) { 77void ThreadManager::SubmitList(s32 channel, Tegra::CommandList&& entries) {
78 PushCommand(SubmitListCommand(std::move(entries))); 78 PushCommand(SubmitListCommand(channel, std::move(entries)));
79} 79}
80 80
81void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) { 81void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
@@ -93,8 +93,12 @@ void ThreadManager::FlushRegion(VAddr addr, u64 size) {
93 } 93 }
94 auto& gpu = system.GPU(); 94 auto& gpu = system.GPU();
95 u64 fence = gpu.RequestFlush(addr, size); 95 u64 fence = gpu.RequestFlush(addr, size);
96 PushCommand(GPUTickCommand(), true); 96 TickGPU();
97 ASSERT(fence <= gpu.CurrentFlushRequestFence()); 97 gpu.WaitForSyncOperation(fence);
98}
99
100void ThreadManager::TickGPU() {
101 PushCommand(GPUTickCommand());
98} 102}
99 103
100void ThreadManager::InvalidateRegion(VAddr addr, u64 size) { 104void ThreadManager::InvalidateRegion(VAddr addr, u64 size) {
diff --git a/src/video_core/gpu_thread.h b/src/video_core/gpu_thread.h
index 2f8210cb9..64628d3e3 100644
--- a/src/video_core/gpu_thread.h
+++ b/src/video_core/gpu_thread.h
@@ -15,7 +15,9 @@
15 15
16namespace Tegra { 16namespace Tegra {
17struct FramebufferConfig; 17struct FramebufferConfig;
18class DmaPusher; 18namespace Control {
19class Scheduler;
20}
19} // namespace Tegra 21} // namespace Tegra
20 22
21namespace Core { 23namespace Core {
@@ -34,8 +36,10 @@ namespace VideoCommon::GPUThread {
34 36
35/// Command to signal to the GPU thread that a command list is ready for processing 37/// Command to signal to the GPU thread that a command list is ready for processing
36struct SubmitListCommand final { 38struct SubmitListCommand final {
37 explicit SubmitListCommand(Tegra::CommandList&& entries_) : entries{std::move(entries_)} {} 39 explicit SubmitListCommand(s32 channel_, Tegra::CommandList&& entries_)
40 : channel{channel_}, entries{std::move(entries_)} {}
38 41
42 s32 channel;
39 Tegra::CommandList entries; 43 Tegra::CommandList entries;
40}; 44};
41 45
@@ -112,10 +116,10 @@ public:
112 116
113 /// Creates and starts the GPU thread. 117 /// Creates and starts the GPU thread.
114 void StartThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context, 118 void StartThread(VideoCore::RendererBase& renderer, Core::Frontend::GraphicsContext& context,
115 Tegra::DmaPusher& dma_pusher); 119 Tegra::Control::Scheduler& scheduler);
116 120
117 /// Push GPU command entries to be processed 121 /// Push GPU command entries to be processed
118 void SubmitList(Tegra::CommandList&& entries); 122 void SubmitList(s32 channel, Tegra::CommandList&& entries);
119 123
120 /// Swap buffers (render frame) 124 /// Swap buffers (render frame)
121 void SwapBuffers(const Tegra::FramebufferConfig* framebuffer); 125 void SwapBuffers(const Tegra::FramebufferConfig* framebuffer);
@@ -131,6 +135,8 @@ public:
131 135
132 void OnCommandListEnd(); 136 void OnCommandListEnd();
133 137
138 void TickGPU();
139
134private: 140private:
135 /// Pushes a command to be executed by the GPU thread 141 /// Pushes a command to be executed by the GPU thread
136 u64 PushCommand(CommandData&& command_data, bool block = false); 142 u64 PushCommand(CommandData&& command_data, bool block = false);
diff --git a/src/video_core/command_classes/codecs/codec.cpp b/src/video_core/host1x/codecs/codec.cpp
index a5eb97b7f..42e7d6e4f 100644
--- a/src/video_core/command_classes/codecs/codec.cpp
+++ b/src/video_core/host1x/codecs/codec.cpp
@@ -6,11 +6,11 @@
6#include <vector> 6#include <vector>
7#include "common/assert.h" 7#include "common/assert.h"
8#include "common/settings.h" 8#include "common/settings.h"
9#include "video_core/command_classes/codecs/codec.h" 9#include "video_core/host1x/codecs/codec.h"
10#include "video_core/command_classes/codecs/h264.h" 10#include "video_core/host1x/codecs/h264.h"
11#include "video_core/command_classes/codecs/vp8.h" 11#include "video_core/host1x/codecs/vp8.h"
12#include "video_core/command_classes/codecs/vp9.h" 12#include "video_core/host1x/codecs/vp9.h"
13#include "video_core/gpu.h" 13#include "video_core/host1x/host1x.h"
14#include "video_core/memory_manager.h" 14#include "video_core/memory_manager.h"
15 15
16extern "C" { 16extern "C" {
@@ -73,10 +73,10 @@ void AVFrameDeleter(AVFrame* ptr) {
73 av_frame_free(&ptr); 73 av_frame_free(&ptr);
74} 74}
75 75
76Codec::Codec(GPU& gpu_, const NvdecCommon::NvdecRegisters& regs) 76Codec::Codec(Host1x::Host1x& host1x_, const Host1x::NvdecCommon::NvdecRegisters& regs)
77 : gpu(gpu_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(gpu)), 77 : host1x(host1x_), state{regs}, h264_decoder(std::make_unique<Decoder::H264>(host1x)),
78 vp8_decoder(std::make_unique<Decoder::VP8>(gpu)), 78 vp8_decoder(std::make_unique<Decoder::VP8>(host1x)),
79 vp9_decoder(std::make_unique<Decoder::VP9>(gpu)) {} 79 vp9_decoder(std::make_unique<Decoder::VP9>(host1x)) {}
80 80
81Codec::~Codec() { 81Codec::~Codec() {
82 if (!initialized) { 82 if (!initialized) {
@@ -168,11 +168,11 @@ void Codec::InitializeGpuDecoder() {
168void Codec::Initialize() { 168void Codec::Initialize() {
169 const AVCodecID codec = [&] { 169 const AVCodecID codec = [&] {
170 switch (current_codec) { 170 switch (current_codec) {
171 case NvdecCommon::VideoCodec::H264: 171 case Host1x::NvdecCommon::VideoCodec::H264:
172 return AV_CODEC_ID_H264; 172 return AV_CODEC_ID_H264;
173 case NvdecCommon::VideoCodec::VP8: 173 case Host1x::NvdecCommon::VideoCodec::VP8:
174 return AV_CODEC_ID_VP8; 174 return AV_CODEC_ID_VP8;
175 case NvdecCommon::VideoCodec::VP9: 175 case Host1x::NvdecCommon::VideoCodec::VP9:
176 return AV_CODEC_ID_VP9; 176 return AV_CODEC_ID_VP9;
177 default: 177 default:
178 UNIMPLEMENTED_MSG("Unknown codec {}", current_codec); 178 UNIMPLEMENTED_MSG("Unknown codec {}", current_codec);
@@ -197,7 +197,7 @@ void Codec::Initialize() {
197 initialized = true; 197 initialized = true;
198} 198}
199 199
200void Codec::SetTargetCodec(NvdecCommon::VideoCodec codec) { 200void Codec::SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec) {
201 if (current_codec != codec) { 201 if (current_codec != codec) {
202 current_codec = codec; 202 current_codec = codec;
203 LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", GetCurrentCodecName()); 203 LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", GetCurrentCodecName());
@@ -215,11 +215,11 @@ void Codec::Decode() {
215 bool vp9_hidden_frame = false; 215 bool vp9_hidden_frame = false;
216 const auto& frame_data = [&]() { 216 const auto& frame_data = [&]() {
217 switch (current_codec) { 217 switch (current_codec) {
218 case Tegra::NvdecCommon::VideoCodec::H264: 218 case Tegra::Host1x::NvdecCommon::VideoCodec::H264:
219 return h264_decoder->ComposeFrame(state, is_first_frame); 219 return h264_decoder->ComposeFrame(state, is_first_frame);
220 case Tegra::NvdecCommon::VideoCodec::VP8: 220 case Tegra::Host1x::NvdecCommon::VideoCodec::VP8:
221 return vp8_decoder->ComposeFrame(state); 221 return vp8_decoder->ComposeFrame(state);
222 case Tegra::NvdecCommon::VideoCodec::VP9: 222 case Tegra::Host1x::NvdecCommon::VideoCodec::VP9:
223 vp9_decoder->ComposeFrame(state); 223 vp9_decoder->ComposeFrame(state);
224 vp9_hidden_frame = vp9_decoder->WasFrameHidden(); 224 vp9_hidden_frame = vp9_decoder->WasFrameHidden();
225 return vp9_decoder->GetFrameBytes(); 225 return vp9_decoder->GetFrameBytes();
@@ -287,21 +287,21 @@ AVFramePtr Codec::GetCurrentFrame() {
287 return frame; 287 return frame;
288} 288}
289 289
290NvdecCommon::VideoCodec Codec::GetCurrentCodec() const { 290Host1x::NvdecCommon::VideoCodec Codec::GetCurrentCodec() const {
291 return current_codec; 291 return current_codec;
292} 292}
293 293
294std::string_view Codec::GetCurrentCodecName() const { 294std::string_view Codec::GetCurrentCodecName() const {
295 switch (current_codec) { 295 switch (current_codec) {
296 case NvdecCommon::VideoCodec::None: 296 case Host1x::NvdecCommon::VideoCodec::None:
297 return "None"; 297 return "None";
298 case NvdecCommon::VideoCodec::H264: 298 case Host1x::NvdecCommon::VideoCodec::H264:
299 return "H264"; 299 return "H264";
300 case NvdecCommon::VideoCodec::VP8: 300 case Host1x::NvdecCommon::VideoCodec::VP8:
301 return "VP8"; 301 return "VP8";
302 case NvdecCommon::VideoCodec::H265: 302 case Host1x::NvdecCommon::VideoCodec::H265:
303 return "H265"; 303 return "H265";
304 case NvdecCommon::VideoCodec::VP9: 304 case Host1x::NvdecCommon::VideoCodec::VP9:
305 return "VP9"; 305 return "VP9";
306 default: 306 default:
307 return "Unknown"; 307 return "Unknown";
diff --git a/src/video_core/command_classes/codecs/codec.h b/src/video_core/host1x/codecs/codec.h
index 0c2405465..0d45fb7fe 100644
--- a/src/video_core/command_classes/codecs/codec.h
+++ b/src/video_core/host1x/codecs/codec.h
@@ -6,8 +6,8 @@
6#include <memory> 6#include <memory>
7#include <string_view> 7#include <string_view>
8#include <queue> 8#include <queue>
9 9#include "common/common_types.h"
10#include "video_core/command_classes/nvdec_common.h" 10#include "video_core/host1x/nvdec_common.h"
11 11
12extern "C" { 12extern "C" {
13#if defined(__GNUC__) || defined(__clang__) 13#if defined(__GNUC__) || defined(__clang__)
@@ -21,7 +21,6 @@ extern "C" {
21} 21}
22 22
23namespace Tegra { 23namespace Tegra {
24class GPU;
25 24
26void AVFrameDeleter(AVFrame* ptr); 25void AVFrameDeleter(AVFrame* ptr);
27using AVFramePtr = std::unique_ptr<AVFrame, decltype(&AVFrameDeleter)>; 26using AVFramePtr = std::unique_ptr<AVFrame, decltype(&AVFrameDeleter)>;
@@ -32,16 +31,20 @@ class VP8;
32class VP9; 31class VP9;
33} // namespace Decoder 32} // namespace Decoder
34 33
34namespace Host1x {
35class Host1x;
36} // namespace Host1x
37
35class Codec { 38class Codec {
36public: 39public:
37 explicit Codec(GPU& gpu, const NvdecCommon::NvdecRegisters& regs); 40 explicit Codec(Host1x::Host1x& host1x, const Host1x::NvdecCommon::NvdecRegisters& regs);
38 ~Codec(); 41 ~Codec();
39 42
40 /// Initialize the codec, returning success or failure 43 /// Initialize the codec, returning success or failure
41 void Initialize(); 44 void Initialize();
42 45
43 /// Sets NVDEC video stream codec 46 /// Sets NVDEC video stream codec
44 void SetTargetCodec(NvdecCommon::VideoCodec codec); 47 void SetTargetCodec(Host1x::NvdecCommon::VideoCodec codec);
45 48
46 /// Call decoders to construct headers, decode AVFrame with ffmpeg 49 /// Call decoders to construct headers, decode AVFrame with ffmpeg
47 void Decode(); 50 void Decode();
@@ -50,7 +53,7 @@ public:
50 [[nodiscard]] AVFramePtr GetCurrentFrame(); 53 [[nodiscard]] AVFramePtr GetCurrentFrame();
51 54
52 /// Returns the value of current_codec 55 /// Returns the value of current_codec
53 [[nodiscard]] NvdecCommon::VideoCodec GetCurrentCodec() const; 56 [[nodiscard]] Host1x::NvdecCommon::VideoCodec GetCurrentCodec() const;
54 57
55 /// Return name of the current codec 58 /// Return name of the current codec
56 [[nodiscard]] std::string_view GetCurrentCodecName() const; 59 [[nodiscard]] std::string_view GetCurrentCodecName() const;
@@ -63,14 +66,14 @@ private:
63 bool CreateGpuAvDevice(); 66 bool CreateGpuAvDevice();
64 67
65 bool initialized{}; 68 bool initialized{};
66 NvdecCommon::VideoCodec current_codec{NvdecCommon::VideoCodec::None}; 69 Host1x::NvdecCommon::VideoCodec current_codec{Host1x::NvdecCommon::VideoCodec::None};
67 70
68 const AVCodec* av_codec{nullptr}; 71 const AVCodec* av_codec{nullptr};
69 AVCodecContext* av_codec_ctx{nullptr}; 72 AVCodecContext* av_codec_ctx{nullptr};
70 AVBufferRef* av_gpu_decoder{nullptr}; 73 AVBufferRef* av_gpu_decoder{nullptr};
71 74
72 GPU& gpu; 75 Host1x::Host1x& host1x;
73 const NvdecCommon::NvdecRegisters& state; 76 const Host1x::NvdecCommon::NvdecRegisters& state;
74 std::unique_ptr<Decoder::H264> h264_decoder; 77 std::unique_ptr<Decoder::H264> h264_decoder;
75 std::unique_ptr<Decoder::VP8> vp8_decoder; 78 std::unique_ptr<Decoder::VP8> vp8_decoder;
76 std::unique_ptr<Decoder::VP9> vp9_decoder; 79 std::unique_ptr<Decoder::VP9> vp9_decoder;
diff --git a/src/video_core/command_classes/codecs/h264.cpp b/src/video_core/host1x/codecs/h264.cpp
index e2acd54d4..e87bd65fa 100644
--- a/src/video_core/command_classes/codecs/h264.cpp
+++ b/src/video_core/host1x/codecs/h264.cpp
@@ -5,8 +5,8 @@
5#include <bit> 5#include <bit>
6 6
7#include "common/settings.h" 7#include "common/settings.h"
8#include "video_core/command_classes/codecs/h264.h" 8#include "video_core/host1x/codecs/h264.h"
9#include "video_core/gpu.h" 9#include "video_core/host1x/host1x.h"
10#include "video_core/memory_manager.h" 10#include "video_core/memory_manager.h"
11 11
12namespace Tegra::Decoder { 12namespace Tegra::Decoder {
@@ -24,19 +24,20 @@ constexpr std::array<u8, 16> zig_zag_scan{
24}; 24};
25} // Anonymous namespace 25} // Anonymous namespace
26 26
27H264::H264(GPU& gpu_) : gpu(gpu_) {} 27H264::H264(Host1x::Host1x& host1x_) : host1x{host1x_} {}
28 28
29H264::~H264() = default; 29H264::~H264() = default;
30 30
31const std::vector<u8>& H264::ComposeFrame(const NvdecCommon::NvdecRegisters& state, 31const std::vector<u8>& H264::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state,
32 bool is_first_frame) { 32 bool is_first_frame) {
33 H264DecoderContext context; 33 H264DecoderContext context;
34 gpu.MemoryManager().ReadBlock(state.picture_info_offset, &context, sizeof(H264DecoderContext)); 34 host1x.MemoryManager().ReadBlock(state.picture_info_offset, &context,
35 sizeof(H264DecoderContext));
35 36
36 const s64 frame_number = context.h264_parameter_set.frame_number.Value(); 37 const s64 frame_number = context.h264_parameter_set.frame_number.Value();
37 if (!is_first_frame && frame_number != 0) { 38 if (!is_first_frame && frame_number != 0) {
38 frame.resize(context.stream_len); 39 frame.resize(context.stream_len);
39 gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size()); 40 host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset, frame.data(), frame.size());
40 return frame; 41 return frame;
41 } 42 }
42 43
@@ -155,8 +156,8 @@ const std::vector<u8>& H264::ComposeFrame(const NvdecCommon::NvdecRegisters& sta
155 frame.resize(encoded_header.size() + context.stream_len); 156 frame.resize(encoded_header.size() + context.stream_len);
156 std::memcpy(frame.data(), encoded_header.data(), encoded_header.size()); 157 std::memcpy(frame.data(), encoded_header.data(), encoded_header.size());
157 158
158 gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, 159 host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset,
159 frame.data() + encoded_header.size(), context.stream_len); 160 frame.data() + encoded_header.size(), context.stream_len);
160 161
161 return frame; 162 return frame;
162} 163}
diff --git a/src/video_core/command_classes/codecs/h264.h b/src/video_core/host1x/codecs/h264.h
index 261574364..5cc86454e 100644
--- a/src/video_core/command_classes/codecs/h264.h
+++ b/src/video_core/host1x/codecs/h264.h
@@ -8,10 +8,14 @@
8#include "common/bit_field.h" 8#include "common/bit_field.h"
9#include "common/common_funcs.h" 9#include "common/common_funcs.h"
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "video_core/command_classes/nvdec_common.h" 11#include "video_core/host1x/nvdec_common.h"
12 12
13namespace Tegra { 13namespace Tegra {
14class GPU; 14
15namespace Host1x {
16class Host1x;
17} // namespace Host1x
18
15namespace Decoder { 19namespace Decoder {
16 20
17class H264BitWriter { 21class H264BitWriter {
@@ -55,16 +59,16 @@ private:
55 59
56class H264 { 60class H264 {
57public: 61public:
58 explicit H264(GPU& gpu); 62 explicit H264(Host1x::Host1x& host1x);
59 ~H264(); 63 ~H264();
60 64
61 /// Compose the H264 frame for FFmpeg decoding 65 /// Compose the H264 frame for FFmpeg decoding
62 [[nodiscard]] const std::vector<u8>& ComposeFrame(const NvdecCommon::NvdecRegisters& state, 66 [[nodiscard]] const std::vector<u8>& ComposeFrame(
63 bool is_first_frame = false); 67 const Host1x::NvdecCommon::NvdecRegisters& state, bool is_first_frame = false);
64 68
65private: 69private:
66 std::vector<u8> frame; 70 std::vector<u8> frame;
67 GPU& gpu; 71 Host1x::Host1x& host1x;
68 72
69 struct H264ParameterSet { 73 struct H264ParameterSet {
70 s32 log2_max_pic_order_cnt_lsb_minus4; ///< 0x00 74 s32 log2_max_pic_order_cnt_lsb_minus4; ///< 0x00
diff --git a/src/video_core/command_classes/codecs/vp8.cpp b/src/video_core/host1x/codecs/vp8.cpp
index c83b9bbc2..28fb12cb8 100644
--- a/src/video_core/command_classes/codecs/vp8.cpp
+++ b/src/video_core/host1x/codecs/vp8.cpp
@@ -3,18 +3,18 @@
3 3
4#include <vector> 4#include <vector>
5 5
6#include "video_core/command_classes/codecs/vp8.h" 6#include "video_core/host1x/codecs/vp8.h"
7#include "video_core/gpu.h" 7#include "video_core/host1x/host1x.h"
8#include "video_core/memory_manager.h" 8#include "video_core/memory_manager.h"
9 9
10namespace Tegra::Decoder { 10namespace Tegra::Decoder {
11VP8::VP8(GPU& gpu_) : gpu(gpu_) {} 11VP8::VP8(Host1x::Host1x& host1x_) : host1x{host1x_} {}
12 12
13VP8::~VP8() = default; 13VP8::~VP8() = default;
14 14
15const std::vector<u8>& VP8::ComposeFrame(const NvdecCommon::NvdecRegisters& state) { 15const std::vector<u8>& VP8::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
16 VP8PictureInfo info; 16 VP8PictureInfo info;
17 gpu.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo)); 17 host1x.MemoryManager().ReadBlock(state.picture_info_offset, &info, sizeof(VP8PictureInfo));
18 18
19 const bool is_key_frame = info.key_frame == 1u; 19 const bool is_key_frame = info.key_frame == 1u;
20 const auto bitstream_size = static_cast<size_t>(info.vld_buffer_size); 20 const auto bitstream_size = static_cast<size_t>(info.vld_buffer_size);
@@ -45,7 +45,7 @@ const std::vector<u8>& VP8::ComposeFrame(const NvdecCommon::NvdecRegisters& stat
45 frame[9] = static_cast<u8>(((info.frame_height >> 8) & 0x3f)); 45 frame[9] = static_cast<u8>(((info.frame_height >> 8) & 0x3f));
46 } 46 }
47 const u64 bitstream_offset = state.frame_bitstream_offset; 47 const u64 bitstream_offset = state.frame_bitstream_offset;
48 gpu.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size); 48 host1x.MemoryManager().ReadBlock(bitstream_offset, frame.data() + header_size, bitstream_size);
49 49
50 return frame; 50 return frame;
51} 51}
diff --git a/src/video_core/command_classes/codecs/vp8.h b/src/video_core/host1x/codecs/vp8.h
index 3357667b0..5bf07ecab 100644
--- a/src/video_core/command_classes/codecs/vp8.h
+++ b/src/video_core/host1x/codecs/vp8.h
@@ -8,23 +8,28 @@
8 8
9#include "common/common_funcs.h" 9#include "common/common_funcs.h"
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "video_core/command_classes/nvdec_common.h" 11#include "video_core/host1x/nvdec_common.h"
12 12
13namespace Tegra { 13namespace Tegra {
14class GPU; 14
15namespace Host1x {
16class Host1x;
17} // namespace Host1x
18
15namespace Decoder { 19namespace Decoder {
16 20
17class VP8 { 21class VP8 {
18public: 22public:
19 explicit VP8(GPU& gpu); 23 explicit VP8(Host1x::Host1x& host1x);
20 ~VP8(); 24 ~VP8();
21 25
22 /// Compose the VP8 frame for FFmpeg decoding 26 /// Compose the VP8 frame for FFmpeg decoding
23 [[nodiscard]] const std::vector<u8>& ComposeFrame(const NvdecCommon::NvdecRegisters& state); 27 [[nodiscard]] const std::vector<u8>& ComposeFrame(
28 const Host1x::NvdecCommon::NvdecRegisters& state);
24 29
25private: 30private:
26 std::vector<u8> frame; 31 std::vector<u8> frame;
27 GPU& gpu; 32 Host1x::Host1x& host1x;
28 33
29 struct VP8PictureInfo { 34 struct VP8PictureInfo {
30 INSERT_PADDING_WORDS_NOINIT(14); 35 INSERT_PADDING_WORDS_NOINIT(14);
diff --git a/src/video_core/command_classes/codecs/vp9.cpp b/src/video_core/host1x/codecs/vp9.cpp
index c01431441..cf40c9012 100644
--- a/src/video_core/command_classes/codecs/vp9.cpp
+++ b/src/video_core/host1x/codecs/vp9.cpp
@@ -4,8 +4,8 @@
4#include <algorithm> // for std::copy 4#include <algorithm> // for std::copy
5#include <numeric> 5#include <numeric>
6#include "common/assert.h" 6#include "common/assert.h"
7#include "video_core/command_classes/codecs/vp9.h" 7#include "video_core/host1x/codecs/vp9.h"
8#include "video_core/gpu.h" 8#include "video_core/host1x/host1x.h"
9#include "video_core/memory_manager.h" 9#include "video_core/memory_manager.h"
10 10
11namespace Tegra::Decoder { 11namespace Tegra::Decoder {
@@ -236,7 +236,7 @@ constexpr std::array<u8, 254> map_lut{
236} 236}
237} // Anonymous namespace 237} // Anonymous namespace
238 238
239VP9::VP9(GPU& gpu_) : gpu{gpu_} {} 239VP9::VP9(Host1x::Host1x& host1x_) : host1x{host1x_} {}
240 240
241VP9::~VP9() = default; 241VP9::~VP9() = default;
242 242
@@ -355,9 +355,9 @@ void VP9::WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_
355 } 355 }
356} 356}
357 357
358Vp9PictureInfo VP9::GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state) { 358Vp9PictureInfo VP9::GetVp9PictureInfo(const Host1x::NvdecCommon::NvdecRegisters& state) {
359 PictureInfo picture_info; 359 PictureInfo picture_info;
360 gpu.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo)); 360 host1x.MemoryManager().ReadBlock(state.picture_info_offset, &picture_info, sizeof(PictureInfo));
361 Vp9PictureInfo vp9_info = picture_info.Convert(); 361 Vp9PictureInfo vp9_info = picture_info.Convert();
362 362
363 InsertEntropy(state.vp9_entropy_probs_offset, vp9_info.entropy); 363 InsertEntropy(state.vp9_entropy_probs_offset, vp9_info.entropy);
@@ -372,18 +372,19 @@ Vp9PictureInfo VP9::GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state)
372 372
373void VP9::InsertEntropy(u64 offset, Vp9EntropyProbs& dst) { 373void VP9::InsertEntropy(u64 offset, Vp9EntropyProbs& dst) {
374 EntropyProbs entropy; 374 EntropyProbs entropy;
375 gpu.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs)); 375 host1x.MemoryManager().ReadBlock(offset, &entropy, sizeof(EntropyProbs));
376 entropy.Convert(dst); 376 entropy.Convert(dst);
377} 377}
378 378
379Vp9FrameContainer VP9::GetCurrentFrame(const NvdecCommon::NvdecRegisters& state) { 379Vp9FrameContainer VP9::GetCurrentFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
380 Vp9FrameContainer current_frame{}; 380 Vp9FrameContainer current_frame{};
381 { 381 {
382 gpu.SyncGuestHost(); 382 // gpu.SyncGuestHost(); epic, why?
383 current_frame.info = GetVp9PictureInfo(state); 383 current_frame.info = GetVp9PictureInfo(state);
384 current_frame.bit_stream.resize(current_frame.info.bitstream_size); 384 current_frame.bit_stream.resize(current_frame.info.bitstream_size);
385 gpu.MemoryManager().ReadBlock(state.frame_bitstream_offset, current_frame.bit_stream.data(), 385 host1x.MemoryManager().ReadBlock(state.frame_bitstream_offset,
386 current_frame.info.bitstream_size); 386 current_frame.bit_stream.data(),
387 current_frame.info.bitstream_size);
387 } 388 }
388 if (!next_frame.bit_stream.empty()) { 389 if (!next_frame.bit_stream.empty()) {
389 Vp9FrameContainer temp{ 390 Vp9FrameContainer temp{
@@ -769,7 +770,7 @@ VpxBitStreamWriter VP9::ComposeUncompressedHeader() {
769 return uncomp_writer; 770 return uncomp_writer;
770} 771}
771 772
772void VP9::ComposeFrame(const NvdecCommon::NvdecRegisters& state) { 773void VP9::ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state) {
773 std::vector<u8> bitstream; 774 std::vector<u8> bitstream;
774 { 775 {
775 Vp9FrameContainer curr_frame = GetCurrentFrame(state); 776 Vp9FrameContainer curr_frame = GetCurrentFrame(state);
diff --git a/src/video_core/command_classes/codecs/vp9.h b/src/video_core/host1x/codecs/vp9.h
index ecc40e8b1..d4083e8d3 100644
--- a/src/video_core/command_classes/codecs/vp9.h
+++ b/src/video_core/host1x/codecs/vp9.h
@@ -8,11 +8,15 @@
8 8
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "common/stream.h" 10#include "common/stream.h"
11#include "video_core/command_classes/codecs/vp9_types.h" 11#include "video_core/host1x/codecs/vp9_types.h"
12#include "video_core/command_classes/nvdec_common.h" 12#include "video_core/host1x/nvdec_common.h"
13 13
14namespace Tegra { 14namespace Tegra {
15class GPU; 15
16namespace Host1x {
17class Host1x;
18} // namespace Host1x
19
16namespace Decoder { 20namespace Decoder {
17 21
18/// The VpxRangeEncoder, and VpxBitStreamWriter classes are used to compose the 22/// The VpxRangeEncoder, and VpxBitStreamWriter classes are used to compose the
@@ -106,7 +110,7 @@ private:
106 110
107class VP9 { 111class VP9 {
108public: 112public:
109 explicit VP9(GPU& gpu_); 113 explicit VP9(Host1x::Host1x& host1x);
110 ~VP9(); 114 ~VP9();
111 115
112 VP9(const VP9&) = delete; 116 VP9(const VP9&) = delete;
@@ -117,7 +121,7 @@ public:
117 121
118 /// Composes the VP9 frame from the GPU state information. 122 /// Composes the VP9 frame from the GPU state information.
119 /// Based on the official VP9 spec documentation 123 /// Based on the official VP9 spec documentation
120 void ComposeFrame(const NvdecCommon::NvdecRegisters& state); 124 void ComposeFrame(const Host1x::NvdecCommon::NvdecRegisters& state);
121 125
122 /// Returns true if the most recent frame was a hidden frame. 126 /// Returns true if the most recent frame was a hidden frame.
123 [[nodiscard]] bool WasFrameHidden() const { 127 [[nodiscard]] bool WasFrameHidden() const {
@@ -162,19 +166,21 @@ private:
162 void WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob); 166 void WriteMvProbabilityUpdate(VpxRangeEncoder& writer, u8 new_prob, u8 old_prob);
163 167
164 /// Returns VP9 information from NVDEC provided offset and size 168 /// Returns VP9 information from NVDEC provided offset and size
165 [[nodiscard]] Vp9PictureInfo GetVp9PictureInfo(const NvdecCommon::NvdecRegisters& state); 169 [[nodiscard]] Vp9PictureInfo GetVp9PictureInfo(
170 const Host1x::NvdecCommon::NvdecRegisters& state);
166 171
167 /// Read and convert NVDEC provided entropy probs to Vp9EntropyProbs struct 172 /// Read and convert NVDEC provided entropy probs to Vp9EntropyProbs struct
168 void InsertEntropy(u64 offset, Vp9EntropyProbs& dst); 173 void InsertEntropy(u64 offset, Vp9EntropyProbs& dst);
169 174
170 /// Returns frame to be decoded after buffering 175 /// Returns frame to be decoded after buffering
171 [[nodiscard]] Vp9FrameContainer GetCurrentFrame(const NvdecCommon::NvdecRegisters& state); 176 [[nodiscard]] Vp9FrameContainer GetCurrentFrame(
177 const Host1x::NvdecCommon::NvdecRegisters& state);
172 178
173 /// Use NVDEC providied information to compose the headers for the current frame 179 /// Use NVDEC providied information to compose the headers for the current frame
174 [[nodiscard]] std::vector<u8> ComposeCompressedHeader(); 180 [[nodiscard]] std::vector<u8> ComposeCompressedHeader();
175 [[nodiscard]] VpxBitStreamWriter ComposeUncompressedHeader(); 181 [[nodiscard]] VpxBitStreamWriter ComposeUncompressedHeader();
176 182
177 GPU& gpu; 183 Host1x::Host1x& host1x;
178 std::vector<u8> frame; 184 std::vector<u8> frame;
179 185
180 std::array<s8, 4> loop_filter_ref_deltas{}; 186 std::array<s8, 4> loop_filter_ref_deltas{};
diff --git a/src/video_core/command_classes/codecs/vp9_types.h b/src/video_core/host1x/codecs/vp9_types.h
index bb3d8df6e..adad8ed7e 100644
--- a/src/video_core/command_classes/codecs/vp9_types.h
+++ b/src/video_core/host1x/codecs/vp9_types.h
@@ -9,7 +9,6 @@
9#include "common/common_types.h" 9#include "common/common_types.h"
10 10
11namespace Tegra { 11namespace Tegra {
12class GPU;
13 12
14namespace Decoder { 13namespace Decoder {
15struct Vp9FrameDimensions { 14struct Vp9FrameDimensions {
diff --git a/src/video_core/host1x/control.cpp b/src/video_core/host1x/control.cpp
new file mode 100644
index 000000000..dceefdb7f
--- /dev/null
+++ b/src/video_core/host1x/control.cpp
@@ -0,0 +1,33 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include "common/assert.h"
5#include "video_core/host1x/control.h"
6#include "video_core/host1x/host1x.h"
7
8namespace Tegra::Host1x {
9
10Control::Control(Host1x& host1x_) : host1x(host1x_) {}
11
12Control::~Control() = default;
13
14void Control::ProcessMethod(Method method, u32 argument) {
15 switch (method) {
16 case Method::LoadSyncptPayload32:
17 syncpoint_value = argument;
18 break;
19 case Method::WaitSyncpt:
20 case Method::WaitSyncpt32:
21 Execute(argument);
22 break;
23 default:
24 UNIMPLEMENTED_MSG("Control method 0x{:X}", static_cast<u32>(method));
25 break;
26 }
27}
28
29void Control::Execute(u32 data) {
30 host1x.GetSyncpointManager().WaitHost(data, syncpoint_value);
31}
32
33} // namespace Tegra::Host1x
diff --git a/src/video_core/command_classes/host1x.h b/src/video_core/host1x/control.h
index bb48a4381..e117888a3 100644
--- a/src/video_core/command_classes/host1x.h
+++ b/src/video_core/host1x/control.h
@@ -1,15 +1,19 @@
1// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project 1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-FileCopyrightText: 2021 Skyline Team and Contributors
3// SPDX-License-Identifier: GPL-3.0-or-later
3 4
4#pragma once 5#pragma once
5 6
6#include "common/common_types.h" 7#include "common/common_types.h"
7 8
8namespace Tegra { 9namespace Tegra {
9class GPU; 10
11namespace Host1x {
12
13class Host1x;
10class Nvdec; 14class Nvdec;
11 15
12class Host1x { 16class Control {
13public: 17public:
14 enum class Method : u32 { 18 enum class Method : u32 {
15 WaitSyncpt = 0x8, 19 WaitSyncpt = 0x8,
@@ -17,8 +21,8 @@ public:
17 WaitSyncpt32 = 0x50, 21 WaitSyncpt32 = 0x50,
18 }; 22 };
19 23
20 explicit Host1x(GPU& gpu); 24 explicit Control(Host1x& host1x);
21 ~Host1x(); 25 ~Control();
22 26
23 /// Writes the method into the state, Invoke Execute() if encountered 27 /// Writes the method into the state, Invoke Execute() if encountered
24 void ProcessMethod(Method method, u32 argument); 28 void ProcessMethod(Method method, u32 argument);
@@ -28,7 +32,9 @@ private:
28 void Execute(u32 data); 32 void Execute(u32 data);
29 33
30 u32 syncpoint_value{}; 34 u32 syncpoint_value{};
31 GPU& gpu; 35 Host1x& host1x;
32}; 36};
33 37
38} // namespace Host1x
39
34} // namespace Tegra 40} // namespace Tegra
diff --git a/src/video_core/host1x/host1x.cpp b/src/video_core/host1x/host1x.cpp
new file mode 100644
index 000000000..7c317a85d
--- /dev/null
+++ b/src/video_core/host1x/host1x.cpp
@@ -0,0 +1,17 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include "core/core.h"
5#include "video_core/host1x/host1x.h"
6
7namespace Tegra {
8
9namespace Host1x {
10
11Host1x::Host1x(Core::System& system_)
12 : system{system_}, syncpoint_manager{}, memory_manager{system, 32, 12},
13 allocator{std::make_unique<Common::FlatAllocator<u32, 0, 32>>(1 << 12)} {}
14
15} // namespace Host1x
16
17} // namespace Tegra
diff --git a/src/video_core/host1x/host1x.h b/src/video_core/host1x/host1x.h
new file mode 100644
index 000000000..57082ae54
--- /dev/null
+++ b/src/video_core/host1x/host1x.h
@@ -0,0 +1,57 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#pragma once
5
6#include "common/common_types.h"
7
8#include "common/address_space.h"
9#include "video_core/host1x/syncpoint_manager.h"
10#include "video_core/memory_manager.h"
11
12namespace Core {
13class System;
14} // namespace Core
15
16namespace Tegra {
17
18namespace Host1x {
19
20class Host1x {
21public:
22 explicit Host1x(Core::System& system);
23
24 SyncpointManager& GetSyncpointManager() {
25 return syncpoint_manager;
26 }
27
28 const SyncpointManager& GetSyncpointManager() const {
29 return syncpoint_manager;
30 }
31
32 Tegra::MemoryManager& MemoryManager() {
33 return memory_manager;
34 }
35
36 const Tegra::MemoryManager& MemoryManager() const {
37 return memory_manager;
38 }
39
40 Common::FlatAllocator<u32, 0, 32>& Allocator() {
41 return *allocator;
42 }
43
44 const Common::FlatAllocator<u32, 0, 32>& Allocator() const {
45 return *allocator;
46 }
47
48private:
49 Core::System& system;
50 SyncpointManager syncpoint_manager;
51 Tegra::MemoryManager memory_manager;
52 std::unique_ptr<Common::FlatAllocator<u32, 0, 32>> allocator;
53};
54
55} // namespace Host1x
56
57} // namespace Tegra
diff --git a/src/video_core/command_classes/nvdec.cpp b/src/video_core/host1x/nvdec.cpp
index 4fbbe3da6..a4bd5b79f 100644
--- a/src/video_core/command_classes/nvdec.cpp
+++ b/src/video_core/host1x/nvdec.cpp
@@ -2,15 +2,16 @@
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-2.0-or-later
3 3
4#include "common/assert.h" 4#include "common/assert.h"
5#include "video_core/command_classes/nvdec.h" 5#include "video_core/host1x/host1x.h"
6#include "video_core/gpu.h" 6#include "video_core/host1x/nvdec.h"
7 7
8namespace Tegra { 8namespace Tegra::Host1x {
9 9
10#define NVDEC_REG_INDEX(field_name) \ 10#define NVDEC_REG_INDEX(field_name) \
11 (offsetof(NvdecCommon::NvdecRegisters, field_name) / sizeof(u64)) 11 (offsetof(NvdecCommon::NvdecRegisters, field_name) / sizeof(u64))
12 12
13Nvdec::Nvdec(GPU& gpu_) : gpu(gpu_), state{}, codec(std::make_unique<Codec>(gpu, state)) {} 13Nvdec::Nvdec(Host1x& host1x_)
14 : host1x(host1x_), state{}, codec(std::make_unique<Codec>(host1x, state)) {}
14 15
15Nvdec::~Nvdec() = default; 16Nvdec::~Nvdec() = default;
16 17
@@ -44,4 +45,4 @@ void Nvdec::Execute() {
44 } 45 }
45} 46}
46 47
47} // namespace Tegra 48} // namespace Tegra::Host1x
diff --git a/src/video_core/command_classes/nvdec.h b/src/video_core/host1x/nvdec.h
index 488531fc6..3949d5181 100644
--- a/src/video_core/command_classes/nvdec.h
+++ b/src/video_core/host1x/nvdec.h
@@ -6,14 +6,17 @@
6#include <memory> 6#include <memory>
7#include <vector> 7#include <vector>
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "video_core/command_classes/codecs/codec.h" 9#include "video_core/host1x/codecs/codec.h"
10 10
11namespace Tegra { 11namespace Tegra {
12class GPU; 12
13namespace Host1x {
14
15class Host1x;
13 16
14class Nvdec { 17class Nvdec {
15public: 18public:
16 explicit Nvdec(GPU& gpu); 19 explicit Nvdec(Host1x& host1x);
17 ~Nvdec(); 20 ~Nvdec();
18 21
19 /// Writes the method into the state, Invoke Execute() if encountered 22 /// Writes the method into the state, Invoke Execute() if encountered
@@ -26,8 +29,11 @@ private:
26 /// Invoke codec to decode a frame 29 /// Invoke codec to decode a frame
27 void Execute(); 30 void Execute();
28 31
29 GPU& gpu; 32 Host1x& host1x;
30 NvdecCommon::NvdecRegisters state; 33 NvdecCommon::NvdecRegisters state;
31 std::unique_ptr<Codec> codec; 34 std::unique_ptr<Codec> codec;
32}; 35};
36
37} // namespace Host1x
38
33} // namespace Tegra 39} // namespace Tegra
diff --git a/src/video_core/command_classes/nvdec_common.h b/src/video_core/host1x/nvdec_common.h
index 521e5b52b..49d67ebbe 100644
--- a/src/video_core/command_classes/nvdec_common.h
+++ b/src/video_core/host1x/nvdec_common.h
@@ -7,7 +7,7 @@
7#include "common/common_funcs.h" 7#include "common/common_funcs.h"
8#include "common/common_types.h" 8#include "common/common_types.h"
9 9
10namespace Tegra::NvdecCommon { 10namespace Tegra::Host1x::NvdecCommon {
11 11
12enum class VideoCodec : u64 { 12enum class VideoCodec : u64 {
13 None = 0x0, 13 None = 0x0,
@@ -94,4 +94,4 @@ ASSERT_REG_POSITION(vp9_curr_frame_mvs_offset, 0x176);
94 94
95#undef ASSERT_REG_POSITION 95#undef ASSERT_REG_POSITION
96 96
97} // namespace Tegra::NvdecCommon 97} // namespace Tegra::Host1x::NvdecCommon
diff --git a/src/video_core/command_classes/sync_manager.cpp b/src/video_core/host1x/sync_manager.cpp
index 67e58046f..5ef9ea217 100644
--- a/src/video_core/command_classes/sync_manager.cpp
+++ b/src/video_core/host1x/sync_manager.cpp
@@ -3,10 +3,13 @@
3 3
4#include <algorithm> 4#include <algorithm>
5#include "sync_manager.h" 5#include "sync_manager.h"
6#include "video_core/gpu.h" 6#include "video_core/host1x/host1x.h"
7#include "video_core/host1x/syncpoint_manager.h"
7 8
8namespace Tegra { 9namespace Tegra {
9SyncptIncrManager::SyncptIncrManager(GPU& gpu_) : gpu(gpu_) {} 10namespace Host1x {
11
12SyncptIncrManager::SyncptIncrManager(Host1x& host1x_) : host1x(host1x_) {}
10SyncptIncrManager::~SyncptIncrManager() = default; 13SyncptIncrManager::~SyncptIncrManager() = default;
11 14
12void SyncptIncrManager::Increment(u32 id) { 15void SyncptIncrManager::Increment(u32 id) {
@@ -36,8 +39,12 @@ void SyncptIncrManager::IncrementAllDone() {
36 if (!increments[done_count].complete) { 39 if (!increments[done_count].complete) {
37 break; 40 break;
38 } 41 }
39 gpu.IncrementSyncPoint(increments[done_count].syncpt_id); 42 auto& syncpoint_manager = host1x.GetSyncpointManager();
43 syncpoint_manager.IncrementGuest(increments[done_count].syncpt_id);
44 syncpoint_manager.IncrementHost(increments[done_count].syncpt_id);
40 } 45 }
41 increments.erase(increments.begin(), increments.begin() + done_count); 46 increments.erase(increments.begin(), increments.begin() + done_count);
42} 47}
48
49} // namespace Host1x
43} // namespace Tegra 50} // namespace Tegra
diff --git a/src/video_core/command_classes/sync_manager.h b/src/video_core/host1x/sync_manager.h
index 6dfaae080..7bb77fa27 100644
--- a/src/video_core/command_classes/sync_manager.h
+++ b/src/video_core/host1x/sync_manager.h
@@ -8,7 +8,11 @@
8#include "common/common_types.h" 8#include "common/common_types.h"
9 9
10namespace Tegra { 10namespace Tegra {
11class GPU; 11
12namespace Host1x {
13
14class Host1x;
15
12struct SyncptIncr { 16struct SyncptIncr {
13 u32 id; 17 u32 id;
14 u32 class_id; 18 u32 class_id;
@@ -21,7 +25,7 @@ struct SyncptIncr {
21 25
22class SyncptIncrManager { 26class SyncptIncrManager {
23public: 27public:
24 explicit SyncptIncrManager(GPU& gpu); 28 explicit SyncptIncrManager(Host1x& host1x);
25 ~SyncptIncrManager(); 29 ~SyncptIncrManager();
26 30
27 /// Add syncpoint id and increment all 31 /// Add syncpoint id and increment all
@@ -41,7 +45,9 @@ private:
41 std::mutex increment_lock; 45 std::mutex increment_lock;
42 u32 current_id{}; 46 u32 current_id{};
43 47
44 GPU& gpu; 48 Host1x& host1x;
45}; 49};
46 50
51} // namespace Host1x
52
47} // namespace Tegra 53} // namespace Tegra
diff --git a/src/video_core/host1x/syncpoint_manager.cpp b/src/video_core/host1x/syncpoint_manager.cpp
new file mode 100644
index 000000000..326e8355a
--- /dev/null
+++ b/src/video_core/host1x/syncpoint_manager.cpp
@@ -0,0 +1,96 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include "common/microprofile.h"
5#include "video_core/host1x/syncpoint_manager.h"
6
7namespace Tegra {
8
9namespace Host1x {
10
11MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
12
13SyncpointManager::ActionHandle SyncpointManager::RegisterAction(
14 std::atomic<u32>& syncpoint, std::list<RegisteredAction>& action_storage, u32 expected_value,
15 std::function<void()>&& action) {
16 if (syncpoint.load(std::memory_order_acquire) >= expected_value) {
17 action();
18 return {};
19 }
20
21 std::unique_lock lk(guard);
22 if (syncpoint.load(std::memory_order_relaxed) >= expected_value) {
23 action();
24 return {};
25 }
26 auto it = action_storage.begin();
27 while (it != action_storage.end()) {
28 if (it->expected_value >= expected_value) {
29 break;
30 }
31 ++it;
32 }
33 return action_storage.emplace(it, expected_value, std::move(action));
34}
35
36void SyncpointManager::DeregisterAction(std::list<RegisteredAction>& action_storage,
37 ActionHandle& handle) {
38 std::unique_lock lk(guard);
39 action_storage.erase(handle);
40}
41
42void SyncpointManager::DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle) {
43 DeregisterAction(guest_action_storage[syncpoint_id], handle);
44}
45
46void SyncpointManager::DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle) {
47 DeregisterAction(host_action_storage[syncpoint_id], handle);
48}
49
50void SyncpointManager::IncrementGuest(u32 syncpoint_id) {
51 Increment(syncpoints_guest[syncpoint_id], wait_guest_cv, guest_action_storage[syncpoint_id]);
52}
53
54void SyncpointManager::IncrementHost(u32 syncpoint_id) {
55 Increment(syncpoints_host[syncpoint_id], wait_host_cv, host_action_storage[syncpoint_id]);
56}
57
58void SyncpointManager::WaitGuest(u32 syncpoint_id, u32 expected_value) {
59 Wait(syncpoints_guest[syncpoint_id], wait_guest_cv, expected_value);
60}
61
62void SyncpointManager::WaitHost(u32 syncpoint_id, u32 expected_value) {
63 MICROPROFILE_SCOPE(GPU_wait);
64 Wait(syncpoints_host[syncpoint_id], wait_host_cv, expected_value);
65}
66
67void SyncpointManager::Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
68 std::list<RegisteredAction>& action_storage) {
69 auto new_value{syncpoint.fetch_add(1, std::memory_order_acq_rel) + 1};
70
71 std::unique_lock lk(guard);
72 auto it = action_storage.begin();
73 while (it != action_storage.end()) {
74 if (it->expected_value > new_value) {
75 break;
76 }
77 it->action();
78 it = action_storage.erase(it);
79 }
80 wait_cv.notify_all();
81}
82
83void SyncpointManager::Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
84 u32 expected_value) {
85 const auto pred = [&]() { return syncpoint.load(std::memory_order_acquire) >= expected_value; };
86 if (pred()) {
87 return;
88 }
89
90 std::unique_lock lk(guard);
91 wait_cv.wait(lk, pred);
92}
93
94} // namespace Host1x
95
96} // namespace Tegra
diff --git a/src/video_core/host1x/syncpoint_manager.h b/src/video_core/host1x/syncpoint_manager.h
new file mode 100644
index 000000000..50a264e23
--- /dev/null
+++ b/src/video_core/host1x/syncpoint_manager.h
@@ -0,0 +1,98 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#pragma once
5
6#include <array>
7#include <atomic>
8#include <condition_variable>
9#include <functional>
10#include <list>
11#include <mutex>
12
13#include "common/common_types.h"
14
15namespace Tegra {
16
17namespace Host1x {
18
19class SyncpointManager {
20public:
21 u32 GetGuestSyncpointValue(u32 id) const {
22 return syncpoints_guest[id].load(std::memory_order_acquire);
23 }
24
25 u32 GetHostSyncpointValue(u32 id) const {
26 return syncpoints_host[id].load(std::memory_order_acquire);
27 }
28
29 struct RegisteredAction {
30 explicit RegisteredAction(u32 expected_value_, std::function<void()>&& action_)
31 : expected_value{expected_value_}, action{std::move(action_)} {}
32 u32 expected_value;
33 std::function<void()> action;
34 };
35 using ActionHandle = std::list<RegisteredAction>::iterator;
36
37 template <typename Func>
38 ActionHandle RegisterGuestAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
39 std::function<void()> func(action);
40 return RegisterAction(syncpoints_guest[syncpoint_id], guest_action_storage[syncpoint_id],
41 expected_value, std::move(func));
42 }
43
44 template <typename Func>
45 ActionHandle RegisterHostAction(u32 syncpoint_id, u32 expected_value, Func&& action) {
46 std::function<void()> func(action);
47 return RegisterAction(syncpoints_host[syncpoint_id], host_action_storage[syncpoint_id],
48 expected_value, std::move(func));
49 }
50
51 void DeregisterGuestAction(u32 syncpoint_id, ActionHandle& handle);
52
53 void DeregisterHostAction(u32 syncpoint_id, ActionHandle& handle);
54
55 void IncrementGuest(u32 syncpoint_id);
56
57 void IncrementHost(u32 syncpoint_id);
58
59 void WaitGuest(u32 syncpoint_id, u32 expected_value);
60
61 void WaitHost(u32 syncpoint_id, u32 expected_value);
62
63 bool IsReadyGuest(u32 syncpoint_id, u32 expected_value) const {
64 return syncpoints_guest[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
65 }
66
67 bool IsReadyHost(u32 syncpoint_id, u32 expected_value) const {
68 return syncpoints_host[syncpoint_id].load(std::memory_order_acquire) >= expected_value;
69 }
70
71private:
72 void Increment(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv,
73 std::list<RegisteredAction>& action_storage);
74
75 ActionHandle RegisterAction(std::atomic<u32>& syncpoint,
76 std::list<RegisteredAction>& action_storage, u32 expected_value,
77 std::function<void()>&& action);
78
79 void DeregisterAction(std::list<RegisteredAction>& action_storage, ActionHandle& handle);
80
81 void Wait(std::atomic<u32>& syncpoint, std::condition_variable& wait_cv, u32 expected_value);
82
83 static constexpr size_t NUM_MAX_SYNCPOINTS = 192;
84
85 std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_guest{};
86 std::array<std::atomic<u32>, NUM_MAX_SYNCPOINTS> syncpoints_host{};
87
88 std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> guest_action_storage;
89 std::array<std::list<RegisteredAction>, NUM_MAX_SYNCPOINTS> host_action_storage;
90
91 std::mutex guard;
92 std::condition_variable wait_guest_cv;
93 std::condition_variable wait_host_cv;
94};
95
96} // namespace Host1x
97
98} // namespace Tegra
diff --git a/src/video_core/command_classes/vic.cpp b/src/video_core/host1x/vic.cpp
index 7c17df353..ac0b7d20e 100644
--- a/src/video_core/command_classes/vic.cpp
+++ b/src/video_core/host1x/vic.cpp
@@ -18,14 +18,17 @@ extern "C" {
18#include "common/bit_field.h" 18#include "common/bit_field.h"
19#include "common/logging/log.h" 19#include "common/logging/log.h"
20 20
21#include "video_core/command_classes/nvdec.h"
22#include "video_core/command_classes/vic.h"
23#include "video_core/engines/maxwell_3d.h" 21#include "video_core/engines/maxwell_3d.h"
24#include "video_core/gpu.h" 22#include "video_core/host1x/host1x.h"
23#include "video_core/host1x/nvdec.h"
24#include "video_core/host1x/vic.h"
25#include "video_core/memory_manager.h" 25#include "video_core/memory_manager.h"
26#include "video_core/textures/decoders.h" 26#include "video_core/textures/decoders.h"
27 27
28namespace Tegra { 28namespace Tegra {
29
30namespace Host1x {
31
29namespace { 32namespace {
30enum class VideoPixelFormat : u64_le { 33enum class VideoPixelFormat : u64_le {
31 RGBA8 = 0x1f, 34 RGBA8 = 0x1f,
@@ -46,8 +49,8 @@ union VicConfig {
46 BitField<46, 14, u64_le> surface_height_minus1; 49 BitField<46, 14, u64_le> surface_height_minus1;
47}; 50};
48 51
49Vic::Vic(GPU& gpu_, std::shared_ptr<Nvdec> nvdec_processor_) 52Vic::Vic(Host1x& host1x_, std::shared_ptr<Nvdec> nvdec_processor_)
50 : gpu(gpu_), 53 : host1x(host1x_),
51 nvdec_processor(std::move(nvdec_processor_)), converted_frame_buffer{nullptr, av_free} {} 54 nvdec_processor(std::move(nvdec_processor_)), converted_frame_buffer{nullptr, av_free} {}
52 55
53Vic::~Vic() = default; 56Vic::~Vic() = default;
@@ -78,7 +81,7 @@ void Vic::Execute() {
78 LOG_ERROR(Service_NVDRV, "VIC Luma address not set."); 81 LOG_ERROR(Service_NVDRV, "VIC Luma address not set.");
79 return; 82 return;
80 } 83 }
81 const VicConfig config{gpu.MemoryManager().Read<u64>(config_struct_address + 0x20)}; 84 const VicConfig config{host1x.MemoryManager().Read<u64>(config_struct_address + 0x20)};
82 const AVFramePtr frame_ptr = nvdec_processor->GetFrame(); 85 const AVFramePtr frame_ptr = nvdec_processor->GetFrame();
83 const auto* frame = frame_ptr.get(); 86 const auto* frame = frame_ptr.get();
84 if (!frame) { 87 if (!frame) {
@@ -153,15 +156,16 @@ void Vic::WriteRGBFrame(const AVFrame* frame, const VicConfig& config) {
153 const u32 block_height = static_cast<u32>(config.block_linear_height_log2); 156 const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
154 const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0); 157 const auto size = Texture::CalculateSize(true, 4, width, height, 1, block_height, 0);
155 luma_buffer.resize(size); 158 luma_buffer.resize(size);
156 Texture::SwizzleSubrect(width, height, width * 4, width, 4, luma_buffer.data(), 159 std::span<const u8> frame_buff(converted_frame_buf_addr, 4 * width * height);
157 converted_frame_buf_addr, block_height, 0, 0); 160 Texture::SwizzleSubrect(luma_buffer, frame_buff, 4, width, height, 1, 0, 0, width, height,
161 block_height, 0, width * 4);
158 162
159 gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size); 163 host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
160 } else { 164 } else {
161 // send pitch linear frame 165 // send pitch linear frame
162 const size_t linear_size = width * height * 4; 166 const size_t linear_size = width * height * 4;
163 gpu.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr, 167 host1x.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
164 linear_size); 168 linear_size);
165 } 169 }
166} 170}
167 171
@@ -189,8 +193,8 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
189 luma_buffer[dst + x] = luma_src[src + x]; 193 luma_buffer[dst + x] = luma_src[src + x];
190 } 194 }
191 } 195 }
192 gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), 196 host1x.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(),
193 luma_buffer.size()); 197 luma_buffer.size());
194 198
195 // Chroma 199 // Chroma
196 const std::size_t half_height = frame_height / 2; 200 const std::size_t half_height = frame_height / 2;
@@ -231,8 +235,10 @@ void Vic::WriteYUVFrame(const AVFrame* frame, const VicConfig& config) {
231 ASSERT(false); 235 ASSERT(false);
232 break; 236 break;
233 } 237 }
234 gpu.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(), 238 host1x.MemoryManager().WriteBlock(output_surface_chroma_address, chroma_buffer.data(),
235 chroma_buffer.size()); 239 chroma_buffer.size());
236} 240}
237 241
242} // namespace Host1x
243
238} // namespace Tegra 244} // namespace Tegra
diff --git a/src/video_core/command_classes/vic.h b/src/video_core/host1x/vic.h
index 010daa6b6..2b78786e8 100644
--- a/src/video_core/command_classes/vic.h
+++ b/src/video_core/host1x/vic.h
@@ -10,7 +10,10 @@
10struct SwsContext; 10struct SwsContext;
11 11
12namespace Tegra { 12namespace Tegra {
13class GPU; 13
14namespace Host1x {
15
16class Host1x;
14class Nvdec; 17class Nvdec;
15union VicConfig; 18union VicConfig;
16 19
@@ -25,7 +28,7 @@ public:
25 SetOutputSurfaceChromaUnusedOffset = 0x1ca 28 SetOutputSurfaceChromaUnusedOffset = 0x1ca
26 }; 29 };
27 30
28 explicit Vic(GPU& gpu, std::shared_ptr<Nvdec> nvdec_processor); 31 explicit Vic(Host1x& host1x, std::shared_ptr<Nvdec> nvdec_processor);
29 32
30 ~Vic(); 33 ~Vic();
31 34
@@ -39,8 +42,8 @@ private:
39 42
40 void WriteYUVFrame(const AVFrame* frame, const VicConfig& config); 43 void WriteYUVFrame(const AVFrame* frame, const VicConfig& config);
41 44
42 GPU& gpu; 45 Host1x& host1x;
43 std::shared_ptr<Tegra::Nvdec> nvdec_processor; 46 std::shared_ptr<Tegra::Host1x::Nvdec> nvdec_processor;
44 47
45 /// Avoid reallocation of the following buffers every frame, as their 48 /// Avoid reallocation of the following buffers every frame, as their
46 /// size does not change during a stream 49 /// size does not change during a stream
@@ -58,4 +61,6 @@ private:
58 s32 scaler_height{}; 61 s32 scaler_height{};
59}; 62};
60 63
64} // namespace Host1x
65
61} // namespace Tegra 66} // namespace Tegra
diff --git a/src/video_core/host_shaders/astc_decoder.comp b/src/video_core/host_shaders/astc_decoder.comp
index 3441a5fe5..d608678a3 100644
--- a/src/video_core/host_shaders/astc_decoder.comp
+++ b/src/video_core/host_shaders/astc_decoder.comp
@@ -1065,7 +1065,7 @@ TexelWeightParams DecodeBlockInfo() {
1065void FillError(ivec3 coord) { 1065void FillError(ivec3 coord) {
1066 for (uint j = 0; j < block_dims.y; j++) { 1066 for (uint j = 0; j < block_dims.y; j++) {
1067 for (uint i = 0; i < block_dims.x; i++) { 1067 for (uint i = 0; i < block_dims.x; i++) {
1068 imageStore(dest_image, coord + ivec3(i, j, 0), vec4(1.0, 1.0, 0.0, 1.0)); 1068 imageStore(dest_image, coord + ivec3(i, j, 0), vec4(0.0, 0.0, 0.0, 0.0));
1069 } 1069 }
1070 } 1070 }
1071} 1071}
diff --git a/src/video_core/macro/macro.cpp b/src/video_core/macro/macro.cpp
index 43f8b5904..f61d5998e 100644
--- a/src/video_core/macro/macro.cpp
+++ b/src/video_core/macro/macro.cpp
@@ -8,6 +8,7 @@
8 8
9#include <boost/container_hash/hash.hpp> 9#include <boost/container_hash/hash.hpp>
10 10
11#include <fstream>
11#include "common/assert.h" 12#include "common/assert.h"
12#include "common/fs/fs.h" 13#include "common/fs/fs.h"
13#include "common/fs/path_util.h" 14#include "common/fs/path_util.h"
diff --git a/src/video_core/macro/macro_hle.cpp b/src/video_core/macro/macro_hle.cpp
index 58382755b..cabe8dcbf 100644
--- a/src/video_core/macro/macro_hle.cpp
+++ b/src/video_core/macro/macro_hle.cpp
@@ -3,6 +3,8 @@
3 3
4#include <array> 4#include <array>
5#include <vector> 5#include <vector>
6#include "common/scope_exit.h"
7#include "video_core/dirty_flags.h"
6#include "video_core/engines/maxwell_3d.h" 8#include "video_core/engines/maxwell_3d.h"
7#include "video_core/macro/macro.h" 9#include "video_core/macro/macro.h"
8#include "video_core/macro/macro_hle.h" 10#include "video_core/macro/macro_hle.h"
@@ -58,6 +60,7 @@ void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, const std::vector<u32>&
58 maxwell3d.regs.index_array.first = parameters[3]; 60 maxwell3d.regs.index_array.first = parameters[3];
59 maxwell3d.regs.reg_array[0x446] = element_base; // vertex id base? 61 maxwell3d.regs.reg_array[0x446] = element_base; // vertex id base?
60 maxwell3d.regs.index_array.count = parameters[1]; 62 maxwell3d.regs.index_array.count = parameters[1];
63 maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
61 maxwell3d.regs.vb_element_base = element_base; 64 maxwell3d.regs.vb_element_base = element_base;
62 maxwell3d.regs.vb_base_instance = base_instance; 65 maxwell3d.regs.vb_base_instance = base_instance;
63 maxwell3d.mme_draw.instance_count = instance_count; 66 maxwell3d.mme_draw.instance_count = instance_count;
@@ -80,10 +83,67 @@ void HLE_0217920100488FF7(Engines::Maxwell3D& maxwell3d, const std::vector<u32>&
80 maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined; 83 maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
81} 84}
82 85
83constexpr std::array<std::pair<u64, HLEFunction>, 3> hle_funcs{{ 86// Multidraw Indirect
87void HLE_3F5E74B9C9A50164(Engines::Maxwell3D& maxwell3d, const std::vector<u32>& parameters) {
88 SCOPE_EXIT({
89 // Clean everything.
90 maxwell3d.regs.reg_array[0x446] = 0x0; // vertex id base?
91 maxwell3d.regs.index_array.count = 0;
92 maxwell3d.regs.vb_element_base = 0x0;
93 maxwell3d.regs.vb_base_instance = 0x0;
94 maxwell3d.mme_draw.instance_count = 0;
95 maxwell3d.CallMethodFromMME(0x8e3, 0x640);
96 maxwell3d.CallMethodFromMME(0x8e4, 0x0);
97 maxwell3d.CallMethodFromMME(0x8e5, 0x0);
98 maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
99 maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
100 });
101 const u32 start_indirect = parameters[0];
102 const u32 end_indirect = parameters[1];
103 if (start_indirect >= end_indirect) {
104 // Nothing to do.
105 return;
106 }
107 const auto topology =
108 static_cast<Tegra::Engines::Maxwell3D::Regs::PrimitiveTopology>(parameters[2]);
109 maxwell3d.regs.draw.topology.Assign(topology);
110 const u32 padding = parameters[3];
111 const std::size_t max_draws = parameters[4];
112
113 const u32 indirect_words = 5 + padding;
114 const std::size_t first_draw = start_indirect;
115 const std::size_t effective_draws = end_indirect - start_indirect;
116 const std::size_t last_draw = start_indirect + std::min(effective_draws, max_draws);
117
118 for (std::size_t index = first_draw; index < last_draw; index++) {
119 const std::size_t base = index * indirect_words + 5;
120 const u32 num_vertices = parameters[base];
121 const u32 instance_count = parameters[base + 1];
122 const u32 first_index = parameters[base + 2];
123 const u32 base_vertex = parameters[base + 3];
124 const u32 base_instance = parameters[base + 4];
125 maxwell3d.regs.index_array.first = first_index;
126 maxwell3d.regs.reg_array[0x446] = base_vertex;
127 maxwell3d.regs.index_array.count = num_vertices;
128 maxwell3d.regs.vb_element_base = base_vertex;
129 maxwell3d.regs.vb_base_instance = base_instance;
130 maxwell3d.mme_draw.instance_count = instance_count;
131 maxwell3d.CallMethodFromMME(0x8e3, 0x640);
132 maxwell3d.CallMethodFromMME(0x8e4, base_vertex);
133 maxwell3d.CallMethodFromMME(0x8e5, base_instance);
134 maxwell3d.dirty.flags[VideoCommon::Dirty::IndexBuffer] = true;
135 if (maxwell3d.ShouldExecute()) {
136 maxwell3d.Rasterizer().Draw(true, true);
137 }
138 maxwell3d.mme_draw.current_mode = Engines::Maxwell3D::MMEDrawMode::Undefined;
139 }
140}
141
142constexpr std::array<std::pair<u64, HLEFunction>, 4> hle_funcs{{
84 {0x771BB18C62444DA0, &HLE_771BB18C62444DA0}, 143 {0x771BB18C62444DA0, &HLE_771BB18C62444DA0},
85 {0x0D61FC9FAAC9FCAD, &HLE_0D61FC9FAAC9FCAD}, 144 {0x0D61FC9FAAC9FCAD, &HLE_0D61FC9FAAC9FCAD},
86 {0x0217920100488FF7, &HLE_0217920100488FF7}, 145 {0x0217920100488FF7, &HLE_0217920100488FF7},
146 {0x3F5E74B9C9A50164, &HLE_3F5E74B9C9A50164},
87}}; 147}};
88 148
89class HLEMacroImpl final : public CachedMacro { 149class HLEMacroImpl final : public CachedMacro {
@@ -99,6 +159,7 @@ private:
99 Engines::Maxwell3D& maxwell3d; 159 Engines::Maxwell3D& maxwell3d;
100 HLEFunction func; 160 HLEFunction func;
101}; 161};
162
102} // Anonymous namespace 163} // Anonymous namespace
103 164
104HLEMacro::HLEMacro(Engines::Maxwell3D& maxwell3d_) : maxwell3d{maxwell3d_} {} 165HLEMacro::HLEMacro(Engines::Maxwell3D& maxwell3d_) : maxwell3d{maxwell3d_} {}
diff --git a/src/video_core/macro/macro_jit_x64.cpp b/src/video_core/macro/macro_jit_x64.cpp
index aca25d902..a302a9603 100644
--- a/src/video_core/macro/macro_jit_x64.cpp
+++ b/src/video_core/macro/macro_jit_x64.cpp
@@ -279,28 +279,13 @@ void MacroJITx64Impl::Compile_ExtractInsert(Macro::Opcode opcode) {
279 auto dst = Compile_GetRegister(opcode.src_a, RESULT); 279 auto dst = Compile_GetRegister(opcode.src_a, RESULT);
280 auto src = Compile_GetRegister(opcode.src_b, eax); 280 auto src = Compile_GetRegister(opcode.src_b, eax);
281 281
282 if (opcode.bf_src_bit != 0 && opcode.bf_src_bit != 31) {
283 shr(src, opcode.bf_src_bit);
284 } else if (opcode.bf_src_bit == 31) {
285 xor_(src, src);
286 }
287 // Don't bother masking the whole register since we're using a 32 bit register
288 if (opcode.bf_size != 31 && opcode.bf_size != 0) {
289 and_(src, opcode.GetBitfieldMask());
290 } else if (opcode.bf_size == 0) {
291 xor_(src, src);
292 }
293 if (opcode.bf_dst_bit != 31 && opcode.bf_dst_bit != 0) {
294 shl(src, opcode.bf_dst_bit);
295 } else if (opcode.bf_dst_bit == 31) {
296 xor_(src, src);
297 }
298
299 const u32 mask = ~(opcode.GetBitfieldMask() << opcode.bf_dst_bit); 282 const u32 mask = ~(opcode.GetBitfieldMask() << opcode.bf_dst_bit);
300 if (mask != 0xffffffff) { 283 and_(dst, mask);
301 and_(dst, mask); 284 shr(src, opcode.bf_src_bit);
302 } 285 and_(src, opcode.GetBitfieldMask());
286 shl(src, opcode.bf_dst_bit);
303 or_(dst, src); 287 or_(dst, src);
288
304 Compile_ProcessResult(opcode.result_operation, opcode.dst); 289 Compile_ProcessResult(opcode.result_operation, opcode.dst);
305} 290}
306 291
@@ -309,17 +294,9 @@ void MacroJITx64Impl::Compile_ExtractShiftLeftImmediate(Macro::Opcode opcode) {
309 const auto src = Compile_GetRegister(opcode.src_b, RESULT); 294 const auto src = Compile_GetRegister(opcode.src_b, RESULT);
310 295
311 shr(src, dst.cvt8()); 296 shr(src, dst.cvt8());
312 if (opcode.bf_size != 0 && opcode.bf_size != 31) { 297 and_(src, opcode.GetBitfieldMask());
313 and_(src, opcode.GetBitfieldMask()); 298 shl(src, opcode.bf_dst_bit);
314 } else if (opcode.bf_size == 0) {
315 xor_(src, src);
316 }
317 299
318 if (opcode.bf_dst_bit != 0 && opcode.bf_dst_bit != 31) {
319 shl(src, opcode.bf_dst_bit);
320 } else if (opcode.bf_dst_bit == 31) {
321 xor_(src, src);
322 }
323 Compile_ProcessResult(opcode.result_operation, opcode.dst); 300 Compile_ProcessResult(opcode.result_operation, opcode.dst);
324} 301}
325 302
@@ -327,13 +304,8 @@ void MacroJITx64Impl::Compile_ExtractShiftLeftRegister(Macro::Opcode opcode) {
327 const auto dst = Compile_GetRegister(opcode.src_a, ecx); 304 const auto dst = Compile_GetRegister(opcode.src_a, ecx);
328 const auto src = Compile_GetRegister(opcode.src_b, RESULT); 305 const auto src = Compile_GetRegister(opcode.src_b, RESULT);
329 306
330 if (opcode.bf_src_bit != 0) { 307 shr(src, opcode.bf_src_bit);
331 shr(src, opcode.bf_src_bit); 308 and_(src, opcode.GetBitfieldMask());
332 }
333
334 if (opcode.bf_size != 31) {
335 and_(src, opcode.GetBitfieldMask());
336 }
337 shl(src, dst.cvt8()); 309 shl(src, dst.cvt8());
338 310
339 Compile_ProcessResult(opcode.result_operation, opcode.dst); 311 Compile_ProcessResult(opcode.result_operation, opcode.dst);
@@ -429,17 +401,11 @@ void MacroJITx64Impl::Compile_Branch(Macro::Opcode opcode) {
429 Xbyak::Label handle_post_exit{}; 401 Xbyak::Label handle_post_exit{};
430 Xbyak::Label skip{}; 402 Xbyak::Label skip{};
431 jmp(skip, T_NEAR); 403 jmp(skip, T_NEAR);
432 if (opcode.is_exit) { 404
433 L(handle_post_exit); 405 L(handle_post_exit);
434 // Execute 1 instruction 406 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
435 mov(BRANCH_HOLDER, end_of_code); 407 jmp(labels[jump_address], T_NEAR);
436 // Jump to next instruction to skip delay slot check 408
437 jmp(labels[jump_address], T_NEAR);
438 } else {
439 L(handle_post_exit);
440 xor_(BRANCH_HOLDER, BRANCH_HOLDER);
441 jmp(labels[jump_address], T_NEAR);
442 }
443 L(skip); 409 L(skip);
444 mov(BRANCH_HOLDER, handle_post_exit); 410 mov(BRANCH_HOLDER, handle_post_exit);
445 jmp(delay_skip[pc], T_NEAR); 411 jmp(delay_skip[pc], T_NEAR);
diff --git a/src/video_core/memory_manager.cpp b/src/video_core/memory_manager.cpp
index bf9eb735d..cca401c74 100644
--- a/src/video_core/memory_manager.cpp
+++ b/src/video_core/memory_manager.cpp
@@ -7,6 +7,7 @@
7#include "common/assert.h" 7#include "common/assert.h"
8#include "common/logging/log.h" 8#include "common/logging/log.h"
9#include "core/core.h" 9#include "core/core.h"
10#include "core/device_memory.h"
10#include "core/hle/kernel/k_page_table.h" 11#include "core/hle/kernel/k_page_table.h"
11#include "core/hle/kernel/k_process.h" 12#include "core/hle/kernel/k_process.h"
12#include "core/memory.h" 13#include "core/memory.h"
@@ -16,172 +17,198 @@
16 17
17namespace Tegra { 18namespace Tegra {
18 19
19MemoryManager::MemoryManager(Core::System& system_) 20std::atomic<size_t> MemoryManager::unique_identifier_generator{};
20 : system{system_}, page_table(page_table_size) {} 21
22MemoryManager::MemoryManager(Core::System& system_, u64 address_space_bits_, u64 big_page_bits_,
23 u64 page_bits_)
24 : system{system_}, memory{system.Memory()}, device_memory{system.DeviceMemory()},
25 address_space_bits{address_space_bits_}, page_bits{page_bits_}, big_page_bits{big_page_bits_},
26 entries{}, big_entries{}, page_table{address_space_bits, address_space_bits + page_bits - 38,
27 page_bits != big_page_bits ? page_bits : 0},
28 unique_identifier{unique_identifier_generator.fetch_add(1, std::memory_order_acq_rel)} {
29 address_space_size = 1ULL << address_space_bits;
30 page_size = 1ULL << page_bits;
31 page_mask = page_size - 1ULL;
32 big_page_size = 1ULL << big_page_bits;
33 big_page_mask = big_page_size - 1ULL;
34 const u64 page_table_bits = address_space_bits - page_bits;
35 const u64 big_page_table_bits = address_space_bits - big_page_bits;
36 const u64 page_table_size = 1ULL << page_table_bits;
37 const u64 big_page_table_size = 1ULL << big_page_table_bits;
38 page_table_mask = page_table_size - 1;
39 big_page_table_mask = big_page_table_size - 1;
40
41 big_entries.resize(big_page_table_size / 32, 0);
42 big_page_table_cpu.resize(big_page_table_size);
43 big_page_continous.resize(big_page_table_size / continous_bits, 0);
44 entries.resize(page_table_size / 32, 0);
45}
21 46
22MemoryManager::~MemoryManager() = default; 47MemoryManager::~MemoryManager() = default;
23 48
24void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) { 49template <bool is_big_page>
25 rasterizer = rasterizer_; 50MemoryManager::EntryType MemoryManager::GetEntry(size_t position) const {
26} 51 if constexpr (is_big_page) {
27 52 position = position >> big_page_bits;
28GPUVAddr MemoryManager::UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) { 53 const u64 entry_mask = big_entries[position / 32];
29 u64 remaining_size{size}; 54 const size_t sub_index = position % 32;
30 for (u64 offset{}; offset < size; offset += page_size) { 55 return static_cast<EntryType>((entry_mask >> (2 * sub_index)) & 0x03ULL);
31 if (remaining_size < page_size) { 56 } else {
32 SetPageEntry(gpu_addr + offset, page_entry + offset, remaining_size); 57 position = position >> page_bits;
33 } else { 58 const u64 entry_mask = entries[position / 32];
34 SetPageEntry(gpu_addr + offset, page_entry + offset); 59 const size_t sub_index = position % 32;
35 } 60 return static_cast<EntryType>((entry_mask >> (2 * sub_index)) & 0x03ULL);
36 remaining_size -= page_size;
37 } 61 }
38 return gpu_addr;
39} 62}
40 63
41GPUVAddr MemoryManager::Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size) { 64template <bool is_big_page>
42 const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first); 65void MemoryManager::SetEntry(size_t position, MemoryManager::EntryType entry) {
43 if (it != map_ranges.end() && it->first == gpu_addr) { 66 if constexpr (is_big_page) {
44 it->second = size; 67 position = position >> big_page_bits;
68 const u64 entry_mask = big_entries[position / 32];
69 const size_t sub_index = position % 32;
70 big_entries[position / 32] =
71 (~(3ULL << sub_index * 2) & entry_mask) | (static_cast<u64>(entry) << sub_index * 2);
45 } else { 72 } else {
46 map_ranges.insert(it, MapRange{gpu_addr, size}); 73 position = position >> page_bits;
74 const u64 entry_mask = entries[position / 32];
75 const size_t sub_index = position % 32;
76 entries[position / 32] =
77 (~(3ULL << sub_index * 2) & entry_mask) | (static_cast<u64>(entry) << sub_index * 2);
47 } 78 }
48 return UpdateRange(gpu_addr, cpu_addr, size);
49} 79}
50 80
51GPUVAddr MemoryManager::MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align) { 81inline bool MemoryManager::IsBigPageContinous(size_t big_page_index) const {
52 return Map(cpu_addr, *FindFreeRange(size, align), size); 82 const u64 entry_mask = big_page_continous[big_page_index / continous_bits];
83 const size_t sub_index = big_page_index % continous_bits;
84 return ((entry_mask >> sub_index) & 0x1ULL) != 0;
53} 85}
54 86
55GPUVAddr MemoryManager::MapAllocate32(VAddr cpu_addr, std::size_t size) { 87inline void MemoryManager::SetBigPageContinous(size_t big_page_index, bool value) {
56 const std::optional<GPUVAddr> gpu_addr = FindFreeRange(size, 1, true); 88 const u64 continous_mask = big_page_continous[big_page_index / continous_bits];
57 ASSERT(gpu_addr); 89 const size_t sub_index = big_page_index % continous_bits;
58 return Map(cpu_addr, *gpu_addr, size); 90 big_page_continous[big_page_index / continous_bits] =
91 (~(1ULL << sub_index) & continous_mask) | (value ? 1ULL << sub_index : 0);
59} 92}
60 93
61void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) { 94template <MemoryManager::EntryType entry_type>
62 if (size == 0) { 95GPUVAddr MemoryManager::PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr,
63 return; 96 size_t size) {
64 } 97 u64 remaining_size{size};
65 const auto it = std::ranges::lower_bound(map_ranges, gpu_addr, {}, &MapRange::first); 98 if constexpr (entry_type == EntryType::Mapped) {
66 if (it != map_ranges.end()) { 99 page_table.ReserveRange(gpu_addr, size);
67 ASSERT(it->first == gpu_addr);
68 map_ranges.erase(it);
69 } else {
70 ASSERT_MSG(false, "Unmapping non-existent GPU address=0x{:x}", gpu_addr);
71 }
72 const auto submapped_ranges = GetSubmappedRange(gpu_addr, size);
73
74 for (const auto& [map_addr, map_size] : submapped_ranges) {
75 // Flush and invalidate through the GPU interface, to be asynchronous if possible.
76 const std::optional<VAddr> cpu_addr = GpuToCpuAddress(map_addr);
77 ASSERT(cpu_addr);
78
79 rasterizer->UnmapMemory(*cpu_addr, map_size);
80 } 100 }
81
82 UpdateRange(gpu_addr, PageEntry::State::Unmapped, size);
83}
84
85std::optional<GPUVAddr> MemoryManager::AllocateFixed(GPUVAddr gpu_addr, std::size_t size) {
86 for (u64 offset{}; offset < size; offset += page_size) { 101 for (u64 offset{}; offset < size; offset += page_size) {
87 if (!GetPageEntry(gpu_addr + offset).IsUnmapped()) { 102 const GPUVAddr current_gpu_addr = gpu_addr + offset;
88 return std::nullopt; 103 [[maybe_unused]] const auto current_entry_type = GetEntry<false>(current_gpu_addr);
104 SetEntry<false>(current_gpu_addr, entry_type);
105 if (current_entry_type != entry_type) {
106 rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, page_size);
107 }
108 if constexpr (entry_type == EntryType::Mapped) {
109 const VAddr current_cpu_addr = cpu_addr + offset;
110 const auto index = PageEntryIndex<false>(current_gpu_addr);
111 const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits);
112 page_table[index] = sub_value;
89 } 113 }
114 remaining_size -= page_size;
90 } 115 }
91 116 return gpu_addr;
92 return UpdateRange(gpu_addr, PageEntry::State::Allocated, size);
93}
94
95GPUVAddr MemoryManager::Allocate(std::size_t size, std::size_t align) {
96 return *AllocateFixed(*FindFreeRange(size, align), size);
97} 117}
98 118
99void MemoryManager::TryLockPage(PageEntry page_entry, std::size_t size) { 119template <MemoryManager::EntryType entry_type>
100 if (!page_entry.IsValid()) { 120GPUVAddr MemoryManager::BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr,
101 return; 121 size_t size) {
122 u64 remaining_size{size};
123 for (u64 offset{}; offset < size; offset += big_page_size) {
124 const GPUVAddr current_gpu_addr = gpu_addr + offset;
125 [[maybe_unused]] const auto current_entry_type = GetEntry<true>(current_gpu_addr);
126 SetEntry<true>(current_gpu_addr, entry_type);
127 if (current_entry_type != entry_type) {
128 rasterizer->ModifyGPUMemory(unique_identifier, gpu_addr, big_page_size);
129 }
130 if constexpr (entry_type == EntryType::Mapped) {
131 const VAddr current_cpu_addr = cpu_addr + offset;
132 const auto index = PageEntryIndex<true>(current_gpu_addr);
133 const u32 sub_value = static_cast<u32>(current_cpu_addr >> cpu_page_bits);
134 big_page_table_cpu[index] = sub_value;
135 const bool is_continous = ([&] {
136 uintptr_t base_ptr{
137 reinterpret_cast<uintptr_t>(memory.GetPointerSilent(current_cpu_addr))};
138 if (base_ptr == 0) {
139 return false;
140 }
141 for (VAddr start_cpu = current_cpu_addr + page_size;
142 start_cpu < current_cpu_addr + big_page_size; start_cpu += page_size) {
143 base_ptr += page_size;
144 auto next_ptr = reinterpret_cast<uintptr_t>(memory.GetPointerSilent(start_cpu));
145 if (next_ptr == 0 || base_ptr != next_ptr) {
146 return false;
147 }
148 }
149 return true;
150 })();
151 SetBigPageContinous(index, is_continous);
152 }
153 remaining_size -= big_page_size;
102 } 154 }
103 155 return gpu_addr;
104 ASSERT(system.CurrentProcess()
105 ->PageTable()
106 .LockForDeviceAddressSpace(page_entry.ToAddress(), size)
107 .IsSuccess());
108} 156}
109 157
110void MemoryManager::TryUnlockPage(PageEntry page_entry, std::size_t size) { 158void MemoryManager::BindRasterizer(VideoCore::RasterizerInterface* rasterizer_) {
111 if (!page_entry.IsValid()) { 159 rasterizer = rasterizer_;
112 return;
113 }
114
115 ASSERT(system.CurrentProcess()
116 ->PageTable()
117 .UnlockForDeviceAddressSpace(page_entry.ToAddress(), size)
118 .IsSuccess());
119} 160}
120 161
121PageEntry MemoryManager::GetPageEntry(GPUVAddr gpu_addr) const { 162GPUVAddr MemoryManager::Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size,
122 return page_table[PageEntryIndex(gpu_addr)]; 163 bool is_big_pages) {
164 if (is_big_pages) [[likely]] {
165 return BigPageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size);
166 }
167 return PageTableOp<EntryType::Mapped>(gpu_addr, cpu_addr, size);
123} 168}
124 169
125void MemoryManager::SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size) { 170GPUVAddr MemoryManager::MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages) {
126 // TODO(bunnei): We should lock/unlock device regions. This currently causes issues due to 171 if (is_big_pages) [[likely]] {
127 // improper tracking, but should be fixed in the future. 172 return BigPageTableOp<EntryType::Reserved>(gpu_addr, 0, size);
128
129 //// Unlock the old page
130 // TryUnlockPage(page_table[PageEntryIndex(gpu_addr)], size);
131
132 //// Lock the new page
133 // TryLockPage(page_entry, size);
134 auto& current_page = page_table[PageEntryIndex(gpu_addr)];
135
136 if ((!current_page.IsValid() && page_entry.IsValid()) ||
137 current_page.ToAddress() != page_entry.ToAddress()) {
138 rasterizer->ModifyGPUMemory(gpu_addr, size);
139 } 173 }
140 174 return PageTableOp<EntryType::Reserved>(gpu_addr, 0, size);
141 current_page = page_entry;
142} 175}
143 176
144std::optional<GPUVAddr> MemoryManager::FindFreeRange(std::size_t size, std::size_t align, 177void MemoryManager::Unmap(GPUVAddr gpu_addr, std::size_t size) {
145 bool start_32bit_address) const { 178 if (size == 0) {
146 if (!align) { 179 return;
147 align = page_size;
148 } else {
149 align = Common::AlignUp(align, page_size);
150 } 180 }
181 const auto submapped_ranges = GetSubmappedRange(gpu_addr, size);
151 182
152 u64 available_size{}; 183 for (const auto& [map_addr, map_size] : submapped_ranges) {
153 GPUVAddr gpu_addr{start_32bit_address ? address_space_start_low : address_space_start}; 184 // Flush and invalidate through the GPU interface, to be asynchronous if possible.
154 while (gpu_addr + available_size < address_space_size) { 185 const std::optional<VAddr> cpu_addr = GpuToCpuAddress(map_addr);
155 if (GetPageEntry(gpu_addr + available_size).IsUnmapped()) { 186 ASSERT(cpu_addr);
156 available_size += page_size;
157
158 if (available_size >= size) {
159 return gpu_addr;
160 }
161 } else {
162 gpu_addr += available_size + page_size;
163 available_size = 0;
164 187
165 const auto remainder{gpu_addr % align}; 188 rasterizer->UnmapMemory(*cpu_addr, map_size);
166 if (remainder) {
167 gpu_addr = (gpu_addr - remainder) + align;
168 }
169 }
170 } 189 }
171 190
172 return std::nullopt; 191 BigPageTableOp<EntryType::Free>(gpu_addr, 0, size);
192 PageTableOp<EntryType::Free>(gpu_addr, 0, size);
173} 193}
174 194
175std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const { 195std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr gpu_addr) const {
176 if (gpu_addr == 0) { 196 if (!IsWithinGPUAddressRange(gpu_addr)) [[unlikely]] {
177 return std::nullopt; 197 return std::nullopt;
178 } 198 }
179 const auto page_entry{GetPageEntry(gpu_addr)}; 199 if (GetEntry<true>(gpu_addr) != EntryType::Mapped) [[unlikely]] {
180 if (!page_entry.IsValid()) { 200 if (GetEntry<false>(gpu_addr) != EntryType::Mapped) {
181 return std::nullopt; 201 return std::nullopt;
202 }
203
204 const VAddr cpu_addr_base = static_cast<VAddr>(page_table[PageEntryIndex<false>(gpu_addr)])
205 << cpu_page_bits;
206 return cpu_addr_base + (gpu_addr & page_mask);
182 } 207 }
183 208
184 return page_entry.ToAddress() + (gpu_addr & page_mask); 209 const VAddr cpu_addr_base =
210 static_cast<VAddr>(big_page_table_cpu[PageEntryIndex<true>(gpu_addr)]) << cpu_page_bits;
211 return cpu_addr_base + (gpu_addr & big_page_mask);
185} 212}
186 213
187std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const { 214std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t size) const {
@@ -189,7 +216,7 @@ std::optional<VAddr> MemoryManager::GpuToCpuAddress(GPUVAddr addr, std::size_t s
189 const size_t page_last{(addr + size + page_size - 1) >> page_bits}; 216 const size_t page_last{(addr + size + page_size - 1) >> page_bits};
190 while (page_index < page_last) { 217 while (page_index < page_last) {
191 const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; 218 const auto page_addr{GpuToCpuAddress(page_index << page_bits)};
192 if (page_addr && *page_addr != 0) { 219 if (page_addr) {
193 return page_addr; 220 return page_addr;
194 } 221 }
195 ++page_index; 222 ++page_index;
@@ -232,126 +259,298 @@ template void MemoryManager::Write<u32>(GPUVAddr addr, u32 data);
232template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data); 259template void MemoryManager::Write<u64>(GPUVAddr addr, u64 data);
233 260
234u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) { 261u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) {
235 if (!GetPageEntry(gpu_addr).IsValid()) {
236 return {};
237 }
238
239 const auto address{GpuToCpuAddress(gpu_addr)}; 262 const auto address{GpuToCpuAddress(gpu_addr)};
240 if (!address) { 263 if (!address) {
241 return {}; 264 return {};
242 } 265 }
243 266
244 return system.Memory().GetPointer(*address); 267 return memory.GetPointer(*address);
245} 268}
246 269
247const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const { 270const u8* MemoryManager::GetPointer(GPUVAddr gpu_addr) const {
248 if (!GetPageEntry(gpu_addr).IsValid()) {
249 return {};
250 }
251
252 const auto address{GpuToCpuAddress(gpu_addr)}; 271 const auto address{GpuToCpuAddress(gpu_addr)};
253 if (!address) { 272 if (!address) {
254 return {}; 273 return {};
255 } 274 }
256 275
257 return system.Memory().GetPointer(*address); 276 return memory.GetPointer(*address);
258} 277}
259 278
260size_t MemoryManager::BytesToMapEnd(GPUVAddr gpu_addr) const noexcept { 279#ifdef _MSC_VER // no need for gcc / clang but msvc's compiler is more conservative with inlining.
261 auto it = std::ranges::upper_bound(map_ranges, gpu_addr, {}, &MapRange::first); 280#pragma inline_recursion(on)
262 --it; 281#endif
263 return it->second - (gpu_addr - it->first); 282
264} 283template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped>
265 284inline void MemoryManager::MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size,
266void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, 285 FuncMapped&& func_mapped, FuncReserved&& func_reserved,
267 bool is_safe) const { 286 FuncUnmapped&& func_unmapped) const {
287 static constexpr bool BOOL_BREAK_MAPPED = std::is_same_v<FuncMapped, bool>;
288 static constexpr bool BOOL_BREAK_RESERVED = std::is_same_v<FuncReserved, bool>;
289 static constexpr bool BOOL_BREAK_UNMAPPED = std::is_same_v<FuncUnmapped, bool>;
290 u64 used_page_size;
291 u64 used_page_mask;
292 u64 used_page_bits;
293 if constexpr (is_big_pages) {
294 used_page_size = big_page_size;
295 used_page_mask = big_page_mask;
296 used_page_bits = big_page_bits;
297 } else {
298 used_page_size = page_size;
299 used_page_mask = page_mask;
300 used_page_bits = page_bits;
301 }
268 std::size_t remaining_size{size}; 302 std::size_t remaining_size{size};
269 std::size_t page_index{gpu_src_addr >> page_bits}; 303 std::size_t page_index{gpu_src_addr >> used_page_bits};
270 std::size_t page_offset{gpu_src_addr & page_mask}; 304 std::size_t page_offset{gpu_src_addr & used_page_mask};
305 GPUVAddr current_address = gpu_src_addr;
271 306
272 while (remaining_size > 0) { 307 while (remaining_size > 0) {
273 const std::size_t copy_amount{ 308 const std::size_t copy_amount{
274 std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; 309 std::min(static_cast<std::size_t>(used_page_size) - page_offset, remaining_size)};
275 const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; 310 auto entry = GetEntry<is_big_pages>(current_address);
276 if (page_addr && *page_addr != 0) { 311 if (entry == EntryType::Mapped) [[likely]] {
277 const auto src_addr{*page_addr + page_offset}; 312 if constexpr (BOOL_BREAK_MAPPED) {
278 if (is_safe) { 313 if (func_mapped(page_index, page_offset, copy_amount)) {
279 // Flush must happen on the rasterizer interface, such that memory is always 314 return;
280 // synchronous when it is read (even when in asynchronous GPU mode). 315 }
281 // Fixes Dead Cells title menu. 316 } else {
282 rasterizer->FlushRegion(src_addr, copy_amount); 317 func_mapped(page_index, page_offset, copy_amount);
283 } 318 }
284 system.Memory().ReadBlockUnsafe(src_addr, dest_buffer, copy_amount);
285 } else {
286 std::memset(dest_buffer, 0, copy_amount);
287 }
288 319
320 } else if (entry == EntryType::Reserved) {
321 if constexpr (BOOL_BREAK_RESERVED) {
322 if (func_reserved(page_index, page_offset, copy_amount)) {
323 return;
324 }
325 } else {
326 func_reserved(page_index, page_offset, copy_amount);
327 }
328
329 } else [[unlikely]] {
330 if constexpr (BOOL_BREAK_UNMAPPED) {
331 if (func_unmapped(page_index, page_offset, copy_amount)) {
332 return;
333 }
334 } else {
335 func_unmapped(page_index, page_offset, copy_amount);
336 }
337 }
289 page_index++; 338 page_index++;
290 page_offset = 0; 339 page_offset = 0;
291 dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
292 remaining_size -= copy_amount; 340 remaining_size -= copy_amount;
341 current_address += copy_amount;
293 } 342 }
294} 343}
295 344
345template <bool is_safe>
346void MemoryManager::ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer,
347 std::size_t size) const {
348 auto set_to_zero = [&]([[maybe_unused]] std::size_t page_index,
349 [[maybe_unused]] std::size_t offset, std::size_t copy_amount) {
350 std::memset(dest_buffer, 0, copy_amount);
351 dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
352 };
353 auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
354 const VAddr cpu_addr_base =
355 (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
356 if constexpr (is_safe) {
357 rasterizer->FlushRegion(cpu_addr_base, copy_amount);
358 }
359 u8* physical = memory.GetPointer(cpu_addr_base);
360 std::memcpy(dest_buffer, physical, copy_amount);
361 dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
362 };
363 auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
364 const VAddr cpu_addr_base =
365 (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
366 if constexpr (is_safe) {
367 rasterizer->FlushRegion(cpu_addr_base, copy_amount);
368 }
369 if (!IsBigPageContinous(page_index)) [[unlikely]] {
370 memory.ReadBlockUnsafe(cpu_addr_base, dest_buffer, copy_amount);
371 } else {
372 u8* physical = memory.GetPointer(cpu_addr_base);
373 std::memcpy(dest_buffer, physical, copy_amount);
374 }
375 dest_buffer = static_cast<u8*>(dest_buffer) + copy_amount;
376 };
377 auto read_short_pages = [&](std::size_t page_index, std::size_t offset,
378 std::size_t copy_amount) {
379 GPUVAddr base = (page_index << big_page_bits) + offset;
380 MemoryOperation<false>(base, copy_amount, mapped_normal, set_to_zero, set_to_zero);
381 };
382 MemoryOperation<true>(gpu_src_addr, size, mapped_big, set_to_zero, read_short_pages);
383}
384
296void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const { 385void MemoryManager::ReadBlock(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const {
297 ReadBlockImpl(gpu_src_addr, dest_buffer, size, true); 386 ReadBlockImpl<true>(gpu_src_addr, dest_buffer, size);
298} 387}
299 388
300void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer, 389void MemoryManager::ReadBlockUnsafe(GPUVAddr gpu_src_addr, void* dest_buffer,
301 const std::size_t size) const { 390 const std::size_t size) const {
302 ReadBlockImpl(gpu_src_addr, dest_buffer, size, false); 391 ReadBlockImpl<false>(gpu_src_addr, dest_buffer, size);
303} 392}
304 393
305void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size, 394template <bool is_safe>
306 bool is_safe) { 395void MemoryManager::WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer,
307 std::size_t remaining_size{size}; 396 std::size_t size) {
308 std::size_t page_index{gpu_dest_addr >> page_bits}; 397 auto just_advance = [&]([[maybe_unused]] std::size_t page_index,
309 std::size_t page_offset{gpu_dest_addr & page_mask}; 398 [[maybe_unused]] std::size_t offset, std::size_t copy_amount) {
310 399 src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
311 while (remaining_size > 0) { 400 };
312 const std::size_t copy_amount{ 401 auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
313 std::min(static_cast<std::size_t>(page_size) - page_offset, remaining_size)}; 402 const VAddr cpu_addr_base =
314 const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; 403 (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
315 if (page_addr && *page_addr != 0) { 404 if constexpr (is_safe) {
316 const auto dest_addr{*page_addr + page_offset}; 405 rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
317
318 if (is_safe) {
319 // Invalidate must happen on the rasterizer interface, such that memory is always
320 // synchronous when it is written (even when in asynchronous GPU mode).
321 rasterizer->InvalidateRegion(dest_addr, copy_amount);
322 }
323 system.Memory().WriteBlockUnsafe(dest_addr, src_buffer, copy_amount);
324 } 406 }
325 407 u8* physical = memory.GetPointer(cpu_addr_base);
326 page_index++; 408 std::memcpy(physical, src_buffer, copy_amount);
327 page_offset = 0;
328 src_buffer = static_cast<const u8*>(src_buffer) + copy_amount; 409 src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
329 remaining_size -= copy_amount; 410 };
330 } 411 auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
412 const VAddr cpu_addr_base =
413 (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
414 if constexpr (is_safe) {
415 rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
416 }
417 if (!IsBigPageContinous(page_index)) [[unlikely]] {
418 memory.WriteBlockUnsafe(cpu_addr_base, src_buffer, copy_amount);
419 } else {
420 u8* physical = memory.GetPointer(cpu_addr_base);
421 std::memcpy(physical, src_buffer, copy_amount);
422 }
423 src_buffer = static_cast<const u8*>(src_buffer) + copy_amount;
424 };
425 auto write_short_pages = [&](std::size_t page_index, std::size_t offset,
426 std::size_t copy_amount) {
427 GPUVAddr base = (page_index << big_page_bits) + offset;
428 MemoryOperation<false>(base, copy_amount, mapped_normal, just_advance, just_advance);
429 };
430 MemoryOperation<true>(gpu_dest_addr, size, mapped_big, just_advance, write_short_pages);
331} 431}
332 432
333void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) { 433void MemoryManager::WriteBlock(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size) {
334 WriteBlockImpl(gpu_dest_addr, src_buffer, size, true); 434 WriteBlockImpl<true>(gpu_dest_addr, src_buffer, size);
335} 435}
336 436
337void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer, 437void MemoryManager::WriteBlockUnsafe(GPUVAddr gpu_dest_addr, const void* src_buffer,
338 std::size_t size) { 438 std::size_t size) {
339 WriteBlockImpl(gpu_dest_addr, src_buffer, size, false); 439 WriteBlockImpl<false>(gpu_dest_addr, src_buffer, size);
340} 440}
341 441
342void MemoryManager::FlushRegion(GPUVAddr gpu_addr, size_t size) const { 442void MemoryManager::FlushRegion(GPUVAddr gpu_addr, size_t size) const {
343 size_t remaining_size{size}; 443 auto do_nothing = [&]([[maybe_unused]] std::size_t page_index,
344 size_t page_index{gpu_addr >> page_bits}; 444 [[maybe_unused]] std::size_t offset,
345 size_t page_offset{gpu_addr & page_mask}; 445 [[maybe_unused]] std::size_t copy_amount) {};
346 while (remaining_size > 0) { 446
347 const size_t num_bytes{std::min(page_size - page_offset, remaining_size)}; 447 auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
348 if (const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; page_addr) { 448 const VAddr cpu_addr_base =
349 rasterizer->FlushRegion(*page_addr + page_offset, num_bytes); 449 (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
450 rasterizer->FlushRegion(cpu_addr_base, copy_amount);
451 };
452 auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
453 const VAddr cpu_addr_base =
454 (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
455 rasterizer->FlushRegion(cpu_addr_base, copy_amount);
456 };
457 auto flush_short_pages = [&](std::size_t page_index, std::size_t offset,
458 std::size_t copy_amount) {
459 GPUVAddr base = (page_index << big_page_bits) + offset;
460 MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing);
461 };
462 MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, flush_short_pages);
463}
464
465bool MemoryManager::IsMemoryDirty(GPUVAddr gpu_addr, size_t size) const {
466 bool result = false;
467 auto do_nothing = [&]([[maybe_unused]] std::size_t page_index,
468 [[maybe_unused]] std::size_t offset,
469 [[maybe_unused]] std::size_t copy_amount) { return false; };
470
471 auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
472 const VAddr cpu_addr_base =
473 (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
474 result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount);
475 return result;
476 };
477 auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
478 const VAddr cpu_addr_base =
479 (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
480 result |= rasterizer->MustFlushRegion(cpu_addr_base, copy_amount);
481 return result;
482 };
483 auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
484 std::size_t copy_amount) {
485 GPUVAddr base = (page_index << big_page_bits) + offset;
486 MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing);
487 return result;
488 };
489 MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, check_short_pages);
490 return result;
491}
492
493size_t MemoryManager::MaxContinousRange(GPUVAddr gpu_addr, size_t size) const {
494 std::optional<VAddr> old_page_addr{};
495 size_t range_so_far = 0;
496 bool result{false};
497 auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
498 std::size_t copy_amount) {
499 result = true;
500 return true;
501 };
502 auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
503 const VAddr cpu_addr_base =
504 (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
505 if (old_page_addr && *old_page_addr != cpu_addr_base) {
506 result = true;
507 return true;
350 } 508 }
351 ++page_index; 509 range_so_far += copy_amount;
352 page_offset = 0; 510 old_page_addr = {cpu_addr_base + copy_amount};
353 remaining_size -= num_bytes; 511 return false;
354 } 512 };
513 auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
514 const VAddr cpu_addr_base =
515 (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
516 if (old_page_addr && *old_page_addr != cpu_addr_base) {
517 return true;
518 }
519 range_so_far += copy_amount;
520 old_page_addr = {cpu_addr_base + copy_amount};
521 return false;
522 };
523 auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
524 std::size_t copy_amount) {
525 GPUVAddr base = (page_index << big_page_bits) + offset;
526 MemoryOperation<false>(base, copy_amount, short_check, fail, fail);
527 return result;
528 };
529 MemoryOperation<true>(gpu_addr, size, big_check, fail, check_short_pages);
530 return range_so_far;
531}
532
533void MemoryManager::InvalidateRegion(GPUVAddr gpu_addr, size_t size) const {
534 auto do_nothing = [&]([[maybe_unused]] std::size_t page_index,
535 [[maybe_unused]] std::size_t offset,
536 [[maybe_unused]] std::size_t copy_amount) {};
537
538 auto mapped_normal = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
539 const VAddr cpu_addr_base =
540 (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
541 rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
542 };
543 auto mapped_big = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
544 const VAddr cpu_addr_base =
545 (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
546 rasterizer->InvalidateRegion(cpu_addr_base, copy_amount);
547 };
548 auto invalidate_short_pages = [&](std::size_t page_index, std::size_t offset,
549 std::size_t copy_amount) {
550 GPUVAddr base = (page_index << big_page_bits) + offset;
551 MemoryOperation<false>(base, copy_amount, mapped_normal, do_nothing, do_nothing);
552 };
553 MemoryOperation<true>(gpu_addr, size, mapped_big, do_nothing, invalidate_short_pages);
355} 554}
356 555
357void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size) { 556void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std::size_t size) {
@@ -365,87 +564,134 @@ void MemoryManager::CopyBlock(GPUVAddr gpu_dest_addr, GPUVAddr gpu_src_addr, std
365} 564}
366 565
367bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const { 566bool MemoryManager::IsGranularRange(GPUVAddr gpu_addr, std::size_t size) const {
368 const auto cpu_addr{GpuToCpuAddress(gpu_addr)}; 567 if (GetEntry<true>(gpu_addr) == EntryType::Mapped) [[likely]] {
369 if (!cpu_addr) { 568 size_t page_index = gpu_addr >> big_page_bits;
569 if (IsBigPageContinous(page_index)) [[likely]] {
570 const std::size_t page{(page_index & big_page_mask) + size};
571 return page <= big_page_size;
572 }
573 const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
574 return page <= Core::Memory::YUZU_PAGESIZE;
575 }
576 if (GetEntry<false>(gpu_addr) != EntryType::Mapped) {
370 return false; 577 return false;
371 } 578 }
372 const std::size_t page{(*cpu_addr & Core::Memory::YUZU_PAGEMASK) + size}; 579 const std::size_t page{(gpu_addr & Core::Memory::YUZU_PAGEMASK) + size};
373 return page <= Core::Memory::YUZU_PAGESIZE; 580 return page <= Core::Memory::YUZU_PAGESIZE;
374} 581}
375 582
376bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const { 583bool MemoryManager::IsContinousRange(GPUVAddr gpu_addr, std::size_t size) const {
377 size_t page_index{gpu_addr >> page_bits};
378 const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits};
379 std::optional<VAddr> old_page_addr{}; 584 std::optional<VAddr> old_page_addr{};
380 while (page_index != page_last) { 585 bool result{true};
381 const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; 586 auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
382 if (!page_addr || *page_addr == 0) { 587 std::size_t copy_amount) {
383 return false; 588 result = false;
589 return true;
590 };
591 auto short_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
592 const VAddr cpu_addr_base =
593 (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
594 if (old_page_addr && *old_page_addr != cpu_addr_base) {
595 result = false;
596 return true;
384 } 597 }
385 if (old_page_addr) { 598 old_page_addr = {cpu_addr_base + copy_amount};
386 if (*old_page_addr + page_size != *page_addr) { 599 return false;
387 return false; 600 };
388 } 601 auto big_check = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
602 const VAddr cpu_addr_base =
603 (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
604 if (old_page_addr && *old_page_addr != cpu_addr_base) {
605 result = false;
606 return true;
389 } 607 }
390 old_page_addr = page_addr; 608 old_page_addr = {cpu_addr_base + copy_amount};
391 ++page_index; 609 return false;
392 } 610 };
393 return true; 611 auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
612 std::size_t copy_amount) {
613 GPUVAddr base = (page_index << big_page_bits) + offset;
614 MemoryOperation<false>(base, copy_amount, short_check, fail, fail);
615 return !result;
616 };
617 MemoryOperation<true>(gpu_addr, size, big_check, fail, check_short_pages);
618 return result;
394} 619}
395 620
396bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const { 621bool MemoryManager::IsFullyMappedRange(GPUVAddr gpu_addr, std::size_t size) const {
397 size_t page_index{gpu_addr >> page_bits}; 622 bool result{true};
398 const size_t page_last{(gpu_addr + size + page_size - 1) >> page_bits}; 623 auto fail = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
399 while (page_index < page_last) { 624 [[maybe_unused]] std::size_t copy_amount) {
400 if (!page_table[page_index].IsValid() || page_table[page_index].ToAddress() == 0) { 625 result = false;
401 return false; 626 return true;
402 } 627 };
403 ++page_index; 628 auto pass = [&]([[maybe_unused]] std::size_t page_index, [[maybe_unused]] std::size_t offset,
404 } 629 [[maybe_unused]] std::size_t copy_amount) { return false; };
405 return true; 630 auto check_short_pages = [&](std::size_t page_index, std::size_t offset,
631 std::size_t copy_amount) {
632 GPUVAddr base = (page_index << big_page_bits) + offset;
633 MemoryOperation<false>(base, copy_amount, pass, pass, fail);
634 return !result;
635 };
636 MemoryOperation<true>(gpu_addr, size, pass, fail, check_short_pages);
637 return result;
406} 638}
407 639
408std::vector<std::pair<GPUVAddr, std::size_t>> MemoryManager::GetSubmappedRange( 640std::vector<std::pair<GPUVAddr, std::size_t>> MemoryManager::GetSubmappedRange(
409 GPUVAddr gpu_addr, std::size_t size) const { 641 GPUVAddr gpu_addr, std::size_t size) const {
410 std::vector<std::pair<GPUVAddr, std::size_t>> result{}; 642 std::vector<std::pair<GPUVAddr, std::size_t>> result{};
411 size_t page_index{gpu_addr >> page_bits};
412 size_t remaining_size{size};
413 size_t page_offset{gpu_addr & page_mask};
414 std::optional<std::pair<GPUVAddr, std::size_t>> last_segment{}; 643 std::optional<std::pair<GPUVAddr, std::size_t>> last_segment{};
415 std::optional<VAddr> old_page_addr{}; 644 std::optional<VAddr> old_page_addr{};
416 const auto extend_size = [&last_segment, &page_index, &page_offset](std::size_t bytes) { 645 const auto split = [&last_segment, &result]([[maybe_unused]] std::size_t page_index,
417 if (!last_segment) { 646 [[maybe_unused]] std::size_t offset,
418 const GPUVAddr new_base_addr = (page_index << page_bits) + page_offset; 647 [[maybe_unused]] std::size_t copy_amount) {
419 last_segment = {new_base_addr, bytes};
420 } else {
421 last_segment->second += bytes;
422 }
423 };
424 const auto split = [&last_segment, &result] {
425 if (last_segment) { 648 if (last_segment) {
426 result.push_back(*last_segment); 649 result.push_back(*last_segment);
427 last_segment = std::nullopt; 650 last_segment = std::nullopt;
428 } 651 }
429 }; 652 };
430 while (remaining_size > 0) { 653 const auto extend_size_big = [this, &split, &old_page_addr,
431 const size_t num_bytes{std::min(page_size - page_offset, remaining_size)}; 654 &last_segment](std::size_t page_index, std::size_t offset,
432 const auto page_addr{GpuToCpuAddress(page_index << page_bits)}; 655 std::size_t copy_amount) {
433 if (!page_addr || *page_addr == 0) { 656 const VAddr cpu_addr_base =
434 split(); 657 (static_cast<VAddr>(big_page_table_cpu[page_index]) << cpu_page_bits) + offset;
435 } else if (old_page_addr) { 658 if (old_page_addr) {
436 if (*old_page_addr + page_size != *page_addr) { 659 if (*old_page_addr != cpu_addr_base) {
437 split(); 660 split(0, 0, 0);
661 }
662 }
663 old_page_addr = {cpu_addr_base + copy_amount};
664 if (!last_segment) {
665 const GPUVAddr new_base_addr = (page_index << big_page_bits) + offset;
666 last_segment = {new_base_addr, copy_amount};
667 } else {
668 last_segment->second += copy_amount;
669 }
670 };
671 const auto extend_size_short = [this, &split, &old_page_addr,
672 &last_segment](std::size_t page_index, std::size_t offset,
673 std::size_t copy_amount) {
674 const VAddr cpu_addr_base =
675 (static_cast<VAddr>(page_table[page_index]) << cpu_page_bits) + offset;
676 if (old_page_addr) {
677 if (*old_page_addr != cpu_addr_base) {
678 split(0, 0, 0);
438 } 679 }
439 extend_size(num_bytes); 680 }
681 old_page_addr = {cpu_addr_base + copy_amount};
682 if (!last_segment) {
683 const GPUVAddr new_base_addr = (page_index << page_bits) + offset;
684 last_segment = {new_base_addr, copy_amount};
440 } else { 685 } else {
441 extend_size(num_bytes); 686 last_segment->second += copy_amount;
442 } 687 }
443 ++page_index; 688 };
444 page_offset = 0; 689 auto do_short_pages = [&](std::size_t page_index, std::size_t offset, std::size_t copy_amount) {
445 remaining_size -= num_bytes; 690 GPUVAddr base = (page_index << big_page_bits) + offset;
446 old_page_addr = page_addr; 691 MemoryOperation<false>(base, copy_amount, extend_size_short, split, split);
447 } 692 };
448 split(); 693 MemoryOperation<true>(gpu_addr, size, extend_size_big, split, do_short_pages);
694 split(0, 0, 0);
449 return result; 695 return result;
450} 696}
451 697
diff --git a/src/video_core/memory_manager.h b/src/video_core/memory_manager.h
index 74f9ce175..f992e29f3 100644
--- a/src/video_core/memory_manager.h
+++ b/src/video_core/memory_manager.h
@@ -3,73 +3,39 @@
3 3
4#pragma once 4#pragma once
5 5
6#include <atomic>
6#include <map> 7#include <map>
7#include <optional> 8#include <optional>
8#include <vector> 9#include <vector>
9 10
10#include "common/common_types.h" 11#include "common/common_types.h"
12#include "common/multi_level_page_table.h"
13#include "common/virtual_buffer.h"
11 14
12namespace VideoCore { 15namespace VideoCore {
13class RasterizerInterface; 16class RasterizerInterface;
14} 17}
15 18
16namespace Core { 19namespace Core {
20class DeviceMemory;
21namespace Memory {
22class Memory;
23} // namespace Memory
17class System; 24class System;
18} 25} // namespace Core
19 26
20namespace Tegra { 27namespace Tegra {
21 28
22class PageEntry final {
23public:
24 enum class State : u32 {
25 Unmapped = static_cast<u32>(-1),
26 Allocated = static_cast<u32>(-2),
27 };
28
29 constexpr PageEntry() = default;
30 constexpr PageEntry(State state_) : state{state_} {}
31 constexpr PageEntry(VAddr addr) : state{static_cast<State>(addr >> ShiftBits)} {}
32
33 [[nodiscard]] constexpr bool IsUnmapped() const {
34 return state == State::Unmapped;
35 }
36
37 [[nodiscard]] constexpr bool IsAllocated() const {
38 return state == State::Allocated;
39 }
40
41 [[nodiscard]] constexpr bool IsValid() const {
42 return !IsUnmapped() && !IsAllocated();
43 }
44
45 [[nodiscard]] constexpr VAddr ToAddress() const {
46 if (!IsValid()) {
47 return {};
48 }
49
50 return static_cast<VAddr>(state) << ShiftBits;
51 }
52
53 [[nodiscard]] constexpr PageEntry operator+(u64 offset) const {
54 // If this is a reserved value, offsets do not apply
55 if (!IsValid()) {
56 return *this;
57 }
58 return PageEntry{(static_cast<VAddr>(state) << ShiftBits) + offset};
59 }
60
61private:
62 static constexpr std::size_t ShiftBits{12};
63
64 State state{State::Unmapped};
65};
66static_assert(sizeof(PageEntry) == 4, "PageEntry is too large");
67
68class MemoryManager final { 29class MemoryManager final {
69public: 30public:
70 explicit MemoryManager(Core::System& system_); 31 explicit MemoryManager(Core::System& system_, u64 address_space_bits_ = 40,
32 u64 big_page_bits_ = 16, u64 page_bits_ = 12);
71 ~MemoryManager(); 33 ~MemoryManager();
72 34
35 size_t GetID() const {
36 return unique_identifier;
37 }
38
73 /// Binds a renderer to the memory manager. 39 /// Binds a renderer to the memory manager.
74 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer); 40 void BindRasterizer(VideoCore::RasterizerInterface* rasterizer);
75 41
@@ -86,9 +52,6 @@ public:
86 [[nodiscard]] u8* GetPointer(GPUVAddr addr); 52 [[nodiscard]] u8* GetPointer(GPUVAddr addr);
87 [[nodiscard]] const u8* GetPointer(GPUVAddr addr) const; 53 [[nodiscard]] const u8* GetPointer(GPUVAddr addr) const;
88 54
89 /// Returns the number of bytes until the end of the memory map containing the given GPU address
90 [[nodiscard]] size_t BytesToMapEnd(GPUVAddr gpu_addr) const noexcept;
91
92 /** 55 /**
93 * ReadBlock and WriteBlock are full read and write operations over virtual 56 * ReadBlock and WriteBlock are full read and write operations over virtual
94 * GPU Memory. It's important to use these when GPU memory may not be continuous 57 * GPU Memory. It's important to use these when GPU memory may not be continuous
@@ -135,54 +98,95 @@ public:
135 std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr, 98 std::vector<std::pair<GPUVAddr, std::size_t>> GetSubmappedRange(GPUVAddr gpu_addr,
136 std::size_t size) const; 99 std::size_t size) const;
137 100
138 [[nodiscard]] GPUVAddr Map(VAddr cpu_addr, GPUVAddr gpu_addr, std::size_t size); 101 GPUVAddr Map(GPUVAddr gpu_addr, VAddr cpu_addr, std::size_t size, bool is_big_pages = true);
139 [[nodiscard]] GPUVAddr MapAllocate(VAddr cpu_addr, std::size_t size, std::size_t align); 102 GPUVAddr MapSparse(GPUVAddr gpu_addr, std::size_t size, bool is_big_pages = true);
140 [[nodiscard]] GPUVAddr MapAllocate32(VAddr cpu_addr, std::size_t size);
141 [[nodiscard]] std::optional<GPUVAddr> AllocateFixed(GPUVAddr gpu_addr, std::size_t size);
142 [[nodiscard]] GPUVAddr Allocate(std::size_t size, std::size_t align);
143 void Unmap(GPUVAddr gpu_addr, std::size_t size); 103 void Unmap(GPUVAddr gpu_addr, std::size_t size);
144 104
145 void FlushRegion(GPUVAddr gpu_addr, size_t size) const; 105 void FlushRegion(GPUVAddr gpu_addr, size_t size) const;
146 106
107 void InvalidateRegion(GPUVAddr gpu_addr, size_t size) const;
108
109 bool IsMemoryDirty(GPUVAddr gpu_addr, size_t size) const;
110
111 size_t MaxContinousRange(GPUVAddr gpu_addr, size_t size) const;
112
113 bool IsWithinGPUAddressRange(GPUVAddr gpu_addr) const {
114 return gpu_addr < address_space_size;
115 }
116
147private: 117private:
148 [[nodiscard]] PageEntry GetPageEntry(GPUVAddr gpu_addr) const; 118 template <bool is_big_pages, typename FuncMapped, typename FuncReserved, typename FuncUnmapped>
149 void SetPageEntry(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size = page_size); 119 inline void MemoryOperation(GPUVAddr gpu_src_addr, std::size_t size, FuncMapped&& func_mapped,
150 GPUVAddr UpdateRange(GPUVAddr gpu_addr, PageEntry page_entry, std::size_t size); 120 FuncReserved&& func_reserved, FuncUnmapped&& func_unmapped) const;
151 [[nodiscard]] std::optional<GPUVAddr> FindFreeRange(std::size_t size, std::size_t align, 121
152 bool start_32bit_address = false) const; 122 template <bool is_safe>
153 123 void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size) const;
154 void TryLockPage(PageEntry page_entry, std::size_t size); 124
155 void TryUnlockPage(PageEntry page_entry, std::size_t size); 125 template <bool is_safe>
156 126 void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size);
157 void ReadBlockImpl(GPUVAddr gpu_src_addr, void* dest_buffer, std::size_t size, 127
158 bool is_safe) const; 128 template <bool is_big_page>
159 void WriteBlockImpl(GPUVAddr gpu_dest_addr, const void* src_buffer, std::size_t size, 129 [[nodiscard]] std::size_t PageEntryIndex(GPUVAddr gpu_addr) const {
160 bool is_safe); 130 if constexpr (is_big_page) {
161 131 return (gpu_addr >> big_page_bits) & big_page_table_mask;
162 [[nodiscard]] static constexpr std::size_t PageEntryIndex(GPUVAddr gpu_addr) { 132 } else {
163 return (gpu_addr >> page_bits) & page_table_mask; 133 return (gpu_addr >> page_bits) & page_table_mask;
134 }
164 } 135 }
165 136
166 static constexpr u64 address_space_size = 1ULL << 40; 137 inline bool IsBigPageContinous(size_t big_page_index) const;
167 static constexpr u64 address_space_start = 1ULL << 32; 138 inline void SetBigPageContinous(size_t big_page_index, bool value);
168 static constexpr u64 address_space_start_low = 1ULL << 16;
169 static constexpr u64 page_bits{16};
170 static constexpr u64 page_size{1 << page_bits};
171 static constexpr u64 page_mask{page_size - 1};
172 static constexpr u64 page_table_bits{24};
173 static constexpr u64 page_table_size{1 << page_table_bits};
174 static constexpr u64 page_table_mask{page_table_size - 1};
175 139
176 Core::System& system; 140 Core::System& system;
141 Core::Memory::Memory& memory;
142 Core::DeviceMemory& device_memory;
143
144 const u64 address_space_bits;
145 const u64 page_bits;
146 u64 address_space_size;
147 u64 page_size;
148 u64 page_mask;
149 u64 page_table_mask;
150 static constexpr u64 cpu_page_bits{12};
151
152 const u64 big_page_bits;
153 u64 big_page_size;
154 u64 big_page_mask;
155 u64 big_page_table_mask;
177 156
178 VideoCore::RasterizerInterface* rasterizer = nullptr; 157 VideoCore::RasterizerInterface* rasterizer = nullptr;
179 158
180 std::vector<PageEntry> page_table; 159 enum class EntryType : u64 {
160 Free = 0,
161 Reserved = 1,
162 Mapped = 2,
163 };
164
165 std::vector<u64> entries;
166 std::vector<u64> big_entries;
167
168 template <EntryType entry_type>
169 GPUVAddr PageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size);
170
171 template <EntryType entry_type>
172 GPUVAddr BigPageTableOp(GPUVAddr gpu_addr, [[maybe_unused]] VAddr cpu_addr, size_t size);
173
174 template <bool is_big_page>
175 inline EntryType GetEntry(size_t position) const;
176
177 template <bool is_big_page>
178 inline void SetEntry(size_t position, EntryType entry);
179
180 Common::MultiLevelPageTable<u32> page_table;
181 Common::VirtualBuffer<u32> big_page_table_cpu;
182
183 std::vector<u64> big_page_continous;
184
185 constexpr static size_t continous_bits = 64;
181 186
182 using MapRange = std::pair<GPUVAddr, size_t>; 187 const size_t unique_identifier;
183 std::vector<MapRange> map_ranges;
184 188
185 std::vector<std::pair<VAddr, std::size_t>> cache_invalidate_queue; 189 static std::atomic<size_t> unique_identifier_generator;
186}; 190};
187 191
188} // namespace Tegra 192} // namespace Tegra
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
index 889b606b3..b0ebe71b7 100644
--- a/src/video_core/query_cache.h
+++ b/src/video_core/query_cache.h
@@ -17,6 +17,7 @@
17 17
18#include "common/assert.h" 18#include "common/assert.h"
19#include "common/settings.h" 19#include "common/settings.h"
20#include "video_core/control/channel_state_cache.h"
20#include "video_core/engines/maxwell_3d.h" 21#include "video_core/engines/maxwell_3d.h"
21#include "video_core/memory_manager.h" 22#include "video_core/memory_manager.h"
22#include "video_core/rasterizer_interface.h" 23#include "video_core/rasterizer_interface.h"
@@ -90,13 +91,10 @@ private:
90}; 91};
91 92
92template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter> 93template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter>
93class QueryCacheBase { 94class QueryCacheBase : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
94public: 95public:
95 explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_, 96 explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_)
96 Tegra::Engines::Maxwell3D& maxwell3d_, 97 : rasterizer{rasterizer_}, streams{{CounterStream{static_cast<QueryCache&>(*this),
97 Tegra::MemoryManager& gpu_memory_)
98 : rasterizer{rasterizer_}, maxwell3d{maxwell3d_},
99 gpu_memory{gpu_memory_}, streams{{CounterStream{static_cast<QueryCache&>(*this),
100 VideoCore::QueryType::SamplesPassed}}} {} 98 VideoCore::QueryType::SamplesPassed}}} {}
101 99
102 void InvalidateRegion(VAddr addr, std::size_t size) { 100 void InvalidateRegion(VAddr addr, std::size_t size) {
@@ -117,13 +115,13 @@ public:
117 */ 115 */
118 void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) { 116 void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) {
119 std::unique_lock lock{mutex}; 117 std::unique_lock lock{mutex};
120 const std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); 118 const std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
121 ASSERT(cpu_addr); 119 ASSERT(cpu_addr);
122 120
123 CachedQuery* query = TryGet(*cpu_addr); 121 CachedQuery* query = TryGet(*cpu_addr);
124 if (!query) { 122 if (!query) {
125 ASSERT_OR_EXECUTE(cpu_addr, return;); 123 ASSERT_OR_EXECUTE(cpu_addr, return;);
126 u8* const host_ptr = gpu_memory.GetPointer(gpu_addr); 124 u8* const host_ptr = gpu_memory->GetPointer(gpu_addr);
127 125
128 query = Register(type, *cpu_addr, host_ptr, timestamp.has_value()); 126 query = Register(type, *cpu_addr, host_ptr, timestamp.has_value());
129 } 127 }
@@ -137,8 +135,10 @@ public:
137 /// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch. 135 /// Updates counters from GPU state. Expected to be called once per draw, clear or dispatch.
138 void UpdateCounters() { 136 void UpdateCounters() {
139 std::unique_lock lock{mutex}; 137 std::unique_lock lock{mutex};
140 const auto& regs = maxwell3d.regs; 138 if (maxwell3d) {
141 Stream(VideoCore::QueryType::SamplesPassed).Update(regs.samplecnt_enable); 139 const auto& regs = maxwell3d->regs;
140 Stream(VideoCore::QueryType::SamplesPassed).Update(regs.samplecnt_enable);
141 }
142 } 142 }
143 143
144 /// Resets a counter to zero. It doesn't disable the query after resetting. 144 /// Resets a counter to zero. It doesn't disable the query after resetting.
@@ -264,8 +264,6 @@ private:
264 static constexpr unsigned YUZU_PAGEBITS = 12; 264 static constexpr unsigned YUZU_PAGEBITS = 12;
265 265
266 VideoCore::RasterizerInterface& rasterizer; 266 VideoCore::RasterizerInterface& rasterizer;
267 Tegra::Engines::Maxwell3D& maxwell3d;
268 Tegra::MemoryManager& gpu_memory;
269 267
270 std::recursive_mutex mutex; 268 std::recursive_mutex mutex;
271 269
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index a04a76481..d2d40884c 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -16,6 +16,9 @@ class MemoryManager;
16namespace Engines { 16namespace Engines {
17class AccelerateDMAInterface; 17class AccelerateDMAInterface;
18} 18}
19namespace Control {
20struct ChannelState;
21}
19} // namespace Tegra 22} // namespace Tegra
20 23
21namespace VideoCore { 24namespace VideoCore {
@@ -59,7 +62,10 @@ public:
59 virtual void DisableGraphicsUniformBuffer(size_t stage, u32 index) = 0; 62 virtual void DisableGraphicsUniformBuffer(size_t stage, u32 index) = 0;
60 63
61 /// Signal a GPU based semaphore as a fence 64 /// Signal a GPU based semaphore as a fence
62 virtual void SignalSemaphore(GPUVAddr addr, u32 value) = 0; 65 virtual void SignalFence(std::function<void()>&& func) = 0;
66
67 /// Send an operation to be done after a certain amount of flushes.
68 virtual void SyncOperation(std::function<void()>&& func) = 0;
63 69
64 /// Signal a GPU based syncpoint as a fence 70 /// Signal a GPU based syncpoint as a fence
65 virtual void SignalSyncPoint(u32 value) = 0; 71 virtual void SignalSyncPoint(u32 value) = 0;
@@ -86,13 +92,13 @@ public:
86 virtual void OnCPUWrite(VAddr addr, u64 size) = 0; 92 virtual void OnCPUWrite(VAddr addr, u64 size) = 0;
87 93
88 /// Sync memory between guest and host. 94 /// Sync memory between guest and host.
89 virtual void SyncGuestHost() = 0; 95 virtual void InvalidateGPUCache() = 0;
90 96
91 /// Unmap memory range 97 /// Unmap memory range
92 virtual void UnmapMemory(VAddr addr, u64 size) = 0; 98 virtual void UnmapMemory(VAddr addr, u64 size) = 0;
93 99
94 /// Remap GPU memory range. This means underneath backing memory changed 100 /// Remap GPU memory range. This means underneath backing memory changed
95 virtual void ModifyGPUMemory(GPUVAddr addr, u64 size) = 0; 101 virtual void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) = 0;
96 102
97 /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory 103 /// Notify rasterizer that any caches of the specified region should be flushed to Switch memory
98 /// and invalidated 104 /// and invalidated
@@ -123,7 +129,7 @@ public:
123 [[nodiscard]] virtual Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() = 0; 129 [[nodiscard]] virtual Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() = 0;
124 130
125 virtual void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, 131 virtual void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
126 std::span<u8> memory) = 0; 132 std::span<const u8> memory) = 0;
127 133
128 /// Attempt to use a faster method to display the framebuffer to screen 134 /// Attempt to use a faster method to display the framebuffer to screen
129 [[nodiscard]] virtual bool AccelerateDisplay(const Tegra::FramebufferConfig& config, 135 [[nodiscard]] virtual bool AccelerateDisplay(const Tegra::FramebufferConfig& config,
@@ -137,5 +143,11 @@ public:
137 /// Initialize disk cached resources for the game being emulated 143 /// Initialize disk cached resources for the game being emulated
138 virtual void LoadDiskResources(u64 title_id, std::stop_token stop_loading, 144 virtual void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
139 const DiskResourceLoadCallback& callback) {} 145 const DiskResourceLoadCallback& callback) {}
146
147 virtual void InitializeChannel(Tegra::Control::ChannelState& channel) {}
148
149 virtual void BindChannel(Tegra::Control::ChannelState& channel) {}
150
151 virtual void ReleaseChannel(s32 channel_id) {}
140}; 152};
141} // namespace VideoCore 153} // namespace VideoCore
diff --git a/src/video_core/renderer_opengl/gl_buffer_cache.cpp b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
index 32450ee1d..08f4d69ab 100644
--- a/src/video_core/renderer_opengl/gl_buffer_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_buffer_cache.cpp
@@ -168,7 +168,7 @@ void BufferCacheRuntime::BindIndexBuffer(Buffer& buffer, u32 offset, u32 size) {
168 if (has_unified_vertex_buffers) { 168 if (has_unified_vertex_buffers) {
169 buffer.MakeResident(GL_READ_ONLY); 169 buffer.MakeResident(GL_READ_ONLY);
170 glBufferAddressRangeNV(GL_ELEMENT_ARRAY_ADDRESS_NV, 0, buffer.HostGpuAddr() + offset, 170 glBufferAddressRangeNV(GL_ELEMENT_ARRAY_ADDRESS_NV, 0, buffer.HostGpuAddr() + offset,
171 static_cast<GLsizeiptr>(size)); 171 static_cast<GLsizeiptr>(Common::AlignUp(size, 4)));
172 } else { 172 } else {
173 glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer.Handle()); 173 glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer.Handle());
174 index_buffer_offset = offset; 174 index_buffer_offset = offset;
diff --git a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp
index 1f0f156ed..26d066004 100644
--- a/src/video_core/renderer_opengl/gl_compute_pipeline.cpp
+++ b/src/video_core/renderer_opengl/gl_compute_pipeline.cpp
@@ -28,12 +28,11 @@ bool ComputePipelineKey::operator==(const ComputePipelineKey& rhs) const noexcep
28} 28}
29 29
30ComputePipeline::ComputePipeline(const Device& device, TextureCache& texture_cache_, 30ComputePipeline::ComputePipeline(const Device& device, TextureCache& texture_cache_,
31 BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_, 31 BufferCache& buffer_cache_, ProgramManager& program_manager_,
32 Tegra::Engines::KeplerCompute& kepler_compute_, 32 const Shader::Info& info_, std::string code,
33 ProgramManager& program_manager_, const Shader::Info& info_, 33 std::vector<u32> code_v)
34 std::string code, std::vector<u32> code_v) 34 : texture_cache{texture_cache_}, buffer_cache{buffer_cache_},
35 : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, gpu_memory{gpu_memory_}, 35 program_manager{program_manager_}, info{info_} {
36 kepler_compute{kepler_compute_}, program_manager{program_manager_}, info{info_} {
37 switch (device.GetShaderBackend()) { 36 switch (device.GetShaderBackend()) {
38 case Settings::ShaderBackend::GLSL: 37 case Settings::ShaderBackend::GLSL:
39 source_program = CreateProgram(code, GL_COMPUTE_SHADER); 38 source_program = CreateProgram(code, GL_COMPUTE_SHADER);
@@ -86,7 +85,7 @@ void ComputePipeline::Configure() {
86 GLsizei texture_binding{}; 85 GLsizei texture_binding{};
87 GLsizei image_binding{}; 86 GLsizei image_binding{};
88 87
89 const auto& qmd{kepler_compute.launch_description}; 88 const auto& qmd{kepler_compute->launch_description};
90 const auto& cbufs{qmd.const_buffer_config}; 89 const auto& cbufs{qmd.const_buffer_config};
91 const bool via_header_index{qmd.linked_tsc != 0}; 90 const bool via_header_index{qmd.linked_tsc != 0};
92 const auto read_handle{[&](const auto& desc, u32 index) { 91 const auto read_handle{[&](const auto& desc, u32 index) {
@@ -101,12 +100,13 @@ void ComputePipeline::Configure() {
101 const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset}; 100 const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset};
102 const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() + 101 const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() +
103 secondary_offset}; 102 secondary_offset};
104 const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; 103 const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left};
105 const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; 104 const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr)
105 << desc.secondary_shift_left};
106 return TexturePair(lhs_raw | rhs_raw, via_header_index); 106 return TexturePair(lhs_raw | rhs_raw, via_header_index);
107 } 107 }
108 } 108 }
109 return TexturePair(gpu_memory.Read<u32>(addr), via_header_index); 109 return TexturePair(gpu_memory->Read<u32>(addr), via_header_index);
110 }}; 110 }};
111 const auto add_image{[&](const auto& desc, bool blacklist) { 111 const auto add_image{[&](const auto& desc, bool blacklist) {
112 for (u32 index = 0; index < desc.count; ++index) { 112 for (u32 index = 0; index < desc.count; ++index) {
diff --git a/src/video_core/renderer_opengl/gl_compute_pipeline.h b/src/video_core/renderer_opengl/gl_compute_pipeline.h
index 723f27f11..6534dec32 100644
--- a/src/video_core/renderer_opengl/gl_compute_pipeline.h
+++ b/src/video_core/renderer_opengl/gl_compute_pipeline.h
@@ -49,10 +49,8 @@ static_assert(std::is_trivially_constructible_v<ComputePipelineKey>);
49class ComputePipeline { 49class ComputePipeline {
50public: 50public:
51 explicit ComputePipeline(const Device& device, TextureCache& texture_cache_, 51 explicit ComputePipeline(const Device& device, TextureCache& texture_cache_,
52 BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_, 52 BufferCache& buffer_cache_, ProgramManager& program_manager_,
53 Tegra::Engines::KeplerCompute& kepler_compute_, 53 const Shader::Info& info_, std::string code, std::vector<u32> code_v);
54 ProgramManager& program_manager_, const Shader::Info& info_,
55 std::string code, std::vector<u32> code_v);
56 54
57 void Configure(); 55 void Configure();
58 56
@@ -60,11 +58,17 @@ public:
60 return writes_global_memory; 58 return writes_global_memory;
61 } 59 }
62 60
61 void SetEngine(Tegra::Engines::KeplerCompute* kepler_compute_,
62 Tegra::MemoryManager* gpu_memory_) {
63 kepler_compute = kepler_compute_;
64 gpu_memory = gpu_memory_;
65 }
66
63private: 67private:
64 TextureCache& texture_cache; 68 TextureCache& texture_cache;
65 BufferCache& buffer_cache; 69 BufferCache& buffer_cache;
66 Tegra::MemoryManager& gpu_memory; 70 Tegra::MemoryManager* gpu_memory;
67 Tegra::Engines::KeplerCompute& kepler_compute; 71 Tegra::Engines::KeplerCompute* kepler_compute;
68 ProgramManager& program_manager; 72 ProgramManager& program_manager;
69 73
70 Shader::Info info; 74 Shader::Info info;
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.cpp b/src/video_core/renderer_opengl/gl_fence_manager.cpp
index 6e82c2e28..91463f854 100644
--- a/src/video_core/renderer_opengl/gl_fence_manager.cpp
+++ b/src/video_core/renderer_opengl/gl_fence_manager.cpp
@@ -10,10 +10,7 @@
10 10
11namespace OpenGL { 11namespace OpenGL {
12 12
13GLInnerFence::GLInnerFence(u32 payload_, bool is_stubbed_) : FenceBase{payload_, is_stubbed_} {} 13GLInnerFence::GLInnerFence(bool is_stubbed_) : FenceBase{is_stubbed_} {}
14
15GLInnerFence::GLInnerFence(GPUVAddr address_, u32 payload_, bool is_stubbed_)
16 : FenceBase{address_, payload_, is_stubbed_} {}
17 14
18GLInnerFence::~GLInnerFence() = default; 15GLInnerFence::~GLInnerFence() = default;
19 16
@@ -48,12 +45,8 @@ FenceManagerOpenGL::FenceManagerOpenGL(VideoCore::RasterizerInterface& rasterize
48 BufferCache& buffer_cache_, QueryCache& query_cache_) 45 BufferCache& buffer_cache_, QueryCache& query_cache_)
49 : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_} {} 46 : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_} {}
50 47
51Fence FenceManagerOpenGL::CreateFence(u32 value, bool is_stubbed) { 48Fence FenceManagerOpenGL::CreateFence(bool is_stubbed) {
52 return std::make_shared<GLInnerFence>(value, is_stubbed); 49 return std::make_shared<GLInnerFence>(is_stubbed);
53}
54
55Fence FenceManagerOpenGL::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) {
56 return std::make_shared<GLInnerFence>(addr, value, is_stubbed);
57} 50}
58 51
59void FenceManagerOpenGL::QueueFence(Fence& fence) { 52void FenceManagerOpenGL::QueueFence(Fence& fence) {
diff --git a/src/video_core/renderer_opengl/gl_fence_manager.h b/src/video_core/renderer_opengl/gl_fence_manager.h
index 14ff00db2..f1446e732 100644
--- a/src/video_core/renderer_opengl/gl_fence_manager.h
+++ b/src/video_core/renderer_opengl/gl_fence_manager.h
@@ -16,8 +16,7 @@ namespace OpenGL {
16 16
17class GLInnerFence : public VideoCommon::FenceBase { 17class GLInnerFence : public VideoCommon::FenceBase {
18public: 18public:
19 explicit GLInnerFence(u32 payload_, bool is_stubbed_); 19 explicit GLInnerFence(bool is_stubbed_);
20 explicit GLInnerFence(GPUVAddr address_, u32 payload_, bool is_stubbed_);
21 ~GLInnerFence(); 20 ~GLInnerFence();
22 21
23 void Queue(); 22 void Queue();
@@ -40,8 +39,7 @@ public:
40 QueryCache& query_cache); 39 QueryCache& query_cache);
41 40
42protected: 41protected:
43 Fence CreateFence(u32 value, bool is_stubbed) override; 42 Fence CreateFence(bool is_stubbed) override;
44 Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override;
45 void QueueFence(Fence& fence) override; 43 void QueueFence(Fence& fence) override;
46 bool IsFenceSignaled(Fence& fence) const override; 44 bool IsFenceSignaled(Fence& fence) const override;
47 void WaitFence(Fence& fence) override; 45 void WaitFence(Fence& fence) override;
diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
index 67eae369d..41493a7da 100644
--- a/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
+++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.cpp
@@ -169,15 +169,15 @@ ConfigureFuncPtr ConfigureFunc(const std::array<Shader::Info, 5>& infos, u32 ena
169} 169}
170} // Anonymous namespace 170} // Anonymous namespace
171 171
172GraphicsPipeline::GraphicsPipeline( 172GraphicsPipeline::GraphicsPipeline(const Device& device, TextureCache& texture_cache_,
173 const Device& device, TextureCache& texture_cache_, BufferCache& buffer_cache_, 173 BufferCache& buffer_cache_, ProgramManager& program_manager_,
174 Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_, 174 StateTracker& state_tracker_, ShaderWorker* thread_worker,
175 ProgramManager& program_manager_, StateTracker& state_tracker_, ShaderWorker* thread_worker, 175 VideoCore::ShaderNotify* shader_notify,
176 VideoCore::ShaderNotify* shader_notify, std::array<std::string, 5> sources, 176 std::array<std::string, 5> sources,
177 std::array<std::vector<u32>, 5> sources_spirv, const std::array<const Shader::Info*, 5>& infos, 177 std::array<std::vector<u32>, 5> sources_spirv,
178 const GraphicsPipelineKey& key_) 178 const std::array<const Shader::Info*, 5>& infos,
179 : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, 179 const GraphicsPipelineKey& key_)
180 gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, program_manager{program_manager_}, 180 : texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_},
181 state_tracker{state_tracker_}, key{key_} { 181 state_tracker{state_tracker_}, key{key_} {
182 if (shader_notify) { 182 if (shader_notify) {
183 shader_notify->MarkShaderBuilding(); 183 shader_notify->MarkShaderBuilding();
@@ -285,7 +285,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
285 buffer_cache.runtime.SetBaseStorageBindings(base_storage_bindings); 285 buffer_cache.runtime.SetBaseStorageBindings(base_storage_bindings);
286 buffer_cache.runtime.SetEnableStorageBuffers(use_storage_buffers); 286 buffer_cache.runtime.SetEnableStorageBuffers(use_storage_buffers);
287 287
288 const auto& regs{maxwell3d.regs}; 288 const auto& regs{maxwell3d->regs};
289 const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex}; 289 const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex};
290 const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE { 290 const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE {
291 const Shader::Info& info{stage_infos[stage]}; 291 const Shader::Info& info{stage_infos[stage]};
@@ -299,7 +299,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
299 ++ssbo_index; 299 ++ssbo_index;
300 } 300 }
301 } 301 }
302 const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers}; 302 const auto& cbufs{maxwell3d->state.shader_stages[stage].const_buffers};
303 const auto read_handle{[&](const auto& desc, u32 index) { 303 const auto read_handle{[&](const auto& desc, u32 index) {
304 ASSERT(cbufs[desc.cbuf_index].enabled); 304 ASSERT(cbufs[desc.cbuf_index].enabled);
305 const u32 index_offset{index << desc.size_shift}; 305 const u32 index_offset{index << desc.size_shift};
@@ -312,13 +312,14 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
312 const u32 second_offset{desc.secondary_cbuf_offset + index_offset}; 312 const u32 second_offset{desc.secondary_cbuf_offset + index_offset};
313 const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address + 313 const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address +
314 second_offset}; 314 second_offset};
315 const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; 315 const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left};
316 const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; 316 const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr)
317 << desc.secondary_shift_left};
317 const u32 raw{lhs_raw | rhs_raw}; 318 const u32 raw{lhs_raw | rhs_raw};
318 return TexturePair(raw, via_header_index); 319 return TexturePair(raw, via_header_index);
319 } 320 }
320 } 321 }
321 return TexturePair(gpu_memory.Read<u32>(addr), via_header_index); 322 return TexturePair(gpu_memory->Read<u32>(addr), via_header_index);
322 }}; 323 }};
323 const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE { 324 const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE {
324 for (u32 index = 0; index < desc.count; ++index) { 325 for (u32 index = 0; index < desc.count; ++index) {
diff --git a/src/video_core/renderer_opengl/gl_graphics_pipeline.h b/src/video_core/renderer_opengl/gl_graphics_pipeline.h
index 4ec15b966..a0f0e63cb 100644
--- a/src/video_core/renderer_opengl/gl_graphics_pipeline.h
+++ b/src/video_core/renderer_opengl/gl_graphics_pipeline.h
@@ -71,10 +71,9 @@ static_assert(std::is_trivially_constructible_v<GraphicsPipelineKey>);
71class GraphicsPipeline { 71class GraphicsPipeline {
72public: 72public:
73 explicit GraphicsPipeline(const Device& device, TextureCache& texture_cache_, 73 explicit GraphicsPipeline(const Device& device, TextureCache& texture_cache_,
74 BufferCache& buffer_cache_, Tegra::MemoryManager& gpu_memory_, 74 BufferCache& buffer_cache_, ProgramManager& program_manager_,
75 Tegra::Engines::Maxwell3D& maxwell3d_, 75 StateTracker& state_tracker_, ShaderWorker* thread_worker,
76 ProgramManager& program_manager_, StateTracker& state_tracker_, 76 VideoCore::ShaderNotify* shader_notify,
77 ShaderWorker* thread_worker, VideoCore::ShaderNotify* shader_notify,
78 std::array<std::string, 5> sources, 77 std::array<std::string, 5> sources,
79 std::array<std::vector<u32>, 5> sources_spirv, 78 std::array<std::vector<u32>, 5> sources_spirv,
80 const std::array<const Shader::Info*, 5>& infos, 79 const std::array<const Shader::Info*, 5>& infos,
@@ -107,6 +106,11 @@ public:
107 }; 106 };
108 } 107 }
109 108
109 void SetEngine(Tegra::Engines::Maxwell3D* maxwell3d_, Tegra::MemoryManager* gpu_memory_) {
110 maxwell3d = maxwell3d_;
111 gpu_memory = gpu_memory_;
112 }
113
110private: 114private:
111 template <typename Spec> 115 template <typename Spec>
112 void ConfigureImpl(bool is_indexed); 116 void ConfigureImpl(bool is_indexed);
@@ -119,8 +123,8 @@ private:
119 123
120 TextureCache& texture_cache; 124 TextureCache& texture_cache;
121 BufferCache& buffer_cache; 125 BufferCache& buffer_cache;
122 Tegra::MemoryManager& gpu_memory; 126 Tegra::MemoryManager* gpu_memory;
123 Tegra::Engines::Maxwell3D& maxwell3d; 127 Tegra::Engines::Maxwell3D* maxwell3d;
124 ProgramManager& program_manager; 128 ProgramManager& program_manager;
125 StateTracker& state_tracker; 129 StateTracker& state_tracker;
126 const GraphicsPipelineKey key; 130 const GraphicsPipelineKey key;
diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp
index ed40f5791..5070db441 100644
--- a/src/video_core/renderer_opengl/gl_query_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_query_cache.cpp
@@ -26,9 +26,8 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) {
26 26
27} // Anonymous namespace 27} // Anonymous namespace
28 28
29QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, 29QueryCache::QueryCache(RasterizerOpenGL& rasterizer_)
30 Tegra::MemoryManager& gpu_memory_) 30 : QueryCacheBase(rasterizer_), gl_rasterizer{rasterizer_} {}
31 : QueryCacheBase(rasterizer_, maxwell3d_, gpu_memory_), gl_rasterizer{rasterizer_} {}
32 31
33QueryCache::~QueryCache() = default; 32QueryCache::~QueryCache() = default;
34 33
diff --git a/src/video_core/renderer_opengl/gl_query_cache.h b/src/video_core/renderer_opengl/gl_query_cache.h
index 8a49f1ef0..14ce59990 100644
--- a/src/video_core/renderer_opengl/gl_query_cache.h
+++ b/src/video_core/renderer_opengl/gl_query_cache.h
@@ -28,8 +28,7 @@ using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>;
28class QueryCache final 28class QueryCache final
29 : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> { 29 : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> {
30public: 30public:
31 explicit QueryCache(RasterizerOpenGL& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, 31 explicit QueryCache(RasterizerOpenGL& rasterizer_);
32 Tegra::MemoryManager& gpu_memory_);
33 ~QueryCache(); 32 ~QueryCache();
34 33
35 OGLQuery AllocateQuery(VideoCore::QueryType type); 34 OGLQuery AllocateQuery(VideoCore::QueryType type);
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index a0d048b0b..c2d80605d 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -16,7 +16,7 @@
16#include "common/microprofile.h" 16#include "common/microprofile.h"
17#include "common/scope_exit.h" 17#include "common/scope_exit.h"
18#include "common/settings.h" 18#include "common/settings.h"
19 19#include "video_core/control/channel_state.h"
20#include "video_core/engines/kepler_compute.h" 20#include "video_core/engines/kepler_compute.h"
21#include "video_core/engines/maxwell_3d.h" 21#include "video_core/engines/maxwell_3d.h"
22#include "video_core/memory_manager.h" 22#include "video_core/memory_manager.h"
@@ -56,22 +56,20 @@ RasterizerOpenGL::RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra
56 Core::Memory::Memory& cpu_memory_, const Device& device_, 56 Core::Memory::Memory& cpu_memory_, const Device& device_,
57 ScreenInfo& screen_info_, ProgramManager& program_manager_, 57 ScreenInfo& screen_info_, ProgramManager& program_manager_,
58 StateTracker& state_tracker_) 58 StateTracker& state_tracker_)
59 : RasterizerAccelerated(cpu_memory_), gpu(gpu_), maxwell3d(gpu.Maxwell3D()), 59 : RasterizerAccelerated(cpu_memory_), gpu(gpu_), device(device_), screen_info(screen_info_),
60 kepler_compute(gpu.KeplerCompute()), gpu_memory(gpu.MemoryManager()), device(device_), 60 program_manager(program_manager_), state_tracker(state_tracker_),
61 screen_info(screen_info_), program_manager(program_manager_), state_tracker(state_tracker_),
62 texture_cache_runtime(device, program_manager, state_tracker), 61 texture_cache_runtime(device, program_manager, state_tracker),
63 texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory), 62 texture_cache(texture_cache_runtime, *this), buffer_cache_runtime(device),
64 buffer_cache_runtime(device), 63 buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
65 buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime), 64 shader_cache(*this, emu_window_, device, texture_cache, buffer_cache, program_manager,
66 shader_cache(*this, emu_window_, maxwell3d, kepler_compute, gpu_memory, device, texture_cache, 65 state_tracker, gpu.ShaderNotify()),
67 buffer_cache, program_manager, state_tracker, gpu.ShaderNotify()), 66 query_cache(*this), accelerate_dma(buffer_cache),
68 query_cache(*this, maxwell3d, gpu_memory), accelerate_dma(buffer_cache),
69 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache) {} 67 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache) {}
70 68
71RasterizerOpenGL::~RasterizerOpenGL() = default; 69RasterizerOpenGL::~RasterizerOpenGL() = default;
72 70
73void RasterizerOpenGL::SyncVertexFormats() { 71void RasterizerOpenGL::SyncVertexFormats() {
74 auto& flags = maxwell3d.dirty.flags; 72 auto& flags = maxwell3d->dirty.flags;
75 if (!flags[Dirty::VertexFormats]) { 73 if (!flags[Dirty::VertexFormats]) {
76 return; 74 return;
77 } 75 }
@@ -89,7 +87,7 @@ void RasterizerOpenGL::SyncVertexFormats() {
89 } 87 }
90 flags[Dirty::VertexFormat0 + index] = false; 88 flags[Dirty::VertexFormat0 + index] = false;
91 89
92 const auto attrib = maxwell3d.regs.vertex_attrib_format[index]; 90 const auto attrib = maxwell3d->regs.vertex_attrib_format[index];
93 const auto gl_index = static_cast<GLuint>(index); 91 const auto gl_index = static_cast<GLuint>(index);
94 92
95 // Disable constant attributes. 93 // Disable constant attributes.
@@ -113,13 +111,13 @@ void RasterizerOpenGL::SyncVertexFormats() {
113} 111}
114 112
115void RasterizerOpenGL::SyncVertexInstances() { 113void RasterizerOpenGL::SyncVertexInstances() {
116 auto& flags = maxwell3d.dirty.flags; 114 auto& flags = maxwell3d->dirty.flags;
117 if (!flags[Dirty::VertexInstances]) { 115 if (!flags[Dirty::VertexInstances]) {
118 return; 116 return;
119 } 117 }
120 flags[Dirty::VertexInstances] = false; 118 flags[Dirty::VertexInstances] = false;
121 119
122 const auto& regs = maxwell3d.regs; 120 const auto& regs = maxwell3d->regs;
123 for (std::size_t index = 0; index < NUM_SUPPORTED_VERTEX_ATTRIBUTES; ++index) { 121 for (std::size_t index = 0; index < NUM_SUPPORTED_VERTEX_ATTRIBUTES; ++index) {
124 if (!flags[Dirty::VertexInstance0 + index]) { 122 if (!flags[Dirty::VertexInstance0 + index]) {
125 continue; 123 continue;
@@ -140,11 +138,11 @@ void RasterizerOpenGL::LoadDiskResources(u64 title_id, std::stop_token stop_load
140 138
141void RasterizerOpenGL::Clear() { 139void RasterizerOpenGL::Clear() {
142 MICROPROFILE_SCOPE(OpenGL_Clears); 140 MICROPROFILE_SCOPE(OpenGL_Clears);
143 if (!maxwell3d.ShouldExecute()) { 141 if (!maxwell3d->ShouldExecute()) {
144 return; 142 return;
145 } 143 }
146 144
147 const auto& regs = maxwell3d.regs; 145 const auto& regs = maxwell3d->regs;
148 bool use_color{}; 146 bool use_color{};
149 bool use_depth{}; 147 bool use_depth{};
150 bool use_stencil{}; 148 bool use_stencil{};
@@ -217,22 +215,26 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
217 if (!pipeline) { 215 if (!pipeline) {
218 return; 216 return;
219 } 217 }
218
219 gpu.TickWork();
220
220 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 221 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
222 pipeline->SetEngine(maxwell3d, gpu_memory);
221 pipeline->Configure(is_indexed); 223 pipeline->Configure(is_indexed);
222 224
223 SyncState(); 225 SyncState();
224 226
225 const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d.regs.draw.topology); 227 const GLenum primitive_mode = MaxwellToGL::PrimitiveTopology(maxwell3d->regs.draw.topology);
226 BeginTransformFeedback(pipeline, primitive_mode); 228 BeginTransformFeedback(pipeline, primitive_mode);
227 229
228 const GLuint base_instance = static_cast<GLuint>(maxwell3d.regs.vb_base_instance); 230 const GLuint base_instance = static_cast<GLuint>(maxwell3d->regs.vb_base_instance);
229 const GLsizei num_instances = 231 const GLsizei num_instances =
230 static_cast<GLsizei>(is_instanced ? maxwell3d.mme_draw.instance_count : 1); 232 static_cast<GLsizei>(is_instanced ? maxwell3d->mme_draw.instance_count : 1);
231 if (is_indexed) { 233 if (is_indexed) {
232 const GLint base_vertex = static_cast<GLint>(maxwell3d.regs.vb_element_base); 234 const GLint base_vertex = static_cast<GLint>(maxwell3d->regs.vb_element_base);
233 const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d.regs.index_array.count); 235 const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d->regs.index_array.count);
234 const GLvoid* const offset = buffer_cache_runtime.IndexOffset(); 236 const GLvoid* const offset = buffer_cache_runtime.IndexOffset();
235 const GLenum format = MaxwellToGL::IndexFormat(maxwell3d.regs.index_array.format); 237 const GLenum format = MaxwellToGL::IndexFormat(maxwell3d->regs.index_array.format);
236 if (num_instances == 1 && base_instance == 0 && base_vertex == 0) { 238 if (num_instances == 1 && base_instance == 0 && base_vertex == 0) {
237 glDrawElements(primitive_mode, num_vertices, format, offset); 239 glDrawElements(primitive_mode, num_vertices, format, offset);
238 } else if (num_instances == 1 && base_instance == 0) { 240 } else if (num_instances == 1 && base_instance == 0) {
@@ -251,8 +253,8 @@ void RasterizerOpenGL::Draw(bool is_indexed, bool is_instanced) {
251 base_instance); 253 base_instance);
252 } 254 }
253 } else { 255 } else {
254 const GLint base_vertex = static_cast<GLint>(maxwell3d.regs.vertex_buffer.first); 256 const GLint base_vertex = static_cast<GLint>(maxwell3d->regs.vertex_buffer.first);
255 const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d.regs.vertex_buffer.count); 257 const GLsizei num_vertices = static_cast<GLsizei>(maxwell3d->regs.vertex_buffer.count);
256 if (num_instances == 1 && base_instance == 0) { 258 if (num_instances == 1 && base_instance == 0) {
257 glDrawArrays(primitive_mode, base_vertex, num_vertices); 259 glDrawArrays(primitive_mode, base_vertex, num_vertices);
258 } else if (base_instance == 0) { 260 } else if (base_instance == 0) {
@@ -273,8 +275,9 @@ void RasterizerOpenGL::DispatchCompute() {
273 if (!pipeline) { 275 if (!pipeline) {
274 return; 276 return;
275 } 277 }
278 pipeline->SetEngine(kepler_compute, gpu_memory);
276 pipeline->Configure(); 279 pipeline->Configure();
277 const auto& qmd{kepler_compute.launch_description}; 280 const auto& qmd{kepler_compute->launch_description};
278 glDispatchCompute(qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z); 281 glDispatchCompute(qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z);
279 ++num_queued_commands; 282 ++num_queued_commands;
280 has_written_global_memory |= pipeline->WritesGlobalMemory(); 283 has_written_global_memory |= pipeline->WritesGlobalMemory();
@@ -359,7 +362,7 @@ void RasterizerOpenGL::OnCPUWrite(VAddr addr, u64 size) {
359 } 362 }
360} 363}
361 364
362void RasterizerOpenGL::SyncGuestHost() { 365void RasterizerOpenGL::InvalidateGPUCache() {
363 MICROPROFILE_SCOPE(OpenGL_CacheManagement); 366 MICROPROFILE_SCOPE(OpenGL_CacheManagement);
364 shader_cache.SyncGuestHost(); 367 shader_cache.SyncGuestHost();
365 { 368 {
@@ -380,40 +383,30 @@ void RasterizerOpenGL::UnmapMemory(VAddr addr, u64 size) {
380 shader_cache.OnCPUWrite(addr, size); 383 shader_cache.OnCPUWrite(addr, size);
381} 384}
382 385
383void RasterizerOpenGL::ModifyGPUMemory(GPUVAddr addr, u64 size) { 386void RasterizerOpenGL::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) {
384 { 387 {
385 std::scoped_lock lock{texture_cache.mutex}; 388 std::scoped_lock lock{texture_cache.mutex};
386 texture_cache.UnmapGPUMemory(addr, size); 389 texture_cache.UnmapGPUMemory(as_id, addr, size);
387 } 390 }
388} 391}
389 392
390void RasterizerOpenGL::SignalSemaphore(GPUVAddr addr, u32 value) { 393void RasterizerOpenGL::SignalFence(std::function<void()>&& func) {
391 if (!gpu.IsAsync()) { 394 fence_manager.SignalFence(std::move(func));
392 gpu_memory.Write<u32>(addr, value); 395}
393 return; 396
394 } 397void RasterizerOpenGL::SyncOperation(std::function<void()>&& func) {
395 fence_manager.SignalSemaphore(addr, value); 398 fence_manager.SyncOperation(std::move(func));
396} 399}
397 400
398void RasterizerOpenGL::SignalSyncPoint(u32 value) { 401void RasterizerOpenGL::SignalSyncPoint(u32 value) {
399 if (!gpu.IsAsync()) {
400 gpu.IncrementSyncPoint(value);
401 return;
402 }
403 fence_manager.SignalSyncPoint(value); 402 fence_manager.SignalSyncPoint(value);
404} 403}
405 404
406void RasterizerOpenGL::SignalReference() { 405void RasterizerOpenGL::SignalReference() {
407 if (!gpu.IsAsync()) {
408 return;
409 }
410 fence_manager.SignalOrdering(); 406 fence_manager.SignalOrdering();
411} 407}
412 408
413void RasterizerOpenGL::ReleaseFences() { 409void RasterizerOpenGL::ReleaseFences() {
414 if (!gpu.IsAsync()) {
415 return;
416 }
417 fence_manager.WaitPendingFences(); 410 fence_manager.WaitPendingFences();
418} 411}
419 412
@@ -430,6 +423,7 @@ void RasterizerOpenGL::WaitForIdle() {
430} 423}
431 424
432void RasterizerOpenGL::FragmentBarrier() { 425void RasterizerOpenGL::FragmentBarrier() {
426 glTextureBarrier();
433 glMemoryBarrier(GL_FRAMEBUFFER_BARRIER_BIT | GL_TEXTURE_FETCH_BARRIER_BIT); 427 glMemoryBarrier(GL_FRAMEBUFFER_BARRIER_BIT | GL_TEXTURE_FETCH_BARRIER_BIT);
434} 428}
435 429
@@ -482,13 +476,13 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerOpenGL::AccessAccelerateDMA()
482} 476}
483 477
484void RasterizerOpenGL::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, 478void RasterizerOpenGL::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
485 std::span<u8> memory) { 479 std::span<const u8> memory) {
486 auto cpu_addr = gpu_memory.GpuToCpuAddress(address); 480 auto cpu_addr = gpu_memory->GpuToCpuAddress(address);
487 if (!cpu_addr) [[unlikely]] { 481 if (!cpu_addr) [[unlikely]] {
488 gpu_memory.WriteBlock(address, memory.data(), copy_size); 482 gpu_memory->WriteBlock(address, memory.data(), copy_size);
489 return; 483 return;
490 } 484 }
491 gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size); 485 gpu_memory->WriteBlockUnsafe(address, memory.data(), copy_size);
492 { 486 {
493 std::unique_lock<std::mutex> lock{buffer_cache.mutex}; 487 std::unique_lock<std::mutex> lock{buffer_cache.mutex};
494 if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) { 488 if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) {
@@ -551,8 +545,8 @@ void RasterizerOpenGL::SyncState() {
551} 545}
552 546
553void RasterizerOpenGL::SyncViewport() { 547void RasterizerOpenGL::SyncViewport() {
554 auto& flags = maxwell3d.dirty.flags; 548 auto& flags = maxwell3d->dirty.flags;
555 const auto& regs = maxwell3d.regs; 549 const auto& regs = maxwell3d->regs;
556 550
557 const bool rescale_viewports = flags[VideoCommon::Dirty::RescaleViewports]; 551 const bool rescale_viewports = flags[VideoCommon::Dirty::RescaleViewports];
558 const bool dirty_viewport = flags[Dirty::Viewports] || rescale_viewports; 552 const bool dirty_viewport = flags[Dirty::Viewports] || rescale_viewports;
@@ -657,23 +651,23 @@ void RasterizerOpenGL::SyncViewport() {
657} 651}
658 652
659void RasterizerOpenGL::SyncDepthClamp() { 653void RasterizerOpenGL::SyncDepthClamp() {
660 auto& flags = maxwell3d.dirty.flags; 654 auto& flags = maxwell3d->dirty.flags;
661 if (!flags[Dirty::DepthClampEnabled]) { 655 if (!flags[Dirty::DepthClampEnabled]) {
662 return; 656 return;
663 } 657 }
664 flags[Dirty::DepthClampEnabled] = false; 658 flags[Dirty::DepthClampEnabled] = false;
665 659
666 oglEnable(GL_DEPTH_CLAMP, maxwell3d.regs.view_volume_clip_control.depth_clamp_disabled == 0); 660 oglEnable(GL_DEPTH_CLAMP, maxwell3d->regs.view_volume_clip_control.depth_clamp_disabled == 0);
667} 661}
668 662
669void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) { 663void RasterizerOpenGL::SyncClipEnabled(u32 clip_mask) {
670 auto& flags = maxwell3d.dirty.flags; 664 auto& flags = maxwell3d->dirty.flags;
671 if (!flags[Dirty::ClipDistances] && !flags[VideoCommon::Dirty::Shaders]) { 665 if (!flags[Dirty::ClipDistances] && !flags[VideoCommon::Dirty::Shaders]) {
672 return; 666 return;
673 } 667 }
674 flags[Dirty::ClipDistances] = false; 668 flags[Dirty::ClipDistances] = false;
675 669
676 clip_mask &= maxwell3d.regs.clip_distance_enabled; 670 clip_mask &= maxwell3d->regs.clip_distance_enabled;
677 if (clip_mask == last_clip_distance_mask) { 671 if (clip_mask == last_clip_distance_mask) {
678 return; 672 return;
679 } 673 }
@@ -689,8 +683,8 @@ void RasterizerOpenGL::SyncClipCoef() {
689} 683}
690 684
691void RasterizerOpenGL::SyncCullMode() { 685void RasterizerOpenGL::SyncCullMode() {
692 auto& flags = maxwell3d.dirty.flags; 686 auto& flags = maxwell3d->dirty.flags;
693 const auto& regs = maxwell3d.regs; 687 const auto& regs = maxwell3d->regs;
694 688
695 if (flags[Dirty::CullTest]) { 689 if (flags[Dirty::CullTest]) {
696 flags[Dirty::CullTest] = false; 690 flags[Dirty::CullTest] = false;
@@ -705,23 +699,23 @@ void RasterizerOpenGL::SyncCullMode() {
705} 699}
706 700
707void RasterizerOpenGL::SyncPrimitiveRestart() { 701void RasterizerOpenGL::SyncPrimitiveRestart() {
708 auto& flags = maxwell3d.dirty.flags; 702 auto& flags = maxwell3d->dirty.flags;
709 if (!flags[Dirty::PrimitiveRestart]) { 703 if (!flags[Dirty::PrimitiveRestart]) {
710 return; 704 return;
711 } 705 }
712 flags[Dirty::PrimitiveRestart] = false; 706 flags[Dirty::PrimitiveRestart] = false;
713 707
714 if (maxwell3d.regs.primitive_restart.enabled) { 708 if (maxwell3d->regs.primitive_restart.enabled) {
715 glEnable(GL_PRIMITIVE_RESTART); 709 glEnable(GL_PRIMITIVE_RESTART);
716 glPrimitiveRestartIndex(maxwell3d.regs.primitive_restart.index); 710 glPrimitiveRestartIndex(maxwell3d->regs.primitive_restart.index);
717 } else { 711 } else {
718 glDisable(GL_PRIMITIVE_RESTART); 712 glDisable(GL_PRIMITIVE_RESTART);
719 } 713 }
720} 714}
721 715
722void RasterizerOpenGL::SyncDepthTestState() { 716void RasterizerOpenGL::SyncDepthTestState() {
723 auto& flags = maxwell3d.dirty.flags; 717 auto& flags = maxwell3d->dirty.flags;
724 const auto& regs = maxwell3d.regs; 718 const auto& regs = maxwell3d->regs;
725 719
726 if (flags[Dirty::DepthMask]) { 720 if (flags[Dirty::DepthMask]) {
727 flags[Dirty::DepthMask] = false; 721 flags[Dirty::DepthMask] = false;
@@ -740,13 +734,13 @@ void RasterizerOpenGL::SyncDepthTestState() {
740} 734}
741 735
742void RasterizerOpenGL::SyncStencilTestState() { 736void RasterizerOpenGL::SyncStencilTestState() {
743 auto& flags = maxwell3d.dirty.flags; 737 auto& flags = maxwell3d->dirty.flags;
744 if (!flags[Dirty::StencilTest]) { 738 if (!flags[Dirty::StencilTest]) {
745 return; 739 return;
746 } 740 }
747 flags[Dirty::StencilTest] = false; 741 flags[Dirty::StencilTest] = false;
748 742
749 const auto& regs = maxwell3d.regs; 743 const auto& regs = maxwell3d->regs;
750 oglEnable(GL_STENCIL_TEST, regs.stencil_enable); 744 oglEnable(GL_STENCIL_TEST, regs.stencil_enable);
751 745
752 glStencilFuncSeparate(GL_FRONT, MaxwellToGL::ComparisonOp(regs.stencil_front_func_func), 746 glStencilFuncSeparate(GL_FRONT, MaxwellToGL::ComparisonOp(regs.stencil_front_func_func),
@@ -771,23 +765,23 @@ void RasterizerOpenGL::SyncStencilTestState() {
771} 765}
772 766
773void RasterizerOpenGL::SyncRasterizeEnable() { 767void RasterizerOpenGL::SyncRasterizeEnable() {
774 auto& flags = maxwell3d.dirty.flags; 768 auto& flags = maxwell3d->dirty.flags;
775 if (!flags[Dirty::RasterizeEnable]) { 769 if (!flags[Dirty::RasterizeEnable]) {
776 return; 770 return;
777 } 771 }
778 flags[Dirty::RasterizeEnable] = false; 772 flags[Dirty::RasterizeEnable] = false;
779 773
780 oglEnable(GL_RASTERIZER_DISCARD, maxwell3d.regs.rasterize_enable == 0); 774 oglEnable(GL_RASTERIZER_DISCARD, maxwell3d->regs.rasterize_enable == 0);
781} 775}
782 776
783void RasterizerOpenGL::SyncPolygonModes() { 777void RasterizerOpenGL::SyncPolygonModes() {
784 auto& flags = maxwell3d.dirty.flags; 778 auto& flags = maxwell3d->dirty.flags;
785 if (!flags[Dirty::PolygonModes]) { 779 if (!flags[Dirty::PolygonModes]) {
786 return; 780 return;
787 } 781 }
788 flags[Dirty::PolygonModes] = false; 782 flags[Dirty::PolygonModes] = false;
789 783
790 const auto& regs = maxwell3d.regs; 784 const auto& regs = maxwell3d->regs;
791 if (regs.fill_rectangle) { 785 if (regs.fill_rectangle) {
792 if (!GLAD_GL_NV_fill_rectangle) { 786 if (!GLAD_GL_NV_fill_rectangle) {
793 LOG_ERROR(Render_OpenGL, "GL_NV_fill_rectangle used and not supported"); 787 LOG_ERROR(Render_OpenGL, "GL_NV_fill_rectangle used and not supported");
@@ -820,7 +814,7 @@ void RasterizerOpenGL::SyncPolygonModes() {
820} 814}
821 815
822void RasterizerOpenGL::SyncColorMask() { 816void RasterizerOpenGL::SyncColorMask() {
823 auto& flags = maxwell3d.dirty.flags; 817 auto& flags = maxwell3d->dirty.flags;
824 if (!flags[Dirty::ColorMasks]) { 818 if (!flags[Dirty::ColorMasks]) {
825 return; 819 return;
826 } 820 }
@@ -829,7 +823,7 @@ void RasterizerOpenGL::SyncColorMask() {
829 const bool force = flags[Dirty::ColorMaskCommon]; 823 const bool force = flags[Dirty::ColorMaskCommon];
830 flags[Dirty::ColorMaskCommon] = false; 824 flags[Dirty::ColorMaskCommon] = false;
831 825
832 const auto& regs = maxwell3d.regs; 826 const auto& regs = maxwell3d->regs;
833 if (regs.color_mask_common) { 827 if (regs.color_mask_common) {
834 if (!force && !flags[Dirty::ColorMask0]) { 828 if (!force && !flags[Dirty::ColorMask0]) {
835 return; 829 return;
@@ -854,30 +848,30 @@ void RasterizerOpenGL::SyncColorMask() {
854} 848}
855 849
856void RasterizerOpenGL::SyncMultiSampleState() { 850void RasterizerOpenGL::SyncMultiSampleState() {
857 auto& flags = maxwell3d.dirty.flags; 851 auto& flags = maxwell3d->dirty.flags;
858 if (!flags[Dirty::MultisampleControl]) { 852 if (!flags[Dirty::MultisampleControl]) {
859 return; 853 return;
860 } 854 }
861 flags[Dirty::MultisampleControl] = false; 855 flags[Dirty::MultisampleControl] = false;
862 856
863 const auto& regs = maxwell3d.regs; 857 const auto& regs = maxwell3d->regs;
864 oglEnable(GL_SAMPLE_ALPHA_TO_COVERAGE, regs.multisample_control.alpha_to_coverage); 858 oglEnable(GL_SAMPLE_ALPHA_TO_COVERAGE, regs.multisample_control.alpha_to_coverage);
865 oglEnable(GL_SAMPLE_ALPHA_TO_ONE, regs.multisample_control.alpha_to_one); 859 oglEnable(GL_SAMPLE_ALPHA_TO_ONE, regs.multisample_control.alpha_to_one);
866} 860}
867 861
868void RasterizerOpenGL::SyncFragmentColorClampState() { 862void RasterizerOpenGL::SyncFragmentColorClampState() {
869 auto& flags = maxwell3d.dirty.flags; 863 auto& flags = maxwell3d->dirty.flags;
870 if (!flags[Dirty::FragmentClampColor]) { 864 if (!flags[Dirty::FragmentClampColor]) {
871 return; 865 return;
872 } 866 }
873 flags[Dirty::FragmentClampColor] = false; 867 flags[Dirty::FragmentClampColor] = false;
874 868
875 glClampColor(GL_CLAMP_FRAGMENT_COLOR, maxwell3d.regs.frag_color_clamp ? GL_TRUE : GL_FALSE); 869 glClampColor(GL_CLAMP_FRAGMENT_COLOR, maxwell3d->regs.frag_color_clamp ? GL_TRUE : GL_FALSE);
876} 870}
877 871
878void RasterizerOpenGL::SyncBlendState() { 872void RasterizerOpenGL::SyncBlendState() {
879 auto& flags = maxwell3d.dirty.flags; 873 auto& flags = maxwell3d->dirty.flags;
880 const auto& regs = maxwell3d.regs; 874 const auto& regs = maxwell3d->regs;
881 875
882 if (flags[Dirty::BlendColor]) { 876 if (flags[Dirty::BlendColor]) {
883 flags[Dirty::BlendColor] = false; 877 flags[Dirty::BlendColor] = false;
@@ -934,13 +928,13 @@ void RasterizerOpenGL::SyncBlendState() {
934} 928}
935 929
936void RasterizerOpenGL::SyncLogicOpState() { 930void RasterizerOpenGL::SyncLogicOpState() {
937 auto& flags = maxwell3d.dirty.flags; 931 auto& flags = maxwell3d->dirty.flags;
938 if (!flags[Dirty::LogicOp]) { 932 if (!flags[Dirty::LogicOp]) {
939 return; 933 return;
940 } 934 }
941 flags[Dirty::LogicOp] = false; 935 flags[Dirty::LogicOp] = false;
942 936
943 const auto& regs = maxwell3d.regs; 937 const auto& regs = maxwell3d->regs;
944 if (regs.logic_op.enable) { 938 if (regs.logic_op.enable) {
945 glEnable(GL_COLOR_LOGIC_OP); 939 glEnable(GL_COLOR_LOGIC_OP);
946 glLogicOp(MaxwellToGL::LogicOp(regs.logic_op.operation)); 940 glLogicOp(MaxwellToGL::LogicOp(regs.logic_op.operation));
@@ -950,7 +944,7 @@ void RasterizerOpenGL::SyncLogicOpState() {
950} 944}
951 945
952void RasterizerOpenGL::SyncScissorTest() { 946void RasterizerOpenGL::SyncScissorTest() {
953 auto& flags = maxwell3d.dirty.flags; 947 auto& flags = maxwell3d->dirty.flags;
954 if (!flags[Dirty::Scissors] && !flags[VideoCommon::Dirty::RescaleScissors]) { 948 if (!flags[Dirty::Scissors] && !flags[VideoCommon::Dirty::RescaleScissors]) {
955 return; 949 return;
956 } 950 }
@@ -959,7 +953,7 @@ void RasterizerOpenGL::SyncScissorTest() {
959 const bool force = flags[VideoCommon::Dirty::RescaleScissors]; 953 const bool force = flags[VideoCommon::Dirty::RescaleScissors];
960 flags[VideoCommon::Dirty::RescaleScissors] = false; 954 flags[VideoCommon::Dirty::RescaleScissors] = false;
961 955
962 const auto& regs = maxwell3d.regs; 956 const auto& regs = maxwell3d->regs;
963 957
964 const auto& resolution = Settings::values.resolution_info; 958 const auto& resolution = Settings::values.resolution_info;
965 const bool is_rescaling{texture_cache.IsRescaling()}; 959 const bool is_rescaling{texture_cache.IsRescaling()};
@@ -995,39 +989,39 @@ void RasterizerOpenGL::SyncScissorTest() {
995} 989}
996 990
997void RasterizerOpenGL::SyncPointState() { 991void RasterizerOpenGL::SyncPointState() {
998 auto& flags = maxwell3d.dirty.flags; 992 auto& flags = maxwell3d->dirty.flags;
999 if (!flags[Dirty::PointSize]) { 993 if (!flags[Dirty::PointSize]) {
1000 return; 994 return;
1001 } 995 }
1002 flags[Dirty::PointSize] = false; 996 flags[Dirty::PointSize] = false;
1003 997
1004 oglEnable(GL_POINT_SPRITE, maxwell3d.regs.point_sprite_enable); 998 oglEnable(GL_POINT_SPRITE, maxwell3d->regs.point_sprite_enable);
1005 oglEnable(GL_PROGRAM_POINT_SIZE, maxwell3d.regs.vp_point_size.enable); 999 oglEnable(GL_PROGRAM_POINT_SIZE, maxwell3d->regs.vp_point_size.enable);
1006 const bool is_rescaling{texture_cache.IsRescaling()}; 1000 const bool is_rescaling{texture_cache.IsRescaling()};
1007 const float scale = is_rescaling ? Settings::values.resolution_info.up_factor : 1.0f; 1001 const float scale = is_rescaling ? Settings::values.resolution_info.up_factor : 1.0f;
1008 glPointSize(std::max(1.0f, maxwell3d.regs.point_size * scale)); 1002 glPointSize(std::max(1.0f, maxwell3d->regs.point_size * scale));
1009} 1003}
1010 1004
1011void RasterizerOpenGL::SyncLineState() { 1005void RasterizerOpenGL::SyncLineState() {
1012 auto& flags = maxwell3d.dirty.flags; 1006 auto& flags = maxwell3d->dirty.flags;
1013 if (!flags[Dirty::LineWidth]) { 1007 if (!flags[Dirty::LineWidth]) {
1014 return; 1008 return;
1015 } 1009 }
1016 flags[Dirty::LineWidth] = false; 1010 flags[Dirty::LineWidth] = false;
1017 1011
1018 const auto& regs = maxwell3d.regs; 1012 const auto& regs = maxwell3d->regs;
1019 oglEnable(GL_LINE_SMOOTH, regs.line_smooth_enable); 1013 oglEnable(GL_LINE_SMOOTH, regs.line_smooth_enable);
1020 glLineWidth(regs.line_smooth_enable ? regs.line_width_smooth : regs.line_width_aliased); 1014 glLineWidth(regs.line_smooth_enable ? regs.line_width_smooth : regs.line_width_aliased);
1021} 1015}
1022 1016
1023void RasterizerOpenGL::SyncPolygonOffset() { 1017void RasterizerOpenGL::SyncPolygonOffset() {
1024 auto& flags = maxwell3d.dirty.flags; 1018 auto& flags = maxwell3d->dirty.flags;
1025 if (!flags[Dirty::PolygonOffset]) { 1019 if (!flags[Dirty::PolygonOffset]) {
1026 return; 1020 return;
1027 } 1021 }
1028 flags[Dirty::PolygonOffset] = false; 1022 flags[Dirty::PolygonOffset] = false;
1029 1023
1030 const auto& regs = maxwell3d.regs; 1024 const auto& regs = maxwell3d->regs;
1031 oglEnable(GL_POLYGON_OFFSET_FILL, regs.polygon_offset_fill_enable); 1025 oglEnable(GL_POLYGON_OFFSET_FILL, regs.polygon_offset_fill_enable);
1032 oglEnable(GL_POLYGON_OFFSET_LINE, regs.polygon_offset_line_enable); 1026 oglEnable(GL_POLYGON_OFFSET_LINE, regs.polygon_offset_line_enable);
1033 oglEnable(GL_POLYGON_OFFSET_POINT, regs.polygon_offset_point_enable); 1027 oglEnable(GL_POLYGON_OFFSET_POINT, regs.polygon_offset_point_enable);
@@ -1041,13 +1035,13 @@ void RasterizerOpenGL::SyncPolygonOffset() {
1041} 1035}
1042 1036
1043void RasterizerOpenGL::SyncAlphaTest() { 1037void RasterizerOpenGL::SyncAlphaTest() {
1044 auto& flags = maxwell3d.dirty.flags; 1038 auto& flags = maxwell3d->dirty.flags;
1045 if (!flags[Dirty::AlphaTest]) { 1039 if (!flags[Dirty::AlphaTest]) {
1046 return; 1040 return;
1047 } 1041 }
1048 flags[Dirty::AlphaTest] = false; 1042 flags[Dirty::AlphaTest] = false;
1049 1043
1050 const auto& regs = maxwell3d.regs; 1044 const auto& regs = maxwell3d->regs;
1051 if (regs.alpha_test_enabled) { 1045 if (regs.alpha_test_enabled) {
1052 glEnable(GL_ALPHA_TEST); 1046 glEnable(GL_ALPHA_TEST);
1053 glAlphaFunc(MaxwellToGL::ComparisonOp(regs.alpha_test_func), regs.alpha_test_ref); 1047 glAlphaFunc(MaxwellToGL::ComparisonOp(regs.alpha_test_func), regs.alpha_test_ref);
@@ -1057,17 +1051,17 @@ void RasterizerOpenGL::SyncAlphaTest() {
1057} 1051}
1058 1052
1059void RasterizerOpenGL::SyncFramebufferSRGB() { 1053void RasterizerOpenGL::SyncFramebufferSRGB() {
1060 auto& flags = maxwell3d.dirty.flags; 1054 auto& flags = maxwell3d->dirty.flags;
1061 if (!flags[Dirty::FramebufferSRGB]) { 1055 if (!flags[Dirty::FramebufferSRGB]) {
1062 return; 1056 return;
1063 } 1057 }
1064 flags[Dirty::FramebufferSRGB] = false; 1058 flags[Dirty::FramebufferSRGB] = false;
1065 1059
1066 oglEnable(GL_FRAMEBUFFER_SRGB, maxwell3d.regs.framebuffer_srgb); 1060 oglEnable(GL_FRAMEBUFFER_SRGB, maxwell3d->regs.framebuffer_srgb);
1067} 1061}
1068 1062
1069void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum primitive_mode) { 1063void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum primitive_mode) {
1070 const auto& regs = maxwell3d.regs; 1064 const auto& regs = maxwell3d->regs;
1071 if (regs.tfb_enabled == 0) { 1065 if (regs.tfb_enabled == 0) {
1072 return; 1066 return;
1073 } 1067 }
@@ -1086,11 +1080,48 @@ void RasterizerOpenGL::BeginTransformFeedback(GraphicsPipeline* program, GLenum
1086} 1080}
1087 1081
1088void RasterizerOpenGL::EndTransformFeedback() { 1082void RasterizerOpenGL::EndTransformFeedback() {
1089 if (maxwell3d.regs.tfb_enabled != 0) { 1083 if (maxwell3d->regs.tfb_enabled != 0) {
1090 glEndTransformFeedback(); 1084 glEndTransformFeedback();
1091 } 1085 }
1092} 1086}
1093 1087
1088void RasterizerOpenGL::InitializeChannel(Tegra::Control::ChannelState& channel) {
1089 CreateChannel(channel);
1090 {
1091 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
1092 texture_cache.CreateChannel(channel);
1093 buffer_cache.CreateChannel(channel);
1094 }
1095 shader_cache.CreateChannel(channel);
1096 query_cache.CreateChannel(channel);
1097 state_tracker.SetupTables(channel);
1098}
1099
1100void RasterizerOpenGL::BindChannel(Tegra::Control::ChannelState& channel) {
1101 const s32 channel_id = channel.bind_id;
1102 BindToChannel(channel_id);
1103 {
1104 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
1105 texture_cache.BindToChannel(channel_id);
1106 buffer_cache.BindToChannel(channel_id);
1107 }
1108 shader_cache.BindToChannel(channel_id);
1109 query_cache.BindToChannel(channel_id);
1110 state_tracker.ChangeChannel(channel);
1111 state_tracker.InvalidateState();
1112}
1113
1114void RasterizerOpenGL::ReleaseChannel(s32 channel_id) {
1115 EraseChannel(channel_id);
1116 {
1117 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
1118 texture_cache.EraseChannel(channel_id);
1119 buffer_cache.EraseChannel(channel_id);
1120 }
1121 shader_cache.EraseChannel(channel_id);
1122 query_cache.EraseChannel(channel_id);
1123}
1124
1094AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {} 1125AccelerateDMA::AccelerateDMA(BufferCache& buffer_cache_) : buffer_cache{buffer_cache_} {}
1095 1126
1096bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) { 1127bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64 amount) {
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 31a16fcba..45131b785 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -12,6 +12,7 @@
12#include <glad/glad.h> 12#include <glad/glad.h>
13 13
14#include "common/common_types.h" 14#include "common/common_types.h"
15#include "video_core/control/channel_state_cache.h"
15#include "video_core/engines/maxwell_dma.h" 16#include "video_core/engines/maxwell_dma.h"
16#include "video_core/rasterizer_accelerated.h" 17#include "video_core/rasterizer_accelerated.h"
17#include "video_core/rasterizer_interface.h" 18#include "video_core/rasterizer_interface.h"
@@ -58,7 +59,8 @@ private:
58 BufferCache& buffer_cache; 59 BufferCache& buffer_cache;
59}; 60};
60 61
61class RasterizerOpenGL : public VideoCore::RasterizerAccelerated { 62class RasterizerOpenGL : public VideoCore::RasterizerAccelerated,
63 protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
62public: 64public:
63 explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, 65 explicit RasterizerOpenGL(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
64 Core::Memory::Memory& cpu_memory_, const Device& device_, 66 Core::Memory::Memory& cpu_memory_, const Device& device_,
@@ -78,10 +80,11 @@ public:
78 bool MustFlushRegion(VAddr addr, u64 size) override; 80 bool MustFlushRegion(VAddr addr, u64 size) override;
79 void InvalidateRegion(VAddr addr, u64 size) override; 81 void InvalidateRegion(VAddr addr, u64 size) override;
80 void OnCPUWrite(VAddr addr, u64 size) override; 82 void OnCPUWrite(VAddr addr, u64 size) override;
81 void SyncGuestHost() override; 83 void InvalidateGPUCache() override;
82 void UnmapMemory(VAddr addr, u64 size) override; 84 void UnmapMemory(VAddr addr, u64 size) override;
83 void ModifyGPUMemory(GPUVAddr addr, u64 size) override; 85 void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
84 void SignalSemaphore(GPUVAddr addr, u32 value) override; 86 void SignalFence(std::function<void()>&& func) override;
87 void SyncOperation(std::function<void()>&& func) override;
85 void SignalSyncPoint(u32 value) override; 88 void SignalSyncPoint(u32 value) override;
86 void SignalReference() override; 89 void SignalReference() override;
87 void ReleaseFences() override; 90 void ReleaseFences() override;
@@ -96,7 +99,7 @@ public:
96 const Tegra::Engines::Fermi2D::Config& copy_config) override; 99 const Tegra::Engines::Fermi2D::Config& copy_config) override;
97 Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override; 100 Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
98 void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, 101 void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
99 std::span<u8> memory) override; 102 std::span<const u8> memory) override;
100 bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, 103 bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
101 u32 pixel_stride) override; 104 u32 pixel_stride) override;
102 void LoadDiskResources(u64 title_id, std::stop_token stop_loading, 105 void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
@@ -107,6 +110,12 @@ public:
107 return num_queued_commands > 0; 110 return num_queued_commands > 0;
108 } 111 }
109 112
113 void InitializeChannel(Tegra::Control::ChannelState& channel) override;
114
115 void BindChannel(Tegra::Control::ChannelState& channel) override;
116
117 void ReleaseChannel(s32 channel_id) override;
118
110private: 119private:
111 static constexpr size_t MAX_TEXTURES = 192; 120 static constexpr size_t MAX_TEXTURES = 192;
112 static constexpr size_t MAX_IMAGES = 48; 121 static constexpr size_t MAX_IMAGES = 48;
@@ -191,9 +200,6 @@ private:
191 void EndTransformFeedback(); 200 void EndTransformFeedback();
192 201
193 Tegra::GPU& gpu; 202 Tegra::GPU& gpu;
194 Tegra::Engines::Maxwell3D& maxwell3d;
195 Tegra::Engines::KeplerCompute& kepler_compute;
196 Tegra::MemoryManager& gpu_memory;
197 203
198 const Device& device; 204 const Device& device;
199 ScreenInfo& screen_info; 205 ScreenInfo& screen_info;
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.cpp b/src/video_core/renderer_opengl/gl_shader_cache.cpp
index ddb70934c..5a29a41d2 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_shader_cache.cpp
@@ -151,16 +151,13 @@ void SetXfbState(VideoCommon::TransformFeedbackState& state, const Maxwell& regs
151} // Anonymous namespace 151} // Anonymous namespace
152 152
153ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_, 153ShaderCache::ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_,
154 Tegra::Engines::Maxwell3D& maxwell3d_, 154 const Device& device_, TextureCache& texture_cache_,
155 Tegra::Engines::KeplerCompute& kepler_compute_, 155 BufferCache& buffer_cache_, ProgramManager& program_manager_,
156 Tegra::MemoryManager& gpu_memory_, const Device& device_, 156 StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_)
157 TextureCache& texture_cache_, BufferCache& buffer_cache_, 157 : VideoCommon::ShaderCache{rasterizer_}, emu_window{emu_window_}, device{device_},
158 ProgramManager& program_manager_, StateTracker& state_tracker_, 158 texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, program_manager{program_manager_},
159 VideoCore::ShaderNotify& shader_notify_) 159 state_tracker{state_tracker_}, shader_notify{shader_notify_},
160 : VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_}, 160 use_asynchronous_shaders{device.UseAsynchronousShaders()},
161 emu_window{emu_window_}, device{device_}, texture_cache{texture_cache_},
162 buffer_cache{buffer_cache_}, program_manager{program_manager_}, state_tracker{state_tracker_},
163 shader_notify{shader_notify_}, use_asynchronous_shaders{device.UseAsynchronousShaders()},
164 profile{ 161 profile{
165 .supported_spirv = 0x00010000, 162 .supported_spirv = 0x00010000,
166 163
@@ -310,7 +307,7 @@ GraphicsPipeline* ShaderCache::CurrentGraphicsPipeline() {
310 current_pipeline = nullptr; 307 current_pipeline = nullptr;
311 return nullptr; 308 return nullptr;
312 } 309 }
313 const auto& regs{maxwell3d.regs}; 310 const auto& regs{maxwell3d->regs};
314 graphics_key.raw = 0; 311 graphics_key.raw = 0;
315 graphics_key.early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0); 312 graphics_key.early_z.Assign(regs.force_early_fragment_tests != 0 ? 1 : 0);
316 graphics_key.gs_input_topology.Assign(graphics_key.unique_hashes[4] != 0 313 graphics_key.gs_input_topology.Assign(graphics_key.unique_hashes[4] != 0
@@ -351,13 +348,13 @@ GraphicsPipeline* ShaderCache::BuiltPipeline(GraphicsPipeline* pipeline) const n
351 } 348 }
352 // If something is using depth, we can assume that games are not rendering anything which 349 // If something is using depth, we can assume that games are not rendering anything which
353 // will be used one time. 350 // will be used one time.
354 if (maxwell3d.regs.zeta_enable) { 351 if (maxwell3d->regs.zeta_enable) {
355 return nullptr; 352 return nullptr;
356 } 353 }
357 // If games are using a small index count, we can assume these are full screen quads. 354 // If games are using a small index count, we can assume these are full screen quads.
358 // Usually these shaders are only used once for building textures so we can assume they 355 // Usually these shaders are only used once for building textures so we can assume they
359 // can't be built async 356 // can't be built async
360 if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) { 357 if (maxwell3d->regs.index_array.count <= 6 || maxwell3d->regs.vertex_buffer.count <= 6) {
361 return pipeline; 358 return pipeline;
362 } 359 }
363 return nullptr; 360 return nullptr;
@@ -368,7 +365,7 @@ ComputePipeline* ShaderCache::CurrentComputePipeline() {
368 if (!shader) { 365 if (!shader) {
369 return nullptr; 366 return nullptr;
370 } 367 }
371 const auto& qmd{kepler_compute.launch_description}; 368 const auto& qmd{kepler_compute->launch_description};
372 const ComputePipelineKey key{ 369 const ComputePipelineKey key{
373 .unique_hash = shader->unique_hash, 370 .unique_hash = shader->unique_hash,
374 .shared_memory_size = qmd.shared_alloc, 371 .shared_memory_size = qmd.shared_alloc,
@@ -480,9 +477,9 @@ std::unique_ptr<GraphicsPipeline> ShaderCache::CreateGraphicsPipeline(
480 previous_program = &program; 477 previous_program = &program;
481 } 478 }
482 auto* const thread_worker{build_in_parallel ? workers.get() : nullptr}; 479 auto* const thread_worker{build_in_parallel ? workers.get() : nullptr};
483 return std::make_unique<GraphicsPipeline>( 480 return std::make_unique<GraphicsPipeline>(device, texture_cache, buffer_cache, program_manager,
484 device, texture_cache, buffer_cache, gpu_memory, maxwell3d, program_manager, state_tracker, 481 state_tracker, thread_worker, &shader_notify, sources,
485 thread_worker, &shader_notify, sources, sources_spirv, infos, key); 482 sources_spirv, infos, key);
486 483
487} catch (Shader::Exception& exception) { 484} catch (Shader::Exception& exception) {
488 LOG_ERROR(Render_OpenGL, "{}", exception.what()); 485 LOG_ERROR(Render_OpenGL, "{}", exception.what());
@@ -491,9 +488,9 @@ std::unique_ptr<GraphicsPipeline> ShaderCache::CreateGraphicsPipeline(
491 488
492std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline( 489std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline(
493 const ComputePipelineKey& key, const VideoCommon::ShaderInfo* shader) { 490 const ComputePipelineKey& key, const VideoCommon::ShaderInfo* shader) {
494 const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()}; 491 const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()};
495 const auto& qmd{kepler_compute.launch_description}; 492 const auto& qmd{kepler_compute->launch_description};
496 ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start}; 493 ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start};
497 env.SetCachedSize(shader->size_bytes); 494 env.SetCachedSize(shader->size_bytes);
498 495
499 main_pools.ReleaseContents(); 496 main_pools.ReleaseContents();
@@ -536,9 +533,8 @@ std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline(
536 break; 533 break;
537 } 534 }
538 535
539 return std::make_unique<ComputePipeline>(device, texture_cache, buffer_cache, gpu_memory, 536 return std::make_unique<ComputePipeline>(device, texture_cache, buffer_cache, program_manager,
540 kepler_compute, program_manager, program.info, code, 537 program.info, code, code_spirv);
541 code_spirv);
542} catch (Shader::Exception& exception) { 538} catch (Shader::Exception& exception) {
543 LOG_ERROR(Render_OpenGL, "{}", exception.what()); 539 LOG_ERROR(Render_OpenGL, "{}", exception.what());
544 return nullptr; 540 return nullptr;
@@ -546,7 +542,7 @@ std::unique_ptr<ComputePipeline> ShaderCache::CreateComputePipeline(
546 542
547std::unique_ptr<ShaderWorker> ShaderCache::CreateWorkers() const { 543std::unique_ptr<ShaderWorker> ShaderCache::CreateWorkers() const {
548 return std::make_unique<ShaderWorker>(std::max(std::thread::hardware_concurrency(), 2U) - 1, 544 return std::make_unique<ShaderWorker>(std::max(std::thread::hardware_concurrency(), 2U) - 1,
549 "yuzu:ShaderBuilder", 545 "GlShaderBuilder",
550 [this] { return Context{emu_window}; }); 546 [this] { return Context{emu_window}; });
551} 547}
552 548
diff --git a/src/video_core/renderer_opengl/gl_shader_cache.h b/src/video_core/renderer_opengl/gl_shader_cache.h
index a14269dea..89f181fe3 100644
--- a/src/video_core/renderer_opengl/gl_shader_cache.h
+++ b/src/video_core/renderer_opengl/gl_shader_cache.h
@@ -30,12 +30,9 @@ using ShaderWorker = Common::StatefulThreadWorker<ShaderContext::Context>;
30class ShaderCache : public VideoCommon::ShaderCache { 30class ShaderCache : public VideoCommon::ShaderCache {
31public: 31public:
32 explicit ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_, 32 explicit ShaderCache(RasterizerOpenGL& rasterizer_, Core::Frontend::EmuWindow& emu_window_,
33 Tegra::Engines::Maxwell3D& maxwell3d_, 33 const Device& device_, TextureCache& texture_cache_,
34 Tegra::Engines::KeplerCompute& kepler_compute_, 34 BufferCache& buffer_cache_, ProgramManager& program_manager_,
35 Tegra::MemoryManager& gpu_memory_, const Device& device_, 35 StateTracker& state_tracker_, VideoCore::ShaderNotify& shader_notify_);
36 TextureCache& texture_cache_, BufferCache& buffer_cache_,
37 ProgramManager& program_manager_, StateTracker& state_tracker_,
38 VideoCore::ShaderNotify& shader_notify_);
39 ~ShaderCache(); 36 ~ShaderCache();
40 37
41 void LoadDiskResources(u64 title_id, std::stop_token stop_loading, 38 void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
diff --git a/src/video_core/renderer_opengl/gl_state_tracker.cpp b/src/video_core/renderer_opengl/gl_state_tracker.cpp
index 912725ef7..a8f3a0f57 100644
--- a/src/video_core/renderer_opengl/gl_state_tracker.cpp
+++ b/src/video_core/renderer_opengl/gl_state_tracker.cpp
@@ -7,8 +7,8 @@
7 7
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "core/core.h" 9#include "core/core.h"
10#include "video_core/control/channel_state.h"
10#include "video_core/engines/maxwell_3d.h" 11#include "video_core/engines/maxwell_3d.h"
11#include "video_core/gpu.h"
12#include "video_core/renderer_opengl/gl_state_tracker.h" 12#include "video_core/renderer_opengl/gl_state_tracker.h"
13 13
14#define OFF(field_name) MAXWELL3D_REG_INDEX(field_name) 14#define OFF(field_name) MAXWELL3D_REG_INDEX(field_name)
@@ -202,9 +202,8 @@ void SetupDirtyMisc(Tables& tables) {
202 202
203} // Anonymous namespace 203} // Anonymous namespace
204 204
205StateTracker::StateTracker(Tegra::GPU& gpu) : flags{gpu.Maxwell3D().dirty.flags} { 205void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) {
206 auto& dirty = gpu.Maxwell3D().dirty; 206 auto& tables{channel_state.maxwell_3d->dirty.tables};
207 auto& tables = dirty.tables;
208 SetupDirtyFlags(tables); 207 SetupDirtyFlags(tables);
209 SetupDirtyColorMasks(tables); 208 SetupDirtyColorMasks(tables);
210 SetupDirtyViewports(tables); 209 SetupDirtyViewports(tables);
@@ -230,4 +229,14 @@ StateTracker::StateTracker(Tegra::GPU& gpu) : flags{gpu.Maxwell3D().dirty.flags}
230 SetupDirtyMisc(tables); 229 SetupDirtyMisc(tables);
231} 230}
232 231
232void StateTracker::ChangeChannel(Tegra::Control::ChannelState& channel_state) {
233 flags = &channel_state.maxwell_3d->dirty.flags;
234}
235
236void StateTracker::InvalidateState() {
237 flags->set();
238}
239
240StateTracker::StateTracker() : flags{&default_flags} {}
241
233} // namespace OpenGL 242} // namespace OpenGL
diff --git a/src/video_core/renderer_opengl/gl_state_tracker.h b/src/video_core/renderer_opengl/gl_state_tracker.h
index 04e024f08..19bcf3f35 100644
--- a/src/video_core/renderer_opengl/gl_state_tracker.h
+++ b/src/video_core/renderer_opengl/gl_state_tracker.h
@@ -12,8 +12,10 @@
12#include "video_core/engines/maxwell_3d.h" 12#include "video_core/engines/maxwell_3d.h"
13 13
14namespace Tegra { 14namespace Tegra {
15class GPU; 15namespace Control {
16struct ChannelState;
16} 17}
18} // namespace Tegra
17 19
18namespace OpenGL { 20namespace OpenGL {
19 21
@@ -83,7 +85,7 @@ static_assert(Last <= std::numeric_limits<u8>::max());
83 85
84class StateTracker { 86class StateTracker {
85public: 87public:
86 explicit StateTracker(Tegra::GPU& gpu); 88 explicit StateTracker();
87 89
88 void BindIndexBuffer(GLuint new_index_buffer) { 90 void BindIndexBuffer(GLuint new_index_buffer) {
89 if (index_buffer == new_index_buffer) { 91 if (index_buffer == new_index_buffer) {
@@ -121,94 +123,107 @@ public:
121 } 123 }
122 124
123 void NotifyScreenDrawVertexArray() { 125 void NotifyScreenDrawVertexArray() {
124 flags[OpenGL::Dirty::VertexFormats] = true; 126 (*flags)[OpenGL::Dirty::VertexFormats] = true;
125 flags[OpenGL::Dirty::VertexFormat0 + 0] = true; 127 (*flags)[OpenGL::Dirty::VertexFormat0 + 0] = true;
126 flags[OpenGL::Dirty::VertexFormat0 + 1] = true; 128 (*flags)[OpenGL::Dirty::VertexFormat0 + 1] = true;
127 129
128 flags[VideoCommon::Dirty::VertexBuffers] = true; 130 (*flags)[VideoCommon::Dirty::VertexBuffers] = true;
129 flags[VideoCommon::Dirty::VertexBuffer0] = true; 131 (*flags)[VideoCommon::Dirty::VertexBuffer0] = true;
130 132
131 flags[OpenGL::Dirty::VertexInstances] = true; 133 (*flags)[OpenGL::Dirty::VertexInstances] = true;
132 flags[OpenGL::Dirty::VertexInstance0 + 0] = true; 134 (*flags)[OpenGL::Dirty::VertexInstance0 + 0] = true;
133 flags[OpenGL::Dirty::VertexInstance0 + 1] = true; 135 (*flags)[OpenGL::Dirty::VertexInstance0 + 1] = true;
134 } 136 }
135 137
136 void NotifyPolygonModes() { 138 void NotifyPolygonModes() {
137 flags[OpenGL::Dirty::PolygonModes] = true; 139 (*flags)[OpenGL::Dirty::PolygonModes] = true;
138 flags[OpenGL::Dirty::PolygonModeFront] = true; 140 (*flags)[OpenGL::Dirty::PolygonModeFront] = true;
139 flags[OpenGL::Dirty::PolygonModeBack] = true; 141 (*flags)[OpenGL::Dirty::PolygonModeBack] = true;
140 } 142 }
141 143
142 void NotifyViewport0() { 144 void NotifyViewport0() {
143 flags[OpenGL::Dirty::Viewports] = true; 145 (*flags)[OpenGL::Dirty::Viewports] = true;
144 flags[OpenGL::Dirty::Viewport0] = true; 146 (*flags)[OpenGL::Dirty::Viewport0] = true;
145 } 147 }
146 148
147 void NotifyScissor0() { 149 void NotifyScissor0() {
148 flags[OpenGL::Dirty::Scissors] = true; 150 (*flags)[OpenGL::Dirty::Scissors] = true;
149 flags[OpenGL::Dirty::Scissor0] = true; 151 (*flags)[OpenGL::Dirty::Scissor0] = true;
150 } 152 }
151 153
152 void NotifyColorMask(size_t index) { 154 void NotifyColorMask(size_t index) {
153 flags[OpenGL::Dirty::ColorMasks] = true; 155 (*flags)[OpenGL::Dirty::ColorMasks] = true;
154 flags[OpenGL::Dirty::ColorMask0 + index] = true; 156 (*flags)[OpenGL::Dirty::ColorMask0 + index] = true;
155 } 157 }
156 158
157 void NotifyBlend0() { 159 void NotifyBlend0() {
158 flags[OpenGL::Dirty::BlendStates] = true; 160 (*flags)[OpenGL::Dirty::BlendStates] = true;
159 flags[OpenGL::Dirty::BlendState0] = true; 161 (*flags)[OpenGL::Dirty::BlendState0] = true;
160 } 162 }
161 163
162 void NotifyFramebuffer() { 164 void NotifyFramebuffer() {
163 flags[VideoCommon::Dirty::RenderTargets] = true; 165 (*flags)[VideoCommon::Dirty::RenderTargets] = true;
164 } 166 }
165 167
166 void NotifyFrontFace() { 168 void NotifyFrontFace() {
167 flags[OpenGL::Dirty::FrontFace] = true; 169 (*flags)[OpenGL::Dirty::FrontFace] = true;
168 } 170 }
169 171
170 void NotifyCullTest() { 172 void NotifyCullTest() {
171 flags[OpenGL::Dirty::CullTest] = true; 173 (*flags)[OpenGL::Dirty::CullTest] = true;
172 } 174 }
173 175
174 void NotifyDepthMask() { 176 void NotifyDepthMask() {
175 flags[OpenGL::Dirty::DepthMask] = true; 177 (*flags)[OpenGL::Dirty::DepthMask] = true;
176 } 178 }
177 179
178 void NotifyDepthTest() { 180 void NotifyDepthTest() {
179 flags[OpenGL::Dirty::DepthTest] = true; 181 (*flags)[OpenGL::Dirty::DepthTest] = true;
180 } 182 }
181 183
182 void NotifyStencilTest() { 184 void NotifyStencilTest() {
183 flags[OpenGL::Dirty::StencilTest] = true; 185 (*flags)[OpenGL::Dirty::StencilTest] = true;
184 } 186 }
185 187
186 void NotifyPolygonOffset() { 188 void NotifyPolygonOffset() {
187 flags[OpenGL::Dirty::PolygonOffset] = true; 189 (*flags)[OpenGL::Dirty::PolygonOffset] = true;
188 } 190 }
189 191
190 void NotifyRasterizeEnable() { 192 void NotifyRasterizeEnable() {
191 flags[OpenGL::Dirty::RasterizeEnable] = true; 193 (*flags)[OpenGL::Dirty::RasterizeEnable] = true;
192 } 194 }
193 195
194 void NotifyFramebufferSRGB() { 196 void NotifyFramebufferSRGB() {
195 flags[OpenGL::Dirty::FramebufferSRGB] = true; 197 (*flags)[OpenGL::Dirty::FramebufferSRGB] = true;
196 } 198 }
197 199
198 void NotifyLogicOp() { 200 void NotifyLogicOp() {
199 flags[OpenGL::Dirty::LogicOp] = true; 201 (*flags)[OpenGL::Dirty::LogicOp] = true;
200 } 202 }
201 203
202 void NotifyClipControl() { 204 void NotifyClipControl() {
203 flags[OpenGL::Dirty::ClipControl] = true; 205 (*flags)[OpenGL::Dirty::ClipControl] = true;
204 } 206 }
205 207
206 void NotifyAlphaTest() { 208 void NotifyAlphaTest() {
207 flags[OpenGL::Dirty::AlphaTest] = true; 209 (*flags)[OpenGL::Dirty::AlphaTest] = true;
208 } 210 }
209 211
212 void NotifyRange(u8 start, u8 end) {
213 for (auto flag = start; flag <= end; flag++) {
214 (*flags)[flag] = true;
215 }
216 }
217
218 void SetupTables(Tegra::Control::ChannelState& channel_state);
219
220 void ChangeChannel(Tegra::Control::ChannelState& channel_state);
221
222 void InvalidateState();
223
210private: 224private:
211 Tegra::Engines::Maxwell3D::DirtyState::Flags& flags; 225 Tegra::Engines::Maxwell3D::DirtyState::Flags* flags;
226 Tegra::Engines::Maxwell3D::DirtyState::Flags default_flags{};
212 227
213 GLuint framebuffer = 0; 228 GLuint framebuffer = 0;
214 GLuint index_buffer = 0; 229 GLuint index_buffer = 0;
diff --git a/src/video_core/renderer_opengl/maxwell_to_gl.h b/src/video_core/renderer_opengl/maxwell_to_gl.h
index 9a72d0d6d..004421236 100644
--- a/src/video_core/renderer_opengl/maxwell_to_gl.h
+++ b/src/video_core/renderer_opengl/maxwell_to_gl.h
@@ -87,7 +87,7 @@ constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> FORMAT_TAB
87 {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT}, // BC3_SRGB 87 {GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT}, // BC3_SRGB
88 {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM}, // BC7_SRGB 88 {GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM}, // BC7_SRGB
89 {GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV}, // A4B4G4R4_UNORM 89 {GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4_REV}, // A4B4G4R4_UNORM
90 {GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // R4G4_UNORM 90 {GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // G4R4_UNORM
91 {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR}, // ASTC_2D_4X4_SRGB 91 {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR}, // ASTC_2D_4X4_SRGB
92 {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR}, // ASTC_2D_8X8_SRGB 92 {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR}, // ASTC_2D_8X8_SRGB
93 {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR}, // ASTC_2D_8X5_SRGB 93 {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR}, // ASTC_2D_8X5_SRGB
@@ -99,6 +99,8 @@ constexpr std::array<FormatTuple, VideoCore::Surface::MaxPixelFormat> FORMAT_TAB
99 {GL_COMPRESSED_RGBA_ASTC_6x6_KHR}, // ASTC_2D_6X6_UNORM 99 {GL_COMPRESSED_RGBA_ASTC_6x6_KHR}, // ASTC_2D_6X6_UNORM
100 {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR}, // ASTC_2D_6X6_SRGB 100 {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR}, // ASTC_2D_6X6_SRGB
101 {GL_COMPRESSED_RGBA_ASTC_10x6_KHR}, // ASTC_2D_10X6_UNORM 101 {GL_COMPRESSED_RGBA_ASTC_10x6_KHR}, // ASTC_2D_10X6_UNORM
102 {GL_COMPRESSED_RGBA_ASTC_10x5_KHR}, // ASTC_2D_10X5_UNORM
103 {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR}, // ASTC_2D_10X5_SRGB
102 {GL_COMPRESSED_RGBA_ASTC_10x10_KHR}, // ASTC_2D_10X10_UNORM 104 {GL_COMPRESSED_RGBA_ASTC_10x10_KHR}, // ASTC_2D_10X10_UNORM
103 {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR}, // ASTC_2D_10X10_SRGB 105 {GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR}, // ASTC_2D_10X10_SRGB
104 {GL_COMPRESSED_RGBA_ASTC_12x12_KHR}, // ASTC_2D_12X12_UNORM 106 {GL_COMPRESSED_RGBA_ASTC_12x12_KHR}, // ASTC_2D_12X12_UNORM
diff --git a/src/video_core/renderer_opengl/renderer_opengl.cpp b/src/video_core/renderer_opengl/renderer_opengl.cpp
index 34f3f7a67..8bd5eba7e 100644
--- a/src/video_core/renderer_opengl/renderer_opengl.cpp
+++ b/src/video_core/renderer_opengl/renderer_opengl.cpp
@@ -131,7 +131,7 @@ RendererOpenGL::RendererOpenGL(Core::TelemetrySession& telemetry_session_,
131 Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_, 131 Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_,
132 std::unique_ptr<Core::Frontend::GraphicsContext> context_) 132 std::unique_ptr<Core::Frontend::GraphicsContext> context_)
133 : RendererBase{emu_window_, std::move(context_)}, telemetry_session{telemetry_session_}, 133 : RendererBase{emu_window_, std::move(context_)}, telemetry_session{telemetry_session_},
134 emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, state_tracker{gpu}, 134 emu_window{emu_window_}, cpu_memory{cpu_memory_}, gpu{gpu_}, state_tracker{},
135 program_manager{device}, 135 program_manager{device},
136 rasterizer(emu_window, gpu, cpu_memory, device, screen_info, program_manager, state_tracker) { 136 rasterizer(emu_window, gpu, cpu_memory, device, screen_info, program_manager, state_tracker) {
137 if (Settings::values.renderer_debug && GLAD_GL_KHR_debug) { 137 if (Settings::values.renderer_debug && GLAD_GL_KHR_debug) {
diff --git a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
index bdb71dc53..e7104d377 100644
--- a/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
+++ b/src/video_core/renderer_vulkan/maxwell_to_vk.cpp
@@ -184,7 +184,7 @@ struct FormatTuple {
184 {VK_FORMAT_BC3_SRGB_BLOCK}, // BC3_SRGB 184 {VK_FORMAT_BC3_SRGB_BLOCK}, // BC3_SRGB
185 {VK_FORMAT_BC7_SRGB_BLOCK}, // BC7_SRGB 185 {VK_FORMAT_BC7_SRGB_BLOCK}, // BC7_SRGB
186 {VK_FORMAT_R4G4B4A4_UNORM_PACK16, Attachable}, // A4B4G4R4_UNORM 186 {VK_FORMAT_R4G4B4A4_UNORM_PACK16, Attachable}, // A4B4G4R4_UNORM
187 {VK_FORMAT_R4G4_UNORM_PACK8}, // R4G4_UNORM 187 {VK_FORMAT_R4G4_UNORM_PACK8}, // G4R4_UNORM
188 {VK_FORMAT_ASTC_4x4_SRGB_BLOCK}, // ASTC_2D_4X4_SRGB 188 {VK_FORMAT_ASTC_4x4_SRGB_BLOCK}, // ASTC_2D_4X4_SRGB
189 {VK_FORMAT_ASTC_8x8_SRGB_BLOCK}, // ASTC_2D_8X8_SRGB 189 {VK_FORMAT_ASTC_8x8_SRGB_BLOCK}, // ASTC_2D_8X8_SRGB
190 {VK_FORMAT_ASTC_8x5_SRGB_BLOCK}, // ASTC_2D_8X5_SRGB 190 {VK_FORMAT_ASTC_8x5_SRGB_BLOCK}, // ASTC_2D_8X5_SRGB
@@ -196,6 +196,8 @@ struct FormatTuple {
196 {VK_FORMAT_ASTC_6x6_UNORM_BLOCK}, // ASTC_2D_6X6_UNORM 196 {VK_FORMAT_ASTC_6x6_UNORM_BLOCK}, // ASTC_2D_6X6_UNORM
197 {VK_FORMAT_ASTC_6x6_SRGB_BLOCK}, // ASTC_2D_6X6_SRGB 197 {VK_FORMAT_ASTC_6x6_SRGB_BLOCK}, // ASTC_2D_6X6_SRGB
198 {VK_FORMAT_ASTC_10x6_UNORM_BLOCK}, // ASTC_2D_10X6_UNORM 198 {VK_FORMAT_ASTC_10x6_UNORM_BLOCK}, // ASTC_2D_10X6_UNORM
199 {VK_FORMAT_ASTC_10x5_UNORM_BLOCK}, // ASTC_2D_10X5_UNORM
200 {VK_FORMAT_ASTC_10x5_SRGB_BLOCK}, // ASTC_2D_10X5_SRGB
199 {VK_FORMAT_ASTC_10x10_UNORM_BLOCK}, // ASTC_2D_10X10_UNORM 201 {VK_FORMAT_ASTC_10x10_UNORM_BLOCK}, // ASTC_2D_10X10_UNORM
200 {VK_FORMAT_ASTC_10x10_SRGB_BLOCK}, // ASTC_2D_10X10_SRGB 202 {VK_FORMAT_ASTC_10x10_SRGB_BLOCK}, // ASTC_2D_10X10_SRGB
201 {VK_FORMAT_ASTC_12x12_UNORM_BLOCK}, // ASTC_2D_12X12_UNORM 203 {VK_FORMAT_ASTC_12x12_UNORM_BLOCK}, // ASTC_2D_12X12_UNORM
diff --git a/src/video_core/renderer_vulkan/renderer_vulkan.cpp b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
index 7c78d0299..d8131232a 100644
--- a/src/video_core/renderer_vulkan/renderer_vulkan.cpp
+++ b/src/video_core/renderer_vulkan/renderer_vulkan.cpp
@@ -102,13 +102,13 @@ RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_,
102 debug_callback(Settings::values.renderer_debug ? CreateDebugCallback(instance) : nullptr), 102 debug_callback(Settings::values.renderer_debug ? CreateDebugCallback(instance) : nullptr),
103 surface(CreateSurface(instance, render_window)), 103 surface(CreateSurface(instance, render_window)),
104 device(CreateDevice(instance, dld, *surface)), memory_allocator(device, false), 104 device(CreateDevice(instance, dld, *surface)), memory_allocator(device, false),
105 state_tracker(gpu), scheduler(device, state_tracker), 105 state_tracker(), scheduler(device, state_tracker),
106 swapchain(*surface, device, scheduler, render_window.GetFramebufferLayout().width, 106 swapchain(*surface, device, scheduler, render_window.GetFramebufferLayout().width,
107 render_window.GetFramebufferLayout().height, false), 107 render_window.GetFramebufferLayout().height, false),
108 blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, scheduler, 108 blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, scheduler,
109 screen_info), 109 screen_info),
110 rasterizer(render_window, gpu, gpu.MemoryManager(), cpu_memory, screen_info, device, 110 rasterizer(render_window, gpu, cpu_memory, screen_info, device, memory_allocator,
111 memory_allocator, state_tracker, scheduler) { 111 state_tracker, scheduler) {
112 Report(); 112 Report();
113} catch (const vk::Exception& exception) { 113} catch (const vk::Exception& exception) {
114 LOG_ERROR(Render_Vulkan, "Vulkan initialization failed with error: {}", exception.what()); 114 LOG_ERROR(Render_Vulkan, "Vulkan initialization failed with error: {}", exception.what());
@@ -142,7 +142,7 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
142 const auto recreate_swapchain = [&] { 142 const auto recreate_swapchain = [&] {
143 if (!has_been_recreated) { 143 if (!has_been_recreated) {
144 has_been_recreated = true; 144 has_been_recreated = true;
145 scheduler.WaitWorker(); 145 scheduler.Finish();
146 } 146 }
147 const Layout::FramebufferLayout layout = render_window.GetFramebufferLayout(); 147 const Layout::FramebufferLayout layout = render_window.GetFramebufferLayout();
148 swapchain.Create(layout.width, layout.height, is_srgb); 148 swapchain.Create(layout.width, layout.height, is_srgb);
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.cpp b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
index 444c29f68..cb7fa2078 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.cpp
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.cpp
@@ -145,6 +145,11 @@ VkSemaphore BlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer,
145 // Finish any pending renderpass 145 // Finish any pending renderpass
146 scheduler.RequestOutsideRenderPassOperationContext(); 146 scheduler.RequestOutsideRenderPassOperationContext();
147 147
148 if (const auto swapchain_images = swapchain.GetImageCount(); swapchain_images != image_count) {
149 image_count = swapchain_images;
150 Recreate();
151 }
152
148 const std::size_t image_index = swapchain.GetImageIndex(); 153 const std::size_t image_index = swapchain.GetImageIndex();
149 154
150 scheduler.Wait(resource_ticks[image_index]); 155 scheduler.Wait(resource_ticks[image_index]);
@@ -448,15 +453,15 @@ vk::Framebuffer BlitScreen::CreateFramebuffer(const VkImageView& image_view, VkE
448 453
449void BlitScreen::CreateStaticResources() { 454void BlitScreen::CreateStaticResources() {
450 CreateShaders(); 455 CreateShaders();
456 CreateSampler();
457}
458
459void BlitScreen::CreateDynamicResources() {
451 CreateSemaphores(); 460 CreateSemaphores();
452 CreateDescriptorPool(); 461 CreateDescriptorPool();
453 CreateDescriptorSetLayout(); 462 CreateDescriptorSetLayout();
454 CreateDescriptorSets(); 463 CreateDescriptorSets();
455 CreatePipelineLayout(); 464 CreatePipelineLayout();
456 CreateSampler();
457}
458
459void BlitScreen::CreateDynamicResources() {
460 CreateRenderPass(); 465 CreateRenderPass();
461 CreateFramebuffers(); 466 CreateFramebuffers();
462 CreateGraphicsPipeline(); 467 CreateGraphicsPipeline();
diff --git a/src/video_core/renderer_vulkan/vk_blit_screen.h b/src/video_core/renderer_vulkan/vk_blit_screen.h
index b8c67bef0..29e2ea925 100644
--- a/src/video_core/renderer_vulkan/vk_blit_screen.h
+++ b/src/video_core/renderer_vulkan/vk_blit_screen.h
@@ -109,7 +109,7 @@ private:
109 MemoryAllocator& memory_allocator; 109 MemoryAllocator& memory_allocator;
110 Swapchain& swapchain; 110 Swapchain& swapchain;
111 Scheduler& scheduler; 111 Scheduler& scheduler;
112 const std::size_t image_count; 112 std::size_t image_count;
113 const ScreenInfo& screen_info; 113 const ScreenInfo& screen_info;
114 114
115 vk::ShaderModule vertex_shader; 115 vk::ShaderModule vertex_shader;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index f17a5ccd6..241d7573e 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -26,8 +26,6 @@
26 26
27namespace Vulkan { 27namespace Vulkan {
28 28
29using Tegra::Texture::SWIZZLE_TABLE;
30
31namespace { 29namespace {
32 30
33constexpr u32 ASTC_BINDING_INPUT_BUFFER = 0; 31constexpr u32 ASTC_BINDING_INPUT_BUFFER = 0;
diff --git a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
index 6447210e2..7906e11a8 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pipeline.cpp
@@ -126,8 +126,8 @@ void ComputePipeline::Configure(Tegra::Engines::KeplerCompute& kepler_compute,
126 const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset}; 126 const u32 secondary_offset{desc.secondary_cbuf_offset + index_offset};
127 const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() + 127 const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].Address() +
128 secondary_offset}; 128 secondary_offset};
129 const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; 129 const u32 lhs_raw{gpu_memory.Read<u32>(addr) << desc.shift_left};
130 const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; 130 const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr) << desc.secondary_shift_left};
131 return TexturePair(lhs_raw | rhs_raw, via_header_index); 131 return TexturePair(lhs_raw | rhs_raw, via_header_index);
132 } 132 }
133 } 133 }
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.cpp b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
index c249b34d4..0214b103a 100644
--- a/src/video_core/renderer_vulkan/vk_fence_manager.cpp
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.cpp
@@ -11,11 +11,8 @@
11 11
12namespace Vulkan { 12namespace Vulkan {
13 13
14InnerFence::InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_) 14InnerFence::InnerFence(Scheduler& scheduler_, bool is_stubbed_)
15 : FenceBase{payload_, is_stubbed_}, scheduler{scheduler_} {} 15 : FenceBase{is_stubbed_}, scheduler{scheduler_} {}
16
17InnerFence::InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_)
18 : FenceBase{address_, payload_, is_stubbed_}, scheduler{scheduler_} {}
19 16
20InnerFence::~InnerFence() = default; 17InnerFence::~InnerFence() = default;
21 18
@@ -48,12 +45,8 @@ FenceManager::FenceManager(VideoCore::RasterizerInterface& rasterizer_, Tegra::G
48 : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_}, 45 : GenericFenceManager{rasterizer_, gpu_, texture_cache_, buffer_cache_, query_cache_},
49 scheduler{scheduler_} {} 46 scheduler{scheduler_} {}
50 47
51Fence FenceManager::CreateFence(u32 value, bool is_stubbed) { 48Fence FenceManager::CreateFence(bool is_stubbed) {
52 return std::make_shared<InnerFence>(scheduler, value, is_stubbed); 49 return std::make_shared<InnerFence>(scheduler, is_stubbed);
53}
54
55Fence FenceManager::CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) {
56 return std::make_shared<InnerFence>(scheduler, addr, value, is_stubbed);
57} 50}
58 51
59void FenceManager::QueueFence(Fence& fence) { 52void FenceManager::QueueFence(Fence& fence) {
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h
index 7c0bbd80a..7fe2afcd9 100644
--- a/src/video_core/renderer_vulkan/vk_fence_manager.h
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.h
@@ -25,8 +25,7 @@ class Scheduler;
25 25
26class InnerFence : public VideoCommon::FenceBase { 26class InnerFence : public VideoCommon::FenceBase {
27public: 27public:
28 explicit InnerFence(Scheduler& scheduler_, u32 payload_, bool is_stubbed_); 28 explicit InnerFence(Scheduler& scheduler_, bool is_stubbed_);
29 explicit InnerFence(Scheduler& scheduler_, GPUVAddr address_, u32 payload_, bool is_stubbed_);
30 ~InnerFence(); 29 ~InnerFence();
31 30
32 void Queue(); 31 void Queue();
@@ -50,8 +49,7 @@ public:
50 QueryCache& query_cache, const Device& device, Scheduler& scheduler); 49 QueryCache& query_cache, const Device& device, Scheduler& scheduler);
51 50
52protected: 51protected:
53 Fence CreateFence(u32 value, bool is_stubbed) override; 52 Fence CreateFence(bool is_stubbed) override;
54 Fence CreateFence(GPUVAddr addr, u32 value, bool is_stubbed) override;
55 void QueueFence(Fence& fence) override; 53 void QueueFence(Fence& fence) override;
56 bool IsFenceSignaled(Fence& fence) const override; 54 bool IsFenceSignaled(Fence& fence) const override;
57 void WaitFence(Fence& fence) override; 55 void WaitFence(Fence& fence) override;
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
index 5aca8f038..f47786f48 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.cpp
@@ -215,15 +215,14 @@ ConfigureFuncPtr ConfigureFunc(const std::array<vk::ShaderModule, NUM_STAGES>& m
215} // Anonymous namespace 215} // Anonymous namespace
216 216
217GraphicsPipeline::GraphicsPipeline( 217GraphicsPipeline::GraphicsPipeline(
218 Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, Scheduler& scheduler_, 218 Scheduler& scheduler_, BufferCache& buffer_cache_, TextureCache& texture_cache_,
219 BufferCache& buffer_cache_, TextureCache& texture_cache_,
220 VideoCore::ShaderNotify* shader_notify, const Device& device_, DescriptorPool& descriptor_pool, 219 VideoCore::ShaderNotify* shader_notify, const Device& device_, DescriptorPool& descriptor_pool,
221 UpdateDescriptorQueue& update_descriptor_queue_, Common::ThreadWorker* worker_thread, 220 UpdateDescriptorQueue& update_descriptor_queue_, Common::ThreadWorker* worker_thread,
222 PipelineStatistics* pipeline_statistics, RenderPassCache& render_pass_cache, 221 PipelineStatistics* pipeline_statistics, RenderPassCache& render_pass_cache,
223 const GraphicsPipelineCacheKey& key_, std::array<vk::ShaderModule, NUM_STAGES> stages, 222 const GraphicsPipelineCacheKey& key_, std::array<vk::ShaderModule, NUM_STAGES> stages,
224 const std::array<const Shader::Info*, NUM_STAGES>& infos) 223 const std::array<const Shader::Info*, NUM_STAGES>& infos)
225 : key{key_}, maxwell3d{maxwell3d_}, gpu_memory{gpu_memory_}, device{device_}, 224 : key{key_}, device{device_}, texture_cache{texture_cache_},
226 texture_cache{texture_cache_}, buffer_cache{buffer_cache_}, scheduler{scheduler_}, 225 buffer_cache{buffer_cache_}, scheduler{scheduler_},
227 update_descriptor_queue{update_descriptor_queue_}, spv_modules{std::move(stages)} { 226 update_descriptor_queue{update_descriptor_queue_}, spv_modules{std::move(stages)} {
228 if (shader_notify) { 227 if (shader_notify) {
229 shader_notify->MarkShaderBuilding(); 228 shader_notify->MarkShaderBuilding();
@@ -288,7 +287,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
288 287
289 buffer_cache.SetUniformBuffersState(enabled_uniform_buffer_masks, &uniform_buffer_sizes); 288 buffer_cache.SetUniformBuffersState(enabled_uniform_buffer_masks, &uniform_buffer_sizes);
290 289
291 const auto& regs{maxwell3d.regs}; 290 const auto& regs{maxwell3d->regs};
292 const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex}; 291 const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex};
293 const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE { 292 const auto config_stage{[&](size_t stage) LAMBDA_FORCEINLINE {
294 const Shader::Info& info{stage_infos[stage]}; 293 const Shader::Info& info{stage_infos[stage]};
@@ -302,7 +301,7 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
302 ++ssbo_index; 301 ++ssbo_index;
303 } 302 }
304 } 303 }
305 const auto& cbufs{maxwell3d.state.shader_stages[stage].const_buffers}; 304 const auto& cbufs{maxwell3d->state.shader_stages[stage].const_buffers};
306 const auto read_handle{[&](const auto& desc, u32 index) { 305 const auto read_handle{[&](const auto& desc, u32 index) {
307 ASSERT(cbufs[desc.cbuf_index].enabled); 306 ASSERT(cbufs[desc.cbuf_index].enabled);
308 const u32 index_offset{index << desc.size_shift}; 307 const u32 index_offset{index << desc.size_shift};
@@ -315,13 +314,14 @@ void GraphicsPipeline::ConfigureImpl(bool is_indexed) {
315 const u32 second_offset{desc.secondary_cbuf_offset + index_offset}; 314 const u32 second_offset{desc.secondary_cbuf_offset + index_offset};
316 const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address + 315 const GPUVAddr separate_addr{cbufs[desc.secondary_cbuf_index].address +
317 second_offset}; 316 second_offset};
318 const u32 lhs_raw{gpu_memory.Read<u32>(addr)}; 317 const u32 lhs_raw{gpu_memory->Read<u32>(addr) << desc.shift_left};
319 const u32 rhs_raw{gpu_memory.Read<u32>(separate_addr)}; 318 const u32 rhs_raw{gpu_memory->Read<u32>(separate_addr)
319 << desc.secondary_shift_left};
320 const u32 raw{lhs_raw | rhs_raw}; 320 const u32 raw{lhs_raw | rhs_raw};
321 return TexturePair(raw, via_header_index); 321 return TexturePair(raw, via_header_index);
322 } 322 }
323 } 323 }
324 return TexturePair(gpu_memory.Read<u32>(addr), via_header_index); 324 return TexturePair(gpu_memory->Read<u32>(addr), via_header_index);
325 }}; 325 }};
326 const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE { 326 const auto add_image{[&](const auto& desc, bool blacklist) LAMBDA_FORCEINLINE {
327 for (u32 index = 0; index < desc.count; ++index) { 327 for (u32 index = 0; index < desc.count; ++index) {
diff --git a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
index e8949a9ab..85602592b 100644
--- a/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
+++ b/src/video_core/renderer_vulkan/vk_graphics_pipeline.h
@@ -69,15 +69,16 @@ class GraphicsPipeline {
69 static constexpr size_t NUM_STAGES = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage; 69 static constexpr size_t NUM_STAGES = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage;
70 70
71public: 71public:
72 explicit GraphicsPipeline( 72 explicit GraphicsPipeline(Scheduler& scheduler, BufferCache& buffer_cache,
73 Tegra::Engines::Maxwell3D& maxwell3d, Tegra::MemoryManager& gpu_memory, 73 TextureCache& texture_cache, VideoCore::ShaderNotify* shader_notify,
74 Scheduler& scheduler, BufferCache& buffer_cache, TextureCache& texture_cache, 74 const Device& device, DescriptorPool& descriptor_pool,
75 VideoCore::ShaderNotify* shader_notify, const Device& device, 75 UpdateDescriptorQueue& update_descriptor_queue,
76 DescriptorPool& descriptor_pool, UpdateDescriptorQueue& update_descriptor_queue, 76 Common::ThreadWorker* worker_thread,
77 Common::ThreadWorker* worker_thread, PipelineStatistics* pipeline_statistics, 77 PipelineStatistics* pipeline_statistics,
78 RenderPassCache& render_pass_cache, const GraphicsPipelineCacheKey& key, 78 RenderPassCache& render_pass_cache,
79 std::array<vk::ShaderModule, NUM_STAGES> stages, 79 const GraphicsPipelineCacheKey& key,
80 const std::array<const Shader::Info*, NUM_STAGES>& infos); 80 std::array<vk::ShaderModule, NUM_STAGES> stages,
81 const std::array<const Shader::Info*, NUM_STAGES>& infos);
81 82
82 GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = delete; 83 GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = delete;
83 GraphicsPipeline(GraphicsPipeline&&) noexcept = delete; 84 GraphicsPipeline(GraphicsPipeline&&) noexcept = delete;
@@ -109,6 +110,11 @@ public:
109 return [](GraphicsPipeline* pl, bool is_indexed) { pl->ConfigureImpl<Spec>(is_indexed); }; 110 return [](GraphicsPipeline* pl, bool is_indexed) { pl->ConfigureImpl<Spec>(is_indexed); };
110 } 111 }
111 112
113 void SetEngine(Tegra::Engines::Maxwell3D* maxwell3d_, Tegra::MemoryManager* gpu_memory_) {
114 maxwell3d = maxwell3d_;
115 gpu_memory = gpu_memory_;
116 }
117
112private: 118private:
113 template <typename Spec> 119 template <typename Spec>
114 void ConfigureImpl(bool is_indexed); 120 void ConfigureImpl(bool is_indexed);
@@ -120,8 +126,8 @@ private:
120 void Validate(); 126 void Validate();
121 127
122 const GraphicsPipelineCacheKey key; 128 const GraphicsPipelineCacheKey key;
123 Tegra::Engines::Maxwell3D& maxwell3d; 129 Tegra::Engines::Maxwell3D* maxwell3d;
124 Tegra::MemoryManager& gpu_memory; 130 Tegra::MemoryManager* gpu_memory;
125 const Device& device; 131 const Device& device;
126 TextureCache& texture_cache; 132 TextureCache& texture_cache;
127 BufferCache& buffer_cache; 133 BufferCache& buffer_cache;
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
index 9708dc45e..732e7b6f2 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.cpp
@@ -259,20 +259,18 @@ bool GraphicsPipelineCacheKey::operator==(const GraphicsPipelineCacheKey& rhs) c
259 return std::memcmp(&rhs, this, Size()) == 0; 259 return std::memcmp(&rhs, this, Size()) == 0;
260} 260}
261 261
262PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, Tegra::Engines::Maxwell3D& maxwell3d_, 262PipelineCache::PipelineCache(RasterizerVulkan& rasterizer_, const Device& device_,
263 Tegra::Engines::KeplerCompute& kepler_compute_,
264 Tegra::MemoryManager& gpu_memory_, const Device& device_,
265 Scheduler& scheduler_, DescriptorPool& descriptor_pool_, 263 Scheduler& scheduler_, DescriptorPool& descriptor_pool_,
266 UpdateDescriptorQueue& update_descriptor_queue_, 264 UpdateDescriptorQueue& update_descriptor_queue_,
267 RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_, 265 RenderPassCache& render_pass_cache_, BufferCache& buffer_cache_,
268 TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_) 266 TextureCache& texture_cache_, VideoCore::ShaderNotify& shader_notify_)
269 : VideoCommon::ShaderCache{rasterizer_, gpu_memory_, maxwell3d_, kepler_compute_}, 267 : VideoCommon::ShaderCache{rasterizer_}, device{device_}, scheduler{scheduler_},
270 device{device_}, scheduler{scheduler_}, descriptor_pool{descriptor_pool_}, 268 descriptor_pool{descriptor_pool_}, update_descriptor_queue{update_descriptor_queue_},
271 update_descriptor_queue{update_descriptor_queue_}, render_pass_cache{render_pass_cache_}, 269 render_pass_cache{render_pass_cache_}, buffer_cache{buffer_cache_},
272 buffer_cache{buffer_cache_}, texture_cache{texture_cache_}, shader_notify{shader_notify_}, 270 texture_cache{texture_cache_}, shader_notify{shader_notify_},
273 use_asynchronous_shaders{Settings::values.use_asynchronous_shaders.GetValue()}, 271 use_asynchronous_shaders{Settings::values.use_asynchronous_shaders.GetValue()},
274 workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "yuzu:PipelineBuilder"), 272 workers(std::max(std::thread::hardware_concurrency(), 2U) - 1, "VkPipelineBuilder"),
275 serialization_thread(1, "yuzu:PipelineSerialization") { 273 serialization_thread(1, "VkPipelineSerialization") {
276 const auto& float_control{device.FloatControlProperties()}; 274 const auto& float_control{device.FloatControlProperties()};
277 const VkDriverIdKHR driver_id{device.GetDriverID()}; 275 const VkDriverIdKHR driver_id{device.GetDriverID()};
278 profile = Shader::Profile{ 276 profile = Shader::Profile{
@@ -337,7 +335,7 @@ GraphicsPipeline* PipelineCache::CurrentGraphicsPipeline() {
337 current_pipeline = nullptr; 335 current_pipeline = nullptr;
338 return nullptr; 336 return nullptr;
339 } 337 }
340 graphics_key.state.Refresh(maxwell3d, device.IsExtExtendedDynamicStateSupported(), 338 graphics_key.state.Refresh(*maxwell3d, device.IsExtExtendedDynamicStateSupported(),
341 device.IsExtVertexInputDynamicStateSupported()); 339 device.IsExtVertexInputDynamicStateSupported());
342 340
343 if (current_pipeline) { 341 if (current_pipeline) {
@@ -357,7 +355,7 @@ ComputePipeline* PipelineCache::CurrentComputePipeline() {
357 if (!shader) { 355 if (!shader) {
358 return nullptr; 356 return nullptr;
359 } 357 }
360 const auto& qmd{kepler_compute.launch_description}; 358 const auto& qmd{kepler_compute->launch_description};
361 const ComputePipelineCacheKey key{ 359 const ComputePipelineCacheKey key{
362 .unique_hash = shader->unique_hash, 360 .unique_hash = shader->unique_hash,
363 .shared_memory_size = qmd.shared_alloc, 361 .shared_memory_size = qmd.shared_alloc,
@@ -486,13 +484,13 @@ GraphicsPipeline* PipelineCache::BuiltPipeline(GraphicsPipeline* pipeline) const
486 } 484 }
487 // If something is using depth, we can assume that games are not rendering anything which 485 // If something is using depth, we can assume that games are not rendering anything which
488 // will be used one time. 486 // will be used one time.
489 if (maxwell3d.regs.zeta_enable) { 487 if (maxwell3d->regs.zeta_enable) {
490 return nullptr; 488 return nullptr;
491 } 489 }
492 // If games are using a small index count, we can assume these are full screen quads. 490 // If games are using a small index count, we can assume these are full screen quads.
493 // Usually these shaders are only used once for building textures so we can assume they 491 // Usually these shaders are only used once for building textures so we can assume they
494 // can't be built async 492 // can't be built async
495 if (maxwell3d.regs.index_array.count <= 6 || maxwell3d.regs.vertex_buffer.count <= 6) { 493 if (maxwell3d->regs.index_array.count <= 6 || maxwell3d->regs.vertex_buffer.count <= 6) {
496 return pipeline; 494 return pipeline;
497 } 495 }
498 return nullptr; 496 return nullptr;
@@ -557,10 +555,10 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline(
557 previous_stage = &program; 555 previous_stage = &program;
558 } 556 }
559 Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr}; 557 Common::ThreadWorker* const thread_worker{build_in_parallel ? &workers : nullptr};
560 return std::make_unique<GraphicsPipeline>( 558 return std::make_unique<GraphicsPipeline>(scheduler, buffer_cache, texture_cache,
561 maxwell3d, gpu_memory, scheduler, buffer_cache, texture_cache, &shader_notify, device, 559 &shader_notify, device, descriptor_pool,
562 descriptor_pool, update_descriptor_queue, thread_worker, statistics, render_pass_cache, key, 560 update_descriptor_queue, thread_worker, statistics,
563 std::move(modules), infos); 561 render_pass_cache, key, std::move(modules), infos);
564 562
565} catch (const Shader::Exception& exception) { 563} catch (const Shader::Exception& exception) {
566 LOG_ERROR(Render_Vulkan, "{}", exception.what()); 564 LOG_ERROR(Render_Vulkan, "{}", exception.what());
@@ -592,9 +590,9 @@ std::unique_ptr<GraphicsPipeline> PipelineCache::CreateGraphicsPipeline() {
592 590
593std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline( 591std::unique_ptr<ComputePipeline> PipelineCache::CreateComputePipeline(
594 const ComputePipelineCacheKey& key, const ShaderInfo* shader) { 592 const ComputePipelineCacheKey& key, const ShaderInfo* shader) {
595 const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()}; 593 const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()};
596 const auto& qmd{kepler_compute.launch_description}; 594 const auto& qmd{kepler_compute->launch_description};
597 ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start}; 595 ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start};
598 env.SetCachedSize(shader->size_bytes); 596 env.SetCachedSize(shader->size_bytes);
599 597
600 main_pools.ReleaseContents(); 598 main_pools.ReleaseContents();
diff --git a/src/video_core/renderer_vulkan/vk_pipeline_cache.h b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
index 127957dbf..61f9e9366 100644
--- a/src/video_core/renderer_vulkan/vk_pipeline_cache.h
+++ b/src/video_core/renderer_vulkan/vk_pipeline_cache.h
@@ -100,10 +100,8 @@ struct ShaderPools {
100 100
101class PipelineCache : public VideoCommon::ShaderCache { 101class PipelineCache : public VideoCommon::ShaderCache {
102public: 102public:
103 explicit PipelineCache(RasterizerVulkan& rasterizer, Tegra::Engines::Maxwell3D& maxwell3d, 103 explicit PipelineCache(RasterizerVulkan& rasterizer, const Device& device, Scheduler& scheduler,
104 Tegra::Engines::KeplerCompute& kepler_compute, 104 DescriptorPool& descriptor_pool,
105 Tegra::MemoryManager& gpu_memory, const Device& device,
106 Scheduler& scheduler, DescriptorPool& descriptor_pool,
107 UpdateDescriptorQueue& update_descriptor_queue, 105 UpdateDescriptorQueue& update_descriptor_queue,
108 RenderPassCache& render_pass_cache, BufferCache& buffer_cache, 106 RenderPassCache& render_pass_cache, BufferCache& buffer_cache,
109 TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_); 107 TextureCache& texture_cache, VideoCore::ShaderNotify& shader_notify_);
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 2b859c6b8..7cb02631c 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -65,10 +65,9 @@ void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
65 usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false; 65 usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
66} 66}
67 67
68QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, 68QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
69 Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, 69 Scheduler& scheduler_)
70 const Device& device_, Scheduler& scheduler_) 70 : QueryCacheBase{rasterizer_}, device{device_}, scheduler{scheduler_},
71 : QueryCacheBase{rasterizer_, maxwell3d_, gpu_memory_}, device{device_}, scheduler{scheduler_},
72 query_pools{ 71 query_pools{
73 QueryPool{device_, scheduler_, QueryType::SamplesPassed}, 72 QueryPool{device_, scheduler_, QueryType::SamplesPassed},
74 } {} 73 } {}
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h
index b0d86c4f8..26762ee09 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.h
+++ b/src/video_core/renderer_vulkan/vk_query_cache.h
@@ -52,9 +52,8 @@ private:
52class QueryCache final 52class QueryCache final
53 : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> { 53 : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> {
54public: 54public:
55 explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_, 55 explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_, const Device& device_,
56 Tegra::Engines::Maxwell3D& maxwell3d_, Tegra::MemoryManager& gpu_memory_, 56 Scheduler& scheduler_);
57 const Device& device_, Scheduler& scheduler_);
58 ~QueryCache(); 57 ~QueryCache();
59 58
60 std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type); 59 std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type);
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 7e40c2df1..acfd5da7d 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -11,6 +11,7 @@
11#include "common/microprofile.h" 11#include "common/microprofile.h"
12#include "common/scope_exit.h" 12#include "common/scope_exit.h"
13#include "common/settings.h" 13#include "common/settings.h"
14#include "video_core/control/channel_state.h"
14#include "video_core/engines/kepler_compute.h" 15#include "video_core/engines/kepler_compute.h"
15#include "video_core/engines/maxwell_3d.h" 16#include "video_core/engines/maxwell_3d.h"
16#include "video_core/renderer_vulkan/blit_image.h" 17#include "video_core/renderer_vulkan/blit_image.h"
@@ -148,14 +149,11 @@ DrawParams MakeDrawParams(const Maxwell& regs, u32 num_instances, bool is_instan
148} // Anonymous namespace 149} // Anonymous namespace
149 150
150RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, 151RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
151 Tegra::MemoryManager& gpu_memory_,
152 Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_, 152 Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_,
153 const Device& device_, MemoryAllocator& memory_allocator_, 153 const Device& device_, MemoryAllocator& memory_allocator_,
154 StateTracker& state_tracker_, Scheduler& scheduler_) 154 StateTracker& state_tracker_, Scheduler& scheduler_)
155 : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, 155 : RasterizerAccelerated{cpu_memory_}, gpu{gpu_}, screen_info{screen_info_}, device{device_},
156 gpu_memory{gpu_memory_}, maxwell3d{gpu.Maxwell3D()}, kepler_compute{gpu.KeplerCompute()}, 156 memory_allocator{memory_allocator_}, state_tracker{state_tracker_}, scheduler{scheduler_},
157 screen_info{screen_info_}, device{device_}, memory_allocator{memory_allocator_},
158 state_tracker{state_tracker_}, scheduler{scheduler_},
159 staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler), 157 staging_pool(device, memory_allocator, scheduler), descriptor_pool(device, scheduler),
160 update_descriptor_queue(device, scheduler), 158 update_descriptor_queue(device, scheduler),
161 blit_image(device, scheduler, state_tracker, descriptor_pool), 159 blit_image(device, scheduler, state_tracker, descriptor_pool),
@@ -165,14 +163,13 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
165 memory_allocator, staging_pool, 163 memory_allocator, staging_pool,
166 blit_image, astc_decoder_pass, 164 blit_image, astc_decoder_pass,
167 render_pass_cache}, 165 render_pass_cache},
168 texture_cache(texture_cache_runtime, *this, maxwell3d, kepler_compute, gpu_memory), 166 texture_cache(texture_cache_runtime, *this),
169 buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool, 167 buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool,
170 update_descriptor_queue, descriptor_pool), 168 update_descriptor_queue, descriptor_pool),
171 buffer_cache(*this, maxwell3d, kepler_compute, gpu_memory, cpu_memory_, buffer_cache_runtime), 169 buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
172 pipeline_cache(*this, maxwell3d, kepler_compute, gpu_memory, device, scheduler, 170 pipeline_cache(*this, device, scheduler, descriptor_pool, update_descriptor_queue,
173 descriptor_pool, update_descriptor_queue, render_pass_cache, buffer_cache, 171 render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()),
174 texture_cache, gpu.ShaderNotify()), 172 query_cache{*this, device, scheduler}, accelerate_dma{buffer_cache},
175 query_cache{*this, maxwell3d, gpu_memory, device, scheduler}, accelerate_dma{buffer_cache},
176 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler), 173 fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
177 wfi_event(device.GetLogical().CreateEvent()) { 174 wfi_event(device.GetLogical().CreateEvent()) {
178 scheduler.SetQueryCache(query_cache); 175 scheduler.SetQueryCache(query_cache);
@@ -193,14 +190,16 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
193 return; 190 return;
194 } 191 }
195 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex}; 192 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
193 // update engine as channel may be different.
194 pipeline->SetEngine(maxwell3d, gpu_memory);
196 pipeline->Configure(is_indexed); 195 pipeline->Configure(is_indexed);
197 196
198 BeginTransformFeedback(); 197 BeginTransformFeedback();
199 198
200 UpdateDynamicStates(); 199 UpdateDynamicStates();
201 200
202 const auto& regs{maxwell3d.regs}; 201 const auto& regs{maxwell3d->regs};
203 const u32 num_instances{maxwell3d.mme_draw.instance_count}; 202 const u32 num_instances{maxwell3d->mme_draw.instance_count};
204 const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)}; 203 const DrawParams draw_params{MakeDrawParams(regs, num_instances, is_instanced, is_indexed)};
205 scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) { 204 scheduler.Record([draw_params](vk::CommandBuffer cmdbuf) {
206 if (draw_params.is_indexed) { 205 if (draw_params.is_indexed) {
@@ -218,14 +217,14 @@ void RasterizerVulkan::Draw(bool is_indexed, bool is_instanced) {
218void RasterizerVulkan::Clear() { 217void RasterizerVulkan::Clear() {
219 MICROPROFILE_SCOPE(Vulkan_Clearing); 218 MICROPROFILE_SCOPE(Vulkan_Clearing);
220 219
221 if (!maxwell3d.ShouldExecute()) { 220 if (!maxwell3d->ShouldExecute()) {
222 return; 221 return;
223 } 222 }
224 FlushWork(); 223 FlushWork();
225 224
226 query_cache.UpdateCounters(); 225 query_cache.UpdateCounters();
227 226
228 auto& regs = maxwell3d.regs; 227 auto& regs = maxwell3d->regs;
229 const bool use_color = regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B || 228 const bool use_color = regs.clear_buffers.R || regs.clear_buffers.G || regs.clear_buffers.B ||
230 regs.clear_buffers.A; 229 regs.clear_buffers.A;
231 const bool use_depth = regs.clear_buffers.Z; 230 const bool use_depth = regs.clear_buffers.Z;
@@ -248,8 +247,15 @@ void RasterizerVulkan::Clear() {
248 } 247 }
249 UpdateViewportsState(regs); 248 UpdateViewportsState(regs);
250 249
250 VkRect2D default_scissor;
251 default_scissor.offset.x = 0;
252 default_scissor.offset.y = 0;
253 default_scissor.extent.width = std::numeric_limits<s32>::max();
254 default_scissor.extent.height = std::numeric_limits<s32>::max();
255
251 VkClearRect clear_rect{ 256 VkClearRect clear_rect{
252 .rect = GetScissorState(regs, 0, up_scale, down_shift), 257 .rect = regs.clear_flags.scissor ? GetScissorState(regs, 0, up_scale, down_shift)
258 : default_scissor,
253 .baseArrayLayer = regs.clear_buffers.layer, 259 .baseArrayLayer = regs.clear_buffers.layer,
254 .layerCount = 1, 260 .layerCount = 1,
255 }; 261 };
@@ -339,9 +345,9 @@ void RasterizerVulkan::DispatchCompute() {
339 return; 345 return;
340 } 346 }
341 std::scoped_lock lock{texture_cache.mutex, buffer_cache.mutex}; 347 std::scoped_lock lock{texture_cache.mutex, buffer_cache.mutex};
342 pipeline->Configure(kepler_compute, gpu_memory, scheduler, buffer_cache, texture_cache); 348 pipeline->Configure(*kepler_compute, *gpu_memory, scheduler, buffer_cache, texture_cache);
343 349
344 const auto& qmd{kepler_compute.launch_description}; 350 const auto& qmd{kepler_compute->launch_description};
345 const std::array<u32, 3> dim{qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z}; 351 const std::array<u32, 3> dim{qmd.grid_dim_x, qmd.grid_dim_y, qmd.grid_dim_z};
346 scheduler.RequestOutsideRenderPassOperationContext(); 352 scheduler.RequestOutsideRenderPassOperationContext();
347 scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); }); 353 scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); });
@@ -422,7 +428,7 @@ void RasterizerVulkan::OnCPUWrite(VAddr addr, u64 size) {
422 } 428 }
423} 429}
424 430
425void RasterizerVulkan::SyncGuestHost() { 431void RasterizerVulkan::InvalidateGPUCache() {
426 pipeline_cache.SyncGuestHost(); 432 pipeline_cache.SyncGuestHost();
427 { 433 {
428 std::scoped_lock lock{buffer_cache.mutex}; 434 std::scoped_lock lock{buffer_cache.mutex};
@@ -442,40 +448,30 @@ void RasterizerVulkan::UnmapMemory(VAddr addr, u64 size) {
442 pipeline_cache.OnCPUWrite(addr, size); 448 pipeline_cache.OnCPUWrite(addr, size);
443} 449}
444 450
445void RasterizerVulkan::ModifyGPUMemory(GPUVAddr addr, u64 size) { 451void RasterizerVulkan::ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) {
446 { 452 {
447 std::scoped_lock lock{texture_cache.mutex}; 453 std::scoped_lock lock{texture_cache.mutex};
448 texture_cache.UnmapGPUMemory(addr, size); 454 texture_cache.UnmapGPUMemory(as_id, addr, size);
449 } 455 }
450} 456}
451 457
452void RasterizerVulkan::SignalSemaphore(GPUVAddr addr, u32 value) { 458void RasterizerVulkan::SignalFence(std::function<void()>&& func) {
453 if (!gpu.IsAsync()) { 459 fence_manager.SignalFence(std::move(func));
454 gpu_memory.Write<u32>(addr, value); 460}
455 return; 461
456 } 462void RasterizerVulkan::SyncOperation(std::function<void()>&& func) {
457 fence_manager.SignalSemaphore(addr, value); 463 fence_manager.SyncOperation(std::move(func));
458} 464}
459 465
460void RasterizerVulkan::SignalSyncPoint(u32 value) { 466void RasterizerVulkan::SignalSyncPoint(u32 value) {
461 if (!gpu.IsAsync()) {
462 gpu.IncrementSyncPoint(value);
463 return;
464 }
465 fence_manager.SignalSyncPoint(value); 467 fence_manager.SignalSyncPoint(value);
466} 468}
467 469
468void RasterizerVulkan::SignalReference() { 470void RasterizerVulkan::SignalReference() {
469 if (!gpu.IsAsync()) {
470 return;
471 }
472 fence_manager.SignalOrdering(); 471 fence_manager.SignalOrdering();
473} 472}
474 473
475void RasterizerVulkan::ReleaseFences() { 474void RasterizerVulkan::ReleaseFences() {
476 if (!gpu.IsAsync()) {
477 return;
478 }
479 fence_manager.WaitPendingFences(); 475 fence_manager.WaitPendingFences();
480} 476}
481 477
@@ -552,13 +548,13 @@ Tegra::Engines::AccelerateDMAInterface& RasterizerVulkan::AccessAccelerateDMA()
552} 548}
553 549
554void RasterizerVulkan::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, 550void RasterizerVulkan::AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
555 std::span<u8> memory) { 551 std::span<const u8> memory) {
556 auto cpu_addr = gpu_memory.GpuToCpuAddress(address); 552 auto cpu_addr = gpu_memory->GpuToCpuAddress(address);
557 if (!cpu_addr) [[unlikely]] { 553 if (!cpu_addr) [[unlikely]] {
558 gpu_memory.WriteBlock(address, memory.data(), copy_size); 554 gpu_memory->WriteBlock(address, memory.data(), copy_size);
559 return; 555 return;
560 } 556 }
561 gpu_memory.WriteBlockUnsafe(address, memory.data(), copy_size); 557 gpu_memory->WriteBlockUnsafe(address, memory.data(), copy_size);
562 { 558 {
563 std::unique_lock<std::mutex> lock{buffer_cache.mutex}; 559 std::unique_lock<std::mutex> lock{buffer_cache.mutex};
564 if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) { 560 if (!buffer_cache.InlineMemory(*cpu_addr, copy_size, memory)) {
@@ -627,7 +623,7 @@ bool AccelerateDMA::BufferCopy(GPUVAddr src_address, GPUVAddr dest_address, u64
627} 623}
628 624
629void RasterizerVulkan::UpdateDynamicStates() { 625void RasterizerVulkan::UpdateDynamicStates() {
630 auto& regs = maxwell3d.regs; 626 auto& regs = maxwell3d->regs;
631 UpdateViewportsState(regs); 627 UpdateViewportsState(regs);
632 UpdateScissorsState(regs); 628 UpdateScissorsState(regs);
633 UpdateDepthBias(regs); 629 UpdateDepthBias(regs);
@@ -651,7 +647,7 @@ void RasterizerVulkan::UpdateDynamicStates() {
651} 647}
652 648
653void RasterizerVulkan::BeginTransformFeedback() { 649void RasterizerVulkan::BeginTransformFeedback() {
654 const auto& regs = maxwell3d.regs; 650 const auto& regs = maxwell3d->regs;
655 if (regs.tfb_enabled == 0) { 651 if (regs.tfb_enabled == 0) {
656 return; 652 return;
657 } 653 }
@@ -667,7 +663,7 @@ void RasterizerVulkan::BeginTransformFeedback() {
667} 663}
668 664
669void RasterizerVulkan::EndTransformFeedback() { 665void RasterizerVulkan::EndTransformFeedback() {
670 const auto& regs = maxwell3d.regs; 666 const auto& regs = maxwell3d->regs;
671 if (regs.tfb_enabled == 0) { 667 if (regs.tfb_enabled == 0) {
672 return; 668 return;
673 } 669 }
@@ -917,7 +913,7 @@ void RasterizerVulkan::UpdateStencilTestEnable(Tegra::Engines::Maxwell3D::Regs&
917} 913}
918 914
919void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) { 915void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs) {
920 auto& dirty{maxwell3d.dirty.flags}; 916 auto& dirty{maxwell3d->dirty.flags};
921 if (!dirty[Dirty::VertexInput]) { 917 if (!dirty[Dirty::VertexInput]) {
922 return; 918 return;
923 } 919 }
@@ -974,4 +970,41 @@ void RasterizerVulkan::UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs)
974 }); 970 });
975} 971}
976 972
973void RasterizerVulkan::InitializeChannel(Tegra::Control::ChannelState& channel) {
974 CreateChannel(channel);
975 {
976 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
977 texture_cache.CreateChannel(channel);
978 buffer_cache.CreateChannel(channel);
979 }
980 pipeline_cache.CreateChannel(channel);
981 query_cache.CreateChannel(channel);
982 state_tracker.SetupTables(channel);
983}
984
985void RasterizerVulkan::BindChannel(Tegra::Control::ChannelState& channel) {
986 const s32 channel_id = channel.bind_id;
987 BindToChannel(channel_id);
988 {
989 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
990 texture_cache.BindToChannel(channel_id);
991 buffer_cache.BindToChannel(channel_id);
992 }
993 pipeline_cache.BindToChannel(channel_id);
994 query_cache.BindToChannel(channel_id);
995 state_tracker.ChangeChannel(channel);
996 state_tracker.InvalidateState();
997}
998
999void RasterizerVulkan::ReleaseChannel(s32 channel_id) {
1000 EraseChannel(channel_id);
1001 {
1002 std::scoped_lock lock{buffer_cache.mutex, texture_cache.mutex};
1003 texture_cache.EraseChannel(channel_id);
1004 buffer_cache.EraseChannel(channel_id);
1005 }
1006 pipeline_cache.EraseChannel(channel_id);
1007 query_cache.EraseChannel(channel_id);
1008}
1009
977} // namespace Vulkan 1010} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index 0370ea39b..4cde3c983 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -8,6 +8,7 @@
8#include <boost/container/static_vector.hpp> 8#include <boost/container/static_vector.hpp>
9 9
10#include "common/common_types.h" 10#include "common/common_types.h"
11#include "video_core/control/channel_state_cache.h"
11#include "video_core/engines/maxwell_dma.h" 12#include "video_core/engines/maxwell_dma.h"
12#include "video_core/rasterizer_accelerated.h" 13#include "video_core/rasterizer_accelerated.h"
13#include "video_core/rasterizer_interface.h" 14#include "video_core/rasterizer_interface.h"
@@ -54,13 +55,13 @@ private:
54 BufferCache& buffer_cache; 55 BufferCache& buffer_cache;
55}; 56};
56 57
57class RasterizerVulkan final : public VideoCore::RasterizerAccelerated { 58class RasterizerVulkan final : public VideoCore::RasterizerAccelerated,
59 protected VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
58public: 60public:
59 explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_, 61 explicit RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra::GPU& gpu_,
60 Tegra::MemoryManager& gpu_memory_, Core::Memory::Memory& cpu_memory_, 62 Core::Memory::Memory& cpu_memory_, ScreenInfo& screen_info_,
61 ScreenInfo& screen_info_, const Device& device_, 63 const Device& device_, MemoryAllocator& memory_allocator_,
62 MemoryAllocator& memory_allocator_, StateTracker& state_tracker_, 64 StateTracker& state_tracker_, Scheduler& scheduler_);
63 Scheduler& scheduler_);
64 ~RasterizerVulkan() override; 65 ~RasterizerVulkan() override;
65 66
66 void Draw(bool is_indexed, bool is_instanced) override; 67 void Draw(bool is_indexed, bool is_instanced) override;
@@ -75,10 +76,11 @@ public:
75 bool MustFlushRegion(VAddr addr, u64 size) override; 76 bool MustFlushRegion(VAddr addr, u64 size) override;
76 void InvalidateRegion(VAddr addr, u64 size) override; 77 void InvalidateRegion(VAddr addr, u64 size) override;
77 void OnCPUWrite(VAddr addr, u64 size) override; 78 void OnCPUWrite(VAddr addr, u64 size) override;
78 void SyncGuestHost() override; 79 void InvalidateGPUCache() override;
79 void UnmapMemory(VAddr addr, u64 size) override; 80 void UnmapMemory(VAddr addr, u64 size) override;
80 void ModifyGPUMemory(GPUVAddr addr, u64 size) override; 81 void ModifyGPUMemory(size_t as_id, GPUVAddr addr, u64 size) override;
81 void SignalSemaphore(GPUVAddr addr, u32 value) override; 82 void SignalFence(std::function<void()>&& func) override;
83 void SyncOperation(std::function<void()>&& func) override;
82 void SignalSyncPoint(u32 value) override; 84 void SignalSyncPoint(u32 value) override;
83 void SignalReference() override; 85 void SignalReference() override;
84 void ReleaseFences() override; 86 void ReleaseFences() override;
@@ -93,12 +95,18 @@ public:
93 const Tegra::Engines::Fermi2D::Config& copy_config) override; 95 const Tegra::Engines::Fermi2D::Config& copy_config) override;
94 Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override; 96 Tegra::Engines::AccelerateDMAInterface& AccessAccelerateDMA() override;
95 void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size, 97 void AccelerateInlineToMemory(GPUVAddr address, size_t copy_size,
96 std::span<u8> memory) override; 98 std::span<const u8> memory) override;
97 bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr, 99 bool AccelerateDisplay(const Tegra::FramebufferConfig& config, VAddr framebuffer_addr,
98 u32 pixel_stride) override; 100 u32 pixel_stride) override;
99 void LoadDiskResources(u64 title_id, std::stop_token stop_loading, 101 void LoadDiskResources(u64 title_id, std::stop_token stop_loading,
100 const VideoCore::DiskResourceLoadCallback& callback) override; 102 const VideoCore::DiskResourceLoadCallback& callback) override;
101 103
104 void InitializeChannel(Tegra::Control::ChannelState& channel) override;
105
106 void BindChannel(Tegra::Control::ChannelState& channel) override;
107
108 void ReleaseChannel(s32 channel_id) override;
109
102private: 110private:
103 static constexpr size_t MAX_TEXTURES = 192; 111 static constexpr size_t MAX_TEXTURES = 192;
104 static constexpr size_t MAX_IMAGES = 48; 112 static constexpr size_t MAX_IMAGES = 48;
@@ -134,9 +142,6 @@ private:
134 void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs); 142 void UpdateVertexInput(Tegra::Engines::Maxwell3D::Regs& regs);
135 143
136 Tegra::GPU& gpu; 144 Tegra::GPU& gpu;
137 Tegra::MemoryManager& gpu_memory;
138 Tegra::Engines::Maxwell3D& maxwell3d;
139 Tegra::Engines::KeplerCompute& kepler_compute;
140 145
141 ScreenInfo& screen_info; 146 ScreenInfo& screen_info;
142 const Device& device; 147 const Device& device;
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index a331ff37e..d96720b80 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -136,7 +136,7 @@ bool Scheduler::UpdateRescaling(bool is_rescaling) {
136} 136}
137 137
138void Scheduler::WorkerThread(std::stop_token stop_token) { 138void Scheduler::WorkerThread(std::stop_token stop_token) {
139 Common::SetCurrentThreadName("yuzu:VulkanWorker"); 139 Common::SetCurrentThreadName("VulkanWorker");
140 do { 140 do {
141 std::unique_ptr<CommandChunk> work; 141 std::unique_ptr<CommandChunk> work;
142 { 142 {
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
index 06f68d09a..7fb256953 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.cpp
@@ -26,20 +26,39 @@ using namespace Common::Literals;
26constexpr VkDeviceSize MAX_ALIGNMENT = 256; 26constexpr VkDeviceSize MAX_ALIGNMENT = 256;
27// Maximum size to put elements in the stream buffer 27// Maximum size to put elements in the stream buffer
28constexpr VkDeviceSize MAX_STREAM_BUFFER_REQUEST_SIZE = 8_MiB; 28constexpr VkDeviceSize MAX_STREAM_BUFFER_REQUEST_SIZE = 8_MiB;
29// Stream buffer size in bytes
30constexpr VkDeviceSize STREAM_BUFFER_SIZE = 128_MiB;
31constexpr VkDeviceSize REGION_SIZE = STREAM_BUFFER_SIZE / StagingBufferPool::NUM_SYNCS;
32 29
33constexpr VkMemoryPropertyFlags HOST_FLAGS = 30constexpr VkMemoryPropertyFlags HOST_FLAGS =
34 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; 31 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
35constexpr VkMemoryPropertyFlags STREAM_FLAGS = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | HOST_FLAGS; 32constexpr VkMemoryPropertyFlags STREAM_FLAGS = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | HOST_FLAGS;
36 33
37bool IsStreamHeap(VkMemoryHeap heap) noexcept { 34static bool IsStreamHeap(VkMemoryHeap heap, size_t staging_buffer_size) noexcept {
38 return STREAM_BUFFER_SIZE < (heap.size * 2) / 3; 35 return staging_buffer_size < (heap.size * 2) / 3;
36}
37
38static bool HasLargeDeviceLocalHostVisibleMemory(const VkPhysicalDeviceMemoryProperties& props) {
39 const auto flags{VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT};
40
41 for (u32 type_index = 0; type_index < props.memoryTypeCount; ++type_index) {
42 const auto& memory_type{props.memoryTypes[type_index]};
43
44 if ((memory_type.propertyFlags & flags) != flags) {
45 // Memory must be device local and host visible
46 continue;
47 }
48
49 const auto& heap{props.memoryHeaps[memory_type.heapIndex]};
50 if (heap.size >= 7168_MiB) {
51 // This is the right type of memory
52 return true;
53 }
54 }
55
56 return false;
39} 57}
40 58
41std::optional<u32> FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& props, u32 type_mask, 59std::optional<u32> FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& props, u32 type_mask,
42 VkMemoryPropertyFlags flags) noexcept { 60 VkMemoryPropertyFlags flags,
61 size_t staging_buffer_size) noexcept {
43 for (u32 type_index = 0; type_index < props.memoryTypeCount; ++type_index) { 62 for (u32 type_index = 0; type_index < props.memoryTypeCount; ++type_index) {
44 if (((type_mask >> type_index) & 1) == 0) { 63 if (((type_mask >> type_index) & 1) == 0) {
45 // Memory type is incompatible 64 // Memory type is incompatible
@@ -50,7 +69,7 @@ std::optional<u32> FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& p
50 // Memory type doesn't have the flags we want 69 // Memory type doesn't have the flags we want
51 continue; 70 continue;
52 } 71 }
53 if (!IsStreamHeap(props.memoryHeaps[memory_type.heapIndex])) { 72 if (!IsStreamHeap(props.memoryHeaps[memory_type.heapIndex], staging_buffer_size)) {
54 // Memory heap is not suitable for streaming 73 // Memory heap is not suitable for streaming
55 continue; 74 continue;
56 } 75 }
@@ -61,17 +80,17 @@ std::optional<u32> FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& p
61} 80}
62 81
63u32 FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& props, u32 type_mask, 82u32 FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& props, u32 type_mask,
64 bool try_device_local) { 83 bool try_device_local, size_t staging_buffer_size) {
65 std::optional<u32> type; 84 std::optional<u32> type;
66 if (try_device_local) { 85 if (try_device_local) {
67 // Try to find a DEVICE_LOCAL_BIT type, Nvidia and AMD have a dedicated heap for this 86 // Try to find a DEVICE_LOCAL_BIT type, Nvidia and AMD have a dedicated heap for this
68 type = FindMemoryTypeIndex(props, type_mask, STREAM_FLAGS); 87 type = FindMemoryTypeIndex(props, type_mask, STREAM_FLAGS, staging_buffer_size);
69 if (type) { 88 if (type) {
70 return *type; 89 return *type;
71 } 90 }
72 } 91 }
73 // Otherwise try without the DEVICE_LOCAL_BIT 92 // Otherwise try without the DEVICE_LOCAL_BIT
74 type = FindMemoryTypeIndex(props, type_mask, HOST_FLAGS); 93 type = FindMemoryTypeIndex(props, type_mask, HOST_FLAGS, staging_buffer_size);
75 if (type) { 94 if (type) {
76 return *type; 95 return *type;
77 } 96 }
@@ -79,20 +98,32 @@ u32 FindMemoryTypeIndex(const VkPhysicalDeviceMemoryProperties& props, u32 type_
79 throw vk::Exception(VK_ERROR_OUT_OF_DEVICE_MEMORY); 98 throw vk::Exception(VK_ERROR_OUT_OF_DEVICE_MEMORY);
80} 99}
81 100
82size_t Region(size_t iterator) noexcept { 101size_t Region(size_t iterator, size_t region_size) noexcept {
83 return iterator / REGION_SIZE; 102 return iterator / region_size;
84} 103}
85} // Anonymous namespace 104} // Anonymous namespace
86 105
87StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_, 106StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& memory_allocator_,
88 Scheduler& scheduler_) 107 Scheduler& scheduler_)
89 : device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_} { 108 : device{device_}, memory_allocator{memory_allocator_}, scheduler{scheduler_} {
109
110 const auto memory_properties{device.GetPhysical().GetMemoryProperties().memoryProperties};
111 if (HasLargeDeviceLocalHostVisibleMemory(memory_properties)) {
112 // Possible on many integrated and newer discrete cards
113 staging_buffer_size = 1_GiB;
114 } else {
115 // Well-supported default size used by most Vulkan PC games
116 staging_buffer_size = 256_MiB;
117 }
118
119 region_size = staging_buffer_size / StagingBufferPool::NUM_SYNCS;
120
90 const vk::Device& dev = device.GetLogical(); 121 const vk::Device& dev = device.GetLogical();
91 stream_buffer = dev.CreateBuffer(VkBufferCreateInfo{ 122 stream_buffer = dev.CreateBuffer(VkBufferCreateInfo{
92 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, 123 .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
93 .pNext = nullptr, 124 .pNext = nullptr,
94 .flags = 0, 125 .flags = 0,
95 .size = STREAM_BUFFER_SIZE, 126 .size = staging_buffer_size,
96 .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | 127 .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
97 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, 128 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
98 .sharingMode = VK_SHARING_MODE_EXCLUSIVE, 129 .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
@@ -117,19 +148,18 @@ StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& mem
117 .image = nullptr, 148 .image = nullptr,
118 .buffer = *stream_buffer, 149 .buffer = *stream_buffer,
119 }; 150 };
120 const auto memory_properties = device.GetPhysical().GetMemoryProperties().memoryProperties;
121 VkMemoryAllocateInfo stream_memory_info{ 151 VkMemoryAllocateInfo stream_memory_info{
122 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, 152 .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
123 .pNext = make_dedicated ? &dedicated_info : nullptr, 153 .pNext = make_dedicated ? &dedicated_info : nullptr,
124 .allocationSize = requirements.size, 154 .allocationSize = requirements.size,
125 .memoryTypeIndex = 155 .memoryTypeIndex = FindMemoryTypeIndex(memory_properties, requirements.memoryTypeBits, true,
126 FindMemoryTypeIndex(memory_properties, requirements.memoryTypeBits, true), 156 staging_buffer_size),
127 }; 157 };
128 stream_memory = dev.TryAllocateMemory(stream_memory_info); 158 stream_memory = dev.TryAllocateMemory(stream_memory_info);
129 if (!stream_memory) { 159 if (!stream_memory) {
130 LOG_INFO(Render_Vulkan, "Dynamic memory allocation failed, trying with system memory"); 160 LOG_INFO(Render_Vulkan, "Dynamic memory allocation failed, trying with system memory");
131 stream_memory_info.memoryTypeIndex = 161 stream_memory_info.memoryTypeIndex = FindMemoryTypeIndex(
132 FindMemoryTypeIndex(memory_properties, requirements.memoryTypeBits, false); 162 memory_properties, requirements.memoryTypeBits, false, staging_buffer_size);
133 stream_memory = dev.AllocateMemory(stream_memory_info); 163 stream_memory = dev.AllocateMemory(stream_memory_info);
134 } 164 }
135 165
@@ -137,7 +167,7 @@ StagingBufferPool::StagingBufferPool(const Device& device_, MemoryAllocator& mem
137 stream_memory.SetObjectNameEXT("Stream Buffer Memory"); 167 stream_memory.SetObjectNameEXT("Stream Buffer Memory");
138 } 168 }
139 stream_buffer.BindMemory(*stream_memory, 0); 169 stream_buffer.BindMemory(*stream_memory, 0);
140 stream_pointer = stream_memory.Map(0, STREAM_BUFFER_SIZE); 170 stream_pointer = stream_memory.Map(0, staging_buffer_size);
141} 171}
142 172
143StagingBufferPool::~StagingBufferPool() = default; 173StagingBufferPool::~StagingBufferPool() = default;
@@ -158,25 +188,25 @@ void StagingBufferPool::TickFrame() {
158} 188}
159 189
160StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) { 190StagingBufferRef StagingBufferPool::GetStreamBuffer(size_t size) {
161 if (AreRegionsActive(Region(free_iterator) + 1, 191 if (AreRegionsActive(Region(free_iterator, region_size) + 1,
162 std::min(Region(iterator + size) + 1, NUM_SYNCS))) { 192 std::min(Region(iterator + size, region_size) + 1, NUM_SYNCS))) {
163 // Avoid waiting for the previous usages to be free 193 // Avoid waiting for the previous usages to be free
164 return GetStagingBuffer(size, MemoryUsage::Upload); 194 return GetStagingBuffer(size, MemoryUsage::Upload);
165 } 195 }
166 const u64 current_tick = scheduler.CurrentTick(); 196 const u64 current_tick = scheduler.CurrentTick();
167 std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + Region(iterator), 197 std::fill(sync_ticks.begin() + Region(used_iterator, region_size),
168 current_tick); 198 sync_ticks.begin() + Region(iterator, region_size), current_tick);
169 used_iterator = iterator; 199 used_iterator = iterator;
170 free_iterator = std::max(free_iterator, iterator + size); 200 free_iterator = std::max(free_iterator, iterator + size);
171 201
172 if (iterator + size >= STREAM_BUFFER_SIZE) { 202 if (iterator + size >= staging_buffer_size) {
173 std::fill(sync_ticks.begin() + Region(used_iterator), sync_ticks.begin() + NUM_SYNCS, 203 std::fill(sync_ticks.begin() + Region(used_iterator, region_size),
174 current_tick); 204 sync_ticks.begin() + NUM_SYNCS, current_tick);
175 used_iterator = 0; 205 used_iterator = 0;
176 iterator = 0; 206 iterator = 0;
177 free_iterator = size; 207 free_iterator = size;
178 208
179 if (AreRegionsActive(0, Region(size) + 1)) { 209 if (AreRegionsActive(0, Region(size, region_size) + 1)) {
180 // Avoid waiting for the previous usages to be free 210 // Avoid waiting for the previous usages to be free
181 return GetStagingBuffer(size, MemoryUsage::Upload); 211 return GetStagingBuffer(size, MemoryUsage::Upload);
182 } 212 }
diff --git a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
index 91dc84da8..90c67177f 100644
--- a/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
+++ b/src/video_core/renderer_vulkan/vk_staging_buffer_pool.h
@@ -93,6 +93,9 @@ private:
93 size_t free_iterator = 0; 93 size_t free_iterator = 0;
94 std::array<u64, NUM_SYNCS> sync_ticks{}; 94 std::array<u64, NUM_SYNCS> sync_ticks{};
95 95
96 size_t staging_buffer_size = 0;
97 size_t region_size = 0;
98
96 StagingBuffersCache device_local_cache; 99 StagingBuffersCache device_local_cache;
97 StagingBuffersCache upload_cache; 100 StagingBuffersCache upload_cache;
98 StagingBuffersCache download_cache; 101 StagingBuffersCache download_cache;
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.cpp b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
index 9ad096431..f234e1a31 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.cpp
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.cpp
@@ -7,9 +7,9 @@
7 7
8#include "common/common_types.h" 8#include "common/common_types.h"
9#include "core/core.h" 9#include "core/core.h"
10#include "video_core/control/channel_state.h"
10#include "video_core/dirty_flags.h" 11#include "video_core/dirty_flags.h"
11#include "video_core/engines/maxwell_3d.h" 12#include "video_core/engines/maxwell_3d.h"
12#include "video_core/gpu.h"
13#include "video_core/renderer_vulkan/vk_state_tracker.h" 13#include "video_core/renderer_vulkan/vk_state_tracker.h"
14 14
15#define OFF(field_name) MAXWELL3D_REG_INDEX(field_name) 15#define OFF(field_name) MAXWELL3D_REG_INDEX(field_name)
@@ -174,9 +174,8 @@ void SetupDirtyVertexBindings(Tables& tables) {
174} 174}
175} // Anonymous namespace 175} // Anonymous namespace
176 176
177StateTracker::StateTracker(Tegra::GPU& gpu) 177void StateTracker::SetupTables(Tegra::Control::ChannelState& channel_state) {
178 : flags{gpu.Maxwell3D().dirty.flags}, invalidation_flags{MakeInvalidationFlags()} { 178 auto& tables{channel_state.maxwell_3d->dirty.tables};
179 auto& tables{gpu.Maxwell3D().dirty.tables};
180 SetupDirtyFlags(tables); 179 SetupDirtyFlags(tables);
181 SetupDirtyViewports(tables); 180 SetupDirtyViewports(tables);
182 SetupDirtyScissors(tables); 181 SetupDirtyScissors(tables);
@@ -199,4 +198,15 @@ StateTracker::StateTracker(Tegra::GPU& gpu)
199 SetupDirtyVertexBindings(tables); 198 SetupDirtyVertexBindings(tables);
200} 199}
201 200
201void StateTracker::ChangeChannel(Tegra::Control::ChannelState& channel_state) {
202 flags = &channel_state.maxwell_3d->dirty.flags;
203}
204
205void StateTracker::InvalidateState() {
206 flags->set();
207}
208
209StateTracker::StateTracker()
210 : flags{&default_flags}, default_flags{}, invalidation_flags{MakeInvalidationFlags()} {}
211
202} // namespace Vulkan 212} // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_state_tracker.h b/src/video_core/renderer_vulkan/vk_state_tracker.h
index a85bc1c10..2296dea60 100644
--- a/src/video_core/renderer_vulkan/vk_state_tracker.h
+++ b/src/video_core/renderer_vulkan/vk_state_tracker.h
@@ -10,6 +10,12 @@
10#include "video_core/dirty_flags.h" 10#include "video_core/dirty_flags.h"
11#include "video_core/engines/maxwell_3d.h" 11#include "video_core/engines/maxwell_3d.h"
12 12
13namespace Tegra {
14namespace Control {
15struct ChannelState;
16}
17} // namespace Tegra
18
13namespace Vulkan { 19namespace Vulkan {
14 20
15namespace Dirty { 21namespace Dirty {
@@ -53,19 +59,19 @@ class StateTracker {
53 using Maxwell = Tegra::Engines::Maxwell3D::Regs; 59 using Maxwell = Tegra::Engines::Maxwell3D::Regs;
54 60
55public: 61public:
56 explicit StateTracker(Tegra::GPU& gpu); 62 explicit StateTracker();
57 63
58 void InvalidateCommandBufferState() { 64 void InvalidateCommandBufferState() {
59 flags |= invalidation_flags; 65 (*flags) |= invalidation_flags;
60 current_topology = INVALID_TOPOLOGY; 66 current_topology = INVALID_TOPOLOGY;
61 } 67 }
62 68
63 void InvalidateViewports() { 69 void InvalidateViewports() {
64 flags[Dirty::Viewports] = true; 70 (*flags)[Dirty::Viewports] = true;
65 } 71 }
66 72
67 void InvalidateScissors() { 73 void InvalidateScissors() {
68 flags[Dirty::Scissors] = true; 74 (*flags)[Dirty::Scissors] = true;
69 } 75 }
70 76
71 bool TouchViewports() { 77 bool TouchViewports() {
@@ -139,16 +145,23 @@ public:
139 return has_changed; 145 return has_changed;
140 } 146 }
141 147
148 void SetupTables(Tegra::Control::ChannelState& channel_state);
149
150 void ChangeChannel(Tegra::Control::ChannelState& channel_state);
151
152 void InvalidateState();
153
142private: 154private:
143 static constexpr auto INVALID_TOPOLOGY = static_cast<Maxwell::PrimitiveTopology>(~0u); 155 static constexpr auto INVALID_TOPOLOGY = static_cast<Maxwell::PrimitiveTopology>(~0u);
144 156
145 bool Exchange(std::size_t id, bool new_value) const noexcept { 157 bool Exchange(std::size_t id, bool new_value) const noexcept {
146 const bool is_dirty = flags[id]; 158 const bool is_dirty = (*flags)[id];
147 flags[id] = new_value; 159 (*flags)[id] = new_value;
148 return is_dirty; 160 return is_dirty;
149 } 161 }
150 162
151 Tegra::Engines::Maxwell3D::DirtyState::Flags& flags; 163 Tegra::Engines::Maxwell3D::DirtyState::Flags* flags;
164 Tegra::Engines::Maxwell3D::DirtyState::Flags default_flags;
152 Tegra::Engines::Maxwell3D::DirtyState::Flags invalidation_flags; 165 Tegra::Engines::Maxwell3D::DirtyState::Flags invalidation_flags;
153 Maxwell::PrimitiveTopology current_topology = INVALID_TOPOLOGY; 166 Maxwell::PrimitiveTopology current_topology = INVALID_TOPOLOGY;
154}; 167};
diff --git a/src/video_core/renderer_vulkan/vk_swapchain.cpp b/src/video_core/renderer_vulkan/vk_swapchain.cpp
index a69ae7725..706d9ba74 100644
--- a/src/video_core/renderer_vulkan/vk_swapchain.cpp
+++ b/src/video_core/renderer_vulkan/vk_swapchain.cpp
@@ -36,7 +36,8 @@ VkPresentModeKHR ChooseSwapPresentMode(vk::Span<VkPresentModeKHR> modes) {
36 // Mailbox (triple buffering) doesn't lock the application like fifo (vsync), 36 // Mailbox (triple buffering) doesn't lock the application like fifo (vsync),
37 // prefer it if vsync option is not selected 37 // prefer it if vsync option is not selected
38 const auto found_mailbox = std::find(modes.begin(), modes.end(), VK_PRESENT_MODE_MAILBOX_KHR); 38 const auto found_mailbox = std::find(modes.begin(), modes.end(), VK_PRESENT_MODE_MAILBOX_KHR);
39 if (found_mailbox != modes.end() && !Settings::values.use_vsync.GetValue()) { 39 if (Settings::values.fullscreen_mode.GetValue() == Settings::FullscreenMode::Borderless &&
40 found_mailbox != modes.end() && !Settings::values.use_vsync.GetValue()) {
40 return VK_PRESENT_MODE_MAILBOX_KHR; 41 return VK_PRESENT_MODE_MAILBOX_KHR;
41 } 42 }
42 if (!Settings::values.use_speed_limit.GetValue()) { 43 if (!Settings::values.use_speed_limit.GetValue()) {
@@ -156,8 +157,16 @@ void Swapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u3
156 present_mode = ChooseSwapPresentMode(present_modes); 157 present_mode = ChooseSwapPresentMode(present_modes);
157 158
158 u32 requested_image_count{capabilities.minImageCount + 1}; 159 u32 requested_image_count{capabilities.minImageCount + 1};
159 if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) { 160 // Ensure Tripple buffering if possible.
160 requested_image_count = capabilities.maxImageCount; 161 if (capabilities.maxImageCount > 0) {
162 if (requested_image_count > capabilities.maxImageCount) {
163 requested_image_count = capabilities.maxImageCount;
164 } else {
165 requested_image_count =
166 std::max(requested_image_count, std::min(3U, capabilities.maxImageCount));
167 }
168 } else {
169 requested_image_count = std::max(requested_image_count, 3U);
161 } 170 }
162 VkSwapchainCreateInfoKHR swapchain_ci{ 171 VkSwapchainCreateInfoKHR swapchain_ci{
163 .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, 172 .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.cpp b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
index caca79d79..305ad8aee 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.cpp
@@ -592,7 +592,7 @@ void TryTransformSwizzleIfNeeded(PixelFormat format, std::array<SwizzleSource, 4
592 case PixelFormat::A5B5G5R1_UNORM: 592 case PixelFormat::A5B5G5R1_UNORM:
593 std::ranges::transform(swizzle, swizzle.begin(), SwapSpecial); 593 std::ranges::transform(swizzle, swizzle.begin(), SwapSpecial);
594 break; 594 break;
595 case PixelFormat::R4G4_UNORM: 595 case PixelFormat::G4R4_UNORM:
596 std::ranges::transform(swizzle, swizzle.begin(), SwapGreenRed); 596 std::ranges::transform(swizzle, swizzle.begin(), SwapGreenRed);
597 break; 597 break;
598 default: 598 default:
@@ -1474,13 +1474,14 @@ bool Image::BlitScaleHelper(bool scale_up) {
1474 }; 1474 };
1475 const VkExtent2D extent{ 1475 const VkExtent2D extent{
1476 .width = std::max(scaled_width, info.size.width), 1476 .width = std::max(scaled_width, info.size.width),
1477 .height = std::max(scaled_height, info.size.width), 1477 .height = std::max(scaled_height, info.size.height),
1478 }; 1478 };
1479 1479
1480 auto* view_ptr = blit_view.get(); 1480 auto* view_ptr = blit_view.get();
1481 if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) { 1481 if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) {
1482 if (!blit_framebuffer) { 1482 if (!blit_framebuffer) {
1483 blit_framebuffer = std::make_unique<Framebuffer>(*runtime, view_ptr, nullptr, extent); 1483 blit_framebuffer =
1484 std::make_unique<Framebuffer>(*runtime, view_ptr, nullptr, extent, scale_up);
1484 } 1485 }
1485 const auto color_view = blit_view->Handle(Shader::TextureType::Color2D); 1486 const auto color_view = blit_view->Handle(Shader::TextureType::Color2D);
1486 1487
@@ -1488,7 +1489,8 @@ bool Image::BlitScaleHelper(bool scale_up) {
1488 src_region, operation, BLIT_OPERATION); 1489 src_region, operation, BLIT_OPERATION);
1489 } else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) { 1490 } else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
1490 if (!blit_framebuffer) { 1491 if (!blit_framebuffer) {
1491 blit_framebuffer = std::make_unique<Framebuffer>(*runtime, nullptr, view_ptr, extent); 1492 blit_framebuffer =
1493 std::make_unique<Framebuffer>(*runtime, nullptr, view_ptr, extent, scale_up);
1492 } 1494 }
1493 runtime->blit_image_helper.BlitDepthStencil(blit_framebuffer.get(), blit_view->DepthView(), 1495 runtime->blit_image_helper.BlitDepthStencil(blit_framebuffer.get(), blit_view->DepthView(),
1494 blit_view->StencilView(), dst_region, 1496 blit_view->StencilView(), dst_region,
@@ -1756,34 +1758,42 @@ Framebuffer::Framebuffer(TextureCacheRuntime& runtime, std::span<ImageView*, NUM
1756 .width = key.size.width, 1758 .width = key.size.width,
1757 .height = key.size.height, 1759 .height = key.size.height,
1758 }} { 1760 }} {
1759 CreateFramebuffer(runtime, color_buffers, depth_buffer); 1761 CreateFramebuffer(runtime, color_buffers, depth_buffer, key.is_rescaled);
1760 if (runtime.device.HasDebuggingToolAttached()) { 1762 if (runtime.device.HasDebuggingToolAttached()) {
1761 framebuffer.SetObjectNameEXT(VideoCommon::Name(key).c_str()); 1763 framebuffer.SetObjectNameEXT(VideoCommon::Name(key).c_str());
1762 } 1764 }
1763} 1765}
1764 1766
1765Framebuffer::Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer, 1767Framebuffer::Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer,
1766 ImageView* depth_buffer, VkExtent2D extent) 1768 ImageView* depth_buffer, VkExtent2D extent, bool is_rescaled)
1767 : render_area{extent} { 1769 : render_area{extent} {
1768 std::array<ImageView*, NUM_RT> color_buffers{color_buffer}; 1770 std::array<ImageView*, NUM_RT> color_buffers{color_buffer};
1769 CreateFramebuffer(runtime, color_buffers, depth_buffer); 1771 CreateFramebuffer(runtime, color_buffers, depth_buffer, is_rescaled);
1770} 1772}
1771 1773
1772Framebuffer::~Framebuffer() = default; 1774Framebuffer::~Framebuffer() = default;
1773 1775
1774void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime, 1776void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime,
1775 std::span<ImageView*, NUM_RT> color_buffers, 1777 std::span<ImageView*, NUM_RT> color_buffers,
1776 ImageView* depth_buffer) { 1778 ImageView* depth_buffer, bool is_rescaled) {
1777 std::vector<VkImageView> attachments; 1779 std::vector<VkImageView> attachments;
1778 RenderPassKey renderpass_key{}; 1780 RenderPassKey renderpass_key{};
1779 s32 num_layers = 1; 1781 s32 num_layers = 1;
1780 1782
1783 const auto& resolution = runtime.resolution;
1784
1785 u32 width = 0;
1786 u32 height = 0;
1781 for (size_t index = 0; index < NUM_RT; ++index) { 1787 for (size_t index = 0; index < NUM_RT; ++index) {
1782 const ImageView* const color_buffer = color_buffers[index]; 1788 const ImageView* const color_buffer = color_buffers[index];
1783 if (!color_buffer) { 1789 if (!color_buffer) {
1784 renderpass_key.color_formats[index] = PixelFormat::Invalid; 1790 renderpass_key.color_formats[index] = PixelFormat::Invalid;
1785 continue; 1791 continue;
1786 } 1792 }
1793 width = std::max(width, is_rescaled ? resolution.ScaleUp(color_buffer->size.width)
1794 : color_buffer->size.width);
1795 height = std::max(height, is_rescaled ? resolution.ScaleUp(color_buffer->size.height)
1796 : color_buffer->size.height);
1787 attachments.push_back(color_buffer->RenderTarget()); 1797 attachments.push_back(color_buffer->RenderTarget());
1788 renderpass_key.color_formats[index] = color_buffer->format; 1798 renderpass_key.color_formats[index] = color_buffer->format;
1789 num_layers = std::max(num_layers, color_buffer->range.extent.layers); 1799 num_layers = std::max(num_layers, color_buffer->range.extent.layers);
@@ -1794,6 +1804,10 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime,
1794 } 1804 }
1795 const size_t num_colors = attachments.size(); 1805 const size_t num_colors = attachments.size();
1796 if (depth_buffer) { 1806 if (depth_buffer) {
1807 width = std::max(width, is_rescaled ? resolution.ScaleUp(depth_buffer->size.width)
1808 : depth_buffer->size.width);
1809 height = std::max(height, is_rescaled ? resolution.ScaleUp(depth_buffer->size.height)
1810 : depth_buffer->size.height);
1797 attachments.push_back(depth_buffer->RenderTarget()); 1811 attachments.push_back(depth_buffer->RenderTarget());
1798 renderpass_key.depth_format = depth_buffer->format; 1812 renderpass_key.depth_format = depth_buffer->format;
1799 num_layers = std::max(num_layers, depth_buffer->range.extent.layers); 1813 num_layers = std::max(num_layers, depth_buffer->range.extent.layers);
@@ -1810,6 +1824,8 @@ void Framebuffer::CreateFramebuffer(TextureCacheRuntime& runtime,
1810 renderpass_key.samples = samples; 1824 renderpass_key.samples = samples;
1811 1825
1812 renderpass = runtime.render_pass_cache.Get(renderpass_key); 1826 renderpass = runtime.render_pass_cache.Get(renderpass_key);
1827 render_area.width = std::min(render_area.width, width);
1828 render_area.height = std::min(render_area.height, height);
1813 1829
1814 num_color_buffers = static_cast<u32>(num_colors); 1830 num_color_buffers = static_cast<u32>(num_colors);
1815 framebuffer = runtime.device.GetLogical().CreateFramebuffer({ 1831 framebuffer = runtime.device.GetLogical().CreateFramebuffer({
diff --git a/src/video_core/renderer_vulkan/vk_texture_cache.h b/src/video_core/renderer_vulkan/vk_texture_cache.h
index 69f06ee7b..0b7ac0df1 100644
--- a/src/video_core/renderer_vulkan/vk_texture_cache.h
+++ b/src/video_core/renderer_vulkan/vk_texture_cache.h
@@ -268,7 +268,7 @@ public:
268 ImageView* depth_buffer, const VideoCommon::RenderTargets& key); 268 ImageView* depth_buffer, const VideoCommon::RenderTargets& key);
269 269
270 explicit Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer, 270 explicit Framebuffer(TextureCacheRuntime& runtime, ImageView* color_buffer,
271 ImageView* depth_buffer, VkExtent2D extent); 271 ImageView* depth_buffer, VkExtent2D extent, bool is_rescaled);
272 272
273 ~Framebuffer(); 273 ~Framebuffer();
274 274
@@ -279,7 +279,8 @@ public:
279 Framebuffer& operator=(Framebuffer&&) = default; 279 Framebuffer& operator=(Framebuffer&&) = default;
280 280
281 void CreateFramebuffer(TextureCacheRuntime& runtime, 281 void CreateFramebuffer(TextureCacheRuntime& runtime,
282 std::span<ImageView*, NUM_RT> color_buffers, ImageView* depth_buffer); 282 std::span<ImageView*, NUM_RT> color_buffers, ImageView* depth_buffer,
283 bool is_rescaled = false);
283 284
284 [[nodiscard]] VkFramebuffer Handle() const noexcept { 285 [[nodiscard]] VkFramebuffer Handle() const noexcept {
285 return *framebuffer; 286 return *framebuffer;
diff --git a/src/video_core/shader_cache.cpp b/src/video_core/shader_cache.cpp
index 164e4ee0e..f53066579 100644
--- a/src/video_core/shader_cache.cpp
+++ b/src/video_core/shader_cache.cpp
@@ -8,6 +8,7 @@
8#include "common/assert.h" 8#include "common/assert.h"
9#include "shader_recompiler/frontend/maxwell/control_flow.h" 9#include "shader_recompiler/frontend/maxwell/control_flow.h"
10#include "shader_recompiler/object_pool.h" 10#include "shader_recompiler/object_pool.h"
11#include "video_core/control/channel_state.h"
11#include "video_core/dirty_flags.h" 12#include "video_core/dirty_flags.h"
12#include "video_core/engines/kepler_compute.h" 13#include "video_core/engines/kepler_compute.h"
13#include "video_core/engines/maxwell_3d.h" 14#include "video_core/engines/maxwell_3d.h"
@@ -33,29 +34,25 @@ void ShaderCache::SyncGuestHost() {
33 RemovePendingShaders(); 34 RemovePendingShaders();
34} 35}
35 36
36ShaderCache::ShaderCache(VideoCore::RasterizerInterface& rasterizer_, 37ShaderCache::ShaderCache(VideoCore::RasterizerInterface& rasterizer_) : rasterizer{rasterizer_} {}
37 Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_,
38 Tegra::Engines::KeplerCompute& kepler_compute_)
39 : gpu_memory{gpu_memory_}, maxwell3d{maxwell3d_}, kepler_compute{kepler_compute_},
40 rasterizer{rasterizer_} {}
41 38
42bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) { 39bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) {
43 auto& dirty{maxwell3d.dirty.flags}; 40 auto& dirty{maxwell3d->dirty.flags};
44 if (!dirty[VideoCommon::Dirty::Shaders]) { 41 if (!dirty[VideoCommon::Dirty::Shaders]) {
45 return last_shaders_valid; 42 return last_shaders_valid;
46 } 43 }
47 dirty[VideoCommon::Dirty::Shaders] = false; 44 dirty[VideoCommon::Dirty::Shaders] = false;
48 45
49 const GPUVAddr base_addr{maxwell3d.regs.code_address.CodeAddress()}; 46 const GPUVAddr base_addr{maxwell3d->regs.code_address.CodeAddress()};
50 for (size_t index = 0; index < Tegra::Engines::Maxwell3D::Regs::MaxShaderProgram; ++index) { 47 for (size_t index = 0; index < Tegra::Engines::Maxwell3D::Regs::MaxShaderProgram; ++index) {
51 if (!maxwell3d.regs.IsShaderConfigEnabled(index)) { 48 if (!maxwell3d->regs.IsShaderConfigEnabled(index)) {
52 unique_hashes[index] = 0; 49 unique_hashes[index] = 0;
53 continue; 50 continue;
54 } 51 }
55 const auto& shader_config{maxwell3d.regs.shader_config[index]}; 52 const auto& shader_config{maxwell3d->regs.shader_config[index]};
56 const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)}; 53 const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)};
57 const GPUVAddr shader_addr{base_addr + shader_config.offset}; 54 const GPUVAddr shader_addr{base_addr + shader_config.offset};
58 const std::optional<VAddr> cpu_shader_addr{gpu_memory.GpuToCpuAddress(shader_addr)}; 55 const std::optional<VAddr> cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)};
59 if (!cpu_shader_addr) { 56 if (!cpu_shader_addr) {
60 LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr); 57 LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr);
61 last_shaders_valid = false; 58 last_shaders_valid = false;
@@ -64,7 +61,7 @@ bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) {
64 const ShaderInfo* shader_info{TryGet(*cpu_shader_addr)}; 61 const ShaderInfo* shader_info{TryGet(*cpu_shader_addr)};
65 if (!shader_info) { 62 if (!shader_info) {
66 const u32 start_address{shader_config.offset}; 63 const u32 start_address{shader_config.offset};
67 GraphicsEnvironment env{maxwell3d, gpu_memory, program, base_addr, start_address}; 64 GraphicsEnvironment env{*maxwell3d, *gpu_memory, program, base_addr, start_address};
68 shader_info = MakeShaderInfo(env, *cpu_shader_addr); 65 shader_info = MakeShaderInfo(env, *cpu_shader_addr);
69 } 66 }
70 shader_infos[index] = shader_info; 67 shader_infos[index] = shader_info;
@@ -75,10 +72,10 @@ bool ShaderCache::RefreshStages(std::array<u64, 6>& unique_hashes) {
75} 72}
76 73
77const ShaderInfo* ShaderCache::ComputeShader() { 74const ShaderInfo* ShaderCache::ComputeShader() {
78 const GPUVAddr program_base{kepler_compute.regs.code_loc.Address()}; 75 const GPUVAddr program_base{kepler_compute->regs.code_loc.Address()};
79 const auto& qmd{kepler_compute.launch_description}; 76 const auto& qmd{kepler_compute->launch_description};
80 const GPUVAddr shader_addr{program_base + qmd.program_start}; 77 const GPUVAddr shader_addr{program_base + qmd.program_start};
81 const std::optional<VAddr> cpu_shader_addr{gpu_memory.GpuToCpuAddress(shader_addr)}; 78 const std::optional<VAddr> cpu_shader_addr{gpu_memory->GpuToCpuAddress(shader_addr)};
82 if (!cpu_shader_addr) { 79 if (!cpu_shader_addr) {
83 LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr); 80 LOG_ERROR(HW_GPU, "Invalid GPU address for shader 0x{:016x}", shader_addr);
84 return nullptr; 81 return nullptr;
@@ -86,22 +83,22 @@ const ShaderInfo* ShaderCache::ComputeShader() {
86 if (const ShaderInfo* const shader = TryGet(*cpu_shader_addr)) { 83 if (const ShaderInfo* const shader = TryGet(*cpu_shader_addr)) {
87 return shader; 84 return shader;
88 } 85 }
89 ComputeEnvironment env{kepler_compute, gpu_memory, program_base, qmd.program_start}; 86 ComputeEnvironment env{*kepler_compute, *gpu_memory, program_base, qmd.program_start};
90 return MakeShaderInfo(env, *cpu_shader_addr); 87 return MakeShaderInfo(env, *cpu_shader_addr);
91} 88}
92 89
93void ShaderCache::GetGraphicsEnvironments(GraphicsEnvironments& result, 90void ShaderCache::GetGraphicsEnvironments(GraphicsEnvironments& result,
94 const std::array<u64, NUM_PROGRAMS>& unique_hashes) { 91 const std::array<u64, NUM_PROGRAMS>& unique_hashes) {
95 size_t env_index{}; 92 size_t env_index{};
96 const GPUVAddr base_addr{maxwell3d.regs.code_address.CodeAddress()}; 93 const GPUVAddr base_addr{maxwell3d->regs.code_address.CodeAddress()};
97 for (size_t index = 0; index < NUM_PROGRAMS; ++index) { 94 for (size_t index = 0; index < NUM_PROGRAMS; ++index) {
98 if (unique_hashes[index] == 0) { 95 if (unique_hashes[index] == 0) {
99 continue; 96 continue;
100 } 97 }
101 const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)}; 98 const auto program{static_cast<Tegra::Engines::Maxwell3D::Regs::ShaderProgram>(index)};
102 auto& env{result.envs[index]}; 99 auto& env{result.envs[index]};
103 const u32 start_address{maxwell3d.regs.shader_config[index].offset}; 100 const u32 start_address{maxwell3d->regs.shader_config[index].offset};
104 env = GraphicsEnvironment{maxwell3d, gpu_memory, program, base_addr, start_address}; 101 env = GraphicsEnvironment{*maxwell3d, *gpu_memory, program, base_addr, start_address};
105 env.SetCachedSize(shader_infos[index]->size_bytes); 102 env.SetCachedSize(shader_infos[index]->size_bytes);
106 result.env_ptrs[env_index++] = &env; 103 result.env_ptrs[env_index++] = &env;
107 } 104 }
diff --git a/src/video_core/shader_cache.h b/src/video_core/shader_cache.h
index f67cea8c4..a4391202d 100644
--- a/src/video_core/shader_cache.h
+++ b/src/video_core/shader_cache.h
@@ -12,6 +12,7 @@
12#include <vector> 12#include <vector>
13 13
14#include "common/common_types.h" 14#include "common/common_types.h"
15#include "video_core/control/channel_state_cache.h"
15#include "video_core/rasterizer_interface.h" 16#include "video_core/rasterizer_interface.h"
16#include "video_core/shader_environment.h" 17#include "video_core/shader_environment.h"
17 18
@@ -19,6 +20,10 @@ namespace Tegra {
19class MemoryManager; 20class MemoryManager;
20} 21}
21 22
23namespace Tegra::Control {
24struct ChannelState;
25}
26
22namespace VideoCommon { 27namespace VideoCommon {
23 28
24class GenericEnvironment; 29class GenericEnvironment;
@@ -28,7 +33,7 @@ struct ShaderInfo {
28 size_t size_bytes{}; 33 size_t size_bytes{};
29}; 34};
30 35
31class ShaderCache { 36class ShaderCache : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
32 static constexpr u64 YUZU_PAGEBITS = 14; 37 static constexpr u64 YUZU_PAGEBITS = 14;
33 static constexpr u64 YUZU_PAGESIZE = u64(1) << YUZU_PAGEBITS; 38 static constexpr u64 YUZU_PAGESIZE = u64(1) << YUZU_PAGEBITS;
34 39
@@ -71,9 +76,7 @@ protected:
71 } 76 }
72 }; 77 };
73 78
74 explicit ShaderCache(VideoCore::RasterizerInterface& rasterizer_, 79 explicit ShaderCache(VideoCore::RasterizerInterface& rasterizer_);
75 Tegra::MemoryManager& gpu_memory_, Tegra::Engines::Maxwell3D& maxwell3d_,
76 Tegra::Engines::KeplerCompute& kepler_compute_);
77 80
78 /// @brief Update the hashes and information of shader stages 81 /// @brief Update the hashes and information of shader stages
79 /// @param unique_hashes Shader hashes to store into when a stage is enabled 82 /// @param unique_hashes Shader hashes to store into when a stage is enabled
@@ -88,10 +91,6 @@ protected:
88 void GetGraphicsEnvironments(GraphicsEnvironments& result, 91 void GetGraphicsEnvironments(GraphicsEnvironments& result,
89 const std::array<u64, NUM_PROGRAMS>& unique_hashes); 92 const std::array<u64, NUM_PROGRAMS>& unique_hashes);
90 93
91 Tegra::MemoryManager& gpu_memory;
92 Tegra::Engines::Maxwell3D& maxwell3d;
93 Tegra::Engines::KeplerCompute& kepler_compute;
94
95 std::array<const ShaderInfo*, NUM_PROGRAMS> shader_infos{}; 94 std::array<const ShaderInfo*, NUM_PROGRAMS> shader_infos{};
96 bool last_shaders_valid = false; 95 bool last_shaders_valid = false;
97 96
diff --git a/src/video_core/surface.cpp b/src/video_core/surface.cpp
index 079d5f028..a2bf08294 100644
--- a/src/video_core/surface.cpp
+++ b/src/video_core/surface.cpp
@@ -247,6 +247,8 @@ bool IsPixelFormatASTC(PixelFormat format) {
247 case PixelFormat::ASTC_2D_6X6_UNORM: 247 case PixelFormat::ASTC_2D_6X6_UNORM:
248 case PixelFormat::ASTC_2D_6X6_SRGB: 248 case PixelFormat::ASTC_2D_6X6_SRGB:
249 case PixelFormat::ASTC_2D_10X6_UNORM: 249 case PixelFormat::ASTC_2D_10X6_UNORM:
250 case PixelFormat::ASTC_2D_10X5_UNORM:
251 case PixelFormat::ASTC_2D_10X5_SRGB:
250 case PixelFormat::ASTC_2D_10X10_UNORM: 252 case PixelFormat::ASTC_2D_10X10_UNORM:
251 case PixelFormat::ASTC_2D_10X10_SRGB: 253 case PixelFormat::ASTC_2D_10X10_SRGB:
252 case PixelFormat::ASTC_2D_12X12_UNORM: 254 case PixelFormat::ASTC_2D_12X12_UNORM:
@@ -276,6 +278,7 @@ bool IsPixelFormatSRGB(PixelFormat format) {
276 case PixelFormat::ASTC_2D_5X5_SRGB: 278 case PixelFormat::ASTC_2D_5X5_SRGB:
277 case PixelFormat::ASTC_2D_10X8_SRGB: 279 case PixelFormat::ASTC_2D_10X8_SRGB:
278 case PixelFormat::ASTC_2D_6X6_SRGB: 280 case PixelFormat::ASTC_2D_6X6_SRGB:
281 case PixelFormat::ASTC_2D_10X5_SRGB:
279 case PixelFormat::ASTC_2D_10X10_SRGB: 282 case PixelFormat::ASTC_2D_10X10_SRGB:
280 case PixelFormat::ASTC_2D_12X12_SRGB: 283 case PixelFormat::ASTC_2D_12X12_SRGB:
281 case PixelFormat::ASTC_2D_8X6_SRGB: 284 case PixelFormat::ASTC_2D_8X6_SRGB:
diff --git a/src/video_core/surface.h b/src/video_core/surface.h
index 16273f185..57ca7f597 100644
--- a/src/video_core/surface.h
+++ b/src/video_core/surface.h
@@ -82,7 +82,7 @@ enum class PixelFormat {
82 BC3_SRGB, 82 BC3_SRGB,
83 BC7_SRGB, 83 BC7_SRGB,
84 A4B4G4R4_UNORM, 84 A4B4G4R4_UNORM,
85 R4G4_UNORM, 85 G4R4_UNORM,
86 ASTC_2D_4X4_SRGB, 86 ASTC_2D_4X4_SRGB,
87 ASTC_2D_8X8_SRGB, 87 ASTC_2D_8X8_SRGB,
88 ASTC_2D_8X5_SRGB, 88 ASTC_2D_8X5_SRGB,
@@ -94,6 +94,8 @@ enum class PixelFormat {
94 ASTC_2D_6X6_UNORM, 94 ASTC_2D_6X6_UNORM,
95 ASTC_2D_6X6_SRGB, 95 ASTC_2D_6X6_SRGB,
96 ASTC_2D_10X6_UNORM, 96 ASTC_2D_10X6_UNORM,
97 ASTC_2D_10X5_UNORM,
98 ASTC_2D_10X5_SRGB,
97 ASTC_2D_10X10_UNORM, 99 ASTC_2D_10X10_UNORM,
98 ASTC_2D_10X10_SRGB, 100 ASTC_2D_10X10_SRGB,
99 ASTC_2D_12X12_UNORM, 101 ASTC_2D_12X12_UNORM,
@@ -216,7 +218,7 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_WIDTH_TABLE = {{
216 4, // BC3_SRGB 218 4, // BC3_SRGB
217 4, // BC7_SRGB 219 4, // BC7_SRGB
218 1, // A4B4G4R4_UNORM 220 1, // A4B4G4R4_UNORM
219 1, // R4G4_UNORM 221 1, // G4R4_UNORM
220 4, // ASTC_2D_4X4_SRGB 222 4, // ASTC_2D_4X4_SRGB
221 8, // ASTC_2D_8X8_SRGB 223 8, // ASTC_2D_8X8_SRGB
222 8, // ASTC_2D_8X5_SRGB 224 8, // ASTC_2D_8X5_SRGB
@@ -228,6 +230,8 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_WIDTH_TABLE = {{
228 6, // ASTC_2D_6X6_UNORM 230 6, // ASTC_2D_6X6_UNORM
229 6, // ASTC_2D_6X6_SRGB 231 6, // ASTC_2D_6X6_SRGB
230 10, // ASTC_2D_10X6_UNORM 232 10, // ASTC_2D_10X6_UNORM
233 10, // ASTC_2D_10X5_UNORM
234 10, // ASTC_2D_10X5_SRGB
231 10, // ASTC_2D_10X10_UNORM 235 10, // ASTC_2D_10X10_UNORM
232 10, // ASTC_2D_10X10_SRGB 236 10, // ASTC_2D_10X10_SRGB
233 12, // ASTC_2D_12X12_UNORM 237 12, // ASTC_2D_12X12_UNORM
@@ -319,7 +323,7 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_HEIGHT_TABLE = {{
319 4, // BC3_SRGB 323 4, // BC3_SRGB
320 4, // BC7_SRGB 324 4, // BC7_SRGB
321 1, // A4B4G4R4_UNORM 325 1, // A4B4G4R4_UNORM
322 1, // R4G4_UNORM 326 1, // G4R4_UNORM
323 4, // ASTC_2D_4X4_SRGB 327 4, // ASTC_2D_4X4_SRGB
324 8, // ASTC_2D_8X8_SRGB 328 8, // ASTC_2D_8X8_SRGB
325 5, // ASTC_2D_8X5_SRGB 329 5, // ASTC_2D_8X5_SRGB
@@ -331,6 +335,8 @@ constexpr std::array<u8, MaxPixelFormat> BLOCK_HEIGHT_TABLE = {{
331 6, // ASTC_2D_6X6_UNORM 335 6, // ASTC_2D_6X6_UNORM
332 6, // ASTC_2D_6X6_SRGB 336 6, // ASTC_2D_6X6_SRGB
333 6, // ASTC_2D_10X6_UNORM 337 6, // ASTC_2D_10X6_UNORM
338 5, // ASTC_2D_10X5_UNORM
339 5, // ASTC_2D_10X5_SRGB
334 10, // ASTC_2D_10X10_UNORM 340 10, // ASTC_2D_10X10_UNORM
335 10, // ASTC_2D_10X10_SRGB 341 10, // ASTC_2D_10X10_SRGB
336 12, // ASTC_2D_12X12_UNORM 342 12, // ASTC_2D_12X12_UNORM
@@ -422,7 +428,7 @@ constexpr std::array<u8, MaxPixelFormat> BITS_PER_BLOCK_TABLE = {{
422 128, // BC3_SRGB 428 128, // BC3_SRGB
423 128, // BC7_UNORM 429 128, // BC7_UNORM
424 16, // A4B4G4R4_UNORM 430 16, // A4B4G4R4_UNORM
425 8, // R4G4_UNORM 431 8, // G4R4_UNORM
426 128, // ASTC_2D_4X4_SRGB 432 128, // ASTC_2D_4X4_SRGB
427 128, // ASTC_2D_8X8_SRGB 433 128, // ASTC_2D_8X8_SRGB
428 128, // ASTC_2D_8X5_SRGB 434 128, // ASTC_2D_8X5_SRGB
@@ -434,6 +440,8 @@ constexpr std::array<u8, MaxPixelFormat> BITS_PER_BLOCK_TABLE = {{
434 128, // ASTC_2D_6X6_UNORM 440 128, // ASTC_2D_6X6_UNORM
435 128, // ASTC_2D_6X6_SRGB 441 128, // ASTC_2D_6X6_SRGB
436 128, // ASTC_2D_10X6_UNORM 442 128, // ASTC_2D_10X6_UNORM
443 128, // ASTC_2D_10X5_UNORM
444 128, // ASTC_2D_10X5_SRGB
437 128, // ASTC_2D_10X10_UNORM 445 128, // ASTC_2D_10X10_UNORM
438 128, // ASTC_2D_10X10_SRGB 446 128, // ASTC_2D_10X10_SRGB
439 128, // ASTC_2D_12X12_UNORM 447 128, // ASTC_2D_12X12_UNORM
diff --git a/src/video_core/texture_cache/format_lookup_table.cpp b/src/video_core/texture_cache/format_lookup_table.cpp
index 1412aa076..ad935d386 100644
--- a/src/video_core/texture_cache/format_lookup_table.cpp
+++ b/src/video_core/texture_cache/format_lookup_table.cpp
@@ -63,7 +63,7 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red,
63 case Hash(TextureFormat::A4B4G4R4, UNORM): 63 case Hash(TextureFormat::A4B4G4R4, UNORM):
64 return PixelFormat::A4B4G4R4_UNORM; 64 return PixelFormat::A4B4G4R4_UNORM;
65 case Hash(TextureFormat::G4R4, UNORM): 65 case Hash(TextureFormat::G4R4, UNORM):
66 return PixelFormat::R4G4_UNORM; 66 return PixelFormat::G4R4_UNORM;
67 case Hash(TextureFormat::A5B5G5R1, UNORM): 67 case Hash(TextureFormat::A5B5G5R1, UNORM):
68 return PixelFormat::A5B5G5R1_UNORM; 68 return PixelFormat::A5B5G5R1_UNORM;
69 case Hash(TextureFormat::R8, UNORM): 69 case Hash(TextureFormat::R8, UNORM):
@@ -208,6 +208,10 @@ PixelFormat PixelFormatFromTextureInfo(TextureFormat format, ComponentType red,
208 return PixelFormat::ASTC_2D_6X6_SRGB; 208 return PixelFormat::ASTC_2D_6X6_SRGB;
209 case Hash(TextureFormat::ASTC_2D_10X6, UNORM, LINEAR): 209 case Hash(TextureFormat::ASTC_2D_10X6, UNORM, LINEAR):
210 return PixelFormat::ASTC_2D_10X6_UNORM; 210 return PixelFormat::ASTC_2D_10X6_UNORM;
211 case Hash(TextureFormat::ASTC_2D_10X5, UNORM, LINEAR):
212 return PixelFormat::ASTC_2D_10X5_UNORM;
213 case Hash(TextureFormat::ASTC_2D_10X5, UNORM, SRGB):
214 return PixelFormat::ASTC_2D_10X5_SRGB;
211 case Hash(TextureFormat::ASTC_2D_10X10, UNORM, LINEAR): 215 case Hash(TextureFormat::ASTC_2D_10X10, UNORM, LINEAR):
212 return PixelFormat::ASTC_2D_10X10_UNORM; 216 return PixelFormat::ASTC_2D_10X10_UNORM;
213 case Hash(TextureFormat::ASTC_2D_10X10, UNORM, SRGB): 217 case Hash(TextureFormat::ASTC_2D_10X10, UNORM, SRGB):
diff --git a/src/video_core/texture_cache/formatter.h b/src/video_core/texture_cache/formatter.h
index 95a572604..acc854715 100644
--- a/src/video_core/texture_cache/formatter.h
+++ b/src/video_core/texture_cache/formatter.h
@@ -153,8 +153,8 @@ struct fmt::formatter<VideoCore::Surface::PixelFormat> : fmt::formatter<fmt::str
153 return "BC7_SRGB"; 153 return "BC7_SRGB";
154 case PixelFormat::A4B4G4R4_UNORM: 154 case PixelFormat::A4B4G4R4_UNORM:
155 return "A4B4G4R4_UNORM"; 155 return "A4B4G4R4_UNORM";
156 case PixelFormat::R4G4_UNORM: 156 case PixelFormat::G4R4_UNORM:
157 return "R4G4_UNORM"; 157 return "G4R4_UNORM";
158 case PixelFormat::ASTC_2D_4X4_SRGB: 158 case PixelFormat::ASTC_2D_4X4_SRGB:
159 return "ASTC_2D_4X4_SRGB"; 159 return "ASTC_2D_4X4_SRGB";
160 case PixelFormat::ASTC_2D_8X8_SRGB: 160 case PixelFormat::ASTC_2D_8X8_SRGB:
@@ -177,6 +177,10 @@ struct fmt::formatter<VideoCore::Surface::PixelFormat> : fmt::formatter<fmt::str
177 return "ASTC_2D_6X6_SRGB"; 177 return "ASTC_2D_6X6_SRGB";
178 case PixelFormat::ASTC_2D_10X6_UNORM: 178 case PixelFormat::ASTC_2D_10X6_UNORM:
179 return "ASTC_2D_10X6_UNORM"; 179 return "ASTC_2D_10X6_UNORM";
180 case PixelFormat::ASTC_2D_10X5_UNORM:
181 return "ASTC_2D_10X5_UNORM";
182 case PixelFormat::ASTC_2D_10X5_SRGB:
183 return "ASTC_2D_10X5_SRGB";
180 case PixelFormat::ASTC_2D_10X10_UNORM: 184 case PixelFormat::ASTC_2D_10X10_UNORM:
181 return "ASTC_2D_10X10_UNORM"; 185 return "ASTC_2D_10X10_UNORM";
182 case PixelFormat::ASTC_2D_10X10_SRGB: 186 case PixelFormat::ASTC_2D_10X10_SRGB:
diff --git a/src/video_core/texture_cache/image_base.cpp b/src/video_core/texture_cache/image_base.cpp
index f61e09ac7..91512022f 100644
--- a/src/video_core/texture_cache/image_base.cpp
+++ b/src/video_core/texture_cache/image_base.cpp
@@ -7,6 +7,7 @@
7#include <vector> 7#include <vector>
8 8
9#include "common/common_types.h" 9#include "common/common_types.h"
10#include "common/div_ceil.h"
10#include "video_core/surface.h" 11#include "video_core/surface.h"
11#include "video_core/texture_cache/formatter.h" 12#include "video_core/texture_cache/formatter.h"
12#include "video_core/texture_cache/image_base.h" 13#include "video_core/texture_cache/image_base.h"
@@ -182,10 +183,6 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i
182 }; 183 };
183 const bool is_lhs_compressed = lhs_block.width > 1 || lhs_block.height > 1; 184 const bool is_lhs_compressed = lhs_block.width > 1 || lhs_block.height > 1;
184 const bool is_rhs_compressed = rhs_block.width > 1 || rhs_block.height > 1; 185 const bool is_rhs_compressed = rhs_block.width > 1 || rhs_block.height > 1;
185 if (is_lhs_compressed && is_rhs_compressed) {
186 LOG_ERROR(HW_GPU, "Compressed to compressed image aliasing is not implemented");
187 return;
188 }
189 const s32 lhs_mips = lhs.info.resources.levels; 186 const s32 lhs_mips = lhs.info.resources.levels;
190 const s32 rhs_mips = rhs.info.resources.levels; 187 const s32 rhs_mips = rhs.info.resources.levels;
191 const s32 num_mips = std::min(lhs_mips - base->level, rhs_mips); 188 const s32 num_mips = std::min(lhs_mips - base->level, rhs_mips);
@@ -199,12 +196,12 @@ void AddImageAlias(ImageBase& lhs, ImageBase& rhs, ImageId lhs_id, ImageId rhs_i
199 Extent3D lhs_size = MipSize(lhs.info.size, base->level + mip_level); 196 Extent3D lhs_size = MipSize(lhs.info.size, base->level + mip_level);
200 Extent3D rhs_size = MipSize(rhs.info.size, mip_level); 197 Extent3D rhs_size = MipSize(rhs.info.size, mip_level);
201 if (is_lhs_compressed) { 198 if (is_lhs_compressed) {
202 lhs_size.width /= lhs_block.width; 199 lhs_size.width = Common::DivCeil(lhs_size.width, lhs_block.width);
203 lhs_size.height /= lhs_block.height; 200 lhs_size.height = Common::DivCeil(lhs_size.height, lhs_block.height);
204 } 201 }
205 if (is_rhs_compressed) { 202 if (is_rhs_compressed) {
206 rhs_size.width /= rhs_block.width; 203 rhs_size.width = Common::DivCeil(rhs_size.width, rhs_block.width);
207 rhs_size.height /= rhs_block.height; 204 rhs_size.height = Common::DivCeil(rhs_size.height, rhs_block.height);
208 } 205 }
209 const Extent3D copy_size{ 206 const Extent3D copy_size{
210 .width = std::min(lhs_size.width, rhs_size.width), 207 .width = std::min(lhs_size.width, rhs_size.width),
diff --git a/src/video_core/texture_cache/image_base.h b/src/video_core/texture_cache/image_base.h
index 1f85ec9da..620565684 100644
--- a/src/video_core/texture_cache/image_base.h
+++ b/src/video_core/texture_cache/image_base.h
@@ -88,6 +88,9 @@ struct ImageBase {
88 u32 scale_rating = 0; 88 u32 scale_rating = 0;
89 u64 scale_tick = 0; 89 u64 scale_tick = 0;
90 bool has_scaled = false; 90 bool has_scaled = false;
91
92 size_t channel = 0;
93
91 ImageFlagBits flags = ImageFlagBits::CpuModified; 94 ImageFlagBits flags = ImageFlagBits::CpuModified;
92 95
93 GPUVAddr gpu_addr = 0; 96 GPUVAddr gpu_addr = 0;
diff --git a/src/video_core/texture_cache/render_targets.h b/src/video_core/texture_cache/render_targets.h
index da8ffe9ec..1efbd6507 100644
--- a/src/video_core/texture_cache/render_targets.h
+++ b/src/video_core/texture_cache/render_targets.h
@@ -26,6 +26,7 @@ struct RenderTargets {
26 ImageViewId depth_buffer_id{}; 26 ImageViewId depth_buffer_id{};
27 std::array<u8, NUM_RT> draw_buffers{}; 27 std::array<u8, NUM_RT> draw_buffers{};
28 Extent2D size{}; 28 Extent2D size{};
29 bool is_rescaled{};
29}; 30};
30 31
31} // namespace VideoCommon 32} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/texture_cache.cpp b/src/video_core/texture_cache/texture_cache.cpp
new file mode 100644
index 000000000..8a9a32f44
--- /dev/null
+++ b/src/video_core/texture_cache/texture_cache.cpp
@@ -0,0 +1,15 @@
1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-3.0-or-later
3
4#include "video_core/control/channel_state_cache.inc"
5#include "video_core/texture_cache/texture_cache_base.h"
6
7namespace VideoCommon {
8
9TextureCacheChannelInfo::TextureCacheChannelInfo(Tegra::Control::ChannelState& state) noexcept
10 : ChannelInfo(state), graphics_image_table{gpu_memory}, graphics_sampler_table{gpu_memory},
11 compute_image_table{gpu_memory}, compute_sampler_table{gpu_memory} {}
12
13template class VideoCommon::ChannelSetupCaches<VideoCommon::TextureCacheChannelInfo>;
14
15} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/texture_cache.h b/src/video_core/texture_cache/texture_cache.h
index 1dbe01bc0..eaf4a1c95 100644
--- a/src/video_core/texture_cache/texture_cache.h
+++ b/src/video_core/texture_cache/texture_cache.h
@@ -1,5 +1,5 @@
1// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project 1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-3.0-or-later
3 3
4#pragma once 4#pragma once
5 5
@@ -7,6 +7,7 @@
7 7
8#include "common/alignment.h" 8#include "common/alignment.h"
9#include "common/settings.h" 9#include "common/settings.h"
10#include "video_core/control/channel_state.h"
10#include "video_core/dirty_flags.h" 11#include "video_core/dirty_flags.h"
11#include "video_core/engines/kepler_compute.h" 12#include "video_core/engines/kepler_compute.h"
12#include "video_core/texture_cache/image_view_base.h" 13#include "video_core/texture_cache/image_view_base.h"
@@ -29,12 +30,8 @@ using VideoCore::Surface::SurfaceType;
29using namespace Common::Literals; 30using namespace Common::Literals;
30 31
31template <class P> 32template <class P>
32TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& rasterizer_, 33TextureCache<P>::TextureCache(Runtime& runtime_, VideoCore::RasterizerInterface& rasterizer_)
33 Tegra::Engines::Maxwell3D& maxwell3d_, 34 : runtime{runtime_}, rasterizer{rasterizer_} {
34 Tegra::Engines::KeplerCompute& kepler_compute_,
35 Tegra::MemoryManager& gpu_memory_)
36 : runtime{runtime_}, rasterizer{rasterizer_}, maxwell3d{maxwell3d_},
37 kepler_compute{kepler_compute_}, gpu_memory{gpu_memory_} {
38 // Configure null sampler 35 // Configure null sampler
39 TSCEntry sampler_descriptor{}; 36 TSCEntry sampler_descriptor{};
40 sampler_descriptor.min_filter.Assign(Tegra::Texture::TextureFilter::Linear); 37 sampler_descriptor.min_filter.Assign(Tegra::Texture::TextureFilter::Linear);
@@ -93,7 +90,7 @@ void TextureCache<P>::RunGarbageCollector() {
93 const auto copies = FullDownloadCopies(image.info); 90 const auto copies = FullDownloadCopies(image.info);
94 image.DownloadMemory(map, copies); 91 image.DownloadMemory(map, copies);
95 runtime.Finish(); 92 runtime.Finish();
96 SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span); 93 SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span);
97 } 94 }
98 if (True(image.flags & ImageFlagBits::Tracked)) { 95 if (True(image.flags & ImageFlagBits::Tracked)) {
99 UntrackImage(image, image_id); 96 UntrackImage(image, image_id);
@@ -152,22 +149,24 @@ void TextureCache<P>::MarkModification(ImageId id) noexcept {
152template <class P> 149template <class P>
153template <bool has_blacklists> 150template <bool has_blacklists>
154void TextureCache<P>::FillGraphicsImageViews(std::span<ImageViewInOut> views) { 151void TextureCache<P>::FillGraphicsImageViews(std::span<ImageViewInOut> views) {
155 FillImageViews<has_blacklists>(graphics_image_table, graphics_image_view_ids, views); 152 FillImageViews<has_blacklists>(channel_state->graphics_image_table,
153 channel_state->graphics_image_view_ids, views);
156} 154}
157 155
158template <class P> 156template <class P>
159void TextureCache<P>::FillComputeImageViews(std::span<ImageViewInOut> views) { 157void TextureCache<P>::FillComputeImageViews(std::span<ImageViewInOut> views) {
160 FillImageViews<true>(compute_image_table, compute_image_view_ids, views); 158 FillImageViews<true>(channel_state->compute_image_table, channel_state->compute_image_view_ids,
159 views);
161} 160}
162 161
163template <class P> 162template <class P>
164typename P::Sampler* TextureCache<P>::GetGraphicsSampler(u32 index) { 163typename P::Sampler* TextureCache<P>::GetGraphicsSampler(u32 index) {
165 if (index > graphics_sampler_table.Limit()) { 164 if (index > channel_state->graphics_sampler_table.Limit()) {
166 LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index); 165 LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index);
167 return &slot_samplers[NULL_SAMPLER_ID]; 166 return &slot_samplers[NULL_SAMPLER_ID];
168 } 167 }
169 const auto [descriptor, is_new] = graphics_sampler_table.Read(index); 168 const auto [descriptor, is_new] = channel_state->graphics_sampler_table.Read(index);
170 SamplerId& id = graphics_sampler_ids[index]; 169 SamplerId& id = channel_state->graphics_sampler_ids[index];
171 if (is_new) { 170 if (is_new) {
172 id = FindSampler(descriptor); 171 id = FindSampler(descriptor);
173 } 172 }
@@ -176,12 +175,12 @@ typename P::Sampler* TextureCache<P>::GetGraphicsSampler(u32 index) {
176 175
177template <class P> 176template <class P>
178typename P::Sampler* TextureCache<P>::GetComputeSampler(u32 index) { 177typename P::Sampler* TextureCache<P>::GetComputeSampler(u32 index) {
179 if (index > compute_sampler_table.Limit()) { 178 if (index > channel_state->compute_sampler_table.Limit()) {
180 LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index); 179 LOG_DEBUG(HW_GPU, "Invalid sampler index={}", index);
181 return &slot_samplers[NULL_SAMPLER_ID]; 180 return &slot_samplers[NULL_SAMPLER_ID];
182 } 181 }
183 const auto [descriptor, is_new] = compute_sampler_table.Read(index); 182 const auto [descriptor, is_new] = channel_state->compute_sampler_table.Read(index);
184 SamplerId& id = compute_sampler_ids[index]; 183 SamplerId& id = channel_state->compute_sampler_ids[index];
185 if (is_new) { 184 if (is_new) {
186 id = FindSampler(descriptor); 185 id = FindSampler(descriptor);
187 } 186 }
@@ -191,34 +190,36 @@ typename P::Sampler* TextureCache<P>::GetComputeSampler(u32 index) {
191template <class P> 190template <class P>
192void TextureCache<P>::SynchronizeGraphicsDescriptors() { 191void TextureCache<P>::SynchronizeGraphicsDescriptors() {
193 using SamplerIndex = Tegra::Engines::Maxwell3D::Regs::SamplerIndex; 192 using SamplerIndex = Tegra::Engines::Maxwell3D::Regs::SamplerIndex;
194 const bool linked_tsc = maxwell3d.regs.sampler_index == SamplerIndex::ViaHeaderIndex; 193 const bool linked_tsc = maxwell3d->regs.sampler_index == SamplerIndex::ViaHeaderIndex;
195 const u32 tic_limit = maxwell3d.regs.tic.limit; 194 const u32 tic_limit = maxwell3d->regs.tic.limit;
196 const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d.regs.tsc.limit; 195 const u32 tsc_limit = linked_tsc ? tic_limit : maxwell3d->regs.tsc.limit;
197 if (graphics_sampler_table.Synchornize(maxwell3d.regs.tsc.Address(), tsc_limit)) { 196 if (channel_state->graphics_sampler_table.Synchornize(maxwell3d->regs.tsc.Address(),
198 graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); 197 tsc_limit)) {
198 channel_state->graphics_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID);
199 } 199 }
200 if (graphics_image_table.Synchornize(maxwell3d.regs.tic.Address(), tic_limit)) { 200 if (channel_state->graphics_image_table.Synchornize(maxwell3d->regs.tic.Address(), tic_limit)) {
201 graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); 201 channel_state->graphics_image_view_ids.resize(tic_limit + 1, CORRUPT_ID);
202 } 202 }
203} 203}
204 204
205template <class P> 205template <class P>
206void TextureCache<P>::SynchronizeComputeDescriptors() { 206void TextureCache<P>::SynchronizeComputeDescriptors() {
207 const bool linked_tsc = kepler_compute.launch_description.linked_tsc; 207 const bool linked_tsc = kepler_compute->launch_description.linked_tsc;
208 const u32 tic_limit = kepler_compute.regs.tic.limit; 208 const u32 tic_limit = kepler_compute->regs.tic.limit;
209 const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute.regs.tsc.limit; 209 const u32 tsc_limit = linked_tsc ? tic_limit : kepler_compute->regs.tsc.limit;
210 const GPUVAddr tsc_gpu_addr = kepler_compute.regs.tsc.Address(); 210 const GPUVAddr tsc_gpu_addr = kepler_compute->regs.tsc.Address();
211 if (compute_sampler_table.Synchornize(tsc_gpu_addr, tsc_limit)) { 211 if (channel_state->compute_sampler_table.Synchornize(tsc_gpu_addr, tsc_limit)) {
212 compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID); 212 channel_state->compute_sampler_ids.resize(tsc_limit + 1, CORRUPT_ID);
213 } 213 }
214 if (compute_image_table.Synchornize(kepler_compute.regs.tic.Address(), tic_limit)) { 214 if (channel_state->compute_image_table.Synchornize(kepler_compute->regs.tic.Address(),
215 compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID); 215 tic_limit)) {
216 channel_state->compute_image_view_ids.resize(tic_limit + 1, CORRUPT_ID);
216 } 217 }
217} 218}
218 219
219template <class P> 220template <class P>
220bool TextureCache<P>::RescaleRenderTargets(bool is_clear) { 221bool TextureCache<P>::RescaleRenderTargets(bool is_clear) {
221 auto& flags = maxwell3d.dirty.flags; 222 auto& flags = maxwell3d->dirty.flags;
222 u32 scale_rating = 0; 223 u32 scale_rating = 0;
223 bool rescaled = false; 224 bool rescaled = false;
224 std::array<ImageId, NUM_RT> tmp_color_images{}; 225 std::array<ImageId, NUM_RT> tmp_color_images{};
@@ -315,7 +316,7 @@ bool TextureCache<P>::RescaleRenderTargets(bool is_clear) {
315template <class P> 316template <class P>
316void TextureCache<P>::UpdateRenderTargets(bool is_clear) { 317void TextureCache<P>::UpdateRenderTargets(bool is_clear) {
317 using namespace VideoCommon::Dirty; 318 using namespace VideoCommon::Dirty;
318 auto& flags = maxwell3d.dirty.flags; 319 auto& flags = maxwell3d->dirty.flags;
319 if (!flags[Dirty::RenderTargets]) { 320 if (!flags[Dirty::RenderTargets]) {
320 for (size_t index = 0; index < NUM_RT; ++index) { 321 for (size_t index = 0; index < NUM_RT; ++index) {
321 ImageViewId& color_buffer_id = render_targets.color_buffer_ids[index]; 322 ImageViewId& color_buffer_id = render_targets.color_buffer_ids[index];
@@ -342,7 +343,7 @@ void TextureCache<P>::UpdateRenderTargets(bool is_clear) {
342 PrepareImageView(depth_buffer_id, true, is_clear && IsFullClear(depth_buffer_id)); 343 PrepareImageView(depth_buffer_id, true, is_clear && IsFullClear(depth_buffer_id));
343 344
344 for (size_t index = 0; index < NUM_RT; ++index) { 345 for (size_t index = 0; index < NUM_RT; ++index) {
345 render_targets.draw_buffers[index] = static_cast<u8>(maxwell3d.regs.rt_control.Map(index)); 346 render_targets.draw_buffers[index] = static_cast<u8>(maxwell3d->regs.rt_control.Map(index));
346 } 347 }
347 u32 up_scale = 1; 348 u32 up_scale = 1;
348 u32 down_shift = 0; 349 u32 down_shift = 0;
@@ -351,9 +352,10 @@ void TextureCache<P>::UpdateRenderTargets(bool is_clear) {
351 down_shift = Settings::values.resolution_info.down_shift; 352 down_shift = Settings::values.resolution_info.down_shift;
352 } 353 }
353 render_targets.size = Extent2D{ 354 render_targets.size = Extent2D{
354 (maxwell3d.regs.render_area.width * up_scale) >> down_shift, 355 (maxwell3d->regs.render_area.width * up_scale) >> down_shift,
355 (maxwell3d.regs.render_area.height * up_scale) >> down_shift, 356 (maxwell3d->regs.render_area.height * up_scale) >> down_shift,
356 }; 357 };
358 render_targets.is_rescaled = is_rescaling;
357 359
358 flags[Dirty::DepthBiasGlobal] = true; 360 flags[Dirty::DepthBiasGlobal] = true;
359} 361}
@@ -458,7 +460,7 @@ void TextureCache<P>::DownloadMemory(VAddr cpu_addr, size_t size) {
458 const auto copies = FullDownloadCopies(image.info); 460 const auto copies = FullDownloadCopies(image.info);
459 image.DownloadMemory(map, copies); 461 image.DownloadMemory(map, copies);
460 runtime.Finish(); 462 runtime.Finish();
461 SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span); 463 SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, map.mapped_span);
462 } 464 }
463} 465}
464 466
@@ -477,12 +479,20 @@ void TextureCache<P>::UnmapMemory(VAddr cpu_addr, size_t size) {
477} 479}
478 480
479template <class P> 481template <class P>
480void TextureCache<P>::UnmapGPUMemory(GPUVAddr gpu_addr, size_t size) { 482void TextureCache<P>::UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t size) {
481 std::vector<ImageId> deleted_images; 483 std::vector<ImageId> deleted_images;
482 ForEachImageInRegionGPU(gpu_addr, size, 484 ForEachImageInRegionGPU(as_id, gpu_addr, size,
483 [&](ImageId id, Image&) { deleted_images.push_back(id); }); 485 [&](ImageId id, Image&) { deleted_images.push_back(id); });
484 for (const ImageId id : deleted_images) { 486 for (const ImageId id : deleted_images) {
485 Image& image = slot_images[id]; 487 Image& image = slot_images[id];
488 if (True(image.flags & ImageFlagBits::CpuModified)) {
489 return;
490 }
491 image.flags |= ImageFlagBits::CpuModified;
492 if (True(image.flags & ImageFlagBits::Tracked)) {
493 UntrackImage(image, id);
494 }
495 /*
486 if (True(image.flags & ImageFlagBits::Remapped)) { 496 if (True(image.flags & ImageFlagBits::Remapped)) {
487 continue; 497 continue;
488 } 498 }
@@ -490,6 +500,7 @@ void TextureCache<P>::UnmapGPUMemory(GPUVAddr gpu_addr, size_t size) {
490 if (True(image.flags & ImageFlagBits::Tracked)) { 500 if (True(image.flags & ImageFlagBits::Tracked)) {
491 UntrackImage(image, id); 501 UntrackImage(image, id);
492 } 502 }
503 */
493 } 504 }
494} 505}
495 506
@@ -655,7 +666,7 @@ void TextureCache<P>::PopAsyncFlushes() {
655 for (const ImageId image_id : download_ids) { 666 for (const ImageId image_id : download_ids) {
656 const ImageBase& image = slot_images[image_id]; 667 const ImageBase& image = slot_images[image_id];
657 const auto copies = FullDownloadCopies(image.info); 668 const auto copies = FullDownloadCopies(image.info);
658 SwizzleImage(gpu_memory, image.gpu_addr, image.info, copies, download_span); 669 SwizzleImage(*gpu_memory, image.gpu_addr, image.info, copies, download_span);
659 download_map.offset += image.unswizzled_size_bytes; 670 download_map.offset += image.unswizzled_size_bytes;
660 download_span = download_span.subspan(image.unswizzled_size_bytes); 671 download_span = download_span.subspan(image.unswizzled_size_bytes);
661 } 672 }
@@ -714,26 +725,26 @@ void TextureCache<P>::UploadImageContents(Image& image, StagingBuffer& staging)
714 const GPUVAddr gpu_addr = image.gpu_addr; 725 const GPUVAddr gpu_addr = image.gpu_addr;
715 726
716 if (True(image.flags & ImageFlagBits::AcceleratedUpload)) { 727 if (True(image.flags & ImageFlagBits::AcceleratedUpload)) {
717 gpu_memory.ReadBlockUnsafe(gpu_addr, mapped_span.data(), mapped_span.size_bytes()); 728 gpu_memory->ReadBlockUnsafe(gpu_addr, mapped_span.data(), mapped_span.size_bytes());
718 const auto uploads = FullUploadSwizzles(image.info); 729 const auto uploads = FullUploadSwizzles(image.info);
719 runtime.AccelerateImageUpload(image, staging, uploads); 730 runtime.AccelerateImageUpload(image, staging, uploads);
720 } else if (True(image.flags & ImageFlagBits::Converted)) { 731 } else if (True(image.flags & ImageFlagBits::Converted)) {
721 std::vector<u8> unswizzled_data(image.unswizzled_size_bytes); 732 std::vector<u8> unswizzled_data(image.unswizzled_size_bytes);
722 auto copies = UnswizzleImage(gpu_memory, gpu_addr, image.info, unswizzled_data); 733 auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, unswizzled_data);
723 ConvertImage(unswizzled_data, image.info, mapped_span, copies); 734 ConvertImage(unswizzled_data, image.info, mapped_span, copies);
724 image.UploadMemory(staging, copies); 735 image.UploadMemory(staging, copies);
725 } else { 736 } else {
726 const auto copies = UnswizzleImage(gpu_memory, gpu_addr, image.info, mapped_span); 737 const auto copies = UnswizzleImage(*gpu_memory, gpu_addr, image.info, mapped_span);
727 image.UploadMemory(staging, copies); 738 image.UploadMemory(staging, copies);
728 } 739 }
729} 740}
730 741
731template <class P> 742template <class P>
732ImageViewId TextureCache<P>::FindImageView(const TICEntry& config) { 743ImageViewId TextureCache<P>::FindImageView(const TICEntry& config) {
733 if (!IsValidEntry(gpu_memory, config)) { 744 if (!IsValidEntry(*gpu_memory, config)) {
734 return NULL_IMAGE_VIEW_ID; 745 return NULL_IMAGE_VIEW_ID;
735 } 746 }
736 const auto [pair, is_new] = image_views.try_emplace(config); 747 const auto [pair, is_new] = channel_state->image_views.try_emplace(config);
737 ImageViewId& image_view_id = pair->second; 748 ImageViewId& image_view_id = pair->second;
738 if (is_new) { 749 if (is_new) {
739 image_view_id = CreateImageView(config); 750 image_view_id = CreateImageView(config);
@@ -777,9 +788,9 @@ ImageId TextureCache<P>::FindOrInsertImage(const ImageInfo& info, GPUVAddr gpu_a
777template <class P> 788template <class P>
778ImageId TextureCache<P>::FindImage(const ImageInfo& info, GPUVAddr gpu_addr, 789ImageId TextureCache<P>::FindImage(const ImageInfo& info, GPUVAddr gpu_addr,
779 RelaxedOptions options) { 790 RelaxedOptions options) {
780 std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); 791 std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
781 if (!cpu_addr) { 792 if (!cpu_addr) {
782 cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info)); 793 cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, CalculateGuestSizeInBytes(info));
783 if (!cpu_addr) { 794 if (!cpu_addr) {
784 return ImageId{}; 795 return ImageId{};
785 } 796 }
@@ -860,7 +871,7 @@ void TextureCache<P>::InvalidateScale(Image& image) {
860 image.scale_tick = frame_tick + 1; 871 image.scale_tick = frame_tick + 1;
861 } 872 }
862 const std::span<const ImageViewId> image_view_ids = image.image_view_ids; 873 const std::span<const ImageViewId> image_view_ids = image.image_view_ids;
863 auto& dirty = maxwell3d.dirty.flags; 874 auto& dirty = maxwell3d->dirty.flags;
864 dirty[Dirty::RenderTargets] = true; 875 dirty[Dirty::RenderTargets] = true;
865 dirty[Dirty::ZetaBuffer] = true; 876 dirty[Dirty::ZetaBuffer] = true;
866 for (size_t rt = 0; rt < NUM_RT; ++rt) { 877 for (size_t rt = 0; rt < NUM_RT; ++rt) {
@@ -880,12 +891,15 @@ void TextureCache<P>::InvalidateScale(Image& image) {
880 } 891 }
881 image.image_view_ids.clear(); 892 image.image_view_ids.clear();
882 image.image_view_infos.clear(); 893 image.image_view_infos.clear();
883 if constexpr (ENABLE_VALIDATION) { 894 for (size_t c : active_channel_ids) {
884 std::ranges::fill(graphics_image_view_ids, CORRUPT_ID); 895 auto& channel_info = channel_storage[c];
885 std::ranges::fill(compute_image_view_ids, CORRUPT_ID); 896 if constexpr (ENABLE_VALIDATION) {
897 std::ranges::fill(channel_info.graphics_image_view_ids, CORRUPT_ID);
898 std::ranges::fill(channel_info.compute_image_view_ids, CORRUPT_ID);
899 }
900 channel_info.graphics_image_table.Invalidate();
901 channel_info.compute_image_table.Invalidate();
886 } 902 }
887 graphics_image_table.Invalidate();
888 compute_image_table.Invalidate();
889 has_deleted_images = true; 903 has_deleted_images = true;
890} 904}
891 905
@@ -929,10 +943,10 @@ bool TextureCache<P>::ScaleDown(Image& image) {
929template <class P> 943template <class P>
930ImageId TextureCache<P>::InsertImage(const ImageInfo& info, GPUVAddr gpu_addr, 944ImageId TextureCache<P>::InsertImage(const ImageInfo& info, GPUVAddr gpu_addr,
931 RelaxedOptions options) { 945 RelaxedOptions options) {
932 std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); 946 std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
933 if (!cpu_addr) { 947 if (!cpu_addr) {
934 const auto size = CalculateGuestSizeInBytes(info); 948 const auto size = CalculateGuestSizeInBytes(info);
935 cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr, size); 949 cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr, size);
936 if (!cpu_addr) { 950 if (!cpu_addr) {
937 const VAddr fake_addr = ~(1ULL << 40ULL) + virtual_invalid_space; 951 const VAddr fake_addr = ~(1ULL << 40ULL) + virtual_invalid_space;
938 virtual_invalid_space += Common::AlignUp(size, 32); 952 virtual_invalid_space += Common::AlignUp(size, 32);
@@ -1050,7 +1064,7 @@ ImageId TextureCache<P>::JoinImages(const ImageInfo& info, GPUVAddr gpu_addr, VA
1050 const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr); 1064 const ImageId new_image_id = slot_images.insert(runtime, new_info, gpu_addr, cpu_addr);
1051 Image& new_image = slot_images[new_image_id]; 1065 Image& new_image = slot_images[new_image_id];
1052 1066
1053 if (!gpu_memory.IsContinousRange(new_image.gpu_addr, new_image.guest_size_bytes)) { 1067 if (!gpu_memory->IsContinousRange(new_image.gpu_addr, new_image.guest_size_bytes)) {
1054 new_image.flags |= ImageFlagBits::Sparse; 1068 new_image.flags |= ImageFlagBits::Sparse;
1055 } 1069 }
1056 1070
@@ -1192,7 +1206,7 @@ SamplerId TextureCache<P>::FindSampler(const TSCEntry& config) {
1192 if (std::ranges::all_of(config.raw, [](u64 value) { return value == 0; })) { 1206 if (std::ranges::all_of(config.raw, [](u64 value) { return value == 0; })) {
1193 return NULL_SAMPLER_ID; 1207 return NULL_SAMPLER_ID;
1194 } 1208 }
1195 const auto [pair, is_new] = samplers.try_emplace(config); 1209 const auto [pair, is_new] = channel_state->samplers.try_emplace(config);
1196 if (is_new) { 1210 if (is_new) {
1197 pair->second = slot_samplers.insert(runtime, config); 1211 pair->second = slot_samplers.insert(runtime, config);
1198 } 1212 }
@@ -1201,7 +1215,7 @@ SamplerId TextureCache<P>::FindSampler(const TSCEntry& config) {
1201 1215
1202template <class P> 1216template <class P>
1203ImageViewId TextureCache<P>::FindColorBuffer(size_t index, bool is_clear) { 1217ImageViewId TextureCache<P>::FindColorBuffer(size_t index, bool is_clear) {
1204 const auto& regs = maxwell3d.regs; 1218 const auto& regs = maxwell3d->regs;
1205 if (index >= regs.rt_control.count) { 1219 if (index >= regs.rt_control.count) {
1206 return ImageViewId{}; 1220 return ImageViewId{};
1207 } 1221 }
@@ -1219,7 +1233,7 @@ ImageViewId TextureCache<P>::FindColorBuffer(size_t index, bool is_clear) {
1219 1233
1220template <class P> 1234template <class P>
1221ImageViewId TextureCache<P>::FindDepthBuffer(bool is_clear) { 1235ImageViewId TextureCache<P>::FindDepthBuffer(bool is_clear) {
1222 const auto& regs = maxwell3d.regs; 1236 const auto& regs = maxwell3d->regs;
1223 if (!regs.zeta_enable) { 1237 if (!regs.zeta_enable) {
1224 return ImageViewId{}; 1238 return ImageViewId{};
1225 } 1239 }
@@ -1316,11 +1330,17 @@ void TextureCache<P>::ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& f
1316 1330
1317template <class P> 1331template <class P>
1318template <typename Func> 1332template <typename Func>
1319void TextureCache<P>::ForEachImageInRegionGPU(GPUVAddr gpu_addr, size_t size, Func&& func) { 1333void TextureCache<P>::ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size,
1334 Func&& func) {
1320 using FuncReturn = typename std::invoke_result<Func, ImageId, Image&>::type; 1335 using FuncReturn = typename std::invoke_result<Func, ImageId, Image&>::type;
1321 static constexpr bool BOOL_BREAK = std::is_same_v<FuncReturn, bool>; 1336 static constexpr bool BOOL_BREAK = std::is_same_v<FuncReturn, bool>;
1322 boost::container::small_vector<ImageId, 8> images; 1337 boost::container::small_vector<ImageId, 8> images;
1323 ForEachGPUPage(gpu_addr, size, [this, &images, gpu_addr, size, func](u64 page) { 1338 auto storage_id = getStorageID(as_id);
1339 if (!storage_id) {
1340 return;
1341 }
1342 auto& gpu_page_table = gpu_page_table_storage[*storage_id];
1343 ForEachGPUPage(gpu_addr, size, [this, gpu_page_table, &images, gpu_addr, size, func](u64 page) {
1324 const auto it = gpu_page_table.find(page); 1344 const auto it = gpu_page_table.find(page);
1325 if (it == gpu_page_table.end()) { 1345 if (it == gpu_page_table.end()) {
1326 if constexpr (BOOL_BREAK) { 1346 if constexpr (BOOL_BREAK) {
@@ -1403,9 +1423,9 @@ template <typename Func>
1403void TextureCache<P>::ForEachSparseSegment(ImageBase& image, Func&& func) { 1423void TextureCache<P>::ForEachSparseSegment(ImageBase& image, Func&& func) {
1404 using FuncReturn = typename std::invoke_result<Func, GPUVAddr, VAddr, size_t>::type; 1424 using FuncReturn = typename std::invoke_result<Func, GPUVAddr, VAddr, size_t>::type;
1405 static constexpr bool RETURNS_BOOL = std::is_same_v<FuncReturn, bool>; 1425 static constexpr bool RETURNS_BOOL = std::is_same_v<FuncReturn, bool>;
1406 const auto segments = gpu_memory.GetSubmappedRange(image.gpu_addr, image.guest_size_bytes); 1426 const auto segments = gpu_memory->GetSubmappedRange(image.gpu_addr, image.guest_size_bytes);
1407 for (const auto& [gpu_addr, size] : segments) { 1427 for (const auto& [gpu_addr, size] : segments) {
1408 std::optional<VAddr> cpu_addr = gpu_memory.GpuToCpuAddress(gpu_addr); 1428 std::optional<VAddr> cpu_addr = gpu_memory->GpuToCpuAddress(gpu_addr);
1409 ASSERT(cpu_addr); 1429 ASSERT(cpu_addr);
1410 if constexpr (RETURNS_BOOL) { 1430 if constexpr (RETURNS_BOOL) {
1411 if (func(gpu_addr, *cpu_addr, size)) { 1431 if (func(gpu_addr, *cpu_addr, size)) {
@@ -1448,8 +1468,9 @@ void TextureCache<P>::RegisterImage(ImageId image_id) {
1448 } 1468 }
1449 image.lru_index = lru_cache.Insert(image_id, frame_tick); 1469 image.lru_index = lru_cache.Insert(image_id, frame_tick);
1450 1470
1451 ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, 1471 ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, image_id](u64 page) {
1452 [this, image_id](u64 page) { gpu_page_table[page].push_back(image_id); }); 1472 (*channel_state->gpu_page_table)[page].push_back(image_id);
1473 });
1453 if (False(image.flags & ImageFlagBits::Sparse)) { 1474 if (False(image.flags & ImageFlagBits::Sparse)) {
1454 auto map_id = 1475 auto map_id =
1455 slot_map_views.insert(image.gpu_addr, image.cpu_addr, image.guest_size_bytes, image_id); 1476 slot_map_views.insert(image.gpu_addr, image.cpu_addr, image.guest_size_bytes, image_id);
@@ -1480,9 +1501,9 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
1480 image.flags &= ~ImageFlagBits::BadOverlap; 1501 image.flags &= ~ImageFlagBits::BadOverlap;
1481 lru_cache.Free(image.lru_index); 1502 lru_cache.Free(image.lru_index);
1482 const auto& clear_page_table = 1503 const auto& clear_page_table =
1483 [this, image_id]( 1504 [this, image_id](u64 page,
1484 u64 page, 1505 std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>>&
1485 std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>>& selected_page_table) { 1506 selected_page_table) {
1486 const auto page_it = selected_page_table.find(page); 1507 const auto page_it = selected_page_table.find(page);
1487 if (page_it == selected_page_table.end()) { 1508 if (page_it == selected_page_table.end()) {
1488 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS); 1509 ASSERT_MSG(false, "Unregistering unregistered page=0x{:x}", page << YUZU_PAGEBITS);
@@ -1497,8 +1518,9 @@ void TextureCache<P>::UnregisterImage(ImageId image_id) {
1497 } 1518 }
1498 image_ids.erase(vector_it); 1519 image_ids.erase(vector_it);
1499 }; 1520 };
1500 ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, 1521 ForEachGPUPage(image.gpu_addr, image.guest_size_bytes, [this, &clear_page_table](u64 page) {
1501 [this, &clear_page_table](u64 page) { clear_page_table(page, gpu_page_table); }); 1522 clear_page_table(page, (*channel_state->gpu_page_table));
1523 });
1502 if (False(image.flags & ImageFlagBits::Sparse)) { 1524 if (False(image.flags & ImageFlagBits::Sparse)) {
1503 const auto map_id = image.map_view_id; 1525 const auto map_id = image.map_view_id;
1504 ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) { 1526 ForEachCPUPage(image.cpu_addr, image.guest_size_bytes, [this, map_id](u64 page) {
@@ -1631,7 +1653,7 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) {
1631 ASSERT_MSG(False(image.flags & ImageFlagBits::Registered), "Image was not unregistered"); 1653 ASSERT_MSG(False(image.flags & ImageFlagBits::Registered), "Image was not unregistered");
1632 1654
1633 // Mark render targets as dirty 1655 // Mark render targets as dirty
1634 auto& dirty = maxwell3d.dirty.flags; 1656 auto& dirty = maxwell3d->dirty.flags;
1635 dirty[Dirty::RenderTargets] = true; 1657 dirty[Dirty::RenderTargets] = true;
1636 dirty[Dirty::ZetaBuffer] = true; 1658 dirty[Dirty::ZetaBuffer] = true;
1637 for (size_t rt = 0; rt < NUM_RT; ++rt) { 1659 for (size_t rt = 0; rt < NUM_RT; ++rt) {
@@ -1681,24 +1703,30 @@ void TextureCache<P>::DeleteImage(ImageId image_id, bool immediate_delete) {
1681 if (alloc_images.empty()) { 1703 if (alloc_images.empty()) {
1682 image_allocs_table.erase(alloc_it); 1704 image_allocs_table.erase(alloc_it);
1683 } 1705 }
1684 if constexpr (ENABLE_VALIDATION) { 1706 for (size_t c : active_channel_ids) {
1685 std::ranges::fill(graphics_image_view_ids, CORRUPT_ID); 1707 auto& channel_info = channel_storage[c];
1686 std::ranges::fill(compute_image_view_ids, CORRUPT_ID); 1708 if constexpr (ENABLE_VALIDATION) {
1709 std::ranges::fill(channel_info.graphics_image_view_ids, CORRUPT_ID);
1710 std::ranges::fill(channel_info.compute_image_view_ids, CORRUPT_ID);
1711 }
1712 channel_info.graphics_image_table.Invalidate();
1713 channel_info.compute_image_table.Invalidate();
1687 } 1714 }
1688 graphics_image_table.Invalidate();
1689 compute_image_table.Invalidate();
1690 has_deleted_images = true; 1715 has_deleted_images = true;
1691} 1716}
1692 1717
1693template <class P> 1718template <class P>
1694void TextureCache<P>::RemoveImageViewReferences(std::span<const ImageViewId> removed_views) { 1719void TextureCache<P>::RemoveImageViewReferences(std::span<const ImageViewId> removed_views) {
1695 auto it = image_views.begin(); 1720 for (size_t c : active_channel_ids) {
1696 while (it != image_views.end()) { 1721 auto& channel_info = channel_storage[c];
1697 const auto found = std::ranges::find(removed_views, it->second); 1722 auto it = channel_info.image_views.begin();
1698 if (found != removed_views.end()) { 1723 while (it != channel_info.image_views.end()) {
1699 it = image_views.erase(it); 1724 const auto found = std::ranges::find(removed_views, it->second);
1700 } else { 1725 if (found != removed_views.end()) {
1701 ++it; 1726 it = channel_info.image_views.erase(it);
1727 } else {
1728 ++it;
1729 }
1702 } 1730 }
1703 } 1731 }
1704} 1732}
@@ -1729,6 +1757,7 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
1729 boost::container::small_vector<const AliasedImage*, 1> aliased_images; 1757 boost::container::small_vector<const AliasedImage*, 1> aliased_images;
1730 Image& image = slot_images[image_id]; 1758 Image& image = slot_images[image_id];
1731 bool any_rescaled = True(image.flags & ImageFlagBits::Rescaled); 1759 bool any_rescaled = True(image.flags & ImageFlagBits::Rescaled);
1760 bool any_modified = True(image.flags & ImageFlagBits::GpuModified);
1732 u64 most_recent_tick = image.modification_tick; 1761 u64 most_recent_tick = image.modification_tick;
1733 for (const AliasedImage& aliased : image.aliased_images) { 1762 for (const AliasedImage& aliased : image.aliased_images) {
1734 ImageBase& aliased_image = slot_images[aliased.id]; 1763 ImageBase& aliased_image = slot_images[aliased.id];
@@ -1736,9 +1765,7 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
1736 most_recent_tick = std::max(most_recent_tick, aliased_image.modification_tick); 1765 most_recent_tick = std::max(most_recent_tick, aliased_image.modification_tick);
1737 aliased_images.push_back(&aliased); 1766 aliased_images.push_back(&aliased);
1738 any_rescaled |= True(aliased_image.flags & ImageFlagBits::Rescaled); 1767 any_rescaled |= True(aliased_image.flags & ImageFlagBits::Rescaled);
1739 if (True(aliased_image.flags & ImageFlagBits::GpuModified)) { 1768 any_modified |= True(aliased_image.flags & ImageFlagBits::GpuModified);
1740 image.flags |= ImageFlagBits::GpuModified;
1741 }
1742 } 1769 }
1743 } 1770 }
1744 if (aliased_images.empty()) { 1771 if (aliased_images.empty()) {
@@ -1753,6 +1780,9 @@ void TextureCache<P>::SynchronizeAliases(ImageId image_id) {
1753 } 1780 }
1754 } 1781 }
1755 image.modification_tick = most_recent_tick; 1782 image.modification_tick = most_recent_tick;
1783 if (any_modified) {
1784 image.flags |= ImageFlagBits::GpuModified;
1785 }
1756 std::ranges::sort(aliased_images, [this](const AliasedImage* lhs, const AliasedImage* rhs) { 1786 std::ranges::sort(aliased_images, [this](const AliasedImage* lhs, const AliasedImage* rhs) {
1757 const ImageBase& lhs_image = slot_images[lhs->id]; 1787 const ImageBase& lhs_image = slot_images[lhs->id];
1758 const ImageBase& rhs_image = slot_images[rhs->id]; 1788 const ImageBase& rhs_image = slot_images[rhs->id];
@@ -1931,6 +1961,7 @@ std::pair<FramebufferId, ImageViewId> TextureCache<P>::RenderTargetFromImage(
1931 .color_buffer_ids = {color_view_id}, 1961 .color_buffer_ids = {color_view_id},
1932 .depth_buffer_id = depth_view_id, 1962 .depth_buffer_id = depth_view_id,
1933 .size = {extent.width >> samples_x, extent.height >> samples_y}, 1963 .size = {extent.width >> samples_x, extent.height >> samples_y},
1964 .is_rescaled = is_rescaled,
1934 }); 1965 });
1935 return {framebuffer_id, view_id}; 1966 return {framebuffer_id, view_id};
1936} 1967}
@@ -1943,7 +1974,7 @@ bool TextureCache<P>::IsFullClear(ImageViewId id) {
1943 const ImageViewBase& image_view = slot_image_views[id]; 1974 const ImageViewBase& image_view = slot_image_views[id];
1944 const ImageBase& image = slot_images[image_view.image_id]; 1975 const ImageBase& image = slot_images[image_view.image_id];
1945 const Extent3D size = image_view.size; 1976 const Extent3D size = image_view.size;
1946 const auto& regs = maxwell3d.regs; 1977 const auto& regs = maxwell3d->regs;
1947 const auto& scissor = regs.scissor_test[0]; 1978 const auto& scissor = regs.scissor_test[0];
1948 if (image.info.resources.levels > 1 || image.info.resources.layers > 1) { 1979 if (image.info.resources.levels > 1 || image.info.resources.layers > 1) {
1949 // Images with multiple resources can't be cleared in a single call 1980 // Images with multiple resources can't be cleared in a single call
@@ -1958,4 +1989,19 @@ bool TextureCache<P>::IsFullClear(ImageViewId id) {
1958 scissor.max_y >= size.height; 1989 scissor.max_y >= size.height;
1959} 1990}
1960 1991
1992template <class P>
1993void TextureCache<P>::CreateChannel(struct Tegra::Control::ChannelState& channel) {
1994 VideoCommon::ChannelSetupCaches<TextureCacheChannelInfo>::CreateChannel(channel);
1995 const auto it = channel_map.find(channel.bind_id);
1996 auto* this_state = &channel_storage[it->second];
1997 const auto& this_as_ref = address_spaces[channel.memory_manager->GetID()];
1998 this_state->gpu_page_table = &gpu_page_table_storage[this_as_ref.storage_id];
1999}
2000
2001/// Bind a channel for execution.
2002template <class P>
2003void TextureCache<P>::OnGPUASRegister([[maybe_unused]] size_t map_id) {
2004 gpu_page_table_storage.emplace_back();
2005}
2006
1961} // namespace VideoCommon 2007} // namespace VideoCommon
diff --git a/src/video_core/texture_cache/texture_cache_base.h b/src/video_core/texture_cache/texture_cache_base.h
index 7e6c6cef2..2fa8445eb 100644
--- a/src/video_core/texture_cache/texture_cache_base.h
+++ b/src/video_core/texture_cache/texture_cache_base.h
@@ -1,8 +1,10 @@
1// SPDX-FileCopyrightText: Copyright 2019 yuzu Emulator Project 1// SPDX-FileCopyrightText: 2021 yuzu Emulator Project
2// SPDX-License-Identifier: GPL-2.0-or-later 2// SPDX-License-Identifier: GPL-3.0-or-later
3 3
4#pragma once 4#pragma once
5 5
6#include <deque>
7#include <limits>
6#include <mutex> 8#include <mutex>
7#include <span> 9#include <span>
8#include <type_traits> 10#include <type_traits>
@@ -11,9 +13,11 @@
11#include <queue> 13#include <queue>
12 14
13#include "common/common_types.h" 15#include "common/common_types.h"
16#include "common/hash.h"
14#include "common/literals.h" 17#include "common/literals.h"
15#include "common/lru_cache.h" 18#include "common/lru_cache.h"
16#include "video_core/compatible_formats.h" 19#include "video_core/compatible_formats.h"
20#include "video_core/control/channel_state_cache.h"
17#include "video_core/delayed_destruction_ring.h" 21#include "video_core/delayed_destruction_ring.h"
18#include "video_core/engines/fermi_2d.h" 22#include "video_core/engines/fermi_2d.h"
19#include "video_core/surface.h" 23#include "video_core/surface.h"
@@ -26,6 +30,10 @@
26#include "video_core/texture_cache/types.h" 30#include "video_core/texture_cache/types.h"
27#include "video_core/textures/texture.h" 31#include "video_core/textures/texture.h"
28 32
33namespace Tegra::Control {
34struct ChannelState;
35}
36
29namespace VideoCommon { 37namespace VideoCommon {
30 38
31using Tegra::Texture::SwizzleSource; 39using Tegra::Texture::SwizzleSource;
@@ -44,8 +52,35 @@ struct ImageViewInOut {
44 ImageViewId id{}; 52 ImageViewId id{};
45}; 53};
46 54
55using TextureCacheGPUMap = std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>>;
56
57class TextureCacheChannelInfo : public ChannelInfo {
58public:
59 TextureCacheChannelInfo() = delete;
60 TextureCacheChannelInfo(Tegra::Control::ChannelState& state) noexcept;
61 TextureCacheChannelInfo(const TextureCacheChannelInfo& state) = delete;
62 TextureCacheChannelInfo& operator=(const TextureCacheChannelInfo&) = delete;
63 TextureCacheChannelInfo(TextureCacheChannelInfo&& other) noexcept = default;
64 TextureCacheChannelInfo& operator=(TextureCacheChannelInfo&& other) noexcept = default;
65
66 DescriptorTable<TICEntry> graphics_image_table{gpu_memory};
67 DescriptorTable<TSCEntry> graphics_sampler_table{gpu_memory};
68 std::vector<SamplerId> graphics_sampler_ids;
69 std::vector<ImageViewId> graphics_image_view_ids;
70
71 DescriptorTable<TICEntry> compute_image_table{gpu_memory};
72 DescriptorTable<TSCEntry> compute_sampler_table{gpu_memory};
73 std::vector<SamplerId> compute_sampler_ids;
74 std::vector<ImageViewId> compute_image_view_ids;
75
76 std::unordered_map<TICEntry, ImageViewId> image_views;
77 std::unordered_map<TSCEntry, SamplerId> samplers;
78
79 TextureCacheGPUMap* gpu_page_table;
80};
81
47template <class P> 82template <class P>
48class TextureCache { 83class TextureCache : public VideoCommon::ChannelSetupCaches<TextureCacheChannelInfo> {
49 /// Address shift for caching images into a hash table 84 /// Address shift for caching images into a hash table
50 static constexpr u64 YUZU_PAGEBITS = 20; 85 static constexpr u64 YUZU_PAGEBITS = 20;
51 86
@@ -58,6 +93,8 @@ class TextureCache {
58 /// True when the API can provide info about the memory of the device. 93 /// True when the API can provide info about the memory of the device.
59 static constexpr bool HAS_DEVICE_MEMORY_INFO = P::HAS_DEVICE_MEMORY_INFO; 94 static constexpr bool HAS_DEVICE_MEMORY_INFO = P::HAS_DEVICE_MEMORY_INFO;
60 95
96 static constexpr size_t UNSET_CHANNEL{std::numeric_limits<size_t>::max()};
97
61 static constexpr s64 TARGET_THRESHOLD = 4_GiB; 98 static constexpr s64 TARGET_THRESHOLD = 4_GiB;
62 static constexpr s64 DEFAULT_EXPECTED_MEMORY = 1_GiB + 125_MiB; 99 static constexpr s64 DEFAULT_EXPECTED_MEMORY = 1_GiB + 125_MiB;
63 static constexpr s64 DEFAULT_CRITICAL_MEMORY = 1_GiB + 625_MiB; 100 static constexpr s64 DEFAULT_CRITICAL_MEMORY = 1_GiB + 625_MiB;
@@ -77,16 +114,8 @@ class TextureCache {
77 PixelFormat src_format; 114 PixelFormat src_format;
78 }; 115 };
79 116
80 template <typename T>
81 struct IdentityHash {
82 [[nodiscard]] size_t operator()(T value) const noexcept {
83 return static_cast<size_t>(value);
84 }
85 };
86
87public: 117public:
88 explicit TextureCache(Runtime&, VideoCore::RasterizerInterface&, Tegra::Engines::Maxwell3D&, 118 explicit TextureCache(Runtime&, VideoCore::RasterizerInterface&);
89 Tegra::Engines::KeplerCompute&, Tegra::MemoryManager&);
90 119
91 /// Notify the cache that a new frame has been queued 120 /// Notify the cache that a new frame has been queued
92 void TickFrame(); 121 void TickFrame();
@@ -142,7 +171,7 @@ public:
142 void UnmapMemory(VAddr cpu_addr, size_t size); 171 void UnmapMemory(VAddr cpu_addr, size_t size);
143 172
144 /// Remove images in a region 173 /// Remove images in a region
145 void UnmapGPUMemory(GPUVAddr gpu_addr, size_t size); 174 void UnmapGPUMemory(size_t as_id, GPUVAddr gpu_addr, size_t size);
146 175
147 /// Blit an image with the given parameters 176 /// Blit an image with the given parameters
148 void BlitImage(const Tegra::Engines::Fermi2D::Surface& dst, 177 void BlitImage(const Tegra::Engines::Fermi2D::Surface& dst,
@@ -171,6 +200,9 @@ public:
171 200
172 [[nodiscard]] bool IsRescaling(const ImageViewBase& image_view) const noexcept; 201 [[nodiscard]] bool IsRescaling(const ImageViewBase& image_view) const noexcept;
173 202
203 /// Create channel state.
204 void CreateChannel(Tegra::Control::ChannelState& channel) final override;
205
174 std::mutex mutex; 206 std::mutex mutex;
175 207
176private: 208private:
@@ -205,6 +237,8 @@ private:
205 } 237 }
206 } 238 }
207 239
240 void OnGPUASRegister(size_t map_id) final override;
241
208 /// Runs the Garbage Collector. 242 /// Runs the Garbage Collector.
209 void RunGarbageCollector(); 243 void RunGarbageCollector();
210 244
@@ -273,7 +307,7 @@ private:
273 void ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& func); 307 void ForEachImageInRegion(VAddr cpu_addr, size_t size, Func&& func);
274 308
275 template <typename Func> 309 template <typename Func>
276 void ForEachImageInRegionGPU(GPUVAddr gpu_addr, size_t size, Func&& func); 310 void ForEachImageInRegionGPU(size_t as_id, GPUVAddr gpu_addr, size_t size, Func&& func);
277 311
278 template <typename Func> 312 template <typename Func>
279 void ForEachSparseImageInRegion(GPUVAddr gpu_addr, size_t size, Func&& func); 313 void ForEachSparseImageInRegion(GPUVAddr gpu_addr, size_t size, Func&& func);
@@ -338,31 +372,16 @@ private:
338 u64 GetScaledImageSizeBytes(ImageBase& image); 372 u64 GetScaledImageSizeBytes(ImageBase& image);
339 373
340 Runtime& runtime; 374 Runtime& runtime;
341 VideoCore::RasterizerInterface& rasterizer;
342 Tegra::Engines::Maxwell3D& maxwell3d;
343 Tegra::Engines::KeplerCompute& kepler_compute;
344 Tegra::MemoryManager& gpu_memory;
345 375
346 DescriptorTable<TICEntry> graphics_image_table{gpu_memory}; 376 VideoCore::RasterizerInterface& rasterizer;
347 DescriptorTable<TSCEntry> graphics_sampler_table{gpu_memory}; 377 std::deque<TextureCacheGPUMap> gpu_page_table_storage;
348 std::vector<SamplerId> graphics_sampler_ids;
349 std::vector<ImageViewId> graphics_image_view_ids;
350
351 DescriptorTable<TICEntry> compute_image_table{gpu_memory};
352 DescriptorTable<TSCEntry> compute_sampler_table{gpu_memory};
353 std::vector<SamplerId> compute_sampler_ids;
354 std::vector<ImageViewId> compute_image_view_ids;
355 378
356 RenderTargets render_targets; 379 RenderTargets render_targets;
357 380
358 std::unordered_map<TICEntry, ImageViewId> image_views;
359 std::unordered_map<TSCEntry, SamplerId> samplers;
360 std::unordered_map<RenderTargets, FramebufferId> framebuffers; 381 std::unordered_map<RenderTargets, FramebufferId> framebuffers;
361 382
362 std::unordered_map<u64, std::vector<ImageMapId>, IdentityHash<u64>> page_table; 383 std::unordered_map<u64, std::vector<ImageMapId>, Common::IdentityHash<u64>> page_table;
363 std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>> gpu_page_table; 384 std::unordered_map<u64, std::vector<ImageId>, Common::IdentityHash<u64>> sparse_page_table;
364 std::unordered_map<u64, std::vector<ImageId>, IdentityHash<u64>> sparse_page_table;
365
366 std::unordered_map<ImageId, std::vector<ImageViewId>> sparse_views; 385 std::unordered_map<ImageId, std::vector<ImageViewId>> sparse_views;
367 386
368 VAddr virtual_invalid_space{}; 387 VAddr virtual_invalid_space{};
diff --git a/src/video_core/texture_cache/util.cpp b/src/video_core/texture_cache/util.cpp
index 1820823b2..1223df5a0 100644
--- a/src/video_core/texture_cache/util.cpp
+++ b/src/video_core/texture_cache/util.cpp
@@ -517,7 +517,6 @@ void SwizzleBlockLinearImage(Tegra::MemoryManager& gpu_memory, GPUVAddr gpu_addr
517 const u32 host_bytes_per_layer = num_blocks_per_layer * bytes_per_block; 517 const u32 host_bytes_per_layer = num_blocks_per_layer * bytes_per_block;
518 518
519 UNIMPLEMENTED_IF(info.tile_width_spacing > 0); 519 UNIMPLEMENTED_IF(info.tile_width_spacing > 0);
520
521 UNIMPLEMENTED_IF(copy.image_offset.x != 0); 520 UNIMPLEMENTED_IF(copy.image_offset.x != 0);
522 UNIMPLEMENTED_IF(copy.image_offset.y != 0); 521 UNIMPLEMENTED_IF(copy.image_offset.y != 0);
523 UNIMPLEMENTED_IF(copy.image_offset.z != 0); 522 UNIMPLEMENTED_IF(copy.image_offset.z != 0);
@@ -755,7 +754,7 @@ bool IsValidEntry(const Tegra::MemoryManager& gpu_memory, const TICEntry& config
755 if (address == 0) { 754 if (address == 0) {
756 return false; 755 return false;
757 } 756 }
758 if (address > (1ULL << 48)) { 757 if (address >= (1ULL << 40)) {
759 return false; 758 return false;
760 } 759 }
761 if (gpu_memory.GpuToCpuAddress(address).has_value()) { 760 if (gpu_memory.GpuToCpuAddress(address).has_value()) {
diff --git a/src/video_core/textures/astc.cpp b/src/video_core/textures/astc.cpp
index b159494c5..15b9d4182 100644
--- a/src/video_core/textures/astc.cpp
+++ b/src/video_core/textures/astc.cpp
@@ -1413,7 +1413,7 @@ static void FillVoidExtentLDR(InputBitStream& strm, std::span<u32> outBuf, u32 b
1413static void FillError(std::span<u32> outBuf, u32 blockWidth, u32 blockHeight) { 1413static void FillError(std::span<u32> outBuf, u32 blockWidth, u32 blockHeight) {
1414 for (u32 j = 0; j < blockHeight; j++) { 1414 for (u32 j = 0; j < blockHeight; j++) {
1415 for (u32 i = 0; i < blockWidth; i++) { 1415 for (u32 i = 0; i < blockWidth; i++) {
1416 outBuf[j * blockWidth + i] = 0xFFFF00FF; 1416 outBuf[j * blockWidth + i] = 0x00000000;
1417 } 1417 }
1418 } 1418 }
1419} 1419}
@@ -1656,7 +1656,7 @@ void Decompress(std::span<const uint8_t> data, uint32_t width, uint32_t height,
1656 const u32 cols = Common::DivideUp(width, block_width); 1656 const u32 cols = Common::DivideUp(width, block_width);
1657 1657
1658 Common::ThreadWorker workers{std::max(std::thread::hardware_concurrency(), 2U) / 2, 1658 Common::ThreadWorker workers{std::max(std::thread::hardware_concurrency(), 2U) / 2,
1659 "yuzu:ASTCDecompress"}; 1659 "ASTCDecompress"};
1660 1660
1661 for (u32 z = 0; z < depth; ++z) { 1661 for (u32 z = 0; z < depth; ++z) {
1662 const u32 depth_offset = z * height * width * 4; 1662 const u32 depth_offset = z * height * width * 4;
diff --git a/src/video_core/textures/decoders.cpp b/src/video_core/textures/decoders.cpp
index 913f8ebcb..52d067a2d 100644
--- a/src/video_core/textures/decoders.cpp
+++ b/src/video_core/textures/decoders.cpp
@@ -35,7 +35,7 @@ void incrpdep(u32& value) {
35 35
36template <bool TO_LINEAR, u32 BYTES_PER_PIXEL> 36template <bool TO_LINEAR, u32 BYTES_PER_PIXEL>
37void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 height, u32 depth, 37void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 height, u32 depth,
38 u32 block_height, u32 block_depth, u32 stride_alignment) { 38 u32 block_height, u32 block_depth, u32 stride) {
39 // The origin of the transformation can be configured here, leave it as zero as the current API 39 // The origin of the transformation can be configured here, leave it as zero as the current API
40 // doesn't expose it. 40 // doesn't expose it.
41 static constexpr u32 origin_x = 0; 41 static constexpr u32 origin_x = 0;
@@ -45,7 +45,6 @@ void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32
45 // We can configure here a custom pitch 45 // We can configure here a custom pitch
46 // As it's not exposed 'width * BYTES_PER_PIXEL' will be the expected pitch. 46 // As it's not exposed 'width * BYTES_PER_PIXEL' will be the expected pitch.
47 const u32 pitch = width * BYTES_PER_PIXEL; 47 const u32 pitch = width * BYTES_PER_PIXEL;
48 const u32 stride = Common::AlignUpLog2(width, stride_alignment) * BYTES_PER_PIXEL;
49 48
50 const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT); 49 const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT);
51 const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth); 50 const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth);
@@ -89,6 +88,69 @@ void SwizzleImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32
89 } 88 }
90} 89}
91 90
91template <bool TO_LINEAR, u32 BYTES_PER_PIXEL>
92void SwizzleSubrectImpl(std::span<u8> output, std::span<const u8> input, u32 width, u32 height,
93 u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 num_lines,
94 u32 block_height, u32 block_depth, u32 pitch_linear) {
95 // The origin of the transformation can be configured here, leave it as zero as the current API
96 // doesn't expose it.
97 static constexpr u32 origin_z = 0;
98
99 // We can configure here a custom pitch
100 // As it's not exposed 'width * BYTES_PER_PIXEL' will be the expected pitch.
101 const u32 pitch = pitch_linear;
102 const u32 stride = Common::AlignUpLog2(width * BYTES_PER_PIXEL, GOB_SIZE_X_SHIFT);
103
104 const u32 gobs_in_x = Common::DivCeilLog2(stride, GOB_SIZE_X_SHIFT);
105 const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth);
106 const u32 slice_size =
107 Common::DivCeilLog2(height, block_height + GOB_SIZE_Y_SHIFT) * block_size;
108
109 const u32 block_height_mask = (1U << block_height) - 1;
110 const u32 block_depth_mask = (1U << block_depth) - 1;
111 const u32 x_shift = GOB_SIZE_SHIFT + block_height + block_depth;
112
113 u32 unprocessed_lines = num_lines;
114 u32 extent_y = std::min(num_lines, height - origin_y);
115
116 for (u32 slice = 0; slice < depth; ++slice) {
117 const u32 z = slice + origin_z;
118 const u32 offset_z = (z >> block_depth) * slice_size +
119 ((z & block_depth_mask) << (GOB_SIZE_SHIFT + block_height));
120 const u32 lines_in_y = std::min(unprocessed_lines, extent_y);
121 for (u32 line = 0; line < lines_in_y; ++line) {
122 const u32 y = line + origin_y;
123 const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(y);
124
125 const u32 block_y = y >> GOB_SIZE_Y_SHIFT;
126 const u32 offset_y = (block_y >> block_height) * block_size +
127 ((block_y & block_height_mask) << GOB_SIZE_SHIFT);
128
129 u32 swizzled_x = pdep<SWIZZLE_X_BITS>(origin_x * BYTES_PER_PIXEL);
130 for (u32 column = 0; column < extent_x;
131 ++column, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) {
132 const u32 x = (column + origin_x) * BYTES_PER_PIXEL;
133 const u32 offset_x = (x >> GOB_SIZE_X_SHIFT) << x_shift;
134
135 const u32 base_swizzled_offset = offset_z + offset_y + offset_x;
136 const u32 swizzled_offset = base_swizzled_offset + (swizzled_x | swizzled_y);
137
138 const u32 unswizzled_offset =
139 slice * pitch * height + line * pitch + column * BYTES_PER_PIXEL;
140
141 u8* const dst = &output[TO_LINEAR ? swizzled_offset : unswizzled_offset];
142 const u8* const src = &input[TO_LINEAR ? unswizzled_offset : swizzled_offset];
143
144 std::memcpy(dst, src, BYTES_PER_PIXEL);
145 }
146 }
147 unprocessed_lines -= lines_in_y;
148 if (unprocessed_lines == 0) {
149 return;
150 }
151 }
152}
153
92template <bool TO_LINEAR> 154template <bool TO_LINEAR>
93void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width, 155void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width,
94 u32 height, u32 depth, u32 block_height, u32 block_depth, u32 stride_alignment) { 156 u32 height, u32 depth, u32 block_height, u32 block_depth, u32 stride_alignment) {
@@ -111,122 +173,39 @@ void Swizzle(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixe
111 } 173 }
112} 174}
113 175
114template <u32 BYTES_PER_PIXEL>
115void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width,
116 u8* swizzled_data, const u8* unswizzled_data, u32 block_height_bit,
117 u32 offset_x, u32 offset_y) {
118 const u32 block_height = 1U << block_height_bit;
119 const u32 image_width_in_gobs =
120 (swizzled_width * BYTES_PER_PIXEL + (GOB_SIZE_X - 1)) / GOB_SIZE_X;
121 for (u32 line = 0; line < subrect_height; ++line) {
122 const u32 dst_y = line + offset_y;
123 const u32 gob_address_y =
124 (dst_y / (GOB_SIZE_Y * block_height)) * GOB_SIZE * block_height * image_width_in_gobs +
125 ((dst_y % (GOB_SIZE_Y * block_height)) / GOB_SIZE_Y) * GOB_SIZE;
126
127 const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(dst_y);
128 u32 swizzled_x = pdep<SWIZZLE_X_BITS>(offset_x * BYTES_PER_PIXEL);
129 for (u32 x = 0; x < subrect_width;
130 ++x, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) {
131 const u32 dst_x = x + offset_x;
132 const u32 gob_address =
133 gob_address_y + (dst_x * BYTES_PER_PIXEL / GOB_SIZE_X) * GOB_SIZE * block_height;
134 const u32 swizzled_offset = gob_address + (swizzled_x | swizzled_y);
135 const u32 unswizzled_offset = line * source_pitch + x * BYTES_PER_PIXEL;
136
137 const u8* const source_line = unswizzled_data + unswizzled_offset;
138 u8* const dest_addr = swizzled_data + swizzled_offset;
139 std::memcpy(dest_addr, source_line, BYTES_PER_PIXEL);
140 }
141 }
142}
143
144template <u32 BYTES_PER_PIXEL>
145void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 block_height,
146 u32 origin_x, u32 origin_y, u8* output, const u8* input) {
147 const u32 stride = width * BYTES_PER_PIXEL;
148 const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) / GOB_SIZE_X;
149 const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height);
150
151 const u32 block_height_mask = (1U << block_height) - 1;
152 const u32 x_shift = GOB_SIZE_SHIFT + block_height;
153
154 for (u32 line = 0; line < line_count; ++line) {
155 const u32 src_y = line + origin_y;
156 const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(src_y);
157
158 const u32 block_y = src_y >> GOB_SIZE_Y_SHIFT;
159 const u32 src_offset_y = (block_y >> block_height) * block_size +
160 ((block_y & block_height_mask) << GOB_SIZE_SHIFT);
161
162 u32 swizzled_x = pdep<SWIZZLE_X_BITS>(origin_x * BYTES_PER_PIXEL);
163 for (u32 column = 0; column < line_length_in;
164 ++column, incrpdep<SWIZZLE_X_BITS, BYTES_PER_PIXEL>(swizzled_x)) {
165 const u32 src_x = (column + origin_x) * BYTES_PER_PIXEL;
166 const u32 src_offset_x = (src_x >> GOB_SIZE_X_SHIFT) << x_shift;
167
168 const u32 swizzled_offset = src_offset_y + src_offset_x + (swizzled_x | swizzled_y);
169 const u32 unswizzled_offset = line * pitch + column * BYTES_PER_PIXEL;
170
171 std::memcpy(output + unswizzled_offset, input + swizzled_offset, BYTES_PER_PIXEL);
172 }
173 }
174}
175
176template <u32 BYTES_PER_PIXEL>
177void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height,
178 u32 block_height, u32 block_depth, u32 origin_x, u32 origin_y, u8* output,
179 const u8* input) {
180 UNIMPLEMENTED_IF(origin_x > 0);
181 UNIMPLEMENTED_IF(origin_y > 0);
182
183 const u32 stride = width * BYTES_PER_PIXEL;
184 const u32 gobs_in_x = (stride + GOB_SIZE_X - 1) / GOB_SIZE_X;
185 const u32 block_size = gobs_in_x << (GOB_SIZE_SHIFT + block_height + block_depth);
186
187 const u32 block_height_mask = (1U << block_height) - 1;
188 const u32 x_shift = static_cast<u32>(GOB_SIZE_SHIFT) + block_height + block_depth;
189
190 for (u32 line = 0; line < line_count; ++line) {
191 const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(line);
192 const u32 block_y = line / GOB_SIZE_Y;
193 const u32 dst_offset_y =
194 (block_y >> block_height) * block_size + (block_y & block_height_mask) * GOB_SIZE;
195
196 u32 swizzled_x = 0;
197 for (u32 x = 0; x < line_length_in; ++x, incrpdep<SWIZZLE_X_BITS, 1>(swizzled_x)) {
198 const u32 dst_offset =
199 ((x / GOB_SIZE_X) << x_shift) + dst_offset_y + (swizzled_x | swizzled_y);
200 const u32 src_offset = x * BYTES_PER_PIXEL + line * pitch;
201 std::memcpy(output + dst_offset, input + src_offset, BYTES_PER_PIXEL);
202 }
203 }
204}
205} // Anonymous namespace 176} // Anonymous namespace
206 177
207void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, 178void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel,
208 u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth, 179 u32 width, u32 height, u32 depth, u32 block_height, u32 block_depth,
209 u32 stride_alignment) { 180 u32 stride_alignment) {
181 const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel;
182 const u32 new_bpp = std::min(4U, static_cast<u32>(std::countr_zero(width * bytes_per_pixel)));
183 width = (width * bytes_per_pixel) >> new_bpp;
184 bytes_per_pixel = 1U << new_bpp;
210 Swizzle<false>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth, 185 Swizzle<false>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth,
211 stride_alignment); 186 stride);
212} 187}
213 188
214void SwizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width, 189void SwizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width,
215 u32 height, u32 depth, u32 block_height, u32 block_depth, 190 u32 height, u32 depth, u32 block_height, u32 block_depth,
216 u32 stride_alignment) { 191 u32 stride_alignment) {
192 const u32 stride = Common::AlignUpLog2(width, stride_alignment) * bytes_per_pixel;
193 const u32 new_bpp = std::min(4U, static_cast<u32>(std::countr_zero(width * bytes_per_pixel)));
194 width = (width * bytes_per_pixel) >> new_bpp;
195 bytes_per_pixel = 1U << new_bpp;
217 Swizzle<true>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth, 196 Swizzle<true>(output, input, bytes_per_pixel, width, height, depth, block_height, block_depth,
218 stride_alignment); 197 stride);
219} 198}
220 199
221void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, 200void SwizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width,
222 u32 bytes_per_pixel, u8* swizzled_data, const u8* unswizzled_data, 201 u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 extent_y,
223 u32 block_height_bit, u32 offset_x, u32 offset_y) { 202 u32 block_height, u32 block_depth, u32 pitch_linear) {
224 switch (bytes_per_pixel) { 203 switch (bytes_per_pixel) {
225#define BPP_CASE(x) \ 204#define BPP_CASE(x) \
226 case x: \ 205 case x: \
227 return SwizzleSubrect<x>(subrect_width, subrect_height, source_pitch, swizzled_width, \ 206 return SwizzleSubrectImpl<true, x>(output, input, width, height, depth, origin_x, \
228 swizzled_data, unswizzled_data, block_height_bit, offset_x, \ 207 origin_y, extent_x, extent_y, block_height, \
229 offset_y); 208 block_depth, pitch_linear);
230 BPP_CASE(1) 209 BPP_CASE(1)
231 BPP_CASE(2) 210 BPP_CASE(2)
232 BPP_CASE(3) 211 BPP_CASE(3)
@@ -241,13 +220,15 @@ void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32
241 } 220 }
242} 221}
243 222
244void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 bytes_per_pixel, 223void UnswizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel,
245 u32 block_height, u32 origin_x, u32 origin_y, u8* output, const u8* input) { 224 u32 width, u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x,
225 u32 extent_y, u32 block_height, u32 block_depth, u32 pitch_linear) {
246 switch (bytes_per_pixel) { 226 switch (bytes_per_pixel) {
247#define BPP_CASE(x) \ 227#define BPP_CASE(x) \
248 case x: \ 228 case x: \
249 return UnswizzleSubrect<x>(line_length_in, line_count, pitch, width, block_height, \ 229 return SwizzleSubrectImpl<false, x>(output, input, width, height, depth, origin_x, \
250 origin_x, origin_y, output, input); 230 origin_y, extent_x, extent_y, block_height, \
231 block_depth, pitch_linear);
251 BPP_CASE(1) 232 BPP_CASE(1)
252 BPP_CASE(2) 233 BPP_CASE(2)
253 BPP_CASE(3) 234 BPP_CASE(3)
@@ -262,55 +243,6 @@ void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width,
262 } 243 }
263} 244}
264 245
265void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height,
266 u32 bytes_per_pixel, u32 block_height, u32 block_depth, u32 origin_x,
267 u32 origin_y, u8* output, const u8* input) {
268 switch (bytes_per_pixel) {
269#define BPP_CASE(x) \
270 case x: \
271 return SwizzleSliceToVoxel<x>(line_length_in, line_count, pitch, width, height, \
272 block_height, block_depth, origin_x, origin_y, output, \
273 input);
274 BPP_CASE(1)
275 BPP_CASE(2)
276 BPP_CASE(3)
277 BPP_CASE(4)
278 BPP_CASE(6)
279 BPP_CASE(8)
280 BPP_CASE(12)
281 BPP_CASE(16)
282#undef BPP_CASE
283 default:
284 ASSERT_MSG(false, "Invalid bytes_per_pixel={}", bytes_per_pixel);
285 }
286}
287
288void SwizzleKepler(const u32 width, const u32 height, const u32 dst_x, const u32 dst_y,
289 const u32 block_height_bit, const std::size_t copy_size, const u8* source_data,
290 u8* swizzle_data) {
291 const u32 block_height = 1U << block_height_bit;
292 const u32 image_width_in_gobs{(width + GOB_SIZE_X - 1) / GOB_SIZE_X};
293 std::size_t count = 0;
294 for (std::size_t y = dst_y; y < height && count < copy_size; ++y) {
295 const std::size_t gob_address_y =
296 (y / (GOB_SIZE_Y * block_height)) * GOB_SIZE * block_height * image_width_in_gobs +
297 ((y % (GOB_SIZE_Y * block_height)) / GOB_SIZE_Y) * GOB_SIZE;
298 const u32 swizzled_y = pdep<SWIZZLE_Y_BITS>(static_cast<u32>(y));
299 u32 swizzled_x = pdep<SWIZZLE_X_BITS>(dst_x);
300 for (std::size_t x = dst_x; x < width && count < copy_size;
301 ++x, incrpdep<SWIZZLE_X_BITS, 1>(swizzled_x)) {
302 const std::size_t gob_address =
303 gob_address_y + (x / GOB_SIZE_X) * GOB_SIZE * block_height;
304 const std::size_t swizzled_offset = gob_address + (swizzled_x | swizzled_y);
305 const u8* source_line = source_data + count;
306 u8* dest_addr = swizzle_data + swizzled_offset;
307 count++;
308
309 *dest_addr = *source_line;
310 }
311 }
312}
313
314std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth, 246std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height, u32 depth,
315 u32 block_height, u32 block_depth) { 247 u32 block_height, u32 block_depth) {
316 if (tiled) { 248 if (tiled) {
diff --git a/src/video_core/textures/decoders.h b/src/video_core/textures/decoders.h
index 31a11708f..e70407692 100644
--- a/src/video_core/textures/decoders.h
+++ b/src/video_core/textures/decoders.h
@@ -40,7 +40,6 @@ constexpr SwizzleTable MakeSwizzleTable() {
40 } 40 }
41 return table; 41 return table;
42} 42}
43constexpr SwizzleTable SWIZZLE_TABLE = MakeSwizzleTable();
44 43
45/// Unswizzles a block linear texture into linear memory. 44/// Unswizzles a block linear texture into linear memory.
46void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, 45void UnswizzleTexture(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel,
@@ -57,34 +56,14 @@ std::size_t CalculateSize(bool tiled, u32 bytes_per_pixel, u32 width, u32 height
57 u32 block_height, u32 block_depth); 56 u32 block_height, u32 block_depth);
58 57
59/// Copies an untiled subrectangle into a tiled surface. 58/// Copies an untiled subrectangle into a tiled surface.
60void SwizzleSubrect(u32 subrect_width, u32 subrect_height, u32 source_pitch, u32 swizzled_width, 59void SwizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel, u32 width,
61 u32 bytes_per_pixel, u8* swizzled_data, const u8* unswizzled_data, 60 u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x, u32 extent_y,
62 u32 block_height_bit, u32 offset_x, u32 offset_y); 61 u32 block_height, u32 block_depth, u32 pitch_linear);
63 62
64/// Copies a tiled subrectangle into a linear surface. 63/// Copies a tiled subrectangle into a linear surface.
65void UnswizzleSubrect(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 bytes_per_pixel, 64void UnswizzleSubrect(std::span<u8> output, std::span<const u8> input, u32 bytes_per_pixel,
66 u32 block_height, u32 origin_x, u32 origin_y, u8* output, const u8* input); 65 u32 width, u32 height, u32 depth, u32 origin_x, u32 origin_y, u32 extent_x,
67 66 u32 extent_y, u32 block_height, u32 block_depth, u32 pitch_linear);
68/// @brief Swizzles a 2D array of pixels into a 3D texture
69/// @param line_length_in Number of pixels per line
70/// @param line_count Number of lines
71/// @param pitch Number of bytes per line
72/// @param width Width of the swizzled texture
73/// @param height Height of the swizzled texture
74/// @param bytes_per_pixel Number of bytes used per pixel
75/// @param block_height Block height shift
76/// @param block_depth Block depth shift
77/// @param origin_x Column offset in pixels of the swizzled texture
78/// @param origin_y Row offset in pixels of the swizzled texture
79/// @param output Pointer to the pixels of the swizzled texture
80/// @param input Pointer to the 2D array of pixels used as input
81/// @pre input and output points to an array large enough to hold the number of bytes used
82void SwizzleSliceToVoxel(u32 line_length_in, u32 line_count, u32 pitch, u32 width, u32 height,
83 u32 bytes_per_pixel, u32 block_height, u32 block_depth, u32 origin_x,
84 u32 origin_y, u8* output, const u8* input);
85
86void SwizzleKepler(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height,
87 std::size_t copy_size, const u8* source_data, u8* swizzle_data);
88 67
89/// Obtains the offset of the gob for positions 'dst_x' & 'dst_y' 68/// Obtains the offset of the gob for positions 'dst_x' & 'dst_y'
90u64 GetGOBOffset(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height, 69u64 GetGOBOffset(u32 width, u32 height, u32 dst_x, u32 dst_y, u32 block_height,
diff --git a/src/video_core/vulkan_common/vulkan_wrapper.h b/src/video_core/vulkan_common/vulkan_wrapper.h
index 795f16bfb..1b3f493bd 100644
--- a/src/video_core/vulkan_common/vulkan_wrapper.h
+++ b/src/video_core/vulkan_common/vulkan_wrapper.h
@@ -519,9 +519,7 @@ public:
519 dld{rhs.dld} {} 519 dld{rhs.dld} {}
520 520
521 /// Assign an allocation transfering ownership from another allocation. 521 /// Assign an allocation transfering ownership from another allocation.
522 /// Releases any previously held allocation.
523 PoolAllocations& operator=(PoolAllocations&& rhs) noexcept { 522 PoolAllocations& operator=(PoolAllocations&& rhs) noexcept {
524 Release();
525 allocations = std::move(rhs.allocations); 523 allocations = std::move(rhs.allocations);
526 num = rhs.num; 524 num = rhs.num;
527 device = rhs.device; 525 device = rhs.device;
@@ -530,11 +528,6 @@ public:
530 return *this; 528 return *this;
531 } 529 }
532 530
533 /// Destroys any held allocation.
534 ~PoolAllocations() {
535 Release();
536 }
537
538 /// Returns the number of allocations. 531 /// Returns the number of allocations.
539 std::size_t size() const noexcept { 532 std::size_t size() const noexcept {
540 return num; 533 return num;
@@ -557,19 +550,6 @@ public:
557 } 550 }
558 551
559private: 552private:
560 /// Destroys the held allocations if they exist.
561 void Release() noexcept {
562 if (!allocations) {
563 return;
564 }
565 const Span<AllocationType> span(allocations.get(), num);
566 const VkResult result = Free(device, pool, span, *dld);
567 // There's no way to report errors from a destructor.
568 if (result != VK_SUCCESS) {
569 std::terminate();
570 }
571 }
572
573 std::unique_ptr<AllocationType[]> allocations; 553 std::unique_ptr<AllocationType[]> allocations;
574 std::size_t num = 0; 554 std::size_t num = 0;
575 VkDevice device = nullptr; 555 VkDevice device = nullptr;
diff --git a/src/web_service/web_backend.cpp b/src/web_service/web_backend.cpp
index 378804c08..12a7e4922 100644
--- a/src/web_service/web_backend.cpp
+++ b/src/web_service/web_backend.cpp
@@ -111,7 +111,8 @@ struct Client::Impl {
111 httplib::Error error; 111 httplib::Error error;
112 112
113 if (!cli->send(request, response, error)) { 113 if (!cli->send(request, response, error)) {
114 LOG_ERROR(WebService, "{} to {} returned null", method, host + path); 114 LOG_ERROR(WebService, "{} to {} returned null (httplib Error: {})", method, host + path,
115 httplib::to_string(error));
115 return WebResult{WebResult::Code::LibError, "Null response", ""}; 116 return WebResult{WebResult::Code::LibError, "Null response", ""};
116 } 117 }
117 118
diff --git a/src/yuzu/applets/qt_controller.cpp b/src/yuzu/applets/qt_controller.cpp
index 1d8072243..12efdc216 100644
--- a/src/yuzu/applets/qt_controller.cpp
+++ b/src/yuzu/applets/qt_controller.cpp
@@ -291,7 +291,7 @@ bool QtControllerSelectorDialog::CheckIfParametersMet() {
291 // Here, we check and validate the current configuration against all applicable parameters. 291 // Here, we check and validate the current configuration against all applicable parameters.
292 const auto num_connected_players = static_cast<int>( 292 const auto num_connected_players = static_cast<int>(
293 std::count_if(player_groupboxes.begin(), player_groupboxes.end(), 293 std::count_if(player_groupboxes.begin(), player_groupboxes.end(),
294 [this](const QGroupBox* player) { return player->isChecked(); })); 294 [](const QGroupBox* player) { return player->isChecked(); }));
295 295
296 const auto min_supported_players = parameters.enable_single_mode ? 1 : parameters.min_players; 296 const auto min_supported_players = parameters.enable_single_mode ? 1 : parameters.min_players;
297 const auto max_supported_players = parameters.enable_single_mode ? 1 : parameters.max_players; 297 const auto max_supported_players = parameters.enable_single_mode ? 1 : parameters.max_players;
diff --git a/src/yuzu/bootmanager.cpp b/src/yuzu/bootmanager.cpp
index d3fbdb09d..24251247d 100644
--- a/src/yuzu/bootmanager.cpp
+++ b/src/yuzu/bootmanager.cpp
@@ -47,7 +47,7 @@ EmuThread::EmuThread(Core::System& system_) : system{system_} {}
47EmuThread::~EmuThread() = default; 47EmuThread::~EmuThread() = default;
48 48
49void EmuThread::run() { 49void EmuThread::run() {
50 std::string name = "yuzu:EmuControlThread"; 50 std::string name = "EmuControlThread";
51 MicroProfileOnThreadCreate(name.c_str()); 51 MicroProfileOnThreadCreate(name.c_str());
52 Common::SetCurrentThreadName(name.c_str()); 52 Common::SetCurrentThreadName(name.c_str());
53 53
diff --git a/src/yuzu/configuration/config.cpp b/src/yuzu/configuration/config.cpp
index a4ed68422..195074bf2 100644
--- a/src/yuzu/configuration/config.cpp
+++ b/src/yuzu/configuration/config.cpp
@@ -546,6 +546,7 @@ void Config::ReadDebuggingValues() {
546 ReadBasicSetting(Settings::values.use_auto_stub); 546 ReadBasicSetting(Settings::values.use_auto_stub);
547 ReadBasicSetting(Settings::values.enable_all_controllers); 547 ReadBasicSetting(Settings::values.enable_all_controllers);
548 ReadBasicSetting(Settings::values.create_crash_dumps); 548 ReadBasicSetting(Settings::values.create_crash_dumps);
549 ReadBasicSetting(Settings::values.perform_vulkan_check);
549 550
550 qt_config->endGroup(); 551 qt_config->endGroup();
551} 552}
@@ -1162,6 +1163,7 @@ void Config::SaveDebuggingValues() {
1162 WriteBasicSetting(Settings::values.disable_macro_jit); 1163 WriteBasicSetting(Settings::values.disable_macro_jit);
1163 WriteBasicSetting(Settings::values.enable_all_controllers); 1164 WriteBasicSetting(Settings::values.enable_all_controllers);
1164 WriteBasicSetting(Settings::values.create_crash_dumps); 1165 WriteBasicSetting(Settings::values.create_crash_dumps);
1166 WriteBasicSetting(Settings::values.perform_vulkan_check);
1165 1167
1166 qt_config->endGroup(); 1168 qt_config->endGroup();
1167} 1169}
diff --git a/src/yuzu/configuration/configure_debug.cpp b/src/yuzu/configuration/configure_debug.cpp
index 622808e94..dacc75a20 100644
--- a/src/yuzu/configuration/configure_debug.cpp
+++ b/src/yuzu/configuration/configure_debug.cpp
@@ -77,6 +77,7 @@ void ConfigureDebug::SetConfiguration() {
77 ui->disable_loop_safety_checks->setChecked( 77 ui->disable_loop_safety_checks->setChecked(
78 Settings::values.disable_shader_loop_safety_checks.GetValue()); 78 Settings::values.disable_shader_loop_safety_checks.GetValue());
79 ui->extended_logging->setChecked(Settings::values.extended_logging.GetValue()); 79 ui->extended_logging->setChecked(Settings::values.extended_logging.GetValue());
80 ui->perform_vulkan_check->setChecked(Settings::values.perform_vulkan_check.GetValue());
80 81
81#ifdef YUZU_USE_QT_WEB_ENGINE 82#ifdef YUZU_USE_QT_WEB_ENGINE
82 ui->disable_web_applet->setChecked(UISettings::values.disable_web_applet.GetValue()); 83 ui->disable_web_applet->setChecked(UISettings::values.disable_web_applet.GetValue());
@@ -117,6 +118,7 @@ void ConfigureDebug::ApplyConfiguration() {
117 ui->disable_loop_safety_checks->isChecked(); 118 ui->disable_loop_safety_checks->isChecked();
118 Settings::values.disable_macro_jit = ui->disable_macro_jit->isChecked(); 119 Settings::values.disable_macro_jit = ui->disable_macro_jit->isChecked();
119 Settings::values.extended_logging = ui->extended_logging->isChecked(); 120 Settings::values.extended_logging = ui->extended_logging->isChecked();
121 Settings::values.perform_vulkan_check = ui->perform_vulkan_check->isChecked();
120 UISettings::values.disable_web_applet = ui->disable_web_applet->isChecked(); 122 UISettings::values.disable_web_applet = ui->disable_web_applet->isChecked();
121 Debugger::ToggleConsole(); 123 Debugger::ToggleConsole();
122 Common::Log::Filter filter; 124 Common::Log::Filter filter;
diff --git a/src/yuzu/configuration/configure_debug.ui b/src/yuzu/configuration/configure_debug.ui
index 314d47af5..102c8c66c 100644
--- a/src/yuzu/configuration/configure_debug.ui
+++ b/src/yuzu/configuration/configure_debug.ui
@@ -313,6 +313,16 @@
313 </property> 313 </property>
314 </widget> 314 </widget>
315 </item> 315 </item>
316 <item row="3" column="0">
317 <widget class="QCheckBox" name="perform_vulkan_check">
318 <property name="toolTip">
319 <string>Enables yuzu to check for a working Vulkan environment when the program starts up. Disable this if this is causing issues with external programs seeing yuzu.</string>
320 </property>
321 <property name="text">
322 <string>Perform Startup Vulkan Check</string>
323 </property>
324 </widget>
325 </item>
316 </layout> 326 </layout>
317 </widget> 327 </widget>
318 </item> 328 </item>
diff --git a/src/yuzu/configuration/configure_graphics.cpp b/src/yuzu/configuration/configure_graphics.cpp
index 87e5d0f48..bd69d04a6 100644
--- a/src/yuzu/configuration/configure_graphics.cpp
+++ b/src/yuzu/configuration/configure_graphics.cpp
@@ -57,9 +57,10 @@ ConfigureGraphics::ConfigureGraphics(const Core::System& system_, QWidget* paren
57 UpdateBackgroundColorButton(new_bg_color); 57 UpdateBackgroundColorButton(new_bg_color);
58 }); 58 });
59 59
60 ui->api->setEnabled(!UISettings::values.has_broken_vulkan); 60 ui->api->setEnabled(!UISettings::values.has_broken_vulkan && ui->api->isEnabled());
61 ui->api_widget->setEnabled(!UISettings::values.has_broken_vulkan || 61 ui->api_widget->setEnabled(
62 Settings::IsConfiguringGlobal()); 62 (!UISettings::values.has_broken_vulkan || Settings::IsConfiguringGlobal()) &&
63 ui->api_widget->isEnabled());
63 ui->bg_label->setVisible(Settings::IsConfiguringGlobal()); 64 ui->bg_label->setVisible(Settings::IsConfiguringGlobal());
64 ui->bg_combobox->setVisible(!Settings::IsConfiguringGlobal()); 65 ui->bg_combobox->setVisible(!Settings::IsConfiguringGlobal());
65} 66}
diff --git a/src/yuzu/configuration/configure_input.cpp b/src/yuzu/configuration/configure_input.cpp
index cb55472c9..1db374d4a 100644
--- a/src/yuzu/configuration/configure_input.cpp
+++ b/src/yuzu/configuration/configure_input.cpp
@@ -163,10 +163,9 @@ void ConfigureInput::Initialize(InputCommon::InputSubsystem* input_subsystem,
163 [this, input_subsystem, &hid_core] { 163 [this, input_subsystem, &hid_core] {
164 CallConfigureDialog<ConfigureRingController>(*this, input_subsystem, hid_core); 164 CallConfigureDialog<ConfigureRingController>(*this, input_subsystem, hid_core);
165 }); 165 });
166 connect(advanced, &ConfigureInputAdvanced::CallCameraDialog, 166 connect(advanced, &ConfigureInputAdvanced::CallCameraDialog, [this, input_subsystem] {
167 [this, input_subsystem, &hid_core] { 167 CallConfigureDialog<ConfigureCamera>(*this, input_subsystem);
168 CallConfigureDialog<ConfigureCamera>(*this, input_subsystem); 168 });
169 });
170 169
171 connect(ui->vibrationButton, &QPushButton::clicked, 170 connect(ui->vibrationButton, &QPushButton::clicked,
172 [this, &hid_core] { CallConfigureDialog<ConfigureVibration>(*this, hid_core); }); 171 [this, &hid_core] { CallConfigureDialog<ConfigureVibration>(*this, hid_core); });
diff --git a/src/yuzu/configuration/input_profiles.cpp b/src/yuzu/configuration/input_profiles.cpp
index 807afbeb2..9bb69cab1 100644
--- a/src/yuzu/configuration/input_profiles.cpp
+++ b/src/yuzu/configuration/input_profiles.cpp
@@ -67,6 +67,8 @@ std::vector<std::string> InputProfiles::GetInputProfileNames() {
67 profile_names.push_back(profile_name); 67 profile_names.push_back(profile_name);
68 } 68 }
69 69
70 std::stable_sort(profile_names.begin(), profile_names.end());
71
70 return profile_names; 72 return profile_names;
71} 73}
72 74
diff --git a/src/yuzu/main.cpp b/src/yuzu/main.cpp
index 3c1bd19db..f45a25410 100644
--- a/src/yuzu/main.cpp
+++ b/src/yuzu/main.cpp
@@ -3,6 +3,7 @@
3 3
4#include <cinttypes> 4#include <cinttypes>
5#include <clocale> 5#include <clocale>
6#include <cmath>
6#include <memory> 7#include <memory>
7#include <thread> 8#include <thread>
8#ifdef __APPLE__ 9#ifdef __APPLE__
@@ -105,12 +106,12 @@ static FileSys::VirtualFile VfsDirectoryCreateFileWrapper(const FileSys::Virtual
105#include "core/hle/kernel/k_process.h" 106#include "core/hle/kernel/k_process.h"
106#include "core/hle/service/am/am.h" 107#include "core/hle/service/am/am.h"
107#include "core/hle/service/filesystem/filesystem.h" 108#include "core/hle/service/filesystem/filesystem.h"
108#include "core/hle/service/nfp/nfp.h"
109#include "core/hle/service/sm/sm.h" 109#include "core/hle/service/sm/sm.h"
110#include "core/loader/loader.h" 110#include "core/loader/loader.h"
111#include "core/perf_stats.h" 111#include "core/perf_stats.h"
112#include "core/telemetry_session.h" 112#include "core/telemetry_session.h"
113#include "input_common/drivers/tas_input.h" 113#include "input_common/drivers/tas_input.h"
114#include "input_common/drivers/virtual_amiibo.h"
114#include "input_common/main.h" 115#include "input_common/main.h"
115#include "ui_main.h" 116#include "ui_main.h"
116#include "util/overlay_dialog.h" 117#include "util/overlay_dialog.h"
@@ -261,6 +262,18 @@ static QString PrettyProductName() {
261 return QSysInfo::prettyProductName(); 262 return QSysInfo::prettyProductName();
262} 263}
263 264
265#ifdef _WIN32
266static void OverrideWindowsFont() {
267 // Qt5 chooses these fonts on Windows and they have fairly ugly alphanumeric/cyrllic characters
268 // Asking to use "MS Shell Dlg 2" gives better other chars while leaving the Chinese Characters.
269 const QString startup_font = QApplication::font().family();
270 const QStringList ugly_fonts = {QStringLiteral("SimSun"), QStringLiteral("PMingLiU")};
271 if (ugly_fonts.contains(startup_font)) {
272 QApplication::setFont(QFont(QStringLiteral("MS Shell Dlg 2"), 9, QFont::Normal));
273 }
274}
275#endif
276
264bool GMainWindow::CheckDarkMode() { 277bool GMainWindow::CheckDarkMode() {
265#ifdef __linux__ 278#ifdef __linux__
266 const QPalette test_palette(qApp->palette()); 279 const QPalette test_palette(qApp->palette());
@@ -899,8 +912,8 @@ void GMainWindow::InitializeWidgets() {
899 } 912 }
900 913
901 // TODO (flTobi): Add the widget when multiplayer is fully implemented 914 // TODO (flTobi): Add the widget when multiplayer is fully implemented
902 // statusBar()->addPermanentWidget(multiplayer_state->GetStatusText(), 0); 915 statusBar()->addPermanentWidget(multiplayer_state->GetStatusText(), 0);
903 // statusBar()->addPermanentWidget(multiplayer_state->GetStatusIcon(), 0); 916 statusBar()->addPermanentWidget(multiplayer_state->GetStatusIcon(), 0);
904 917
905 tas_label = new QLabel(); 918 tas_label = new QLabel();
906 tas_label->setObjectName(QStringLiteral("TASlabel")); 919 tas_label->setObjectName(QStringLiteral("TASlabel"));
@@ -1299,6 +1312,7 @@ void GMainWindow::ConnectMenuEvents() {
1299 &MultiplayerState::OnDirectConnectToRoom); 1312 &MultiplayerState::OnDirectConnectToRoom);
1300 connect(ui->action_Show_Room, &QAction::triggered, multiplayer_state, 1313 connect(ui->action_Show_Room, &QAction::triggered, multiplayer_state,
1301 &MultiplayerState::OnOpenNetworkRoom); 1314 &MultiplayerState::OnOpenNetworkRoom);
1315 connect(multiplayer_state, &MultiplayerState::SaveConfig, this, &GMainWindow::OnSaveConfig);
1302 1316
1303 // Tools 1317 // Tools
1304 connect_menu(ui->action_Rederive, std::bind(&GMainWindow::OnReinitializeKeys, this, 1318 connect_menu(ui->action_Rederive, std::bind(&GMainWindow::OnReinitializeKeys, this,
@@ -1339,6 +1353,8 @@ void GMainWindow::UpdateMenuState() {
1339 } else { 1353 } else {
1340 ui->action_Pause->setText(tr("&Pause")); 1354 ui->action_Pause->setText(tr("&Pause"));
1341 } 1355 }
1356
1357 multiplayer_state->UpdateNotificationStatus();
1342} 1358}
1343 1359
1344void GMainWindow::OnDisplayTitleBars(bool show) { 1360void GMainWindow::OnDisplayTitleBars(bool show) {
@@ -2000,7 +2016,7 @@ static bool RomFSRawCopy(QProgressDialog& dialog, const FileSys::VirtualDir& src
2000} 2016}
2001 2017
2002void GMainWindow::OnGameListRemoveInstalledEntry(u64 program_id, InstalledEntryType type) { 2018void GMainWindow::OnGameListRemoveInstalledEntry(u64 program_id, InstalledEntryType type) {
2003 const QString entry_type = [this, type] { 2019 const QString entry_type = [type] {
2004 switch (type) { 2020 switch (type) {
2005 case InstalledEntryType::Game: 2021 case InstalledEntryType::Game:
2006 return tr("Contents"); 2022 return tr("Contents");
@@ -2097,7 +2113,7 @@ void GMainWindow::RemoveAddOnContent(u64 program_id, const QString& entry_type)
2097 2113
2098void GMainWindow::OnGameListRemoveFile(u64 program_id, GameListRemoveTarget target, 2114void GMainWindow::OnGameListRemoveFile(u64 program_id, GameListRemoveTarget target,
2099 const std::string& game_path) { 2115 const std::string& game_path) {
2100 const QString question = [this, target] { 2116 const QString question = [target] {
2101 switch (target) { 2117 switch (target) {
2102 case GameListRemoveTarget::GlShaderCache: 2118 case GameListRemoveTarget::GlShaderCache:
2103 return tr("Delete OpenGL Transferable Shader Cache?"); 2119 return tr("Delete OpenGL Transferable Shader Cache?");
@@ -2770,6 +2786,11 @@ void GMainWindow::OnExit() {
2770 OnStopGame(); 2786 OnStopGame();
2771} 2787}
2772 2788
2789void GMainWindow::OnSaveConfig() {
2790 system->ApplySettings();
2791 config->Save();
2792}
2793
2773void GMainWindow::ErrorDisplayDisplayError(QString error_code, QString error_text) { 2794void GMainWindow::ErrorDisplayDisplayError(QString error_code, QString error_text) {
2774 OverlayDialog dialog(render_window, *system, error_code, error_text, QString{}, tr("OK"), 2795 OverlayDialog dialog(render_window, *system, error_code, error_text, QString{}, tr("OK"),
2775 Qt::AlignLeft | Qt::AlignVCenter); 2796 Qt::AlignLeft | Qt::AlignVCenter);
@@ -3211,21 +3232,16 @@ void GMainWindow::OnLoadAmiibo() {
3211 return; 3232 return;
3212 } 3233 }
3213 3234
3214 Service::SM::ServiceManager& sm = system->ServiceManager(); 3235 auto* virtual_amiibo = input_subsystem->GetVirtualAmiibo();
3215 auto nfc = sm.GetService<Service::NFP::Module::Interface>("nfp:user"); 3236
3216 if (nfc == nullptr) { 3237 // Remove amiibo if one is connected
3217 QMessageBox::warning(this, tr("Error"), tr("The current game is not looking for amiibos")); 3238 if (virtual_amiibo->GetCurrentState() == InputCommon::VirtualAmiibo::State::AmiiboIsOpen) {
3218 return; 3239 virtual_amiibo->CloseAmiibo();
3219 }
3220 const auto nfc_state = nfc->GetCurrentState();
3221 if (nfc_state == Service::NFP::DeviceState::TagFound ||
3222 nfc_state == Service::NFP::DeviceState::TagMounted) {
3223 nfc->CloseAmiibo();
3224 QMessageBox::warning(this, tr("Amiibo"), tr("The current amiibo has been removed")); 3240 QMessageBox::warning(this, tr("Amiibo"), tr("The current amiibo has been removed"));
3225 return; 3241 return;
3226 } 3242 }
3227 3243
3228 if (nfc_state != Service::NFP::DeviceState::SearchingForTag) { 3244 if (virtual_amiibo->GetCurrentState() != InputCommon::VirtualAmiibo::State::WaitingForAmiibo) {
3229 QMessageBox::warning(this, tr("Error"), tr("The current game is not looking for amiibos")); 3245 QMessageBox::warning(this, tr("Error"), tr("The current game is not looking for amiibos"));
3230 return; 3246 return;
3231 } 3247 }
@@ -3244,24 +3260,30 @@ void GMainWindow::OnLoadAmiibo() {
3244} 3260}
3245 3261
3246void GMainWindow::LoadAmiibo(const QString& filename) { 3262void GMainWindow::LoadAmiibo(const QString& filename) {
3247 Service::SM::ServiceManager& sm = system->ServiceManager(); 3263 auto* virtual_amiibo = input_subsystem->GetVirtualAmiibo();
3248 auto nfc = sm.GetService<Service::NFP::Module::Interface>("nfp:user"); 3264 const QString title = tr("Error loading Amiibo data");
3249 if (nfc == nullptr) {
3250 return;
3251 }
3252
3253 // Remove amiibo if one is connected 3265 // Remove amiibo if one is connected
3254 const auto nfc_state = nfc->GetCurrentState(); 3266 if (virtual_amiibo->GetCurrentState() == InputCommon::VirtualAmiibo::State::AmiiboIsOpen) {
3255 if (nfc_state == Service::NFP::DeviceState::TagFound || 3267 virtual_amiibo->CloseAmiibo();
3256 nfc_state == Service::NFP::DeviceState::TagMounted) {
3257 nfc->CloseAmiibo();
3258 QMessageBox::warning(this, tr("Amiibo"), tr("The current amiibo has been removed")); 3268 QMessageBox::warning(this, tr("Amiibo"), tr("The current amiibo has been removed"));
3259 return; 3269 return;
3260 } 3270 }
3261 3271
3262 if (!nfc->LoadAmiibo(filename.toStdString())) { 3272 switch (virtual_amiibo->LoadAmiibo(filename.toStdString())) {
3263 QMessageBox::warning(this, tr("Error loading Amiibo data"), 3273 case InputCommon::VirtualAmiibo::Info::NotAnAmiibo:
3264 tr("Unable to load Amiibo data.")); 3274 QMessageBox::warning(this, title, tr("The selected file is not a valid amiibo"));
3275 break;
3276 case InputCommon::VirtualAmiibo::Info::UnableToLoad:
3277 QMessageBox::warning(this, title, tr("The selected file is already on use"));
3278 break;
3279 case InputCommon::VirtualAmiibo::Info::WrongDeviceState:
3280 QMessageBox::warning(this, title, tr("The current game is not looking for amiibos"));
3281 break;
3282 case InputCommon::VirtualAmiibo::Info::Unknown:
3283 QMessageBox::warning(this, title, tr("An unkown error occured"));
3284 break;
3285 default:
3286 break;
3265 } 3287 }
3266} 3288}
3267 3289
@@ -3442,9 +3464,10 @@ void GMainWindow::UpdateStatusBar() {
3442 } 3464 }
3443 if (!Settings::values.use_speed_limit) { 3465 if (!Settings::values.use_speed_limit) {
3444 game_fps_label->setText( 3466 game_fps_label->setText(
3445 tr("Game: %1 FPS (Unlocked)").arg(results.average_game_fps, 0, 'f', 0)); 3467 tr("Game: %1 FPS (Unlocked)").arg(std::round(results.average_game_fps), 0, 'f', 0));
3446 } else { 3468 } else {
3447 game_fps_label->setText(tr("Game: %1 FPS").arg(results.average_game_fps, 0, 'f', 0)); 3469 game_fps_label->setText(
3470 tr("Game: %1 FPS").arg(std::round(results.average_game_fps), 0, 'f', 0));
3448 } 3471 }
3449 emu_frametime_label->setText(tr("Frame: %1 ms").arg(results.frametime * 1000.0, 0, 'f', 2)); 3472 emu_frametime_label->setText(tr("Frame: %1 ms").arg(results.frametime * 1000.0, 0, 'f', 2));
3450 3473
@@ -4086,7 +4109,8 @@ int main(int argc, char* argv[]) {
4086 } 4109 }
4087#endif 4110#endif
4088 4111
4089 if (StartupChecks(argv[0], &has_broken_vulkan)) { 4112 if (StartupChecks(argv[0], &has_broken_vulkan,
4113 Settings::values.perform_vulkan_check.GetValue())) {
4090 return 0; 4114 return 0;
4091 } 4115 }
4092 4116
@@ -4125,6 +4149,10 @@ int main(int argc, char* argv[]) {
4125 QCoreApplication::setAttribute(Qt::AA_DontCheckOpenGLContextThreadAffinity); 4149 QCoreApplication::setAttribute(Qt::AA_DontCheckOpenGLContextThreadAffinity);
4126 QApplication app(argc, argv); 4150 QApplication app(argc, argv);
4127 4151
4152#ifdef _WIN32
4153 OverrideWindowsFont();
4154#endif
4155
4128 // Workaround for QTBUG-85409, for Suzhou numerals the number 1 is actually \u3021 4156 // Workaround for QTBUG-85409, for Suzhou numerals the number 1 is actually \u3021
4129 // so we can see if we get \u3008 instead 4157 // so we can see if we get \u3008 instead
4130 // TL;DR all other number formats are consecutive in unicode code points 4158 // TL;DR all other number formats are consecutive in unicode code points
diff --git a/src/yuzu/main.h b/src/yuzu/main.h
index 716aef063..f7aa8e417 100644
--- a/src/yuzu/main.h
+++ b/src/yuzu/main.h
@@ -169,6 +169,7 @@ public slots:
169 void OnLoadComplete(); 169 void OnLoadComplete();
170 void OnExecuteProgram(std::size_t program_index); 170 void OnExecuteProgram(std::size_t program_index);
171 void OnExit(); 171 void OnExit();
172 void OnSaveConfig();
172 void ControllerSelectorReconfigureControllers( 173 void ControllerSelectorReconfigureControllers(
173 const Core::Frontend::ControllerParameters& parameters); 174 const Core::Frontend::ControllerParameters& parameters);
174 void SoftwareKeyboardInitialize( 175 void SoftwareKeyboardInitialize(
diff --git a/src/yuzu/main.ui b/src/yuzu/main.ui
index cdf31b417..74d49dbd4 100644
--- a/src/yuzu/main.ui
+++ b/src/yuzu/main.ui
@@ -120,6 +120,20 @@
120 <addaction name="menu_Reset_Window_Size"/> 120 <addaction name="menu_Reset_Window_Size"/>
121 <addaction name="menu_View_Debugging"/> 121 <addaction name="menu_View_Debugging"/>
122 </widget> 122 </widget>
123 <widget class="QMenu" name="menu_Multiplayer">
124 <property name="enabled">
125 <bool>true</bool>
126 </property>
127 <property name="title">
128 <string>&amp;Multiplayer</string>
129 </property>
130 <addaction name="action_View_Lobby"/>
131 <addaction name="action_Start_Room"/>
132 <addaction name="action_Connect_To_Room"/>
133 <addaction name="separator"/>
134 <addaction name="action_Show_Room"/>
135 <addaction name="action_Leave_Room"/>
136 </widget>
123 <widget class="QMenu" name="menu_Tools"> 137 <widget class="QMenu" name="menu_Tools">
124 <property name="title"> 138 <property name="title">
125 <string>&amp;Tools</string> 139 <string>&amp;Tools</string>
@@ -251,7 +265,7 @@
251 <bool>true</bool> 265 <bool>true</bool>
252 </property> 266 </property>
253 <property name="text"> 267 <property name="text">
254 <string>Browse Public Game Lobby</string> 268 <string>&amp;Browse Public Game Lobby</string>
255 </property> 269 </property>
256 </action> 270 </action>
257 <action name="action_Start_Room"> 271 <action name="action_Start_Room">
@@ -259,7 +273,7 @@
259 <bool>true</bool> 273 <bool>true</bool>
260 </property> 274 </property>
261 <property name="text"> 275 <property name="text">
262 <string>Create Room</string> 276 <string>&amp;Create Room</string>
263 </property> 277 </property>
264 </action> 278 </action>
265 <action name="action_Leave_Room"> 279 <action name="action_Leave_Room">
@@ -267,12 +281,12 @@
267 <bool>false</bool> 281 <bool>false</bool>
268 </property> 282 </property>
269 <property name="text"> 283 <property name="text">
270 <string>Leave Room</string> 284 <string>&amp;Leave Room</string>
271 </property> 285 </property>
272 </action> 286 </action>
273 <action name="action_Connect_To_Room"> 287 <action name="action_Connect_To_Room">
274 <property name="text"> 288 <property name="text">
275 <string>Direct Connect to Room</string> 289 <string>&amp;Direct Connect to Room</string>
276 </property> 290 </property>
277 </action> 291 </action>
278 <action name="action_Show_Room"> 292 <action name="action_Show_Room">
@@ -280,7 +294,7 @@
280 <bool>false</bool> 294 <bool>false</bool>
281 </property> 295 </property>
282 <property name="text"> 296 <property name="text">
283 <string>Show Current Room</string> 297 <string>&amp;Show Current Room</string>
284 </property> 298 </property>
285 </action> 299 </action>
286 <action name="action_Fullscreen"> 300 <action name="action_Fullscreen">
diff --git a/src/yuzu/multiplayer/chat_room.cpp b/src/yuzu/multiplayer/chat_room.cpp
index 9e672f82e..dec9696c1 100644
--- a/src/yuzu/multiplayer/chat_room.cpp
+++ b/src/yuzu/multiplayer/chat_room.cpp
@@ -61,7 +61,10 @@ public:
61 61
62 /// Format the message using the players color 62 /// Format the message using the players color
63 QString GetPlayerChatMessage(u16 player) const { 63 QString GetPlayerChatMessage(u16 player) const {
64 auto color = player_color[player % 16]; 64 const bool is_dark_theme = QIcon::themeName().contains(QStringLiteral("dark")) ||
65 QIcon::themeName().contains(QStringLiteral("midnight"));
66 auto color =
67 is_dark_theme ? player_color_dark[player % 16] : player_color_default[player % 16];
65 QString name; 68 QString name;
66 if (username.isEmpty() || username == nickname) { 69 if (username.isEmpty() || username == nickname) {
67 name = nickname; 70 name = nickname;
@@ -84,9 +87,12 @@ public:
84 } 87 }
85 88
86private: 89private:
87 static constexpr std::array<const char*, 16> player_color = { 90 static constexpr std::array<const char*, 16> player_color_default = {
88 {"#0000FF", "#FF0000", "#8A2BE2", "#FF69B4", "#1E90FF", "#008000", "#00FF7F", "#B22222", 91 {"#0000FF", "#FF0000", "#8A2BE2", "#FF69B4", "#1E90FF", "#008000", "#00FF7F", "#B22222",
89 "#DAA520", "#FF4500", "#2E8B57", "#5F9EA0", "#D2691E", "#9ACD32", "#FF7F50", "FFFF00"}}; 92 "#DAA520", "#FF4500", "#2E8B57", "#5F9EA0", "#D2691E", "#9ACD32", "#FF7F50", "#FFFF00"}};
93 static constexpr std::array<const char*, 16> player_color_dark = {
94 {"#559AD1", "#4EC9A8", "#D69D85", "#C6C923", "#B975B5", "#D81F1F", "#7EAE39", "#4F8733",
95 "#F7CD8A", "#6FCACF", "#CE4897", "#8A2BE2", "#D2691E", "#9ACD32", "#FF7F50", "#152ccd"}};
90 static constexpr char ping_color[] = "#FFFF00"; 96 static constexpr char ping_color[] = "#FFFF00";
91 97
92 QString timestamp; 98 QString timestamp;
diff --git a/src/yuzu/multiplayer/client_room.cpp b/src/yuzu/multiplayer/client_room.cpp
index b34a8d004..caf34a414 100644
--- a/src/yuzu/multiplayer/client_room.cpp
+++ b/src/yuzu/multiplayer/client_room.cpp
@@ -97,8 +97,9 @@ void ClientRoomWindow::UpdateView() {
97 auto memberlist = member->GetMemberInformation(); 97 auto memberlist = member->GetMemberInformation();
98 ui->chat->SetPlayerList(memberlist); 98 ui->chat->SetPlayerList(memberlist);
99 const auto information = member->GetRoomInformation(); 99 const auto information = member->GetRoomInformation();
100 setWindowTitle(QString(tr("%1 (%2/%3 members) - connected")) 100 setWindowTitle(QString(tr("%1 - %2 (%3/%4 members) - connected"))
101 .arg(QString::fromStdString(information.name)) 101 .arg(QString::fromStdString(information.name))
102 .arg(QString::fromStdString(information.preferred_game.name))
102 .arg(memberlist.size()) 103 .arg(memberlist.size())
103 .arg(information.member_slots)); 104 .arg(information.member_slots));
104 ui->description->setText(QString::fromStdString(information.description)); 105 ui->description->setText(QString::fromStdString(information.description));
diff --git a/src/yuzu/multiplayer/direct_connect.cpp b/src/yuzu/multiplayer/direct_connect.cpp
index 017063074..10bf0a4fb 100644
--- a/src/yuzu/multiplayer/direct_connect.cpp
+++ b/src/yuzu/multiplayer/direct_connect.cpp
@@ -106,6 +106,8 @@ void DirectConnectWindow::Connect() {
106 UISettings::values.multiplayer_port = UISettings::values.multiplayer_port.GetDefault(); 106 UISettings::values.multiplayer_port = UISettings::values.multiplayer_port.GetDefault();
107 } 107 }
108 108
109 emit SaveConfig();
110
109 // attempt to connect in a different thread 111 // attempt to connect in a different thread
110 QFuture<void> f = QtConcurrent::run([&] { 112 QFuture<void> f = QtConcurrent::run([&] {
111 if (auto room_member = room_network.GetRoomMember().lock()) { 113 if (auto room_member = room_network.GetRoomMember().lock()) {
diff --git a/src/yuzu/multiplayer/direct_connect.h b/src/yuzu/multiplayer/direct_connect.h
index e39dd1e0d..b8f66cfb2 100644
--- a/src/yuzu/multiplayer/direct_connect.h
+++ b/src/yuzu/multiplayer/direct_connect.h
@@ -31,6 +31,7 @@ signals:
31 * connections that it might have. 31 * connections that it might have.
32 */ 32 */
33 void Closed(); 33 void Closed();
34 void SaveConfig();
34 35
35private slots: 36private slots:
36 void OnConnection(); 37 void OnConnection();
diff --git a/src/yuzu/multiplayer/host_room.cpp b/src/yuzu/multiplayer/host_room.cpp
index 0c6adfd04..a8faa5b24 100644
--- a/src/yuzu/multiplayer/host_room.cpp
+++ b/src/yuzu/multiplayer/host_room.cpp
@@ -232,6 +232,7 @@ void HostRoomWindow::Host() {
232 } 232 }
233 UISettings::values.multiplayer_room_description = ui->room_description->toPlainText(); 233 UISettings::values.multiplayer_room_description = ui->room_description->toPlainText();
234 ui->host->setEnabled(true); 234 ui->host->setEnabled(true);
235 emit SaveConfig();
235 close(); 236 close();
236 } 237 }
237} 238}
diff --git a/src/yuzu/multiplayer/host_room.h b/src/yuzu/multiplayer/host_room.h
index 034cb2eef..ae816e2e0 100644
--- a/src/yuzu/multiplayer/host_room.h
+++ b/src/yuzu/multiplayer/host_room.h
@@ -46,6 +46,9 @@ public:
46 void UpdateGameList(QStandardItemModel* list); 46 void UpdateGameList(QStandardItemModel* list);
47 void RetranslateUi(); 47 void RetranslateUi();
48 48
49signals:
50 void SaveConfig();
51
49private: 52private:
50 void Host(); 53 void Host();
51 std::unique_ptr<Network::VerifyUser::Backend> CreateVerifyBackend(bool use_validation) const; 54 std::unique_ptr<Network::VerifyUser::Backend> CreateVerifyBackend(bool use_validation) const;
diff --git a/src/yuzu/multiplayer/lobby.cpp b/src/yuzu/multiplayer/lobby.cpp
index 107d40547..08c275696 100644
--- a/src/yuzu/multiplayer/lobby.cpp
+++ b/src/yuzu/multiplayer/lobby.cpp
@@ -7,6 +7,7 @@
7#include "common/logging/log.h" 7#include "common/logging/log.h"
8#include "common/settings.h" 8#include "common/settings.h"
9#include "core/core.h" 9#include "core/core.h"
10#include "core/hle/service/acc/profile_manager.h"
10#include "core/internal_network/network_interface.h" 11#include "core/internal_network/network_interface.h"
11#include "network/network.h" 12#include "network/network.h"
12#include "ui_lobby.h" 13#include "ui_lobby.h"
@@ -26,9 +27,9 @@
26Lobby::Lobby(QWidget* parent, QStandardItemModel* list, 27Lobby::Lobby(QWidget* parent, QStandardItemModel* list,
27 std::shared_ptr<Core::AnnounceMultiplayerSession> session, Core::System& system_) 28 std::shared_ptr<Core::AnnounceMultiplayerSession> session, Core::System& system_)
28 : QDialog(parent, Qt::WindowTitleHint | Qt::WindowCloseButtonHint | Qt::WindowSystemMenuHint), 29 : QDialog(parent, Qt::WindowTitleHint | Qt::WindowCloseButtonHint | Qt::WindowSystemMenuHint),
29 ui(std::make_unique<Ui::Lobby>()), 30 ui(std::make_unique<Ui::Lobby>()), announce_multiplayer_session(session),
30 announce_multiplayer_session(session), system{system_}, room_network{ 31 profile_manager(std::make_unique<Service::Account::ProfileManager>()), system{system_},
31 system.GetRoomNetwork()} { 32 room_network{system.GetRoomNetwork()} {
32 ui->setupUi(this); 33 ui->setupUi(this);
33 34
34 // setup the watcher for background connections 35 // setup the watcher for background connections
@@ -60,9 +61,17 @@ Lobby::Lobby(QWidget* parent, QStandardItemModel* list,
60 61
61 ui->nickname->setValidator(validation.GetNickname()); 62 ui->nickname->setValidator(validation.GetNickname());
62 ui->nickname->setText(UISettings::values.multiplayer_nickname.GetValue()); 63 ui->nickname->setText(UISettings::values.multiplayer_nickname.GetValue());
63 if (ui->nickname->text().isEmpty() && !Settings::values.yuzu_username.GetValue().empty()) { 64
64 // Use yuzu Web Service user name as nickname by default 65 // Try find the best nickname by default
65 ui->nickname->setText(QString::fromStdString(Settings::values.yuzu_username.GetValue())); 66 if (ui->nickname->text().isEmpty() || ui->nickname->text() == QStringLiteral("yuzu")) {
67 if (!Settings::values.yuzu_username.GetValue().empty()) {
68 ui->nickname->setText(
69 QString::fromStdString(Settings::values.yuzu_username.GetValue()));
70 } else if (!GetProfileUsername().empty()) {
71 ui->nickname->setText(QString::fromStdString(GetProfileUsername()));
72 } else {
73 ui->nickname->setText(QStringLiteral("yuzu"));
74 }
66 } 75 }
67 76
68 // UI Buttons 77 // UI Buttons
@@ -76,12 +85,6 @@ Lobby::Lobby(QWidget* parent, QStandardItemModel* list,
76 // Actions 85 // Actions
77 connect(&room_list_watcher, &QFutureWatcher<AnnounceMultiplayerRoom::RoomList>::finished, this, 86 connect(&room_list_watcher, &QFutureWatcher<AnnounceMultiplayerRoom::RoomList>::finished, this,
78 &Lobby::OnRefreshLobby); 87 &Lobby::OnRefreshLobby);
79
80 // manually start a refresh when the window is opening
81 // TODO(jroweboy): if this refresh is slow for people with bad internet, then don't do it as
82 // part of the constructor, but offload the refresh until after the window shown. perhaps emit a
83 // refreshroomlist signal from places that open the lobby
84 RefreshLobby();
85} 88}
86 89
87Lobby::~Lobby() = default; 90Lobby::~Lobby() = default;
@@ -96,6 +99,7 @@ void Lobby::UpdateGameList(QStandardItemModel* list) {
96 } 99 }
97 if (proxy) 100 if (proxy)
98 proxy->UpdateGameList(game_list); 101 proxy->UpdateGameList(game_list);
102 ui->room_list->sortByColumn(Column::GAME_NAME, Qt::AscendingOrder);
99} 103}
100 104
101void Lobby::RetranslateUi() { 105void Lobby::RetranslateUi() {
@@ -117,6 +121,11 @@ void Lobby::OnExpandRoom(const QModelIndex& index) {
117 121
118void Lobby::OnJoinRoom(const QModelIndex& source) { 122void Lobby::OnJoinRoom(const QModelIndex& source) {
119 if (!Network::GetSelectedNetworkInterface()) { 123 if (!Network::GetSelectedNetworkInterface()) {
124 LOG_INFO(WebService, "Automatically selected network interface for room network.");
125 Network::SelectFirstNetworkInterface();
126 }
127
128 if (!Network::GetSelectedNetworkInterface()) {
120 NetworkMessage::ErrorManager::ShowError( 129 NetworkMessage::ErrorManager::ShowError(
121 NetworkMessage::ErrorManager::NO_INTERFACE_SELECTED); 130 NetworkMessage::ErrorManager::NO_INTERFACE_SELECTED);
122 return; 131 return;
@@ -197,16 +206,16 @@ void Lobby::OnJoinRoom(const QModelIndex& source) {
197 proxy->data(connection_index, LobbyItemHost::HostIPRole).toString(); 206 proxy->data(connection_index, LobbyItemHost::HostIPRole).toString();
198 UISettings::values.multiplayer_port = 207 UISettings::values.multiplayer_port =
199 proxy->data(connection_index, LobbyItemHost::HostPortRole).toInt(); 208 proxy->data(connection_index, LobbyItemHost::HostPortRole).toInt();
209 emit SaveConfig();
200} 210}
201 211
202void Lobby::ResetModel() { 212void Lobby::ResetModel() {
203 model->clear(); 213 model->clear();
204 model->insertColumns(0, Column::TOTAL); 214 model->insertColumns(0, Column::TOTAL);
205 model->setHeaderData(Column::EXPAND, Qt::Horizontal, QString(), Qt::DisplayRole); 215 model->setHeaderData(Column::MEMBER, Qt::Horizontal, tr("Players"), Qt::DisplayRole);
206 model->setHeaderData(Column::ROOM_NAME, Qt::Horizontal, tr("Room Name"), Qt::DisplayRole); 216 model->setHeaderData(Column::ROOM_NAME, Qt::Horizontal, tr("Room Name"), Qt::DisplayRole);
207 model->setHeaderData(Column::GAME_NAME, Qt::Horizontal, tr("Preferred Game"), Qt::DisplayRole); 217 model->setHeaderData(Column::GAME_NAME, Qt::Horizontal, tr("Preferred Game"), Qt::DisplayRole);
208 model->setHeaderData(Column::HOST, Qt::Horizontal, tr("Host"), Qt::DisplayRole); 218 model->setHeaderData(Column::HOST, Qt::Horizontal, tr("Host"), Qt::DisplayRole);
209 model->setHeaderData(Column::MEMBER, Qt::Horizontal, tr("Players"), Qt::DisplayRole);
210} 219}
211 220
212void Lobby::RefreshLobby() { 221void Lobby::RefreshLobby() {
@@ -229,6 +238,7 @@ void Lobby::OnRefreshLobby() {
229 for (int r = 0; r < game_list->rowCount(); ++r) { 238 for (int r = 0; r < game_list->rowCount(); ++r) {
230 auto index = game_list->index(r, 0); 239 auto index = game_list->index(r, 0);
231 auto game_id = game_list->data(index, GameListItemPath::ProgramIdRole).toULongLong(); 240 auto game_id = game_list->data(index, GameListItemPath::ProgramIdRole).toULongLong();
241
232 if (game_id != 0 && room.information.preferred_game.id == game_id) { 242 if (game_id != 0 && room.information.preferred_game.id == game_id) {
233 smdh_icon = game_list->data(index, Qt::DecorationRole).value<QPixmap>(); 243 smdh_icon = game_list->data(index, Qt::DecorationRole).value<QPixmap>();
234 } 244 }
@@ -243,17 +253,16 @@ void Lobby::OnRefreshLobby() {
243 members.append(var); 253 members.append(var);
244 } 254 }
245 255
246 auto first_item = new LobbyItem(); 256 auto first_item = new LobbyItemGame(
257 room.information.preferred_game.id,
258 QString::fromStdString(room.information.preferred_game.name), smdh_icon);
247 auto row = QList<QStandardItem*>({ 259 auto row = QList<QStandardItem*>({
248 first_item, 260 first_item,
249 new LobbyItemName(room.has_password, QString::fromStdString(room.information.name)), 261 new LobbyItemName(room.has_password, QString::fromStdString(room.information.name)),
250 new LobbyItemGame(room.information.preferred_game.id, 262 new LobbyItemMemberList(members, room.information.member_slots),
251 QString::fromStdString(room.information.preferred_game.name),
252 smdh_icon),
253 new LobbyItemHost(QString::fromStdString(room.information.host_username), 263 new LobbyItemHost(QString::fromStdString(room.information.host_username),
254 QString::fromStdString(room.ip), room.information.port, 264 QString::fromStdString(room.ip), room.information.port,
255 QString::fromStdString(room.verify_uid)), 265 QString::fromStdString(room.verify_uid)),
256 new LobbyItemMemberList(members, room.information.member_slots),
257 }); 266 });
258 model->appendRow(row); 267 model->appendRow(row);
259 // To make the rows expandable, add the member data as a child of the first column of the 268 // To make the rows expandable, add the member data as a child of the first column of the
@@ -283,6 +292,26 @@ void Lobby::OnRefreshLobby() {
283 ui->room_list->setFirstColumnSpanned(j, proxy->index(i, 0), true); 292 ui->room_list->setFirstColumnSpanned(j, proxy->index(i, 0), true);
284 } 293 }
285 } 294 }
295
296 ui->room_list->sortByColumn(Column::GAME_NAME, Qt::AscendingOrder);
297}
298
299std::string Lobby::GetProfileUsername() {
300 const auto& current_user = profile_manager->GetUser(Settings::values.current_user.GetValue());
301 Service::Account::ProfileBase profile{};
302
303 if (!current_user.has_value()) {
304 return "";
305 }
306
307 if (!profile_manager->GetProfileBase(*current_user, profile)) {
308 return "";
309 }
310
311 const auto text = Common::StringFromFixedZeroTerminatedBuffer(
312 reinterpret_cast<const char*>(profile.username.data()), profile.username.size());
313
314 return text;
286} 315}
287 316
288LobbyFilterProxyModel::LobbyFilterProxyModel(QWidget* parent, QStandardItemModel* list) 317LobbyFilterProxyModel::LobbyFilterProxyModel(QWidget* parent, QStandardItemModel* list)
diff --git a/src/yuzu/multiplayer/lobby.h b/src/yuzu/multiplayer/lobby.h
index 2696aec21..300dad13e 100644
--- a/src/yuzu/multiplayer/lobby.h
+++ b/src/yuzu/multiplayer/lobby.h
@@ -24,6 +24,10 @@ namespace Core {
24class System; 24class System;
25} 25}
26 26
27namespace Service::Account {
28class ProfileManager;
29}
30
27/** 31/**
28 * Listing of all public games pulled from services. The lobby should be simple enough for users to 32 * Listing of all public games pulled from services. The lobby should be simple enough for users to
29 * find the game they want to play, and join it. 33 * find the game they want to play, and join it.
@@ -75,8 +79,11 @@ private slots:
75 79
76signals: 80signals:
77 void StateChanged(const Network::RoomMember::State&); 81 void StateChanged(const Network::RoomMember::State&);
82 void SaveConfig();
78 83
79private: 84private:
85 std::string GetProfileUsername();
86
80 /** 87 /**
81 * Removes all entries in the Lobby before refreshing. 88 * Removes all entries in the Lobby before refreshing.
82 */ 89 */
@@ -96,6 +103,7 @@ private:
96 103
97 QFutureWatcher<AnnounceMultiplayerRoom::RoomList> room_list_watcher; 104 QFutureWatcher<AnnounceMultiplayerRoom::RoomList> room_list_watcher;
98 std::weak_ptr<Core::AnnounceMultiplayerSession> announce_multiplayer_session; 105 std::weak_ptr<Core::AnnounceMultiplayerSession> announce_multiplayer_session;
106 std::unique_ptr<Service::Account::ProfileManager> profile_manager;
99 QFutureWatcher<void>* watcher; 107 QFutureWatcher<void>* watcher;
100 Validation validation; 108 Validation validation;
101 Core::System& system; 109 Core::System& system;
diff --git a/src/yuzu/multiplayer/lobby_p.h b/src/yuzu/multiplayer/lobby_p.h
index 8071cede4..068c95aca 100644
--- a/src/yuzu/multiplayer/lobby_p.h
+++ b/src/yuzu/multiplayer/lobby_p.h
@@ -11,11 +11,10 @@
11 11
12namespace Column { 12namespace Column {
13enum List { 13enum List {
14 EXPAND,
15 ROOM_NAME,
16 GAME_NAME, 14 GAME_NAME,
17 HOST, 15 ROOM_NAME,
18 MEMBER, 16 MEMBER,
17 HOST,
19 TOTAL, 18 TOTAL,
20}; 19};
21} 20}
@@ -91,6 +90,8 @@ public:
91 setData(game_name, GameNameRole); 90 setData(game_name, GameNameRole);
92 if (!smdh_icon.isNull()) { 91 if (!smdh_icon.isNull()) {
93 setData(smdh_icon, GameIconRole); 92 setData(smdh_icon, GameIconRole);
93 } else {
94 setData(QIcon::fromTheme(QStringLiteral("chip")).pixmap(32), GameIconRole);
94 } 95 }
95 } 96 }
96 97
@@ -98,7 +99,12 @@ public:
98 if (role == Qt::DecorationRole) { 99 if (role == Qt::DecorationRole) {
99 auto val = data(GameIconRole); 100 auto val = data(GameIconRole);
100 if (val.isValid()) { 101 if (val.isValid()) {
101 val = val.value<QPixmap>().scaled(16, 16, Qt::KeepAspectRatio); 102 val = val.value<QPixmap>().scaled(32, 32, Qt::KeepAspectRatio,
103 Qt::TransformationMode::SmoothTransformation);
104 } else {
105 auto blank_image = QPixmap(32, 32);
106 blank_image.fill(Qt::black);
107 val = blank_image;
102 } 108 }
103 return val; 109 return val;
104 } else if (role != Qt::DisplayRole) { 110 } else if (role != Qt::DisplayRole) {
@@ -191,8 +197,8 @@ public:
191 return LobbyItem::data(role); 197 return LobbyItem::data(role);
192 } 198 }
193 auto members = data(MemberListRole).toList(); 199 auto members = data(MemberListRole).toList();
194 return QStringLiteral("%1 / %2").arg(QString::number(members.size()), 200 return QStringLiteral("%1 / %2 ")
195 data(MaxPlayerRole).toString()); 201 .arg(QString::number(members.size()), data(MaxPlayerRole).toString());
196 } 202 }
197 203
198 bool operator<(const QStandardItem& other) const override { 204 bool operator<(const QStandardItem& other) const override {
diff --git a/src/yuzu/multiplayer/message.cpp b/src/yuzu/multiplayer/message.cpp
index 758b5b731..6d8f18274 100644
--- a/src/yuzu/multiplayer/message.cpp
+++ b/src/yuzu/multiplayer/message.cpp
@@ -49,9 +49,9 @@ const ConnectionError ErrorManager::PERMISSION_DENIED(
49 QT_TR_NOOP("You do not have enough permission to perform this action.")); 49 QT_TR_NOOP("You do not have enough permission to perform this action."));
50const ConnectionError ErrorManager::NO_SUCH_USER(QT_TR_NOOP( 50const ConnectionError ErrorManager::NO_SUCH_USER(QT_TR_NOOP(
51 "The user you are trying to kick/ban could not be found.\nThey may have left the room.")); 51 "The user you are trying to kick/ban could not be found.\nThey may have left the room."));
52const ConnectionError ErrorManager::NO_INTERFACE_SELECTED( 52const ConnectionError ErrorManager::NO_INTERFACE_SELECTED(QT_TR_NOOP(
53 QT_TR_NOOP("No network interface is selected.\nPlease go to Configure -> System -> Network and " 53 "No valid network interface is selected.\nPlease go to Configure -> System -> Network and "
54 "make a selection.")); 54 "make a selection."));
55 55
56static bool WarnMessage(const std::string& title, const std::string& text) { 56static bool WarnMessage(const std::string& title, const std::string& text) {
57 return QMessageBox::Ok == QMessageBox::warning(nullptr, QObject::tr(title.c_str()), 57 return QMessageBox::Ok == QMessageBox::warning(nullptr, QObject::tr(title.c_str()),
diff --git a/src/yuzu/multiplayer/state.cpp b/src/yuzu/multiplayer/state.cpp
index 66e098296..ae2738ad4 100644
--- a/src/yuzu/multiplayer/state.cpp
+++ b/src/yuzu/multiplayer/state.cpp
@@ -44,9 +44,6 @@ MultiplayerState::MultiplayerState(QWidget* parent, QStandardItemModel* game_lis
44 44
45 status_text = new ClickableLabel(this); 45 status_text = new ClickableLabel(this);
46 status_icon = new ClickableLabel(this); 46 status_icon = new ClickableLabel(this);
47 status_text->setToolTip(tr("Current connection status"));
48 status_text->setText(tr("Not Connected. Click here to find a room!"));
49 status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("disconnected")).pixmap(16));
50 47
51 connect(status_text, &ClickableLabel::clicked, this, &MultiplayerState::OnOpenNetworkRoom); 48 connect(status_text, &ClickableLabel::clicked, this, &MultiplayerState::OnOpenNetworkRoom);
52 connect(status_icon, &ClickableLabel::clicked, this, &MultiplayerState::OnOpenNetworkRoom); 49 connect(status_icon, &ClickableLabel::clicked, this, &MultiplayerState::OnOpenNetworkRoom);
@@ -57,6 +54,8 @@ MultiplayerState::MultiplayerState(QWidget* parent, QStandardItemModel* game_lis
57 HideNotification(); 54 HideNotification();
58 } 55 }
59 }); 56 });
57
58 retranslateUi();
60} 59}
61 60
62MultiplayerState::~MultiplayerState() = default; 61MultiplayerState::~MultiplayerState() = default;
@@ -90,14 +89,7 @@ void MultiplayerState::Close() {
90void MultiplayerState::retranslateUi() { 89void MultiplayerState::retranslateUi() {
91 status_text->setToolTip(tr("Current connection status")); 90 status_text->setToolTip(tr("Current connection status"));
92 91
93 if (current_state == Network::RoomMember::State::Uninitialized) { 92 UpdateNotificationStatus();
94 status_text->setText(tr("Not Connected. Click here to find a room!"));
95 } else if (current_state == Network::RoomMember::State::Joined ||
96 current_state == Network::RoomMember::State::Moderator) {
97 status_text->setText(tr("Connected"));
98 } else {
99 status_text->setText(tr("Not Connected"));
100 }
101 93
102 if (lobby) { 94 if (lobby) {
103 lobby->RetranslateUi(); 95 lobby->RetranslateUi();
@@ -113,21 +105,55 @@ void MultiplayerState::retranslateUi() {
113 } 105 }
114} 106}
115 107
108void MultiplayerState::SetNotificationStatus(NotificationStatus status) {
109 notification_status = status;
110 UpdateNotificationStatus();
111}
112
113void MultiplayerState::UpdateNotificationStatus() {
114 switch (notification_status) {
115 case NotificationStatus::Unitialized:
116 status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("disconnected")).pixmap(16));
117 status_text->setText(tr("Not Connected. Click here to find a room!"));
118 leave_room->setEnabled(false);
119 show_room->setEnabled(false);
120 break;
121 case NotificationStatus::Disconnected:
122 status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("disconnected")).pixmap(16));
123 status_text->setText(tr("Not Connected"));
124 leave_room->setEnabled(false);
125 show_room->setEnabled(false);
126 break;
127 case NotificationStatus::Connected:
128 status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("connected")).pixmap(16));
129 status_text->setText(tr("Connected"));
130 leave_room->setEnabled(true);
131 show_room->setEnabled(true);
132 break;
133 case NotificationStatus::Notification:
134 status_icon->setPixmap(
135 QIcon::fromTheme(QStringLiteral("connected_notification")).pixmap(16));
136 status_text->setText(tr("New Messages Received"));
137 leave_room->setEnabled(true);
138 show_room->setEnabled(true);
139 break;
140 }
141
142 // Clean up status bar if game is running
143 if (system.IsPoweredOn()) {
144 status_text->clear();
145 }
146}
147
116void MultiplayerState::OnNetworkStateChanged(const Network::RoomMember::State& state) { 148void MultiplayerState::OnNetworkStateChanged(const Network::RoomMember::State& state) {
117 LOG_DEBUG(Frontend, "Network State: {}", Network::GetStateStr(state)); 149 LOG_DEBUG(Frontend, "Network State: {}", Network::GetStateStr(state));
118 if (state == Network::RoomMember::State::Joined || 150 if (state == Network::RoomMember::State::Joined ||
119 state == Network::RoomMember::State::Moderator) { 151 state == Network::RoomMember::State::Moderator) {
120 152
121 OnOpenNetworkRoom(); 153 OnOpenNetworkRoom();
122 status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("connected")).pixmap(16)); 154 SetNotificationStatus(NotificationStatus::Connected);
123 status_text->setText(tr("Connected"));
124 leave_room->setEnabled(true);
125 show_room->setEnabled(true);
126 } else { 155 } else {
127 status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("disconnected")).pixmap(16)); 156 SetNotificationStatus(NotificationStatus::Disconnected);
128 status_text->setText(tr("Not Connected"));
129 leave_room->setEnabled(false);
130 show_room->setEnabled(false);
131 } 157 }
132 158
133 current_state = state; 159 current_state = state;
@@ -185,6 +211,10 @@ void MultiplayerState::OnAnnounceFailed(const WebService::WebResult& result) {
185 QMessageBox::Ok); 211 QMessageBox::Ok);
186} 212}
187 213
214void MultiplayerState::OnSaveConfig() {
215 emit SaveConfig();
216}
217
188void MultiplayerState::UpdateThemedIcons() { 218void MultiplayerState::UpdateThemedIcons() {
189 if (show_notification) { 219 if (show_notification) {
190 status_icon->setPixmap( 220 status_icon->setPixmap(
@@ -209,13 +239,16 @@ static void BringWidgetToFront(QWidget* widget) {
209void MultiplayerState::OnViewLobby() { 239void MultiplayerState::OnViewLobby() {
210 if (lobby == nullptr) { 240 if (lobby == nullptr) {
211 lobby = new Lobby(this, game_list_model, announce_multiplayer_session, system); 241 lobby = new Lobby(this, game_list_model, announce_multiplayer_session, system);
242 connect(lobby, &Lobby::SaveConfig, this, &MultiplayerState::OnSaveConfig);
212 } 243 }
244 lobby->RefreshLobby();
213 BringWidgetToFront(lobby); 245 BringWidgetToFront(lobby);
214} 246}
215 247
216void MultiplayerState::OnCreateRoom() { 248void MultiplayerState::OnCreateRoom() {
217 if (host_room == nullptr) { 249 if (host_room == nullptr) {
218 host_room = new HostRoomWindow(this, game_list_model, announce_multiplayer_session, system); 250 host_room = new HostRoomWindow(this, game_list_model, announce_multiplayer_session, system);
251 connect(host_room, &HostRoomWindow::SaveConfig, this, &MultiplayerState::OnSaveConfig);
219 } 252 }
220 BringWidgetToFront(host_room); 253 BringWidgetToFront(host_room);
221} 254}
@@ -249,14 +282,13 @@ void MultiplayerState::ShowNotification() {
249 return; // Do not show notification if the chat window currently has focus 282 return; // Do not show notification if the chat window currently has focus
250 show_notification = true; 283 show_notification = true;
251 QApplication::alert(nullptr); 284 QApplication::alert(nullptr);
252 status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("connected_notification")).pixmap(16)); 285 QApplication::beep();
253 status_text->setText(tr("New Messages Received")); 286 SetNotificationStatus(NotificationStatus::Notification);
254} 287}
255 288
256void MultiplayerState::HideNotification() { 289void MultiplayerState::HideNotification() {
257 show_notification = false; 290 show_notification = false;
258 status_icon->setPixmap(QIcon::fromTheme(QStringLiteral("connected")).pixmap(16)); 291 SetNotificationStatus(NotificationStatus::Connected);
259 status_text->setText(tr("Connected"));
260} 292}
261 293
262void MultiplayerState::OnOpenNetworkRoom() { 294void MultiplayerState::OnOpenNetworkRoom() {
@@ -279,6 +311,8 @@ void MultiplayerState::OnOpenNetworkRoom() {
279void MultiplayerState::OnDirectConnectToRoom() { 311void MultiplayerState::OnDirectConnectToRoom() {
280 if (direct_connect == nullptr) { 312 if (direct_connect == nullptr) {
281 direct_connect = new DirectConnectWindow(system, this); 313 direct_connect = new DirectConnectWindow(system, this);
314 connect(direct_connect, &DirectConnectWindow::SaveConfig, this,
315 &MultiplayerState::OnSaveConfig);
282 } 316 }
283 BringWidgetToFront(direct_connect); 317 BringWidgetToFront(direct_connect);
284} 318}
diff --git a/src/yuzu/multiplayer/state.h b/src/yuzu/multiplayer/state.h
index c92496413..5d681c5c6 100644
--- a/src/yuzu/multiplayer/state.h
+++ b/src/yuzu/multiplayer/state.h
@@ -22,6 +22,13 @@ class MultiplayerState : public QWidget {
22 Q_OBJECT; 22 Q_OBJECT;
23 23
24public: 24public:
25 enum class NotificationStatus {
26 Unitialized,
27 Disconnected,
28 Connected,
29 Notification,
30 };
31
25 explicit MultiplayerState(QWidget* parent, QStandardItemModel* game_list, QAction* leave_room, 32 explicit MultiplayerState(QWidget* parent, QStandardItemModel* game_list, QAction* leave_room,
26 QAction* show_room, Core::System& system_); 33 QAction* show_room, Core::System& system_);
27 ~MultiplayerState(); 34 ~MultiplayerState();
@@ -31,6 +38,10 @@ public:
31 */ 38 */
32 void Close(); 39 void Close();
33 40
41 void SetNotificationStatus(NotificationStatus state);
42
43 void UpdateNotificationStatus();
44
34 ClickableLabel* GetStatusText() const { 45 ClickableLabel* GetStatusText() const {
35 return status_text; 46 return status_text;
36 } 47 }
@@ -64,6 +75,7 @@ public slots:
64 void OnOpenNetworkRoom(); 75 void OnOpenNetworkRoom();
65 void OnDirectConnectToRoom(); 76 void OnDirectConnectToRoom();
66 void OnAnnounceFailed(const WebService::WebResult&); 77 void OnAnnounceFailed(const WebService::WebResult&);
78 void OnSaveConfig();
67 void UpdateThemedIcons(); 79 void UpdateThemedIcons();
68 void ShowNotification(); 80 void ShowNotification();
69 void HideNotification(); 81 void HideNotification();
@@ -72,6 +84,7 @@ signals:
72 void NetworkStateChanged(const Network::RoomMember::State&); 84 void NetworkStateChanged(const Network::RoomMember::State&);
73 void NetworkError(const Network::RoomMember::Error&); 85 void NetworkError(const Network::RoomMember::Error&);
74 void AnnounceFailed(const WebService::WebResult&); 86 void AnnounceFailed(const WebService::WebResult&);
87 void SaveConfig();
75 88
76private: 89private:
77 Lobby* lobby = nullptr; 90 Lobby* lobby = nullptr;
@@ -85,6 +98,7 @@ private:
85 QAction* show_room; 98 QAction* show_room;
86 std::shared_ptr<Core::AnnounceMultiplayerSession> announce_multiplayer_session; 99 std::shared_ptr<Core::AnnounceMultiplayerSession> announce_multiplayer_session;
87 Network::RoomMember::State current_state = Network::RoomMember::State::Uninitialized; 100 Network::RoomMember::State current_state = Network::RoomMember::State::Uninitialized;
101 NotificationStatus notification_status = NotificationStatus::Unitialized;
88 bool has_mod_perms = false; 102 bool has_mod_perms = false;
89 Network::RoomMember::CallbackHandle<Network::RoomMember::State> state_callback_handle; 103 Network::RoomMember::CallbackHandle<Network::RoomMember::State> state_callback_handle;
90 Network::RoomMember::CallbackHandle<Network::RoomMember::Error> error_callback_handle; 104 Network::RoomMember::CallbackHandle<Network::RoomMember::Error> error_callback_handle;
diff --git a/src/yuzu/startup_checks.cpp b/src/yuzu/startup_checks.cpp
index 29b87da05..fc2693f9d 100644
--- a/src/yuzu/startup_checks.cpp
+++ b/src/yuzu/startup_checks.cpp
@@ -57,7 +57,7 @@ bool CheckEnvVars(bool* is_child) {
57 return false; 57 return false;
58} 58}
59 59
60bool StartupChecks(const char* arg0, bool* has_broken_vulkan) { 60bool StartupChecks(const char* arg0, bool* has_broken_vulkan, bool perform_vulkan_check) {
61#ifdef _WIN32 61#ifdef _WIN32
62 // Set the startup variable for child processes 62 // Set the startup variable for child processes
63 const bool env_var_set = SetEnvironmentVariableA(STARTUP_CHECK_ENV_VAR, ENV_VAR_ENABLED_TEXT); 63 const bool env_var_set = SetEnvironmentVariableA(STARTUP_CHECK_ENV_VAR, ENV_VAR_ENABLED_TEXT);
@@ -67,29 +67,32 @@ bool StartupChecks(const char* arg0, bool* has_broken_vulkan) {
67 return false; 67 return false;
68 } 68 }
69 69
70 PROCESS_INFORMATION process_info; 70 if (perform_vulkan_check) {
71 std::memset(&process_info, '\0', sizeof(process_info)); 71 // Spawn child process that performs Vulkan check
72 72 PROCESS_INFORMATION process_info;
73 if (!SpawnChild(arg0, &process_info, 0)) { 73 std::memset(&process_info, '\0', sizeof(process_info));
74 return false; 74
75 } 75 if (!SpawnChild(arg0, &process_info, 0)) {
76 76 return false;
77 // Wait until the processs exits and get exit code from it 77 }
78 WaitForSingleObject(process_info.hProcess, INFINITE); 78
79 DWORD exit_code = STILL_ACTIVE; 79 // Wait until the processs exits and get exit code from it
80 const int err = GetExitCodeProcess(process_info.hProcess, &exit_code); 80 WaitForSingleObject(process_info.hProcess, INFINITE);
81 if (err == 0) { 81 DWORD exit_code = STILL_ACTIVE;
82 std::fprintf(stderr, "GetExitCodeProcess failed with error %d\n", GetLastError()); 82 const int err = GetExitCodeProcess(process_info.hProcess, &exit_code);
83 } 83 if (err == 0) {
84 84 std::fprintf(stderr, "GetExitCodeProcess failed with error %d\n", GetLastError());
85 // Vulkan is broken if the child crashed (return value is not zero) 85 }
86 *has_broken_vulkan = (exit_code != 0); 86
87 87 // Vulkan is broken if the child crashed (return value is not zero)
88 if (CloseHandle(process_info.hProcess) == 0) { 88 *has_broken_vulkan = (exit_code != 0);
89 std::fprintf(stderr, "CloseHandle failed with error %d\n", GetLastError()); 89
90 } 90 if (CloseHandle(process_info.hProcess) == 0) {
91 if (CloseHandle(process_info.hThread) == 0) { 91 std::fprintf(stderr, "CloseHandle failed with error %d\n", GetLastError());
92 std::fprintf(stderr, "CloseHandle failed with error %d\n", GetLastError()); 92 }
93 if (CloseHandle(process_info.hThread) == 0) {
94 std::fprintf(stderr, "CloseHandle failed with error %d\n", GetLastError());
95 }
93 } 96 }
94 97
95 if (!SetEnvironmentVariableA(STARTUP_CHECK_ENV_VAR, nullptr)) { 98 if (!SetEnvironmentVariableA(STARTUP_CHECK_ENV_VAR, nullptr)) {
@@ -98,26 +101,28 @@ bool StartupChecks(const char* arg0, bool* has_broken_vulkan) {
98 } 101 }
99 102
100#elif defined(YUZU_UNIX) 103#elif defined(YUZU_UNIX)
101 const pid_t pid = fork(); 104 if (perform_vulkan_check) {
102 if (pid == 0) { 105 const pid_t pid = fork();
103 CheckVulkan(); 106 if (pid == 0) {
104 return true; 107 CheckVulkan();
105 } else if (pid == -1) { 108 return true;
106 const int err = errno; 109 } else if (pid == -1) {
107 std::fprintf(stderr, "fork failed with error %d\n", err); 110 const int err = errno;
108 return false; 111 std::fprintf(stderr, "fork failed with error %d\n", err);
109 } 112 return false;
110 113 }
111 // Get exit code from child process 114
112 int status; 115 // Get exit code from child process
113 const int r_val = wait(&status); 116 int status;
114 if (r_val == -1) { 117 const int r_val = wait(&status);
115 const int err = errno; 118 if (r_val == -1) {
116 std::fprintf(stderr, "wait failed with error %d\n", err); 119 const int err = errno;
117 return false; 120 std::fprintf(stderr, "wait failed with error %d\n", err);
121 return false;
122 }
123 // Vulkan is broken if the child crashed (return value is not zero)
124 *has_broken_vulkan = (status != 0);
118 } 125 }
119 // Vulkan is broken if the child crashed (return value is not zero)
120 *has_broken_vulkan = (status != 0);
121#endif 126#endif
122 return false; 127 return false;
123} 128}
diff --git a/src/yuzu/startup_checks.h b/src/yuzu/startup_checks.h
index f2fc2d9d4..d8e563be6 100644
--- a/src/yuzu/startup_checks.h
+++ b/src/yuzu/startup_checks.h
@@ -13,7 +13,7 @@ constexpr char ENV_VAR_ENABLED_TEXT[] = "ON";
13 13
14void CheckVulkan(); 14void CheckVulkan();
15bool CheckEnvVars(bool* is_child); 15bool CheckEnvVars(bool* is_child);
16bool StartupChecks(const char* arg0, bool* has_broken_vulkan); 16bool StartupChecks(const char* arg0, bool* has_broken_vulkan, bool perform_vulkan_check);
17 17
18#ifdef _WIN32 18#ifdef _WIN32
19bool SpawnChild(const char* arg0, PROCESS_INFORMATION* pi, int flags); 19bool SpawnChild(const char* arg0, PROCESS_INFORMATION* pi, int flags);
diff --git a/src/yuzu/uisettings.h b/src/yuzu/uisettings.h
index e12d414d9..753797efc 100644
--- a/src/yuzu/uisettings.h
+++ b/src/yuzu/uisettings.h
@@ -102,7 +102,7 @@ struct Values {
102 Settings::Setting<uint32_t> callout_flags{0, "calloutFlags"}; 102 Settings::Setting<uint32_t> callout_flags{0, "calloutFlags"};
103 103
104 // multiplayer settings 104 // multiplayer settings
105 Settings::Setting<QString> multiplayer_nickname{QStringLiteral("yuzu"), "nickname"}; 105 Settings::Setting<QString> multiplayer_nickname{{}, "nickname"};
106 Settings::Setting<QString> multiplayer_ip{{}, "ip"}; 106 Settings::Setting<QString> multiplayer_ip{{}, "ip"};
107 Settings::SwitchableSetting<uint, true> multiplayer_port{24872, 0, UINT16_MAX, "port"}; 107 Settings::SwitchableSetting<uint, true> multiplayer_port{24872, 0, UINT16_MAX, "port"};
108 Settings::Setting<QString> multiplayer_room_nickname{{}, "room_nickname"}; 108 Settings::Setting<QString> multiplayer_room_nickname{{}, "room_nickname"};